Updates network branch to v0.5.0

This commit is contained in:
Age Manning 2019-03-18 18:17:37 +11:00
commit 66f09e1b8e
No known key found for this signature in database
GPG Key ID: 05EED64B79E06A93
174 changed files with 7711 additions and 3924 deletions

1
.gitignore vendored
View File

@ -3,3 +3,4 @@ target/
Cargo.lock Cargo.lock
*.pk *.pk
*.sk *.sk
*.raw_keypairs

View File

@ -11,6 +11,7 @@ members = [
"eth2/utils/honey-badger-split", "eth2/utils/honey-badger-split",
"eth2/utils/merkle_proof", "eth2/utils/merkle_proof",
"eth2/utils/int_to_bytes", "eth2/utils/int_to_bytes",
"eth2/utils/serde_hex",
"eth2/utils/slot_clock", "eth2/utils/slot_clock",
"eth2/utils/ssz", "eth2/utils/ssz",
"eth2/utils/ssz_derive", "eth2/utils/ssz_derive",

View File

@ -16,3 +16,4 @@ ctrlc = { version = "3.1.1", features = ["termination"] }
tokio = "0.1.15" tokio = "0.1.15"
futures = "0.1.25" futures = "0.1.25"
exit-future = "0.1.3" exit-future = "0.1.3"
state_processing = { path = "../eth2/state_processing" }

View File

@ -1,4 +1,3 @@
use log::trace;
use ssz::TreeHash; use ssz::TreeHash;
use state_processing::per_block_processing::validate_attestation_without_signature; use state_processing::per_block_processing::validate_attestation_without_signature;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
@ -86,34 +85,22 @@ impl AttestationAggregator {
free_attestation: &FreeAttestation, free_attestation: &FreeAttestation,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Outcome, BeaconStateError> { ) -> Result<Outcome, BeaconStateError> {
let attestation_duties = match state.attestation_slot_and_shard_for_validator( let duties =
free_attestation.validator_index as usize, match state.get_attestation_duties(free_attestation.validator_index as usize, spec) {
spec, Err(BeaconStateError::EpochCacheUninitialized(e)) => {
) { panic!("Attempted to access unbuilt cache {:?}.", e)
Err(BeaconStateError::EpochCacheUninitialized(e)) => { }
panic!("Attempted to access unbuilt cache {:?}.", e) Err(BeaconStateError::EpochOutOfBounds) => invalid_outcome!(Message::TooOld),
} Err(BeaconStateError::ShardOutOfBounds) => invalid_outcome!(Message::BadShard),
Err(BeaconStateError::EpochOutOfBounds) => invalid_outcome!(Message::TooOld), Err(e) => return Err(e),
Err(BeaconStateError::ShardOutOfBounds) => invalid_outcome!(Message::BadShard), Ok(None) => invalid_outcome!(Message::BadValidatorIndex),
Err(e) => return Err(e), Ok(Some(attestation_duties)) => attestation_duties,
Ok(None) => invalid_outcome!(Message::BadValidatorIndex), };
Ok(Some(attestation_duties)) => attestation_duties,
};
let (slot, shard, committee_index) = attestation_duties; if free_attestation.data.slot != duties.slot {
trace!(
"slot: {}, shard: {}, committee_index: {}, val_index: {}",
slot,
shard,
committee_index,
free_attestation.validator_index
);
if free_attestation.data.slot != slot {
invalid_outcome!(Message::BadSlot); invalid_outcome!(Message::BadSlot);
} }
if free_attestation.data.shard != shard { if free_attestation.data.shard != duties.shard {
invalid_outcome!(Message::BadShard); invalid_outcome!(Message::BadShard);
} }
@ -143,7 +130,7 @@ impl AttestationAggregator {
if let Some(updated_attestation) = aggregate_attestation( if let Some(updated_attestation) = aggregate_attestation(
existing_attestation, existing_attestation,
&free_attestation.signature, &free_attestation.signature,
committee_index as usize, duties.committee_index as usize,
) { ) {
self.store.insert(signable_message, updated_attestation); self.store.insert(signable_message, updated_attestation);
valid_outcome!(Message::Aggregated); valid_outcome!(Message::Aggregated);
@ -154,7 +141,7 @@ impl AttestationAggregator {
let mut aggregate_signature = AggregateSignature::new(); let mut aggregate_signature = AggregateSignature::new();
aggregate_signature.add(&free_attestation.signature); aggregate_signature.add(&free_attestation.signature);
let mut aggregation_bitfield = Bitfield::new(); let mut aggregation_bitfield = Bitfield::new();
aggregation_bitfield.set(committee_index as usize, true); aggregation_bitfield.set(duties.committee_index as usize, true);
let new_attestation = Attestation { let new_attestation = Attestation {
data: free_attestation.data.clone(), data: free_attestation.data.clone(),
aggregation_bitfield, aggregation_bitfield,
@ -177,9 +164,13 @@ impl AttestationAggregator {
) -> Vec<Attestation> { ) -> Vec<Attestation> {
let mut known_attestation_data: HashSet<AttestationData> = HashSet::new(); let mut known_attestation_data: HashSet<AttestationData> = HashSet::new();
state.latest_attestations.iter().for_each(|attestation| { state
known_attestation_data.insert(attestation.data.clone()); .previous_epoch_attestations
}); .iter()
.chain(state.current_epoch_attestations.iter())
.for_each(|attestation| {
known_attestation_data.insert(attestation.data.clone());
});
self.store self.store
.values() .values()

View File

@ -15,10 +15,7 @@ use state_processing::{
per_slot_processing, BlockProcessingError, SlotProcessingError, per_slot_processing, BlockProcessingError, SlotProcessingError,
}; };
use std::sync::Arc; use std::sync::Arc;
use types::{ use types::*;
readers::{BeaconBlockReader, BeaconStateReader},
*,
};
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum ValidBlock { pub enum ValidBlock {
@ -73,45 +70,30 @@ where
F: ForkChoice, F: ForkChoice,
{ {
/// Instantiate a new Beacon Chain, from genesis. /// Instantiate a new Beacon Chain, from genesis.
#[allow(clippy::too_many_arguments)] // Will be re-factored in the coming weeks. pub fn from_genesis(
pub fn genesis(
state_store: Arc<BeaconStateStore<T>>, state_store: Arc<BeaconStateStore<T>>,
block_store: Arc<BeaconBlockStore<T>>, block_store: Arc<BeaconBlockStore<T>>,
slot_clock: U, slot_clock: U,
genesis_time: u64, mut genesis_state: BeaconState,
latest_eth1_data: Eth1Data, genesis_block: BeaconBlock,
initial_validator_deposits: Vec<Deposit>,
spec: ChainSpec, spec: ChainSpec,
fork_choice: F, fork_choice: F,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
if initial_validator_deposits.is_empty() {
return Err(Error::InsufficientValidators);
}
let mut genesis_state = BeaconState::genesis(
genesis_time,
initial_validator_deposits,
latest_eth1_data,
&spec,
)?;
let state_root = genesis_state.canonical_root(); let state_root = genesis_state.canonical_root();
state_store.put(&state_root, &ssz_encode(&genesis_state)[..])?; state_store.put(&state_root, &ssz_encode(&genesis_state)[..])?;
let genesis_block = BeaconBlock::genesis(state_root, &spec); let block_root = genesis_block.into_header().canonical_root();
let block_root = genesis_block.canonical_root();
block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?; block_store.put(&block_root, &ssz_encode(&genesis_block)[..])?;
let finalized_head = RwLock::new(CheckPoint::new( let finalized_head = RwLock::new(CheckPoint::new(
genesis_block.clone(), genesis_block.clone(),
block_root, block_root,
// TODO: this is a memory waste; remove full clone.
genesis_state.clone(), genesis_state.clone(),
state_root, state_root,
)); ));
let canonical_head = RwLock::new(CheckPoint::new( let canonical_head = RwLock::new(CheckPoint::new(
genesis_block.clone(), genesis_block.clone(),
block_root, block_root,
// TODO: this is a memory waste; remove full clone.
genesis_state.clone(), genesis_state.clone(),
state_root, state_root,
)); ));
@ -119,7 +101,8 @@ where
genesis_state.build_epoch_cache(RelativeEpoch::Previous, &spec)?; genesis_state.build_epoch_cache(RelativeEpoch::Previous, &spec)?;
genesis_state.build_epoch_cache(RelativeEpoch::Current, &spec)?; genesis_state.build_epoch_cache(RelativeEpoch::Current, &spec)?;
genesis_state.build_epoch_cache(RelativeEpoch::Next, &spec)?; genesis_state.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, &spec)?;
genesis_state.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, &spec)?;
Ok(Self { Ok(Self {
block_store, block_store,
@ -205,10 +188,13 @@ where
/// processing applied to it. /// processing applied to it.
pub fn advance_state(&self, slot: Slot) -> Result<(), SlotProcessingError> { pub fn advance_state(&self, slot: Slot) -> Result<(), SlotProcessingError> {
let state_slot = self.state.read().slot; let state_slot = self.state.read().slot;
let head_block_root = self.head().beacon_block_root;
let latest_block_header = self.head().beacon_block.into_header();
for _ in state_slot.as_u64()..slot.as_u64() { for _ in state_slot.as_u64()..slot.as_u64() {
per_slot_processing(&mut *self.state.write(), head_block_root, &self.spec)?; per_slot_processing(&mut *self.state.write(), &latest_block_header, &self.spec)?;
} }
Ok(()) Ok(())
} }
@ -261,19 +247,15 @@ where
/// present and prior epoch is available. /// present and prior epoch is available.
pub fn block_proposer(&self, slot: Slot) -> Result<usize, BeaconStateError> { pub fn block_proposer(&self, slot: Slot) -> Result<usize, BeaconStateError> {
trace!("BeaconChain::block_proposer: slot: {}", slot); trace!("BeaconChain::block_proposer: slot: {}", slot);
let index = self let index = self.state.read().get_beacon_proposer_index(
.state slot,
.read() RelativeEpoch::Current,
.get_beacon_proposer_index(slot, &self.spec)?; &self.spec,
)?;
Ok(index) Ok(index)
} }
/// Returns the justified slot for the present state.
pub fn justified_epoch(&self) -> Epoch {
self.state.read().justified_epoch
}
/// Returns the attestation slot and shard for a given validator index. /// Returns the attestation slot and shard for a given validator index.
/// ///
/// Information is read from the current state, so only information from the present and prior /// Information is read from the current state, so only information from the present and prior
@ -286,12 +268,12 @@ where
"BeaconChain::validator_attestion_slot_and_shard: validator_index: {}", "BeaconChain::validator_attestion_slot_and_shard: validator_index: {}",
validator_index validator_index
); );
if let Some((slot, shard, _committee)) = self if let Some(attestation_duty) = self
.state .state
.read() .read()
.attestation_slot_and_shard_for_validator(validator_index, &self.spec)? .get_attestation_duties(validator_index, &self.spec)?
{ {
Ok(Some((slot, shard))) Ok(Some((attestation_duty.slot, attestation_duty.shard)))
} else { } else {
Ok(None) Ok(None)
} }
@ -300,37 +282,33 @@ where
/// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`.
pub fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, Error> { pub fn produce_attestation_data(&self, shard: u64) -> Result<AttestationData, Error> {
trace!("BeaconChain::produce_attestation_data: shard: {}", shard); trace!("BeaconChain::produce_attestation_data: shard: {}", shard);
let justified_epoch = self.justified_epoch(); let source_epoch = self.state.read().current_justified_epoch;
let justified_block_root = *self let source_root = *self.state.read().get_block_root(
.state source_epoch.start_slot(self.spec.slots_per_epoch),
.read() &self.spec,
.get_block_root( )?;
justified_epoch.start_slot(self.spec.slots_per_epoch),
&self.spec,
)
.ok_or_else(|| Error::BadRecentBlockRoots)?;
let epoch_boundary_root = *self let target_root = *self.state.read().get_block_root(
.state self.state
.read() .read()
.get_block_root( .slot
self.state.read().current_epoch_start_slot(&self.spec), .epoch(self.spec.slots_per_epoch)
&self.spec, .start_slot(self.spec.slots_per_epoch),
) &self.spec,
.ok_or_else(|| Error::BadRecentBlockRoots)?; )?;
Ok(AttestationData { Ok(AttestationData {
slot: self.state.read().slot, slot: self.state.read().slot,
shard, shard,
beacon_block_root: self.head().beacon_block_root, beacon_block_root: self.head().beacon_block_root,
epoch_boundary_root, target_root,
crosslink_data_root: Hash256::zero(), crosslink_data_root: Hash256::zero(),
latest_crosslink: Crosslink { previous_crosslink: Crosslink {
epoch: self.state.read().slot.epoch(self.spec.slots_per_epoch), epoch: self.state.read().slot.epoch(self.spec.slots_per_epoch),
crosslink_data_root: Hash256::zero(), crosslink_data_root: Hash256::zero(),
}, },
justified_epoch, source_epoch,
justified_block_root, source_root,
}) })
} }
@ -577,66 +555,13 @@ where
} }
} }
/// Dumps the entire canonical chain, from the head to genesis to a vector for analysis.
///
/// This could be a very expensive operation and should only be done in testing/analysis
/// activities.
pub fn chain_dump(&self) -> Result<Vec<CheckPoint>, Error> {
let mut dump = vec![];
let mut last_slot = CheckPoint {
beacon_block: self.head().beacon_block.clone(),
beacon_block_root: self.head().beacon_block_root,
beacon_state: self.head().beacon_state.clone(),
beacon_state_root: self.head().beacon_state_root,
};
dump.push(last_slot.clone());
loop {
let beacon_block_root = last_slot.beacon_block.parent_root;
if beacon_block_root == self.spec.zero_hash {
break; // Genesis has been reached.
}
let beacon_block = self
.block_store
.get_deserialized(&beacon_block_root)?
.ok_or_else(|| {
Error::DBInconsistent(format!("Missing block {}", beacon_block_root))
})?;
let beacon_state_root = beacon_block.state_root;
let beacon_state = self
.state_store
.get_deserialized(&beacon_state_root)?
.ok_or_else(|| {
Error::DBInconsistent(format!("Missing state {}", beacon_state_root))
})?;
let slot = CheckPoint {
beacon_block,
beacon_block_root,
beacon_state,
beacon_state_root,
};
dump.push(slot.clone());
last_slot = slot;
}
dump.reverse();
Ok(dump)
}
/// Accept some block and attempt to add it to block DAG. /// Accept some block and attempt to add it to block DAG.
/// ///
/// Will accept blocks from prior slots, however it will reject any block from a future slot. /// Will accept blocks from prior slots, however it will reject any block from a future slot.
pub fn process_block(&self, block: BeaconBlock) -> Result<BlockProcessingOutcome, Error> { pub fn process_block(&self, block: BeaconBlock) -> Result<BlockProcessingOutcome, Error> {
debug!("Processing block with slot {}...", block.slot()); debug!("Processing block with slot {}...", block.slot);
let block_root = block.canonical_root(); let block_root = block.into_header().canonical_root();
let present_slot = self.present_slot(); let present_slot = self.present_slot();
@ -648,9 +573,9 @@ where
// Load the blocks parent block from the database, returning invalid if that block is not // Load the blocks parent block from the database, returning invalid if that block is not
// found. // found.
let parent_block_root = block.parent_root; let parent_block_root = block.previous_block_root;
let parent_block = match self.block_store.get_reader(&parent_block_root)? { let parent_block = match self.block_store.get_deserialized(&parent_block_root)? {
Some(parent_root) => parent_root, Some(previous_block_root) => previous_block_root,
None => { None => {
return Ok(BlockProcessingOutcome::InvalidBlock( return Ok(BlockProcessingOutcome::InvalidBlock(
InvalidBlock::ParentUnknown, InvalidBlock::ParentUnknown,
@ -660,23 +585,21 @@ where
// Load the parent blocks state from the database, returning an error if it is not found. // Load the parent blocks state from the database, returning an error if it is not found.
// It is an error because if know the parent block we should also know the parent state. // It is an error because if know the parent block we should also know the parent state.
let parent_state_root = parent_block.state_root(); let parent_state_root = parent_block.state_root;
let parent_state = self let parent_state = self
.state_store .state_store
.get_reader(&parent_state_root)? .get_deserialized(&parent_state_root)?
.ok_or_else(|| Error::DBInconsistent(format!("Missing state {}", parent_state_root)))? .ok_or_else(|| Error::DBInconsistent(format!("Missing state {}", parent_state_root)))?;
.into_beacon_state()
.ok_or_else(|| {
Error::DBInconsistent(format!("State SSZ invalid {}", parent_state_root))
})?;
// TODO: check the block proposer signature BEFORE doing a state transition. This will // TODO: check the block proposer signature BEFORE doing a state transition. This will
// significantly lower exposure surface to DoS attacks. // significantly lower exposure surface to DoS attacks.
// Transition the parent state to the present slot. // Transition the parent state to the present slot.
let mut state = parent_state; let mut state = parent_state;
println!("parent process state: {:?}", state.latest_block_header);
let previous_block_header = parent_block.into_header();
for _ in state.slot.as_u64()..present_slot.as_u64() { for _ in state.slot.as_u64()..present_slot.as_u64() {
if let Err(e) = per_slot_processing(&mut state, parent_block_root, &self.spec) { if let Err(e) = per_slot_processing(&mut state, &previous_block_header, &self.spec) {
return Ok(BlockProcessingOutcome::InvalidBlock( return Ok(BlockProcessingOutcome::InvalidBlock(
InvalidBlock::SlotProcessingError(e), InvalidBlock::SlotProcessingError(e),
)); ));
@ -691,6 +614,8 @@ where
)); ));
} }
println!("process state: {:?}", state.latest_block_header);
let state_root = state.canonical_root(); let state_root = state.canonical_root();
if block.state_root != state_root { if block.state_root != state_root {
@ -752,22 +677,22 @@ where
attestations.len() attestations.len()
); );
let parent_root = *state let previous_block_root = *state
.get_block_root(state.slot.saturating_sub(1_u64), &self.spec) .get_block_root(state.slot - 1, &self.spec)
.ok_or_else(|| BlockProductionError::UnableToGetBlockRootFromState)?; .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)?;
let mut block = BeaconBlock { let mut block = BeaconBlock {
slot: state.slot, slot: state.slot,
parent_root, previous_block_root,
state_root: Hash256::zero(), // Updated after the state is calculated. state_root: Hash256::zero(), // Updated after the state is calculated.
randao_reveal,
eth1_data: Eth1Data {
// TODO: replace with real data
deposit_root: Hash256::zero(),
block_hash: Hash256::zero(),
},
signature: self.spec.empty_signature.clone(), // To be completed by a validator. signature: self.spec.empty_signature.clone(), // To be completed by a validator.
body: BeaconBlockBody { body: BeaconBlockBody {
randao_reveal,
eth1_data: Eth1Data {
// TODO: replace with real data
deposit_root: Hash256::zero(),
block_hash: Hash256::zero(),
},
proposer_slashings: self.get_proposer_slashings_for_block(), proposer_slashings: self.get_proposer_slashings_for_block(),
attester_slashings: self.get_attester_slashings_for_block(), attester_slashings: self.get_attester_slashings_for_block(),
attestations, attestations,
@ -781,6 +706,8 @@ where
per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?; per_block_processing_without_verifying_block_signature(&mut state, &block, &self.spec)?;
println!("produce state: {:?}", state.latest_block_header);
let state_root = state.canonical_root(); let state_root = state.canonical_root();
block.state_root = state_root; block.state_root = state_root;
@ -815,6 +742,59 @@ where
Ok(()) Ok(())
} }
/// Dumps the entire canonical chain, from the head to genesis to a vector for analysis.
///
/// This could be a very expensive operation and should only be done in testing/analysis
/// activities.
pub fn chain_dump(&self) -> Result<Vec<CheckPoint>, Error> {
let mut dump = vec![];
let mut last_slot = CheckPoint {
beacon_block: self.head().beacon_block.clone(),
beacon_block_root: self.head().beacon_block_root,
beacon_state: self.head().beacon_state.clone(),
beacon_state_root: self.head().beacon_state_root,
};
dump.push(last_slot.clone());
loop {
let beacon_block_root = last_slot.beacon_block.previous_block_root;
if beacon_block_root == self.spec.zero_hash {
break; // Genesis has been reached.
}
let beacon_block = self
.block_store
.get_deserialized(&beacon_block_root)?
.ok_or_else(|| {
Error::DBInconsistent(format!("Missing block {}", beacon_block_root))
})?;
let beacon_state_root = beacon_block.state_root;
let beacon_state = self
.state_store
.get_deserialized(&beacon_state_root)?
.ok_or_else(|| {
Error::DBInconsistent(format!("Missing state {}", beacon_state_root))
})?;
let slot = CheckPoint {
beacon_block,
beacon_block_root,
beacon_state,
beacon_state_root,
};
dump.push(slot.clone());
last_slot = slot;
}
dump.reverse();
Ok(dump)
}
} }
impl From<DBError> for Error { impl From<DBError> for Error {

View File

@ -3,19 +3,20 @@
// testnet. These are examples. Also. there is code duplication which can/should be cleaned up. // testnet. These are examples. Also. there is code duplication which can/should be cleaned up.
use crate::BeaconChain; use crate::BeaconChain;
use bls;
use db::stores::{BeaconBlockStore, BeaconStateStore}; use db::stores::{BeaconBlockStore, BeaconStateStore};
use db::{DiskDB, MemoryDB}; use db::{DiskDB, MemoryDB};
use fork_choice::BitwiseLMDGhost; use fork_choice::BitwiseLMDGhost;
use slot_clock::SystemTimeSlotClock; use slot_clock::SystemTimeSlotClock;
use ssz::TreeHash;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use types::{ChainSpec, Deposit, DepositData, DepositInput, Eth1Data, Hash256, Keypair}; use types::test_utils::TestingBeaconStateBuilder;
use types::{BeaconBlock, ChainSpec, Hash256};
//TODO: Correct this for prod //TODO: Correct this for prod
//TODO: Account for historical db //TODO: Account for historical db
pub fn initialise_beacon_chain( pub fn initialise_beacon_chain(
chain_spec: &ChainSpec, spec: &ChainSpec,
db_name: Option<&PathBuf>, db_name: Option<&PathBuf>,
) -> Arc<BeaconChain<DiskDB, SystemTimeSlotClock, BitwiseLMDGhost<DiskDB>>> { ) -> Arc<BeaconChain<DiskDB, SystemTimeSlotClock, BitwiseLMDGhost<DiskDB>>> {
// set up the db // set up the db
@ -23,124 +24,71 @@ pub fn initialise_beacon_chain(
db_name.expect("Database directory must be included"), db_name.expect("Database directory must be included"),
None, None,
)); ));
let block_store = Arc::new(BeaconBlockStore::new(db.clone())); let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
let state_store = Arc::new(BeaconStateStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone()));
let state_builder = TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec);
let (genesis_state, _keypairs) = state_builder.build();
let mut genesis_block = BeaconBlock::empty(&spec);
genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root());
// Slot clock // Slot clock
let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). let slot_clock = SystemTimeSlotClock::new(genesis_state.genesis_time, spec.seconds_per_slot)
let slot_clock = SystemTimeSlotClock::new(genesis_time, chain_spec.seconds_per_slot)
.expect("Unable to load SystemTimeSlotClock"); .expect("Unable to load SystemTimeSlotClock");
// Choose the fork choice // Choose the fork choice
let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
/*
* Generate some random data to start a chain with.
*
* This is will need to be replace for production usage.
*/
let latest_eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
block_hash: Hash256::zero(),
};
let keypairs: Vec<Keypair> = (0..10)
.collect::<Vec<usize>>()
.iter()
.map(|_| Keypair::random())
.collect();
let initial_validator_deposits = keypairs
.iter()
.map(|keypair| Deposit {
branch: vec![], // branch verification is not chain_specified.
index: 0, // index verification is not chain_specified.
deposit_data: DepositData {
amount: 32_000_000_000, // 32 ETH (in Gwei)
timestamp: genesis_time - 1,
deposit_input: DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(), // Withdrawal not possible.
proof_of_possession: bls::create_proof_of_possession(&keypair),
},
},
})
.collect();
// Genesis chain // Genesis chain
// TODO:Remove the expect here. Propagate errors and handle somewhat gracefully. //TODO: Handle error correctly
Arc::new( Arc::new(
BeaconChain::genesis( BeaconChain::from_genesis(
state_store.clone(), state_store.clone(),
block_store.clone(), block_store.clone(),
slot_clock, slot_clock,
genesis_time, genesis_state,
latest_eth1_data, genesis_block,
initial_validator_deposits, spec.clone(),
chain_spec.clone(),
fork_choice, fork_choice,
) )
.expect("Cannot initialise a beacon chain. Exiting"), .expect("Terminate if beacon chain generation fails"),
) )
} }
/// Initialisation of a test beacon chain, uses an in memory db with fixed genesis time. /// Initialisation of a test beacon chain, uses an in memory db with fixed genesis time.
pub fn initialise_test_beacon_chain( pub fn initialise_test_beacon_chain(
chain_spec: &ChainSpec, spec: &ChainSpec,
_db_name: Option<&PathBuf>, _db_name: Option<&PathBuf>,
) -> Arc<BeaconChain<MemoryDB, SystemTimeSlotClock, BitwiseLMDGhost<MemoryDB>>> { ) -> Arc<BeaconChain<MemoryDB, SystemTimeSlotClock, BitwiseLMDGhost<MemoryDB>>> {
let db = Arc::new(MemoryDB::open()); let db = Arc::new(MemoryDB::open());
let block_store = Arc::new(BeaconBlockStore::new(db.clone())); let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
let state_store = Arc::new(BeaconStateStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone()));
let state_builder = TestingBeaconStateBuilder::from_deterministic_keypairs(8, spec);
let (genesis_state, _keypairs) = state_builder.build();
let mut genesis_block = BeaconBlock::empty(spec);
genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root());
// Slot clock // Slot clock
let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past). let slot_clock = SystemTimeSlotClock::new(genesis_state.genesis_time, spec.seconds_per_slot)
let slot_clock = SystemTimeSlotClock::new(genesis_time, chain_spec.seconds_per_slot)
.expect("Unable to load SystemTimeSlotClock"); .expect("Unable to load SystemTimeSlotClock");
// Choose the fork choice // Choose the fork choice
let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
/*
* Generate some random data to start a chain with.
*
* This is will need to be replace for production usage.
*/
let latest_eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
block_hash: Hash256::zero(),
};
let keypairs: Vec<Keypair> = (0..8)
.collect::<Vec<usize>>()
.iter()
.map(|_| Keypair::random())
.collect();
let initial_validator_deposits = keypairs
.iter()
.map(|keypair| Deposit {
branch: vec![], // branch verification is not chain_specified.
index: 0, // index verification is not chain_specified.
deposit_data: DepositData {
amount: 32_000_000_000, // 32 ETH (in Gwei)
timestamp: genesis_time - 1,
deposit_input: DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(), // Withdrawal not possible.
proof_of_possession: bls::create_proof_of_possession(&keypair),
},
},
})
.collect();
// Genesis chain // Genesis chain
// TODO: Handle error correctly //TODO: Handle error correctly
Arc::new( Arc::new(
BeaconChain::genesis( BeaconChain::from_genesis(
state_store.clone(), state_store.clone(),
block_store.clone(), block_store.clone(),
slot_clock, slot_clock,
genesis_time, genesis_state,
latest_eth1_data, genesis_block,
initial_validator_deposits, spec.clone(),
chain_spec.clone(),
fork_choice, fork_choice,
) )
.expect("Cannot generate beacon chain"), .expect("Terminate if beacon chain generation fails"),
) )
} }

View File

@ -12,12 +12,7 @@ path = "src/bin.rs"
name = "test_harness" name = "test_harness"
path = "src/lib.rs" path = "src/lib.rs"
[[bench]]
name = "state_transition"
harness = false
[dev-dependencies] [dev-dependencies]
criterion = "0.2"
state_processing = { path = "../../../eth2/state_processing" } state_processing = { path = "../../../eth2/state_processing" }
[dependencies] [dependencies]
@ -33,12 +28,14 @@ failure = "0.1"
failure_derive = "0.1" failure_derive = "0.1"
fork_choice = { path = "../../../eth2/fork_choice" } fork_choice = { path = "../../../eth2/fork_choice" }
hashing = { path = "../../../eth2/utils/hashing" } hashing = { path = "../../../eth2/utils/hashing" }
int_to_bytes = { path = "../../../eth2/utils/int_to_bytes" }
log = "0.4" log = "0.4"
env_logger = "0.6.0" env_logger = "0.6.0"
rayon = "1.0" rayon = "1.0"
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
serde_json = "1.0" serde_json = "1.0"
serde_yaml = "0.8"
slot_clock = { path = "../../../eth2/utils/slot_clock" } slot_clock = { path = "../../../eth2/utils/slot_clock" }
ssz = { path = "../../../eth2/utils/ssz" } ssz = { path = "../../../eth2/utils/ssz" }
types = { path = "../../../eth2/types" } types = { path = "../../../eth2/types" }

View File

@ -1,69 +0,0 @@
use criterion::Criterion;
use criterion::{black_box, criterion_group, criterion_main, Benchmark};
// use env_logger::{Builder, Env};
use state_processing::SlotProcessable;
use test_harness::BeaconChainHarness;
use types::{ChainSpec, Hash256};
fn mid_epoch_state_transition(c: &mut Criterion) {
// Builder::from_env(Env::default().default_filter_or("debug")).init();
let validator_count = 1000;
let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count);
let epoch_depth = (rig.spec.slots_per_epoch * 2) + (rig.spec.slots_per_epoch / 2);
for _ in 0..epoch_depth {
rig.advance_chain_with_block();
}
let state = rig.beacon_chain.state.read().clone();
assert!((state.slot + 1) % rig.spec.slots_per_epoch != 0);
c.bench_function("mid-epoch state transition 10k validators", move |b| {
let state = state.clone();
b.iter(|| {
let mut state = state.clone();
black_box(state.per_slot_processing(Hash256::zero(), &rig.spec))
})
});
}
fn epoch_boundary_state_transition(c: &mut Criterion) {
// Builder::from_env(Env::default().default_filter_or("debug")).init();
let validator_count = 10000;
let mut rig = BeaconChainHarness::new(ChainSpec::foundation(), validator_count);
let epoch_depth = rig.spec.slots_per_epoch * 2;
for _ in 0..(epoch_depth - 1) {
rig.advance_chain_with_block();
}
let state = rig.beacon_chain.state.read().clone();
assert_eq!((state.slot + 1) % rig.spec.slots_per_epoch, 0);
c.bench(
"routines",
Benchmark::new("routine_1", move |b| {
let state = state.clone();
b.iter(|| {
let mut state = state.clone();
black_box(black_box(
state.per_slot_processing(Hash256::zero(), &rig.spec),
))
})
})
.sample_size(5), // sample size is low because function is sloooow.
);
}
criterion_group!(
benches,
mid_epoch_state_transition,
epoch_boundary_state_transition
);
criterion_main!(benches);

View File

@ -1,7 +1,6 @@
use super::ValidatorHarness; use super::ValidatorHarness;
use beacon_chain::{BeaconChain, BlockProcessingOutcome}; use beacon_chain::{BeaconChain, BlockProcessingOutcome};
pub use beacon_chain::{BeaconChainError, CheckPoint}; pub use beacon_chain::{BeaconChainError, CheckPoint};
use bls::{create_proof_of_possession, get_withdrawal_credentials};
use db::{ use db::{
stores::{BeaconBlockStore, BeaconStateStore}, stores::{BeaconBlockStore, BeaconStateStore},
MemoryDB, MemoryDB,
@ -10,10 +9,11 @@ use fork_choice::BitwiseLMDGhost;
use log::debug; use log::debug;
use rayon::prelude::*; use rayon::prelude::*;
use slot_clock::TestingSlotClock; use slot_clock::TestingSlotClock;
use ssz::TreeHash;
use std::collections::HashSet; use std::collections::HashSet;
use std::iter::FromIterator; use std::iter::FromIterator;
use std::sync::Arc; use std::sync::Arc;
use types::*; use types::{test_utils::TestingBeaconStateBuilder, *};
/// The beacon chain harness simulates a single beacon node with `validator_count` validators connected /// The beacon chain harness simulates a single beacon node with `validator_count` validators connected
/// to it. Each validator is provided a borrow to the beacon chain, where it may read /// to it. Each validator is provided a borrow to the beacon chain, where it may read
@ -39,58 +39,24 @@ impl BeaconChainHarness {
let db = Arc::new(MemoryDB::open()); let db = Arc::new(MemoryDB::open());
let block_store = Arc::new(BeaconBlockStore::new(db.clone())); let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
let state_store = Arc::new(BeaconStateStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone()));
let genesis_time = 1_549_935_547; // 12th Feb 2018 (arbitrary value in the past).
let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64()); let slot_clock = TestingSlotClock::new(spec.genesis_slot.as_u64());
let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone()); let fork_choice = BitwiseLMDGhost::new(block_store.clone(), state_store.clone());
let latest_eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
block_hash: Hash256::zero(),
};
debug!("Generating validator keypairs..."); let state_builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
let (genesis_state, keypairs) = state_builder.build();
let keypairs: Vec<Keypair> = (0..validator_count) let mut genesis_block = BeaconBlock::empty(&spec);
.collect::<Vec<usize>>() genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root());
.par_iter()
.map(|_| Keypair::random())
.collect();
debug!("Creating validator deposits...");
let initial_validator_deposits = keypairs
.par_iter()
.map(|keypair| Deposit {
branch: vec![], // branch verification is not specified.
index: 0, // index verification is not specified.
deposit_data: DepositData {
amount: 32_000_000_000, // 32 ETH (in Gwei)
timestamp: genesis_time - 1,
deposit_input: DepositInput {
pubkey: keypair.pk.clone(),
// Validator can withdraw using their main keypair.
withdrawal_credentials: Hash256::from_slice(
&get_withdrawal_credentials(
&keypair.pk,
spec.bls_withdrawal_prefix_byte,
)[..],
),
proof_of_possession: create_proof_of_possession(&keypair),
},
},
})
.collect();
debug!("Creating the BeaconChain...");
// Create the Beacon Chain // Create the Beacon Chain
let beacon_chain = Arc::new( let beacon_chain = Arc::new(
BeaconChain::genesis( BeaconChain::from_genesis(
state_store.clone(), state_store.clone(),
block_store.clone(), block_store.clone(),
slot_clock, slot_clock,
genesis_time, genesis_state,
latest_eth1_data, genesis_block,
initial_validator_deposits,
spec.clone(), spec.clone(),
fork_choice, fork_choice,
) )
@ -161,8 +127,8 @@ impl BeaconChainHarness {
.get_crosslink_committees_at_slot(present_slot, &self.spec) .get_crosslink_committees_at_slot(present_slot, &self.spec)
.unwrap() .unwrap()
.iter() .iter()
.fold(vec![], |mut acc, (committee, _slot)| { .fold(vec![], |mut acc, c| {
acc.append(&mut committee.clone()); acc.append(&mut c.committee.clone());
acc acc
}); });
let attesting_validators: HashSet<usize> = let attesting_validators: HashSet<usize> =
@ -267,6 +233,27 @@ impl BeaconChainHarness {
Some(Signature::new(message, domain, &validator.keypair.sk)) Some(Signature::new(message, domain, &validator.keypair.sk))
} }
/// Returns the current `Fork` of the `beacon_chain`.
pub fn fork(&self) -> Fork {
self.beacon_chain.state.read().fork.clone()
}
/// Returns the current `epoch` of the `beacon_chain`.
pub fn epoch(&self) -> Epoch {
self.beacon_chain
.state
.read()
.slot
.epoch(self.spec.slots_per_epoch)
}
/// Returns the keypair for some validator index.
pub fn validator_keypair(&self, validator_index: usize) -> Option<&Keypair> {
self.validators
.get(validator_index)
.and_then(|v| Some(&v.keypair))
}
/// Submit a deposit to the `BeaconChain` and, if given a keypair, create a new /// Submit a deposit to the `BeaconChain` and, if given a keypair, create a new
/// `ValidatorHarness` instance for this validator. /// `ValidatorHarness` instance for this validator.
/// ///

View File

@ -1,69 +1,102 @@
use clap::{App, Arg}; use clap::{App, Arg, SubCommand};
use env_logger::{Builder, Env}; use env_logger::{Builder, Env};
use std::{fs::File, io::prelude::*}; use gen_keys::gen_keys;
use test_case::TestCase; use run_test::run_test;
use yaml_rust::YamlLoader; use std::fs;
use types::test_utils::keypairs_path;
use types::ChainSpec;
mod beacon_chain_harness; mod beacon_chain_harness;
mod gen_keys;
mod run_test;
mod test_case; mod test_case;
mod validator_harness; mod validator_harness;
use validator_harness::ValidatorHarness; use validator_harness::ValidatorHarness;
fn main() { fn main() {
let validator_file_path = keypairs_path();
let _ = fs::create_dir(validator_file_path.parent().unwrap());
let matches = App::new("Lighthouse Test Harness Runner") let matches = App::new("Lighthouse Test Harness Runner")
.version("0.0.1") .version("0.0.1")
.author("Sigma Prime <contact@sigmaprime.io>") .author("Sigma Prime <contact@sigmaprime.io>")
.about("Runs `test_harness` using a YAML test_case.") .about("Runs `test_harness` using a YAML test_case.")
.arg(
Arg::with_name("yaml")
.long("yaml")
.value_name("FILE")
.help("YAML file test_case.")
.required(true),
)
.arg( .arg(
Arg::with_name("log") Arg::with_name("log")
.long("log-level") .long("log-level")
.short("l")
.value_name("LOG_LEVEL") .value_name("LOG_LEVEL")
.help("Logging level.") .help("Logging level.")
.possible_values(&["error", "warn", "info", "debug", "trace"]) .possible_values(&["error", "warn", "info", "debug", "trace"])
.default_value("debug") .default_value("debug")
.required(true), .required(true),
) )
.arg(
Arg::with_name("spec")
.long("spec")
.short("s")
.value_name("SPECIFICATION")
.help("ChainSpec instantiation.")
.possible_values(&["foundation", "few_validators"])
.default_value("foundation"),
)
.subcommand(
SubCommand::with_name("run_test")
.about("Executes a YAML test specification")
.arg(
Arg::with_name("yaml")
.long("yaml")
.value_name("FILE")
.help("YAML file test_case.")
.required(true),
)
.arg(
Arg::with_name("validators_dir")
.long("validators-dir")
.short("v")
.value_name("VALIDATORS_DIR")
.help("A directory with validator deposits and keypair YAML."),
),
)
.subcommand(
SubCommand::with_name("gen_keys")
.about("Builds a file of BLS keypairs for faster tests.")
.arg(
Arg::with_name("validator_count")
.long("validator_count")
.short("n")
.value_name("VALIDATOR_COUNT")
.help("Number of validators to generate.")
.required(true),
)
.arg(
Arg::with_name("output_file")
.long("output_file")
.short("d")
.value_name("GENESIS_TIME")
.help("Output directory for generated YAML.")
.default_value(validator_file_path.to_str().unwrap()),
),
)
.get_matches(); .get_matches();
if let Some(log_level) = matches.value_of("log") { if let Some(log_level) = matches.value_of("log") {
Builder::from_env(Env::default().default_filter_or(log_level)).init(); Builder::from_env(Env::default().default_filter_or(log_level)).init();
} }
if let Some(yaml_file) = matches.value_of("yaml") { let _spec = match matches.value_of("spec") {
let docs = { Some("foundation") => ChainSpec::foundation(),
let mut file = File::open(yaml_file).unwrap(); Some("few_validators") => ChainSpec::few_validators(),
_ => unreachable!(), // Has a default value, should always exist.
};
let mut yaml_str = String::new(); if let Some(matches) = matches.subcommand_matches("run_test") {
file.read_to_string(&mut yaml_str).unwrap(); run_test(matches);
}
YamlLoader::load_from_str(&yaml_str).unwrap() if let Some(matches) = matches.subcommand_matches("gen_keys") {
}; gen_keys(matches);
for doc in &docs {
// For each `test_cases` YAML in the document, build a `TestCase`, execute it and
// assert that the execution result matches the test_case description.
//
// In effect, for each `test_case` a new `BeaconChainHarness` is created from genesis
// and a new `BeaconChain` is built as per the test_case.
//
// After the `BeaconChain` has been built out as per the test_case, a dump of all blocks
// and states in the chain is obtained and checked against the `results` specified in
// the `test_case`.
//
// If any of the expectations in the results are not met, the process
// panics with a message.
for test_case in doc["test_cases"].as_vec().unwrap() {
let test_case = TestCase::from_yaml(test_case);
test_case.assert_result_valid(test_case.execute())
}
}
} }
} }

View File

@ -0,0 +1,21 @@
use clap::{value_t, ArgMatches};
use log::debug;
use std::path::Path;
use types::test_utils::{generate_deterministic_keypairs, KeypairsFile};
/// Creates a file containing BLS keypairs.
pub fn gen_keys(matches: &ArgMatches) {
let validator_count = value_t!(matches.value_of("validator_count"), usize)
.expect("Validator count is required argument");
let output_file = matches
.value_of("output_file")
.expect("Output file has a default value.");
let keypairs = generate_deterministic_keypairs(validator_count);
debug!("Writing keypairs to file...");
let keypairs_path = Path::new(output_file);
keypairs.to_raw_file(&keypairs_path, &keypairs).unwrap();
}

View File

@ -0,0 +1,37 @@
use crate::test_case::TestCase;
use clap::ArgMatches;
use std::{fs::File, io::prelude::*};
use yaml_rust::YamlLoader;
/// Runs a YAML-specified test case.
pub fn run_test(matches: &ArgMatches) {
if let Some(yaml_file) = matches.value_of("yaml") {
let docs = {
let mut file = File::open(yaml_file).unwrap();
let mut yaml_str = String::new();
file.read_to_string(&mut yaml_str).unwrap();
YamlLoader::load_from_str(&yaml_str).unwrap()
};
for doc in &docs {
// For each `test_cases` YAML in the document, build a `TestCase`, execute it and
// assert that the execution result matches the test_case description.
//
// In effect, for each `test_case` a new `BeaconChainHarness` is created from genesis
// and a new `BeaconChain` is built as per the test_case.
//
// After the `BeaconChain` has been built out as per the test_case, a dump of all blocks
// and states in the chain is obtained and checked against the `results` specified in
// the `test_case`.
//
// If any of the expectations in the results are not met, the process
// panics with a message.
for test_case in doc["test_cases"].as_vec().unwrap() {
let test_case = TestCase::from_yaml(test_case);
test_case.assert_result_valid(test_case.execute())
}
}
}
}

View File

@ -3,14 +3,11 @@
use crate::beacon_chain_harness::BeaconChainHarness; use crate::beacon_chain_harness::BeaconChainHarness;
use beacon_chain::CheckPoint; use beacon_chain::CheckPoint;
use bls::{create_proof_of_possession, get_withdrawal_credentials};
use log::{info, warn}; use log::{info, warn};
use ssz::SignedRoot; use ssz::SignedRoot;
use types::*; use types::*;
use types::{ use types::test_utils::*;
attester_slashing::AttesterSlashingBuilder, proposer_slashing::ProposerSlashingBuilder,
};
use yaml_rust::Yaml; use yaml_rust::Yaml;
mod config; mod config;
@ -224,27 +221,20 @@ impl TestCase {
} }
/// Builds a `Deposit` this is valid for the given `BeaconChainHarness` at its next slot. /// Builds a `Deposit` this is valid for the given `BeaconChainHarness` at its next slot.
fn build_transfer(harness: &BeaconChainHarness, from: u64, to: u64, amount: u64) -> Transfer { fn build_transfer(
harness: &BeaconChainHarness,
sender: u64,
recipient: u64,
amount: u64,
) -> Transfer {
let slot = harness.beacon_chain.state.read().slot + 1; let slot = harness.beacon_chain.state.read().slot + 1;
let mut transfer = Transfer { let mut builder = TestingTransferBuilder::new(sender, recipient, amount, slot);
from,
to,
amount,
fee: 0,
slot,
pubkey: harness.validators[from as usize].keypair.pk.clone(),
signature: Signature::empty_signature(),
};
let message = transfer.signed_root(); let keypair = harness.validator_keypair(sender as usize).unwrap();
let epoch = slot.epoch(harness.spec.slots_per_epoch); builder.sign(keypair.clone(), &harness.fork(), &harness.spec);
transfer.signature = harness builder.build()
.validator_sign(from as usize, &message[..], epoch, Domain::Transfer)
.expect("Unable to sign Transfer");
transfer
} }
/// Builds a `Deposit` this is valid for the given `BeaconChainHarness`. /// Builds a `Deposit` this is valid for the given `BeaconChainHarness`.
@ -257,29 +247,12 @@ fn build_deposit(
index_offset: u64, index_offset: u64,
) -> (Deposit, Keypair) { ) -> (Deposit, Keypair) {
let keypair = Keypair::random(); let keypair = Keypair::random();
let proof_of_possession = create_proof_of_possession(&keypair);
let index = harness.beacon_chain.state.read().deposit_index + index_offset;
let withdrawal_credentials = Hash256::from_slice(
&get_withdrawal_credentials(&keypair.pk, harness.spec.bls_withdrawal_prefix_byte)[..],
);
let deposit = Deposit { let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), amount);
// Note: `branch` and `index` will need to be updated once the spec defines their builder.set_index(harness.beacon_chain.state.read().deposit_index + index_offset);
// validity. builder.sign(&keypair, harness.epoch(), &harness.fork(), &harness.spec);
branch: vec![],
index,
deposit_data: DepositData {
amount,
timestamp: 1,
deposit_input: DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials,
proof_of_possession,
},
},
};
(deposit, keypair) (builder.build(), keypair)
} }
/// Builds a `VoluntaryExit` this is valid for the given `BeaconChainHarness`. /// Builds a `VoluntaryExit` this is valid for the given `BeaconChainHarness`.
@ -318,7 +291,7 @@ fn build_double_vote_attester_slashing(
.expect("Unable to sign AttesterSlashing") .expect("Unable to sign AttesterSlashing")
}; };
AttesterSlashingBuilder::double_vote(validator_indices, signer) TestingAttesterSlashingBuilder::double_vote(validator_indices, signer)
} }
/// Builds an `ProposerSlashing` for some `validator_index`. /// Builds an `ProposerSlashing` for some `validator_index`.
@ -331,5 +304,5 @@ fn build_proposer_slashing(harness: &BeaconChainHarness, validator_index: u64) -
.expect("Unable to sign AttesterSlashing") .expect("Unable to sign AttesterSlashing")
}; };
ProposerSlashingBuilder::double_vote(validator_index, signer, &harness.spec) TestingProposerSlashingBuilder::double_vote(validator_index, signer, &harness.spec)
} }

View File

@ -2,7 +2,7 @@ use super::BLOCKS_DB_COLUMN as DB_COLUMN;
use super::{ClientDB, DBError}; use super::{ClientDB, DBError};
use ssz::Decodable; use ssz::Decodable;
use std::sync::Arc; use std::sync::Arc;
use types::{readers::BeaconBlockReader, BeaconBlock, Hash256, Slot}; use types::{BeaconBlock, Hash256, Slot};
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub enum BeaconBlockAtSlotError { pub enum BeaconBlockAtSlotError {
@ -38,23 +38,6 @@ impl<T: ClientDB> BeaconBlockStore<T> {
} }
} }
/// Returns an object implementing `BeaconBlockReader`, or `None` (if hash not known).
///
/// Note: Presently, this function fully deserializes a `BeaconBlock` and returns that. In the
/// future, it would be ideal to return an object capable of reading directly from serialized
/// SSZ bytes.
pub fn get_reader(&self, hash: &Hash256) -> Result<Option<impl BeaconBlockReader>, DBError> {
match self.get(&hash)? {
None => Ok(None),
Some(ssz) => {
let (block, _) = BeaconBlock::ssz_decode(&ssz, 0).map_err(|_| DBError {
message: "Bad BeaconBlock SSZ.".to_string(),
})?;
Ok(Some(block))
}
}
}
/// Retrieve the block at a slot given a "head_hash" and a slot. /// Retrieve the block at a slot given a "head_hash" and a slot.
/// ///
/// A "head_hash" must be a block hash with a slot number greater than or equal to the desired /// A "head_hash" must be a block hash with a slot number greater than or equal to the desired
@ -72,17 +55,17 @@ impl<T: ClientDB> BeaconBlockStore<T> {
&self, &self,
head_hash: &Hash256, head_hash: &Hash256,
slot: Slot, slot: Slot,
) -> Result<Option<(Hash256, impl BeaconBlockReader)>, BeaconBlockAtSlotError> { ) -> Result<Option<(Hash256, BeaconBlock)>, BeaconBlockAtSlotError> {
let mut current_hash = *head_hash; let mut current_hash = *head_hash;
loop { loop {
if let Some(block_reader) = self.get_reader(&current_hash)? { if let Some(block) = self.get_deserialized(&current_hash)? {
if block_reader.slot() == slot { if block.slot == slot {
break Ok(Some((current_hash, block_reader))); break Ok(Some((current_hash, block)));
} else if block_reader.slot() < slot { } else if block.slot < slot {
break Ok(None); break Ok(None);
} else { } else {
current_hash = block_reader.parent_root(); current_hash = block.previous_block_root;
} }
} else { } else {
break Err(BeaconBlockAtSlotError::UnknownBeaconBlock(current_hash)); break Err(BeaconBlockAtSlotError::UnknownBeaconBlock(current_hash));
@ -198,6 +181,7 @@ mod tests {
} }
#[test] #[test]
#[ignore]
fn test_block_at_slot() { fn test_block_at_slot() {
let db = Arc::new(MemoryDB::open()); let db = Arc::new(MemoryDB::open());
let bs = Arc::new(BeaconBlockStore::new(db.clone())); let bs = Arc::new(BeaconBlockStore::new(db.clone()));
@ -227,7 +211,7 @@ mod tests {
for i in 0..block_count { for i in 0..block_count {
let mut block = BeaconBlock::random_for_test(&mut rng); let mut block = BeaconBlock::random_for_test(&mut rng);
block.parent_root = parent_hashes[i]; block.previous_block_root = parent_hashes[i];
block.slot = slots[i]; block.slot = slots[i];
let ssz = ssz_encode(&block); let ssz = ssz_encode(&block);
@ -239,12 +223,12 @@ mod tests {
// Test that certain slots can be reached from certain hashes. // Test that certain slots can be reached from certain hashes.
let test_cases = vec![(4, 4), (4, 3), (4, 2), (4, 1), (4, 0)]; let test_cases = vec![(4, 4), (4, 3), (4, 2), (4, 1), (4, 0)];
for (hashes_index, slot_index) in test_cases { for (hashes_index, slot_index) in test_cases {
let (matched_block_hash, reader) = bs let (matched_block_hash, block) = bs
.block_at_slot(&hashes[hashes_index], slots[slot_index]) .block_at_slot(&hashes[hashes_index], slots[slot_index])
.unwrap() .unwrap()
.unwrap(); .unwrap();
assert_eq!(matched_block_hash, hashes[slot_index]); assert_eq!(matched_block_hash, hashes[slot_index]);
assert_eq!(reader.slot(), slots[slot_index]); assert_eq!(block.slot, slots[slot_index]);
} }
let ssz = bs.block_at_slot(&hashes[4], Slot::new(2)).unwrap(); let ssz = bs.block_at_slot(&hashes[4], Slot::new(2)).unwrap();

View File

@ -2,7 +2,7 @@ use super::STATES_DB_COLUMN as DB_COLUMN;
use super::{ClientDB, DBError}; use super::{ClientDB, DBError};
use ssz::Decodable; use ssz::Decodable;
use std::sync::Arc; use std::sync::Arc;
use types::{readers::BeaconStateReader, BeaconState, Hash256}; use types::{BeaconState, Hash256};
pub struct BeaconStateStore<T> pub struct BeaconStateStore<T>
where where
@ -30,23 +30,6 @@ impl<T: ClientDB> BeaconStateStore<T> {
} }
} }
} }
/// Retuns an object implementing `BeaconStateReader`, or `None` (if hash not known).
///
/// Note: Presently, this function fully deserializes a `BeaconState` and returns that. In the
/// future, it would be ideal to return an object capable of reading directly from serialized
/// SSZ bytes.
pub fn get_reader(&self, hash: &Hash256) -> Result<Option<impl BeaconStateReader>, DBError> {
match self.get(&hash)? {
None => Ok(None),
Some(ssz) => {
let (state, _) = BeaconState::ssz_decode(&ssz, 0).map_err(|_| DBError {
message: "Bad State SSZ.".to_string(),
})?;
Ok(Some(state))
}
}
}
} }
#[cfg(test)] #[cfg(test)]
@ -72,8 +55,7 @@ mod tests {
store.put(&state_root, &ssz_encode(&state)).unwrap(); store.put(&state_root, &ssz_encode(&state)).unwrap();
let reader = store.get_reader(&state_root).unwrap().unwrap(); let decoded = store.get_deserialized(&state_root).unwrap().unwrap();
let decoded = reader.into_beacon_state().unwrap();
assert_eq!(state, decoded); assert_eq!(state, decoded);
} }

View File

@ -4,7 +4,7 @@ mod traits;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use ssz::{SignedRoot, TreeHash}; use ssz::{SignedRoot, TreeHash};
use std::sync::Arc; use std::sync::Arc;
use types::{BeaconBlock, ChainSpec, Domain, Hash256, Proposal, Slot}; use types::{BeaconBlock, ChainSpec, Domain, Slot};
pub use self::traits::{ pub use self::traits::{
BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer, BeaconNode, BeaconNodeError, DutiesReader, DutiesReaderError, PublishOutcome, Signer,
@ -158,7 +158,7 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U
if self.safe_to_produce(&block) { if self.safe_to_produce(&block) {
let domain = self.spec.get_domain( let domain = self.spec.get_domain(
slot.epoch(self.spec.slots_per_epoch), slot.epoch(self.spec.slots_per_epoch),
Domain::Proposal, Domain::BeaconBlock,
&fork, &fork,
); );
if let Some(block) = self.sign_block(block, domain) { if let Some(block) = self.sign_block(block, domain) {
@ -182,16 +182,9 @@ impl<T: SlotClock, U: BeaconNode, V: DutiesReader, W: Signer> BlockProducer<T, U
fn sign_block(&mut self, mut block: BeaconBlock, domain: u64) -> Option<BeaconBlock> { fn sign_block(&mut self, mut block: BeaconBlock, domain: u64) -> Option<BeaconBlock> {
self.store_produce(&block); self.store_produce(&block);
let proposal = Proposal {
slot: block.slot,
shard: self.spec.beacon_chain_shard_number,
block_root: Hash256::from_slice(&block.signed_root()[..]),
signature: block.signature.clone(),
};
match self match self
.signer .signer
.sign_block_proposal(&proposal.signed_root()[..], domain) .sign_block_proposal(&block.signed_root()[..], domain)
{ {
None => None, None => None,
Some(signature) => { Some(signature) => {

View File

@ -28,8 +28,8 @@ impl DutiesReader for EpochMap {
fn fork(&self) -> Result<Fork, DutiesReaderError> { fn fork(&self) -> Result<Fork, DutiesReaderError> {
Ok(Fork { Ok(Fork {
previous_version: 0, previous_version: [0; 4],
current_version: 0, current_version: [0; 4],
epoch: Epoch::new(0), epoch: Epoch::new(0),
}) })
} }

View File

@ -11,8 +11,8 @@ use log::{debug, trace};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use types::{ use types::{
readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock, validator_registry::get_active_validator_indices, BeaconBlock, ChainSpec, Hash256, Slot,
ChainSpec, Hash256, Slot, SlotHeight, SlotHeight,
}; };
//TODO: Pruning - Children //TODO: Pruning - Children
@ -255,17 +255,17 @@ impl<T: ClientDB + Sized> ForkChoice for BitwiseLMDGhost<T> {
// get the height of the parent // get the height of the parent
let parent_height = self let parent_height = self
.block_store .block_store
.get_deserialized(&block.parent_root)? .get_deserialized(&block.previous_block_root)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.parent_root))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.previous_block_root))?
.slot() .slot
.height(spec.genesis_slot); .height(spec.genesis_slot);
let parent_hash = &block.parent_root; let parent_hash = &block.previous_block_root;
// add the new block to the children of parent // add the new block to the children of parent
(*self (*self
.children .children
.entry(block.parent_root) .entry(block.previous_block_root)
.or_insert_with(|| vec![])) .or_insert_with(|| vec![]))
.push(block_hash.clone()); .push(block_hash.clone());
@ -309,7 +309,7 @@ impl<T: ClientDB + Sized> ForkChoice for BitwiseLMDGhost<T> {
.block_store .block_store
.get_deserialized(&target_block_root)? .get_deserialized(&target_block_root)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))?
.slot() .slot
.height(spec.genesis_slot); .height(spec.genesis_slot);
// get the height of the past target block // get the height of the past target block
@ -317,7 +317,7 @@ impl<T: ClientDB + Sized> ForkChoice for BitwiseLMDGhost<T> {
.block_store .block_store
.get_deserialized(&attestation_target)? .get_deserialized(&attestation_target)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))?
.slot() .slot
.height(spec.genesis_slot); .height(spec.genesis_slot);
// update the attestation only if the new target is higher // update the attestation only if the new target is higher
if past_block_height < block_height { if past_block_height < block_height {
@ -343,8 +343,8 @@ impl<T: ClientDB + Sized> ForkChoice for BitwiseLMDGhost<T> {
.get_deserialized(&justified_block_start)? .get_deserialized(&justified_block_start)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?;
let block_slot = block.slot(); let block_slot = block.slot;
let state_root = block.state_root(); let state_root = block.state_root;
let mut block_height = block_slot.height(spec.genesis_slot); let mut block_height = block_slot.height(spec.genesis_slot);
let mut current_head = *justified_block_start; let mut current_head = *justified_block_start;
@ -409,11 +409,23 @@ impl<T: ClientDB + Sized> ForkChoice for BitwiseLMDGhost<T> {
*child_votes.entry(child).or_insert_with(|| 0) += vote; *child_votes.entry(child).or_insert_with(|| 0) += vote;
} }
} }
// given the votes on the children, find the best child // check if we have votes of children, if not select the smallest hash child
current_head = self if child_votes.is_empty() {
.choose_best_child(&child_votes) current_head = *children
.ok_or(ForkChoiceError::CannotFindBestChild)?; .iter()
trace!("Best child found: {}", current_head); .min_by(|child1, child2| child1.cmp(child2))
.expect("Must be children here");
trace!(
"Children have no votes - smallest hash chosen: {}",
current_head
);
} else {
// given the votes on the children, find the best child
current_head = self
.choose_best_child(&child_votes)
.ok_or(ForkChoiceError::CannotFindBestChild)?;
trace!("Best child found: {}", current_head);
}
} }
// didn't find head yet, proceed to next iteration // didn't find head yet, proceed to next iteration
@ -422,7 +434,7 @@ impl<T: ClientDB + Sized> ForkChoice for BitwiseLMDGhost<T> {
.block_store .block_store
.get_deserialized(&current_head)? .get_deserialized(&current_head)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(current_head))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(current_head))?
.slot() .slot
.height(spec.genesis_slot); .height(spec.genesis_slot);
// prune the latest votes for votes that are not part of current chosen chain // prune the latest votes for votes that are not part of current chosen chain
// more specifically, only keep votes that have head as an ancestor // more specifically, only keep votes that have head as an ancestor

View File

@ -22,6 +22,7 @@ extern crate types;
pub mod bitwise_lmd_ghost; pub mod bitwise_lmd_ghost;
pub mod longest_chain; pub mod longest_chain;
pub mod optimized_lmd_ghost;
pub mod slow_lmd_ghost; pub mod slow_lmd_ghost;
use db::stores::BeaconBlockAtSlotError; use db::stores::BeaconBlockAtSlotError;
@ -30,6 +31,7 @@ use types::{BeaconBlock, ChainSpec, Hash256};
pub use bitwise_lmd_ghost::BitwiseLMDGhost; pub use bitwise_lmd_ghost::BitwiseLMDGhost;
pub use longest_chain::LongestChain; pub use longest_chain::LongestChain;
pub use optimized_lmd_ghost::OptimizedLMDGhost;
pub use slow_lmd_ghost::SlowLMDGhost; pub use slow_lmd_ghost::SlowLMDGhost;
/// Defines the interface for Fork Choices. Each Fork choice will define their own data structures /// Defines the interface for Fork Choices. Each Fork choice will define their own data structures
@ -102,4 +104,6 @@ pub enum ForkChoiceAlgorithm {
SlowLMDGhost, SlowLMDGhost,
/// An optimised version of bitwise LMD-GHOST by Vitalik. /// An optimised version of bitwise LMD-GHOST by Vitalik.
BitwiseLMDGhost, BitwiseLMDGhost,
/// An optimised implementation of LMD ghost.
OptimizedLMDGhost,
} }

View File

@ -34,7 +34,7 @@ impl<T: ClientDB + Sized> ForkChoice for LongestChain<T> {
) -> Result<(), ForkChoiceError> { ) -> Result<(), ForkChoiceError> {
// add the block hash to head_block_hashes removing the parent if it exists // add the block hash to head_block_hashes removing the parent if it exists
self.head_block_hashes self.head_block_hashes
.retain(|hash| *hash != block.parent_root); .retain(|hash| *hash != block.previous_block_root);
self.head_block_hashes.push(*block_hash); self.head_block_hashes.push(*block_hash);
Ok(()) Ok(())
} }

View File

@ -0,0 +1,465 @@
//! The optimised bitwise LMD-GHOST fork choice rule.
extern crate bit_vec;
use crate::{ForkChoice, ForkChoiceError};
use db::{
stores::{BeaconBlockStore, BeaconStateStore},
ClientDB,
};
use log::{debug, trace};
use std::cmp::Ordering;
use std::collections::HashMap;
use std::sync::Arc;
use types::{
validator_registry::get_active_validator_indices, BeaconBlock, ChainSpec, Hash256, Slot,
SlotHeight,
};
//TODO: Pruning - Children
//TODO: Handle Syncing
// NOTE: This uses u32 to represent difference between block heights. Thus this is only
// applicable for block height differences in the range of a u32.
// This can potentially be parallelized in some parts.
/// Compute the base-2 logarithm of an integer, floored (rounded down)
#[inline]
fn log2_int(x: u64) -> u32 {
if x == 0 {
return 0;
}
63 - x.leading_zeros()
}
fn power_of_2_below(x: u64) -> u64 {
2u64.pow(log2_int(x))
}
/// Stores the necessary data structures to run the optimised lmd ghost algorithm.
pub struct OptimizedLMDGhost<T: ClientDB + Sized> {
/// A cache of known ancestors at given heights for a specific block.
//TODO: Consider FnvHashMap
cache: HashMap<CacheKey<u64>, Hash256>,
/// Log lookup table for blocks to their ancestors.
//TODO: Verify we only want/need a size 16 log lookup
ancestors: Vec<HashMap<Hash256, Hash256>>,
/// Stores the children for any given parent.
children: HashMap<Hash256, Vec<Hash256>>,
/// The latest attestation targets as a map of validator index to block hash.
//TODO: Could this be a fixed size vec
latest_attestation_targets: HashMap<u64, Hash256>,
/// Block storage access.
block_store: Arc<BeaconBlockStore<T>>,
/// State storage access.
state_store: Arc<BeaconStateStore<T>>,
max_known_height: SlotHeight,
}
impl<T> OptimizedLMDGhost<T>
where
T: ClientDB + Sized,
{
pub fn new(
block_store: Arc<BeaconBlockStore<T>>,
state_store: Arc<BeaconStateStore<T>>,
) -> Self {
OptimizedLMDGhost {
cache: HashMap::new(),
ancestors: vec![HashMap::new(); 16],
latest_attestation_targets: HashMap::new(),
children: HashMap::new(),
max_known_height: SlotHeight::new(0),
block_store,
state_store,
}
}
/// Finds the latest votes weighted by validator balance. Returns a hashmap of block_hash to
/// weighted votes.
pub fn get_latest_votes(
&self,
state_root: &Hash256,
block_slot: Slot,
spec: &ChainSpec,
) -> Result<HashMap<Hash256, u64>, ForkChoiceError> {
// get latest votes
// Note: Votes are weighted by min(balance, MAX_DEPOSIT_AMOUNT) //
// FORK_CHOICE_BALANCE_INCREMENT
// build a hashmap of block_hash to weighted votes
let mut latest_votes: HashMap<Hash256, u64> = HashMap::new();
// gets the current weighted votes
let current_state = self
.state_store
.get_deserialized(&state_root)?
.ok_or_else(|| ForkChoiceError::MissingBeaconState(*state_root))?;
let active_validator_indices = get_active_validator_indices(
&current_state.validator_registry[..],
block_slot.epoch(spec.slots_per_epoch),
);
for index in active_validator_indices {
let balance = std::cmp::min(
current_state.validator_balances[index],
spec.max_deposit_amount,
) / spec.fork_choice_balance_increment;
if balance > 0 {
if let Some(target) = self.latest_attestation_targets.get(&(index as u64)) {
*latest_votes.entry(*target).or_insert_with(|| 0) += balance;
}
}
}
trace!("Latest votes: {:?}", latest_votes);
Ok(latest_votes)
}
/// Gets the ancestor at a given height `at_height` of a block specified by `block_hash`.
fn get_ancestor(
&mut self,
block_hash: Hash256,
target_height: SlotHeight,
spec: &ChainSpec,
) -> Option<Hash256> {
// return None if we can't get the block from the db.
let block_height = {
let block_slot = self
.block_store
.get_deserialized(&block_hash)
.ok()?
.expect("Should have returned already if None")
.slot;
block_slot.height(spec.genesis_slot)
};
// verify we haven't exceeded the block height
if target_height >= block_height {
if target_height > block_height {
return None;
} else {
return Some(block_hash);
}
}
// check if the result is stored in our cache
let cache_key = CacheKey::new(&block_hash, target_height.as_u64());
if let Some(ancestor) = self.cache.get(&cache_key) {
return Some(*ancestor);
}
// not in the cache recursively search for ancestors using a log-lookup
if let Some(ancestor) = {
let ancestor_lookup = self.ancestors
[log2_int((block_height - target_height - 1u64).as_u64()) as usize]
.get(&block_hash)
//TODO: Panic if we can't lookup and fork choice fails
.expect("All blocks should be added to the ancestor log lookup table");
self.get_ancestor(*ancestor_lookup, target_height, &spec)
} {
// add the result to the cache
self.cache.insert(cache_key, ancestor);
return Some(ancestor);
}
None
}
// looks for an obvious block winner given the latest votes for a specific height
fn get_clear_winner(
&mut self,
latest_votes: &HashMap<Hash256, u64>,
block_height: SlotHeight,
spec: &ChainSpec,
) -> Option<Hash256> {
// map of vote counts for every hash at this height
let mut current_votes: HashMap<Hash256, u64> = HashMap::new();
let mut total_vote_count = 0;
trace!("Clear winner at block height: {}", block_height);
// loop through the latest votes and count all votes
// these have already been weighted by balance
for (hash, votes) in latest_votes.iter() {
if let Some(ancestor) = self.get_ancestor(*hash, block_height, spec) {
let current_vote_value = current_votes.get(&ancestor).unwrap_or_else(|| &0);
current_votes.insert(ancestor, current_vote_value + *votes);
total_vote_count += votes;
}
}
// Check if there is a clear block winner at this height. If so return it.
for (hash, votes) in current_votes.iter() {
if *votes > total_vote_count / 2 {
// we have a clear winner, return it
return Some(*hash);
}
}
// didn't find a clear winner
None
}
// Finds the best child (one with highest votes)
fn choose_best_child(&self, votes: &HashMap<Hash256, u64>) -> Option<Hash256> {
if votes.is_empty() {
return None;
}
// Iterate through hashmap to get child with maximum votes
let best_child = votes.iter().max_by(|(child1, v1), (child2, v2)| {
let mut result = v1.cmp(v2);
// If votes are equal, choose smaller hash to break ties deterministically
if result == Ordering::Equal {
// Reverse so that max_by chooses smaller hash
result = child1.cmp(child2).reverse();
}
result
});
Some(*best_child.unwrap().0)
}
}
impl<T: ClientDB + Sized> ForkChoice for OptimizedLMDGhost<T> {
fn add_block(
&mut self,
block: &BeaconBlock,
block_hash: &Hash256,
spec: &ChainSpec,
) -> Result<(), ForkChoiceError> {
// get the height of the parent
let parent_height = self
.block_store
.get_deserialized(&block.previous_block_root)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(block.previous_block_root))?
.slot
.height(spec.genesis_slot);
let parent_hash = &block.previous_block_root;
// add the new block to the children of parent
(*self
.children
.entry(block.previous_block_root)
.or_insert_with(|| vec![]))
.push(block_hash.clone());
// build the ancestor data structure
for index in 0..16 {
if parent_height % (1 << index) == 0 {
self.ancestors[index].insert(*block_hash, *parent_hash);
} else {
// TODO: This is unsafe. Will panic if parent_hash doesn't exist. Using it for debugging
let parent_ancestor = self.ancestors[index][parent_hash];
self.ancestors[index].insert(*block_hash, parent_ancestor);
}
}
// update the max height
self.max_known_height = std::cmp::max(self.max_known_height, parent_height + 1);
Ok(())
}
fn add_attestation(
&mut self,
validator_index: u64,
target_block_root: &Hash256,
spec: &ChainSpec,
) -> Result<(), ForkChoiceError> {
// simply add the attestation to the latest_attestation_target if the block_height is
// larger
trace!(
"Adding attestation of validator: {:?} for block: {}",
validator_index,
target_block_root
);
let attestation_target = self
.latest_attestation_targets
.entry(validator_index)
.or_insert_with(|| *target_block_root);
// if we already have a value
if attestation_target != target_block_root {
trace!("Old attestation found: {:?}", attestation_target);
// get the height of the target block
let block_height = self
.block_store
.get_deserialized(&target_block_root)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))?
.slot
.height(spec.genesis_slot);
// get the height of the past target block
let past_block_height = self
.block_store
.get_deserialized(&attestation_target)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))?
.slot
.height(spec.genesis_slot);
// update the attestation only if the new target is higher
if past_block_height < block_height {
trace!("Updating old attestation");
*attestation_target = *target_block_root;
}
}
Ok(())
}
/// Perform lmd_ghost on the current chain to find the head.
fn find_head(
&mut self,
justified_block_start: &Hash256,
spec: &ChainSpec,
) -> Result<Hash256, ForkChoiceError> {
debug!(
"Starting optimised fork choice at block: {}",
justified_block_start
);
let block = self
.block_store
.get_deserialized(&justified_block_start)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?;
let block_slot = block.slot;
let state_root = block.state_root;
let mut block_height = block_slot.height(spec.genesis_slot);
let mut current_head = *justified_block_start;
let mut latest_votes = self.get_latest_votes(&state_root, block_slot, spec)?;
// remove any votes that don't relate to our current head.
latest_votes
.retain(|hash, _| self.get_ancestor(*hash, block_height, spec) == Some(current_head));
// begin searching for the head
loop {
debug!(
"Iteration for block: {} with vote length: {}",
current_head,
latest_votes.len()
);
// if there are no children, we are done, return the current_head
let children = match self.children.get(&current_head) {
Some(children) => children.clone(),
None => {
debug!("Head found: {}", current_head);
return Ok(current_head);
}
};
// logarithmic lookup blocks to see if there are obvious winners, if so,
// progress to the next iteration.
let mut step =
power_of_2_below(self.max_known_height.saturating_sub(block_height).as_u64()) / 2;
while step > 0 {
trace!("Current Step: {}", step);
if let Some(clear_winner) = self.get_clear_winner(
&latest_votes,
block_height - (block_height % step) + step,
spec,
) {
current_head = clear_winner;
break;
}
step /= 2;
}
if step > 0 {
trace!("Found clear winner: {}", current_head);
}
// if our skip lookup failed and we only have one child, progress to that child
else if children.len() == 1 {
current_head = children[0];
trace!(
"Lookup failed, only one child, proceeding to child: {}",
current_head
);
}
// we need to find the best child path to progress down.
else {
trace!("Searching for best child");
let mut child_votes = HashMap::new();
for (voted_hash, vote) in latest_votes.iter() {
// if the latest votes correspond to a child
if let Some(child) = self.get_ancestor(*voted_hash, block_height + 1, spec) {
// add up the votes for each child
*child_votes.entry(child).or_insert_with(|| 0) += vote;
}
}
// check if we have votes of children, if not select the smallest hash child
if child_votes.is_empty() {
current_head = *children
.iter()
.min_by(|child1, child2| child1.cmp(child2))
.expect("Must be children here");
trace!(
"Children have no votes - smallest hash chosen: {}",
current_head
);
} else {
// given the votes on the children, find the best child
current_head = self
.choose_best_child(&child_votes)
.ok_or(ForkChoiceError::CannotFindBestChild)?;
trace!("Best child found: {}", current_head);
}
}
// didn't find head yet, proceed to next iteration
// update block height
block_height = self
.block_store
.get_deserialized(&current_head)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(current_head))?
.slot
.height(spec.genesis_slot);
// prune the latest votes for votes that are not part of current chosen chain
// more specifically, only keep votes that have head as an ancestor
for hash in latest_votes.keys() {
trace!(
"Ancestor for vote: {} at height: {} is: {:?}",
hash,
block_height,
self.get_ancestor(*hash, block_height, spec)
);
}
latest_votes.retain(|hash, _| {
self.get_ancestor(*hash, block_height, spec) == Some(current_head)
});
}
}
}
/// Type for storing blocks in a memory cache. Key is comprised of block-hash plus the height.
#[derive(PartialEq, Eq, Hash)]
pub struct CacheKey<T> {
block_hash: Hash256,
block_height: T,
}
impl<T> CacheKey<T> {
pub fn new(block_hash: &Hash256, block_height: T) -> Self {
CacheKey {
block_hash: *block_hash,
block_height,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
pub fn test_power_of_2_below() {
assert_eq!(power_of_2_below(4), 4);
assert_eq!(power_of_2_below(5), 4);
assert_eq!(power_of_2_below(7), 4);
assert_eq!(power_of_2_below(24), 16);
assert_eq!(power_of_2_below(32), 32);
assert_eq!(power_of_2_below(33), 32);
assert_eq!(power_of_2_below(63), 32);
}
#[test]
pub fn test_power_of_2_below_large() {
let pow: u64 = 1 << 24;
for x in (pow - 20)..(pow + 20) {
assert!(power_of_2_below(x) <= x, "{}", x);
}
}
}

View File

@ -9,8 +9,7 @@ use log::{debug, trace};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use types::{ use types::{
readers::BeaconBlockReader, validator_registry::get_active_validator_indices, BeaconBlock, validator_registry::get_active_validator_indices, BeaconBlock, ChainSpec, Hash256, Slot,
ChainSpec, Hash256, Slot,
}; };
//TODO: Pruning and syncing //TODO: Pruning and syncing
@ -95,7 +94,7 @@ where
.block_store .block_store
.get_deserialized(&block_root)? .get_deserialized(&block_root)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*block_root))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*block_root))?
.slot(); .slot;
for (vote_hash, votes) in latest_votes.iter() { for (vote_hash, votes) in latest_votes.iter() {
let (root_at_slot, _) = self let (root_at_slot, _) = self
@ -122,7 +121,7 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> {
// add the new block to the children of parent // add the new block to the children of parent
(*self (*self
.children .children
.entry(block.parent_root) .entry(block.previous_block_root)
.or_insert_with(|| vec![])) .or_insert_with(|| vec![]))
.push(block_hash.clone()); .push(block_hash.clone());
@ -155,7 +154,7 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> {
.block_store .block_store
.get_deserialized(&target_block_root)? .get_deserialized(&target_block_root)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*target_block_root))?
.slot() .slot
.height(spec.genesis_slot); .height(spec.genesis_slot);
// get the height of the past target block // get the height of the past target block
@ -163,7 +162,7 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> {
.block_store .block_store
.get_deserialized(&attestation_target)? .get_deserialized(&attestation_target)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))? .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*attestation_target))?
.slot() .slot
.height(spec.genesis_slot); .height(spec.genesis_slot);
// update the attestation only if the new target is higher // update the attestation only if the new target is higher
if past_block_height < block_height { if past_block_height < block_height {
@ -186,9 +185,9 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> {
.get_deserialized(&justified_block_start)? .get_deserialized(&justified_block_start)?
.ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?; .ok_or_else(|| ForkChoiceError::MissingBeaconBlock(*justified_block_start))?;
let start_state_root = start.state_root(); let start_state_root = start.state_root;
let latest_votes = self.get_latest_votes(&start_state_root, start.slot(), spec)?; let latest_votes = self.get_latest_votes(&start_state_root, start.slot, spec)?;
let mut head_hash = *justified_block_start; let mut head_hash = *justified_block_start;
@ -210,6 +209,7 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> {
trace!("Children found: {:?}", children); trace!("Children found: {:?}", children);
let mut head_vote_count = 0; let mut head_vote_count = 0;
head_hash = children[0];
for child_hash in children { for child_hash in children {
let vote_count = self.get_vote_count(&latest_votes, &child_hash)?; let vote_count = self.get_vote_count(&latest_votes, &child_hash)?;
trace!("Vote count for child: {} is: {}", child_hash, vote_count); trace!("Vote count for child: {} is: {}", child_hash, vote_count);
@ -218,6 +218,12 @@ impl<T: ClientDB + Sized> ForkChoice for SlowLMDGhost<T> {
head_hash = *child_hash; head_hash = *child_hash;
head_vote_count = vote_count; head_vote_count = vote_count;
} }
// resolve ties - choose smaller hash
else if vote_count == head_vote_count {
if *child_hash < head_hash {
head_hash = *child_hash;
}
}
} }
} }
Ok(head_hash) Ok(head_hash)

View File

@ -63,3 +63,82 @@ test_cases:
- b7: 2 - b7: 2
heads: heads:
- id: 'b4' - id: 'b4'
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b0'
- id: 'b3'
parent: 'b0'
- id: 'b4'
parent: 'b1'
- id: 'b5'
parent: 'b1'
- id: 'b6'
parent: 'b2'
- id: 'b7'
parent: 'b2'
- id: 'b8'
parent: 'b3'
- id: 'b9'
parent: 'b3'
weights:
- b1: 2
- b2: 1
- b3: 1
- b4: 7
- b5: 5
- b6: 2
- b7: 4
- b8: 4
- b9: 2
heads:
- id: 'b4'
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b0'
- id: 'b3'
parent: 'b0'
- id: 'b4'
parent: 'b1'
- id: 'b5'
parent: 'b1'
- id: 'b6'
parent: 'b2'
- id: 'b7'
parent: 'b2'
- id: 'b8'
parent: 'b3'
- id: 'b9'
parent: 'b3'
weights:
- b1: 1
- b2: 1
- b3: 1
- b4: 7
- b5: 5
- b6: 2
- b7: 4
- b8: 4
- b9: 2
heads:
- id: 'b7'
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b0'
weights:
- b1: 0
- b2: 0
heads:
- id: 'b1'

View File

@ -35,3 +35,31 @@ test_cases:
- b3: 3 - b3: 3
heads: heads:
- id: 'b1' - id: 'b1'
# equal weights children. Should choose lower hash b2
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b0'
- id: 'b3'
parent: 'b0'
weights:
- b1: 5
- b2: 6
- b3: 6
heads:
- id: 'b2'
- blocks:
- id: 'b0'
parent: 'b0'
- id: 'b1'
parent: 'b0'
- id: 'b2'
parent: 'b0'
weights:
- b1: 0
- b2: 0
heads:
- id: 'b1'

View File

@ -3,7 +3,7 @@
extern crate beacon_chain; extern crate beacon_chain;
extern crate bls; extern crate bls;
extern crate db; extern crate db;
//extern crate env_logger; // for debugging // extern crate env_logger; // for debugging
extern crate fork_choice; extern crate fork_choice;
extern crate hex; extern crate hex;
extern crate log; extern crate log;
@ -12,22 +12,35 @@ extern crate types;
extern crate yaml_rust; extern crate yaml_rust;
pub use beacon_chain::BeaconChain; pub use beacon_chain::BeaconChain;
use bls::{PublicKey, Signature}; use bls::Signature;
use db::stores::{BeaconBlockStore, BeaconStateStore}; use db::stores::{BeaconBlockStore, BeaconStateStore};
use db::MemoryDB; use db::MemoryDB;
//use env_logger::{Builder, Env}; // use env_logger::{Builder, Env};
use fork_choice::{BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, SlowLMDGhost}; use fork_choice::{
BitwiseLMDGhost, ForkChoice, ForkChoiceAlgorithm, LongestChain, OptimizedLMDGhost, SlowLMDGhost,
};
use ssz::ssz_encode; use ssz::ssz_encode;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
use std::{fs::File, io::prelude::*, path::PathBuf}; use std::{fs::File, io::prelude::*, path::PathBuf};
use types::{ use types::test_utils::TestingBeaconStateBuilder;
BeaconBlock, BeaconBlockBody, BeaconState, ChainSpec, Epoch, Eth1Data, Hash256, Slot, Validator, use types::{BeaconBlock, BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Keypair, Slot};
};
use yaml_rust::yaml; use yaml_rust::yaml;
// Note: We Assume the block Id's are hex-encoded. // Note: We Assume the block Id's are hex-encoded.
#[test]
fn test_optimized_lmd_ghost() {
// set up logging
// Builder::from_env(Env::default().default_filter_or("trace")).init();
test_yaml_vectors(
ForkChoiceAlgorithm::OptimizedLMDGhost,
"tests/lmd_ghost_test_vectors.yaml",
100,
);
}
#[test] #[test]
fn test_bitwise_lmd_ghost() { fn test_bitwise_lmd_ghost() {
// set up logging // set up logging
@ -77,6 +90,8 @@ fn test_yaml_vectors(
let randao_reveal = Signature::empty_signature(); let randao_reveal = Signature::empty_signature();
let signature = Signature::empty_signature(); let signature = Signature::empty_signature();
let body = BeaconBlockBody { let body = BeaconBlockBody {
eth1_data,
randao_reveal,
proposer_slashings: vec![], proposer_slashings: vec![],
attester_slashings: vec![], attester_slashings: vec![],
attestations: vec![], attestations: vec![],
@ -104,14 +119,14 @@ fn test_yaml_vectors(
// default params for genesis // default params for genesis
let block_hash = id_to_hash(&block_id); let block_hash = id_to_hash(&block_id);
let mut slot = spec.genesis_slot; let mut slot = spec.genesis_slot;
let parent_root = id_to_hash(&parent_id); let previous_block_root = id_to_hash(&parent_id);
// set the slot and parent based off the YAML. Start with genesis; // set the slot and parent based off the YAML. Start with genesis;
// if not the genesis, update slot // if not the genesis, update slot
if parent_id != block_id { if parent_id != block_id {
// find parent slot // find parent slot
slot = *(block_slot slot = *(block_slot
.get(&parent_root) .get(&previous_block_root)
.expect("Parent should have a slot number")) .expect("Parent should have a slot number"))
+ 1; + 1;
} else { } else {
@ -124,10 +139,8 @@ fn test_yaml_vectors(
// build the BeaconBlock // build the BeaconBlock
let beacon_block = BeaconBlock { let beacon_block = BeaconBlock {
slot, slot,
parent_root, previous_block_root,
state_root: state_root.clone(), state_root: state_root.clone(),
randao_reveal: randao_reveal.clone(),
eth1_data: eth1_data.clone(),
signature: signature.clone(), signature: signature.clone(),
body: body.clone(), body: body.clone(),
}; };
@ -205,16 +218,18 @@ fn load_test_cases_from_yaml(file_path: &str) -> Vec<yaml_rust::Yaml> {
// initialise a single validator and state. All blocks will reference this state root. // initialise a single validator and state. All blocks will reference this state root.
fn setup_inital_state( fn setup_inital_state(
fork_choice_algo: &ForkChoiceAlgorithm, fork_choice_algo: &ForkChoiceAlgorithm,
no_validators: usize, num_validators: usize,
) -> (Box<ForkChoice>, Arc<BeaconBlockStore<MemoryDB>>, Hash256) { ) -> (Box<ForkChoice>, Arc<BeaconBlockStore<MemoryDB>>, Hash256) {
let zero_hash = Hash256::zero();
let db = Arc::new(MemoryDB::open()); let db = Arc::new(MemoryDB::open());
let block_store = Arc::new(BeaconBlockStore::new(db.clone())); let block_store = Arc::new(BeaconBlockStore::new(db.clone()));
let state_store = Arc::new(BeaconStateStore::new(db.clone())); let state_store = Arc::new(BeaconStateStore::new(db.clone()));
// the fork choice instantiation // the fork choice instantiation
let fork_choice: Box<ForkChoice> = match fork_choice_algo { let fork_choice: Box<ForkChoice> = match fork_choice_algo {
ForkChoiceAlgorithm::OptimizedLMDGhost => Box::new(OptimizedLMDGhost::new(
block_store.clone(),
state_store.clone(),
)),
ForkChoiceAlgorithm::BitwiseLMDGhost => Box::new(BitwiseLMDGhost::new( ForkChoiceAlgorithm::BitwiseLMDGhost => Box::new(BitwiseLMDGhost::new(
block_store.clone(), block_store.clone(),
state_store.clone(), state_store.clone(),
@ -225,40 +240,11 @@ fn setup_inital_state(
ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(block_store.clone())), ForkChoiceAlgorithm::LongestChain => Box::new(LongestChain::new(block_store.clone())),
}; };
// misc vars for setting up the state
let genesis_time = 1_550_381_159;
let latest_eth1_data = Eth1Data {
deposit_root: zero_hash.clone(),
block_hash: zero_hash.clone(),
};
let initial_validator_deposits = vec![];
let spec = ChainSpec::foundation(); let spec = ChainSpec::foundation();
// create the state let state_builder =
let mut state = BeaconState::genesis( TestingBeaconStateBuilder::from_single_keypair(num_validators, &Keypair::random(), &spec);
genesis_time, let (state, _keypairs) = state_builder.build();
initial_validator_deposits,
latest_eth1_data,
&spec,
)
.unwrap();
let default_validator = Validator {
pubkey: PublicKey::default(),
withdrawal_credentials: zero_hash,
activation_epoch: Epoch::from(0u64),
exit_epoch: spec.far_future_epoch,
withdrawable_epoch: spec.far_future_epoch,
initiated_exit: false,
slashed: false,
};
// activate the validators
for _ in 0..no_validators {
state.validator_registry.push(default_validator.clone());
state.validator_balances.push(32_000_000_000);
}
let state_root = state.canonical_root(); let state_root = state.canonical_root();
state_store state_store

View File

@ -11,9 +11,13 @@ harness = false
[dev-dependencies] [dev-dependencies]
criterion = "0.2" criterion = "0.2"
env_logger = "0.6.0" env_logger = "0.6.0"
serde = "1.0"
serde_derive = "1.0"
serde_yaml = "0.8"
[dependencies] [dependencies]
bls = { path = "../utils/bls" } bls = { path = "../utils/bls" }
fnv = "1.0"
hashing = { path = "../utils/hashing" } hashing = { path = "../utils/hashing" }
int_to_bytes = { path = "../utils/int_to_bytes" } int_to_bytes = { path = "../utils/int_to_bytes" }
integer-sqrt = "0.1" integer-sqrt = "0.1"

View File

@ -0,0 +1,270 @@
use criterion::Criterion;
use criterion::{black_box, Benchmark};
use ssz::TreeHash;
use state_processing::{
per_block_processing,
per_block_processing::{
process_attestations, process_attester_slashings, process_deposits, process_eth1_data,
process_exits, process_proposer_slashings, process_randao, process_transfers,
verify_block_signature,
},
};
use types::*;
/// Run the detailed benchmarking suite on the given `BeaconState`.
///
/// `desc` will be added to the title of each bench.
pub fn bench_block_processing(
c: &mut Criterion,
initial_block: &BeaconBlock,
initial_state: &BeaconState,
initial_spec: &ChainSpec,
desc: &str,
) {
let state = initial_state.clone();
let block = initial_block.clone();
let spec = initial_spec.clone();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("verify_block_signature", move |b| {
b.iter_batched(
|| state.clone(),
|mut state| {
verify_block_signature(&mut state, &block, &spec).unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let state = initial_state.clone();
let block = initial_block.clone();
let spec = initial_spec.clone();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("process_randao", move |b| {
b.iter_batched(
|| state.clone(),
|mut state| {
process_randao(&mut state, &block, &spec).unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let state = initial_state.clone();
let block = initial_block.clone();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("process_eth1_data", move |b| {
b.iter_batched(
|| state.clone(),
|mut state| {
process_eth1_data(&mut state, &block.eth1_data).unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let state = initial_state.clone();
let block = initial_block.clone();
let spec = initial_spec.clone();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("process_proposer_slashings", move |b| {
b.iter_batched(
|| state.clone(),
|mut state| {
process_proposer_slashings(&mut state, &block.body.proposer_slashings, &spec)
.unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let state = initial_state.clone();
let block = initial_block.clone();
let spec = initial_spec.clone();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("process_attester_slashings", move |b| {
b.iter_batched(
|| state.clone(),
|mut state| {
process_attester_slashings(&mut state, &block.body.attester_slashings, &spec)
.unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let state = initial_state.clone();
let block = initial_block.clone();
let spec = initial_spec.clone();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("process_attestations", move |b| {
b.iter_batched(
|| state.clone(),
|mut state| {
process_attestations(&mut state, &block.body.attestations, &spec).unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let state = initial_state.clone();
let block = initial_block.clone();
let spec = initial_spec.clone();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("process_deposits", move |b| {
b.iter_batched(
|| state.clone(),
|mut state| {
process_deposits(&mut state, &block.body.deposits, &spec).unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let state = initial_state.clone();
let block = initial_block.clone();
let spec = initial_spec.clone();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("process_exits", move |b| {
b.iter_batched(
|| state.clone(),
|mut state| {
process_exits(&mut state, &block.body.voluntary_exits, &spec).unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let state = initial_state.clone();
let block = initial_block.clone();
let spec = initial_spec.clone();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("process_transfers", move |b| {
b.iter_batched(
|| state.clone(),
|mut state| {
process_transfers(&mut state, &block.body.transfers, &spec).unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let state = initial_state.clone();
let block = initial_block.clone();
let spec = initial_spec.clone();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("per_block_processing", move |b| {
b.iter_batched(
|| state.clone(),
|mut state| {
per_block_processing(&mut state, &block, &spec).unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let mut state = initial_state.clone();
state.drop_cache(RelativeEpoch::Previous);
let spec = initial_spec.clone();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("build_previous_state_epoch_cache", move |b| {
b.iter_batched(
|| state.clone(),
|mut state| {
state
.build_epoch_cache(RelativeEpoch::Previous, &spec)
.unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let mut state = initial_state.clone();
state.drop_cache(RelativeEpoch::Current);
let spec = initial_spec.clone();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("build_current_state_epoch_cache", move |b| {
b.iter_batched(
|| state.clone(),
|mut state| {
state
.build_epoch_cache(RelativeEpoch::Current, &spec)
.unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let mut state = initial_state.clone();
state.drop_pubkey_cache();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("build_pubkey_cache", move |b| {
b.iter_batched(
|| state.clone(),
|mut state| {
state.update_pubkey_cache().unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let block = initial_block.clone();
c.bench(
&format!("{}/block_processing", desc),
Benchmark::new("tree_hash_block", move |b| {
b.iter(|| black_box(block.hash_tree_root()))
})
.sample_size(10),
);
}

View File

@ -0,0 +1,263 @@
use criterion::Criterion;
use criterion::{black_box, Benchmark};
use ssz::TreeHash;
use state_processing::{
per_epoch_processing,
per_epoch_processing::{
clean_attestations, initialize_validator_statuses, process_crosslinks, process_eth1_data,
process_justification, process_rewards_and_penalities, process_validator_registry,
update_active_tree_index_roots, update_latest_slashed_balances,
},
};
use types::test_utils::TestingBeaconStateBuilder;
use types::*;
pub const BENCHING_SAMPLE_SIZE: usize = 10;
pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10;
/// Run the benchmarking suite on a foundation spec with 16,384 validators.
pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: usize) {
let spec = ChainSpec::foundation();
let mut builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec);
// Set the state to be just before an epoch transition.
let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch);
builder.teleport_to_slot(target_slot, &spec);
// Builds all caches; benches will not contain shuffling/committee building times.
builder.build_caches(&spec).unwrap();
// Inserts one attestation with full participation for each committee able to include an
// attestation in this state.
builder.insert_attestations(&spec);
let (state, _keypairs) = builder.build();
// Assert that the state has an attestations for each committee that is able to include an
// attestation in the state.
let committees_per_epoch = spec.get_epoch_committee_count(validator_count);
let committees_per_slot = committees_per_epoch / spec.slots_per_epoch;
let previous_epoch_attestations = committees_per_epoch;
let current_epoch_attestations =
committees_per_slot * (spec.slots_per_epoch - spec.min_attestation_inclusion_delay);
assert_eq!(
state.latest_attestations.len() as u64,
previous_epoch_attestations + current_epoch_attestations,
"The state should have an attestation for each committee."
);
// Assert that we will run the first arm of process_rewards_and_penalities
let epochs_since_finality = state.next_epoch(&spec) - state.finalized_epoch;
assert_eq!(
epochs_since_finality, 4,
"Epochs since finality should be 4"
);
bench_epoch_processing(c, &state, &spec, &format!("{}_validators", validator_count));
}
/// Run the detailed benchmarking suite on the given `BeaconState`.
///
/// `desc` will be added to the title of each bench.
fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSpec, desc: &str) {
let state_clone = state.clone();
let spec_clone = spec.clone();
c.bench(
&format!("{}/epoch_processing", desc),
Benchmark::new("process_eth1_data", move |b| {
b.iter_batched(
|| state_clone.clone(),
|mut state| {
process_eth1_data(&mut state, &spec_clone);
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(BENCHING_SAMPLE_SIZE),
);
let state_clone = state.clone();
let spec_clone = spec.clone();
c.bench(
&format!("{}/epoch_processing", desc),
Benchmark::new("initialize_validator_statuses", move |b| {
b.iter_batched(
|| state_clone.clone(),
|mut state| {
initialize_validator_statuses(&mut state, &spec_clone).unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(BENCHING_SAMPLE_SIZE),
);
let state_clone = state.clone();
let spec_clone = spec.clone();
let attesters = initialize_validator_statuses(&state, &spec).unwrap();
c.bench(
&format!("{}/epoch_processing", desc),
Benchmark::new("process_justification", move |b| {
b.iter_batched(
|| state_clone.clone(),
|mut state| {
process_justification(&mut state, &attesters.total_balances, &spec_clone);
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(10),
);
let state_clone = state.clone();
let spec_clone = spec.clone();
c.bench(
&format!("{}/epoch_processing", desc),
Benchmark::new("process_crosslinks", move |b| {
b.iter_batched(
|| state_clone.clone(),
|mut state| black_box(process_crosslinks(&mut state, &spec_clone).unwrap()),
criterion::BatchSize::SmallInput,
)
})
.sample_size(BENCHING_SAMPLE_SIZE),
);
let mut state_clone = state.clone();
let spec_clone = spec.clone();
let attesters = initialize_validator_statuses(&state, &spec).unwrap();
let winning_root_for_shards = process_crosslinks(&mut state_clone, &spec).unwrap();
c.bench(
&format!("{}/epoch_processing", desc),
Benchmark::new("process_rewards_and_penalties", move |b| {
b.iter_batched(
|| (state_clone.clone(), attesters.clone()),
|(mut state, mut attesters)| {
process_rewards_and_penalities(
&mut state,
&mut attesters,
&winning_root_for_shards,
&spec_clone,
)
.unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(SMALL_BENCHING_SAMPLE_SIZE),
);
let state_clone = state.clone();
let spec_clone = spec.clone();
c.bench(
&format!("{}/epoch_processing", desc),
Benchmark::new("process_ejections", move |b| {
b.iter_batched(
|| state_clone.clone(),
|mut state| {
state.process_ejections(&spec_clone);
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(BENCHING_SAMPLE_SIZE),
);
let state_clone = state.clone();
let spec_clone = spec.clone();
c.bench(
&format!("{}/epoch_processing", desc),
Benchmark::new("process_validator_registry", move |b| {
b.iter_batched(
|| state_clone.clone(),
|mut state| {
process_validator_registry(&mut state, &spec_clone).unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(BENCHING_SAMPLE_SIZE),
);
let state_clone = state.clone();
let spec_clone = spec.clone();
c.bench(
&format!("{}/epoch_processing", desc),
Benchmark::new("update_active_tree_index_roots", move |b| {
b.iter_batched(
|| state_clone.clone(),
|mut state| {
update_active_tree_index_roots(&mut state, &spec_clone).unwrap();
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(BENCHING_SAMPLE_SIZE),
);
let state_clone = state.clone();
let spec_clone = spec.clone();
c.bench(
&format!("{}/epoch_processing", desc),
Benchmark::new("update_latest_slashed_balances", move |b| {
b.iter_batched(
|| state_clone.clone(),
|mut state| {
update_latest_slashed_balances(&mut state, &spec_clone);
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(BENCHING_SAMPLE_SIZE),
);
let state_clone = state.clone();
let spec_clone = spec.clone();
c.bench(
&format!("{}/epoch_processing", desc),
Benchmark::new("clean_attestations", move |b| {
b.iter_batched(
|| state_clone.clone(),
|mut state| {
clean_attestations(&mut state, &spec_clone);
state
},
criterion::BatchSize::SmallInput,
)
})
.sample_size(BENCHING_SAMPLE_SIZE),
);
let state_clone = state.clone();
let spec_clone = spec.clone();
c.bench(
&format!("{}/epoch_processing", desc),
Benchmark::new("per_epoch_processing", move |b| {
b.iter_batched(
|| state_clone.clone(),
|mut state| black_box(per_epoch_processing(&mut state, &spec_clone).unwrap()),
criterion::BatchSize::SmallInput,
)
})
.sample_size(SMALL_BENCHING_SAMPLE_SIZE),
);
let state_clone = state.clone();
c.bench(
&format!("{}/epoch_processing", desc),
Benchmark::new("tree_hash_state", move |b| {
b.iter(|| black_box(state_clone.hash_tree_root()))
})
.sample_size(SMALL_BENCHING_SAMPLE_SIZE),
);
}

View File

@ -1,65 +1,103 @@
use block_benching_builder::BlockBenchingBuilder;
use criterion::Criterion; use criterion::Criterion;
use criterion::{black_box, criterion_group, criterion_main, Benchmark}; use criterion::{criterion_group, criterion_main};
// use env_logger::{Builder, Env}; use env_logger::{Builder, Env};
use state_processing::SlotProcessable; use log::info;
use types::beacon_state::BeaconStateBuilder;
use types::*; use types::*;
fn epoch_processing(c: &mut Criterion) { mod bench_block_processing;
// Builder::from_env(Env::default().default_filter_or("debug")).init(); mod bench_epoch_processing;
mod block_benching_builder;
let mut builder = BeaconStateBuilder::new(16_384); pub const VALIDATOR_COUNT: usize = 16_384;
builder.build_fast().unwrap(); // `LOG_LEVEL == "info"` gives handy messages.
builder.teleport_to_end_of_epoch(builder.spec.genesis_epoch + 4); pub const LOG_LEVEL: &str = "info";
let mut state = builder.cloned_state(); /// Build a worst-case block and benchmark processing it.
pub fn block_processing_worst_case(c: &mut Criterion) {
// Build all the caches so the following state does _not_ include the cache-building time. if LOG_LEVEL != "" {
state Builder::from_env(Env::default().default_filter_or(LOG_LEVEL)).init();
.build_epoch_cache(RelativeEpoch::Previous, &builder.spec) }
.unwrap(); info!(
state "Building worst case block bench with {} validators",
.build_epoch_cache(RelativeEpoch::Current, &builder.spec) VALIDATOR_COUNT
.unwrap();
state
.build_epoch_cache(RelativeEpoch::Next, &builder.spec)
.unwrap();
let cached_state = state.clone();
// Drop all the caches so the following state includes the cache-building time.
state.drop_cache(RelativeEpoch::Previous);
state.drop_cache(RelativeEpoch::Current);
state.drop_cache(RelativeEpoch::Next);
let cacheless_state = state;
let spec_a = builder.spec.clone();
let spec_b = builder.spec.clone();
c.bench(
"epoch processing",
Benchmark::new("with pre-built caches", move |b| {
b.iter_with_setup(
|| cached_state.clone(),
|mut state| black_box(state.per_slot_processing(Hash256::zero(), &spec_a).unwrap()),
)
})
.sample_size(10),
); );
c.bench( // Use the specifications from the Eth2.0 spec.
"epoch processing", let spec = ChainSpec::foundation();
Benchmark::new("without pre-built caches", move |b| {
b.iter_with_setup( // Create a builder for configuring the block and state for benching.
|| cacheless_state.clone(), let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec);
|mut state| black_box(state.per_slot_processing(Hash256::zero(), &spec_b).unwrap()),
) // Set the number of included operations to be maximum (e.g., `MAX_ATTESTATIONS`, etc.)
}) bench_builder.maximize_block_operations(&spec);
.sample_size(10),
// Set the state and block to be in the last slot of the 4th epoch.
let last_slot_of_epoch = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch);
bench_builder.set_slot(last_slot_of_epoch, &spec);
// Build all the state caches so the build times aren't included in the benches.
bench_builder.build_caches(&spec);
// Generate the block and state then run benches.
let (block, state) = bench_builder.build(&spec);
bench_block_processing::bench_block_processing(
c,
&block,
&state,
&spec,
&format!("{}_validators/worst_case", VALIDATOR_COUNT),
); );
} }
criterion_group!(benches, epoch_processing,); /// Build a reasonable-case block and benchmark processing it.
pub fn block_processing_reasonable_case(c: &mut Criterion) {
info!(
"Building reasonable case block bench with {} validators",
VALIDATOR_COUNT
);
// Use the specifications from the Eth2.0 spec.
let spec = ChainSpec::foundation();
// Create a builder for configuring the block and state for benching.
let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec);
// Set the number of included operations to what we might expect normally.
bench_builder.num_proposer_slashings = 0;
bench_builder.num_attester_slashings = 0;
bench_builder.num_attestations = (spec.shard_count / spec.slots_per_epoch) as usize;
bench_builder.num_deposits = 2;
bench_builder.num_exits = 2;
bench_builder.num_transfers = 2;
// Set the state and block to be in the last slot of the 4th epoch.
let last_slot_of_epoch = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch);
bench_builder.set_slot(last_slot_of_epoch, &spec);
// Build all the state caches so the build times aren't included in the benches.
bench_builder.build_caches(&spec);
// Generate the block and state then run benches.
let (block, state) = bench_builder.build(&spec);
bench_block_processing::bench_block_processing(
c,
&block,
&state,
&spec,
&format!("{}_validators/reasonable_case", VALIDATOR_COUNT),
);
}
pub fn state_processing(c: &mut Criterion) {
bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT);
}
criterion_group!(
benches,
block_processing_reasonable_case,
block_processing_worst_case,
state_processing
);
criterion_main!(benches); criterion_main!(benches);

View File

@ -0,0 +1,175 @@
use log::info;
use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder};
use types::*;
pub struct BlockBenchingBuilder {
pub state_builder: TestingBeaconStateBuilder,
pub block_builder: TestingBeaconBlockBuilder,
pub num_validators: usize,
pub num_proposer_slashings: usize,
pub num_attester_slashings: usize,
pub num_indices_per_slashable_vote: usize,
pub num_attestations: usize,
pub num_deposits: usize,
pub num_exits: usize,
pub num_transfers: usize,
}
impl BlockBenchingBuilder {
pub fn new(num_validators: usize, spec: &ChainSpec) -> Self {
let state_builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, &spec);
let block_builder = TestingBeaconBlockBuilder::new(spec);
Self {
state_builder,
block_builder,
num_validators: 0,
num_proposer_slashings: 0,
num_attester_slashings: 0,
num_indices_per_slashable_vote: spec.max_indices_per_slashable_vote as usize,
num_attestations: 0,
num_deposits: 0,
num_exits: 0,
num_transfers: 0,
}
}
pub fn maximize_block_operations(&mut self, spec: &ChainSpec) {
self.num_proposer_slashings = spec.max_proposer_slashings as usize;
self.num_attester_slashings = spec.max_attester_slashings as usize;
self.num_indices_per_slashable_vote = spec.max_indices_per_slashable_vote as usize;
self.num_attestations = spec.max_attestations as usize;
self.num_deposits = spec.max_deposits as usize;
self.num_exits = spec.max_voluntary_exits as usize;
self.num_transfers = spec.max_transfers as usize;
}
pub fn set_slot(&mut self, slot: Slot, spec: &ChainSpec) {
self.state_builder.teleport_to_slot(slot, &spec);
}
pub fn build_caches(&mut self, spec: &ChainSpec) {
// Builds all caches; benches will not contain shuffling/committee building times.
self.state_builder.build_caches(&spec).unwrap();
}
pub fn build(mut self, spec: &ChainSpec) -> (BeaconBlock, BeaconState) {
let (mut state, keypairs) = self.state_builder.build();
let builder = &mut self.block_builder;
builder.set_slot(state.slot);
let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap();
let keypair = &keypairs[proposer_index];
builder.set_randao_reveal(&keypair.sk, &state.fork, spec);
// Used as a stream of validator indices for use in slashings, exits, etc.
let mut validators_iter = (0..keypairs.len() as u64).into_iter();
// Insert `ProposerSlashing` objects.
for _ in 0..self.num_proposer_slashings {
let validator_index = validators_iter.next().expect("Insufficient validators.");
builder.insert_proposer_slashing(
validator_index,
&keypairs[validator_index as usize].sk,
&state.fork,
spec,
);
}
info!(
"Inserted {} proposer slashings.",
builder.block.body.proposer_slashings.len()
);
// Insert `AttesterSlashing` objects
for _ in 0..self.num_attester_slashings {
let mut attesters: Vec<u64> = vec![];
let mut secret_keys: Vec<&SecretKey> = vec![];
for _ in 0..self.num_indices_per_slashable_vote {
let validator_index = validators_iter.next().expect("Insufficient validators.");
attesters.push(validator_index);
secret_keys.push(&keypairs[validator_index as usize].sk);
}
builder.insert_attester_slashing(&attesters, &secret_keys, &state.fork, spec);
}
info!(
"Inserted {} attester slashings.",
builder.block.body.attester_slashings.len()
);
// Insert `Attestation` objects.
let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect();
builder
.insert_attestations(
&state,
&all_secret_keys,
self.num_attestations as usize,
spec,
)
.unwrap();
info!(
"Inserted {} attestations.",
builder.block.body.attestations.len()
);
// Insert `Deposit` objects.
for i in 0..self.num_deposits {
builder.insert_deposit(
32_000_000_000,
state.deposit_index + (i as u64),
&state,
spec,
);
}
info!("Inserted {} deposits.", builder.block.body.deposits.len());
// Insert the maximum possible number of `Exit` objects.
for _ in 0..self.num_exits {
let validator_index = validators_iter.next().expect("Insufficient validators.");
builder.insert_exit(
&state,
validator_index,
&keypairs[validator_index as usize].sk,
spec,
);
}
info!(
"Inserted {} exits.",
builder.block.body.voluntary_exits.len()
);
// Insert the maximum possible number of `Transfer` objects.
for _ in 0..self.num_transfers {
let validator_index = validators_iter.next().expect("Insufficient validators.");
// Manually set the validator to be withdrawn.
state.validator_registry[validator_index as usize].withdrawable_epoch =
state.previous_epoch(spec);
builder.insert_transfer(
&state,
validator_index,
validator_index,
1,
keypairs[validator_index as usize].clone(),
spec,
);
}
info!("Inserted {} transfers.", builder.block.body.transfers.len());
let mut block = self.block_builder.build(&keypair.sk, &state.fork, spec);
// Set the eth1 data to be different from the state.
block.eth1_data.block_hash = Hash256::from_slice(&vec![42; 32]);
(block, state)
}
}

View File

@ -0,0 +1,347 @@
title: Sanity tests
summary: Basic sanity checks from phase 0 spec pythonization. All tests are run with
`verify_signatures` as set to False.
test_suite: beacon_state
fork: tchaikovsky
version: v0.5.0
test_cases:
- name: test_empty_block_transition
config: {SHARD_COUNT: 8, TARGET_COMMITTEE_SIZE: 4, MAX_BALANCE_CHURN_QUOTIENT: 32,
MAX_INDICES_PER_SLASHABLE_VOTE: 4096, MAX_EXIT_DEQUEUES_PER_EPOCH: 4, SHUFFLE_ROUND_COUNT: 90,
DEPOSIT_CONTRACT_TREE_DEPTH: 32, MIN_DEPOSIT_AMOUNT: 1000000000, MAX_DEPOSIT_AMOUNT: 32000000000,
FORK_CHOICE_BALANCE_INCREMENT: 1000000000, EJECTION_BALANCE: 16000000000, GENESIS_FORK_VERSION: 0,
GENESIS_SLOT: 4294967296, GENESIS_EPOCH: 536870912, GENESIS_START_SHARD: 0, BLS_WITHDRAWAL_PREFIX_BYTE: 0,
SECONDS_PER_SLOT: 6, MIN_ATTESTATION_INCLUSION_DELAY: 2, SLOTS_PER_EPOCH: 8, MIN_SEED_LOOKAHEAD: 1,
ACTIVATION_EXIT_DELAY: 4, EPOCHS_PER_ETH1_VOTING_PERIOD: 16, SLOTS_PER_HISTORICAL_ROOT: 64,
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256, PERSISTENT_COMMITTEE_PERIOD: 2048, LATEST_RANDAO_MIXES_LENGTH: 64,
LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 64, LATEST_SLASHED_EXIT_LENGTH: 64, BASE_REWARD_QUOTIENT: 32,
WHISTLEBLOWER_REWARD_QUOTIENT: 512, ATTESTATION_INCLUSION_REWARD_QUOTIENT: 8,
INACTIVITY_PENALTY_QUOTIENT: 16777216, MIN_PENALTY_QUOTIENT: 32, MAX_PROPOSER_SLASHINGS: 16,
MAX_ATTESTER_SLASHINGS: 1, MAX_ATTESTATIONS: 128, MAX_DEPOSITS: 16, MAX_VOLUNTARY_EXITS: 16,
MAX_TRANSFERS: 16, DOMAIN_BEACON_BLOCK: 0, DOMAIN_RANDAO: 1, DOMAIN_ATTESTATION: 2,
DOMAIN_DEPOSIT: 3, DOMAIN_VOLUNTARY_EXIT: 4, DOMAIN_TRANSFER: 5}
verify_signatures: false
initial_state:
slot: 4294967296
genesis_time: 0
fork: {previous_version: 0, current_version: 0, epoch: 536870912}
validator_registry:
- {pubkey: '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x0a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x0b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x0c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x0d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x0e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x0f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x130000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x140000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x150000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x160000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x170000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x180000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x190000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x1a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x1b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x1c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x1d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x1e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
- {pubkey: '0x1f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
withdrawal_credentials: '0x2222222222222222222222222222222222222222222222222222222222222222',
activation_epoch: 536870912, exit_epoch: 18446744073709551615, withdrawable_epoch: 18446744073709551615,
initiated_exit: false, slashed: false}
validator_balances: [32000000000, 32000000000, 32000000000, 32000000000, 32000000000,
32000000000, 32000000000, 32000000000, 32000000000, 32000000000, 32000000000,
32000000000, 32000000000, 32000000000, 32000000000, 32000000000, 32000000000,
32000000000, 32000000000, 32000000000, 32000000000, 32000000000, 32000000000,
32000000000, 32000000000, 32000000000, 32000000000, 32000000000, 32000000000,
32000000000, 32000000000, 32000000000]
validator_registry_update_epoch: 536870912
latest_randao_mixes: ['0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000']
previous_shuffling_start_shard: 0
current_shuffling_start_shard: 0
previous_shuffling_epoch: 536870912
current_shuffling_epoch: 536870912
previous_shuffling_seed: '0x0000000000000000000000000000000000000000000000000000000000000000'
current_shuffling_seed: '0x94ab448e948e6d501a2b48c1e9a0946f871100969f6fa70a990acf2348c9b185'
previous_epoch_attestations: []
current_epoch_attestations: []
previous_justified_epoch: 536870912
current_justified_epoch: 536870912
previous_justified_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
current_justified_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
justification_bitfield: 0
finalized_epoch: 536870912
finalized_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
latest_crosslinks:
- {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'}
- {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'}
- {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'}
- {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'}
- {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'}
- {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'}
- {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'}
- {epoch: 536870912, crosslink_data_root: '0x0000000000000000000000000000000000000000000000000000000000000000'}
latest_block_roots: ['0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000']
latest_state_roots: ['0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000', '0x0000000000000000000000000000000000000000000000000000000000000000',
'0x0000000000000000000000000000000000000000000000000000000000000000']
latest_active_index_roots: ['0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42', '0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42',
'0xf6b8ca96e524598ba62d563347f5aea6ce2d81d644e2788687e5a92844df1b42']
latest_slashed_balances: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
latest_block_header: {slot: 4294967296, previous_block_root: '0x0000000000000000000000000000000000000000000000000000000000000000',
state_root: '0x0000000000000000000000000000000000000000000000000000000000000000',
block_body_root: '0x5359b62990beb1d78e1cec479f5a4d80af84709886a8e16c535dff0556dc0e2d',
signature: '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'}
historical_roots: []
latest_eth1_data: {deposit_root: '0xb05de6a9059df0c9a2ab5f76708d256941dfe9eb89e6fda549b30713087d2a5e',
block_hash: '0x0000000000000000000000000000000000000000000000000000000000000000'}
eth1_data_votes: []
deposit_index: 32
blocks:
- slot: 4294967297
previous_block_root: '0x92ed652508d2b4c109a857107101716b18e257e7ce0d199d4b16232956e9e27e'
state_root: '0x0000000000000000000000000000000000000000000000000000000000000000'
body:
randao_reveal: '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
eth1_data: {deposit_root: '0x0000000000000000000000000000000000000000000000000000000000000000',
block_hash: '0x0000000000000000000000000000000000000000000000000000000000000000'}
proposer_slashings: []
attester_slashings: []
attestations: []
deposits: []
voluntary_exits: []
transfers: []
signature: '0x000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
expected_state: {slot: 4294967297}

View File

@ -0,0 +1,59 @@
use super::per_block_processing::{errors::BlockProcessingError, process_deposits};
use ssz::TreeHash;
use types::*;
pub enum GenesisError {
BlockProcessingError(BlockProcessingError),
BeaconStateError(BeaconStateError),
}
/// Returns the genesis `BeaconState`
///
/// Spec v0.5.0
pub fn get_genesis_state(
genesis_validator_deposits: &[Deposit],
genesis_time: u64,
genesis_eth1_data: Eth1Data,
spec: &ChainSpec,
) -> Result<(), BlockProcessingError> {
// Get the genesis `BeaconState`
let mut state = BeaconState::genesis(genesis_time, genesis_eth1_data, spec);
// Process genesis deposits.
process_deposits(&mut state, genesis_validator_deposits, spec)?;
// Process genesis activations.
for i in 0..state.validator_registry.len() {
if state.get_effective_balance(i, spec)? >= spec.max_deposit_amount {
state.validator_registry[i].activation_epoch = spec.genesis_epoch;
}
}
// Ensure the current epoch cache is built.
state.build_epoch_cache(RelativeEpoch::Current, spec)?;
// Set all the active index roots to be the genesis active index root.
let active_validator_indices = state
.get_active_validator_indices(spec.genesis_epoch, spec)?
.to_vec();
let genesis_active_index_root = Hash256::from_slice(&active_validator_indices.hash_tree_root());
state.latest_active_index_roots =
vec![genesis_active_index_root; spec.latest_active_index_roots_length as usize];
// Generate the current shuffling seed.
state.current_shuffling_seed = state.generate_seed(spec.genesis_epoch, spec)?;
Ok(())
}
impl From<BlockProcessingError> for GenesisError {
fn from(e: BlockProcessingError) -> GenesisError {
GenesisError::BlockProcessingError(e)
}
}
impl From<BeaconStateError> for GenesisError {
fn from(e: BeaconStateError) -> GenesisError {
GenesisError::BeaconStateError(e)
}
}

View File

@ -1,10 +1,12 @@
#[macro_use] #[macro_use]
mod macros; mod macros;
pub mod get_genesis_state;
pub mod per_block_processing; pub mod per_block_processing;
pub mod per_epoch_processing; pub mod per_epoch_processing;
pub mod per_slot_processing; pub mod per_slot_processing;
pub use get_genesis_state::get_genesis_state;
pub use per_block_processing::{ pub use per_block_processing::{
errors::{BlockInvalid, BlockProcessingError}, errors::{BlockInvalid, BlockProcessingError},
per_block_processing, per_block_processing_without_verifying_block_signature, per_block_processing, per_block_processing_without_verifying_block_signature,

View File

@ -1,14 +1,16 @@
use self::verify_proposer_slashing::verify_proposer_slashing; use self::verify_proposer_slashing::verify_proposer_slashing;
use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex}; use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex};
use hashing::hash; use rayon::prelude::*;
use log::debug; use ssz::{SignedRoot, TreeHash};
use ssz::{ssz_encode, SignedRoot, TreeHash};
use types::*; use types::*;
pub use self::verify_attester_slashing::verify_attester_slashing; pub use self::verify_attester_slashing::{
gather_attester_slashing_indices, verify_attester_slashing,
};
pub use validate_attestation::{validate_attestation, validate_attestation_without_signature}; pub use validate_attestation::{validate_attestation, validate_attestation_without_signature};
pub use verify_deposit::verify_deposit; pub use verify_deposit::{get_existing_validator_index, verify_deposit, verify_deposit_index};
pub use verify_exit::verify_exit; pub use verify_exit::verify_exit;
pub use verify_slashable_attestation::verify_slashable_attestation;
pub use verify_transfer::{execute_transfer, verify_transfer}; pub use verify_transfer::{execute_transfer, verify_transfer};
pub mod errors; pub mod errors;
@ -30,7 +32,7 @@ const VERIFY_DEPOSIT_MERKLE_PROOFS: bool = false;
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
/// returns an error describing why the block was invalid or how the function failed to execute. /// returns an error describing why the block was invalid or how the function failed to execute.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn per_block_processing( pub fn per_block_processing(
state: &mut BeaconState, state: &mut BeaconState,
block: &BeaconBlock, block: &BeaconBlock,
@ -45,7 +47,7 @@ pub fn per_block_processing(
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
/// returns an error describing why the block was invalid or how the function failed to execute. /// returns an error describing why the block was invalid or how the function failed to execute.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn per_block_processing_without_verifying_block_signature( pub fn per_block_processing_without_verifying_block_signature(
state: &mut BeaconState, state: &mut BeaconState,
block: &BeaconBlock, block: &BeaconBlock,
@ -60,63 +62,75 @@ pub fn per_block_processing_without_verifying_block_signature(
/// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise
/// returns an error describing why the block was invalid or how the function failed to execute. /// returns an error describing why the block was invalid or how the function failed to execute.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
fn per_block_processing_signature_optional( fn per_block_processing_signature_optional(
mut state: &mut BeaconState, mut state: &mut BeaconState,
block: &BeaconBlock, block: &BeaconBlock,
should_verify_block_signature: bool, should_verify_block_signature: bool,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
// Verify that `block.slot == state.slot`. process_block_header(state, block, spec)?;
verify!(block.slot == state.slot, Invalid::StateSlotMismatch);
// Ensure the current epoch cache is built. // Ensure the current and previous epoch cache is built.
state.build_epoch_cache(RelativeEpoch::Previous, spec)?;
state.build_epoch_cache(RelativeEpoch::Current, spec)?; state.build_epoch_cache(RelativeEpoch::Current, spec)?;
if should_verify_block_signature { if should_verify_block_signature {
verify_block_signature(&state, &block, &spec)?; verify_block_signature(&state, &block, &spec)?;
} }
process_randao(&mut state, &block, &spec)?; process_randao(&mut state, &block, &spec)?;
process_eth1_data(&mut state, &block.eth1_data)?; process_eth1_data(&mut state, &block.body.eth1_data)?;
process_proposer_slashings(&mut state, &block.body.proposer_slashings[..], spec)?; process_proposer_slashings(&mut state, &block.body.proposer_slashings, spec)?;
process_attester_slashings(&mut state, &block.body.attester_slashings[..], spec)?; process_attester_slashings(&mut state, &block.body.attester_slashings, spec)?;
process_attestations(&mut state, &block.body.attestations[..], spec)?; process_attestations(&mut state, &block.body.attestations, spec)?;
process_deposits(&mut state, &block.body.deposits[..], spec)?; process_deposits(&mut state, &block.body.deposits, spec)?;
process_exits(&mut state, &block.body.voluntary_exits[..], spec)?; process_exits(&mut state, &block.body.voluntary_exits, spec)?;
process_transfers(&mut state, &block.body.transfers[..], spec)?; process_transfers(&mut state, &block.body.transfers, spec)?;
debug!("per_block_processing complete."); Ok(())
}
/// Processes the block header.
///
/// Spec v0.5.0
pub fn process_block_header(
state: &mut BeaconState,
block: &BeaconBlock,
spec: &ChainSpec,
) -> Result<(), Error> {
verify!(block.slot == state.slot, Invalid::StateSlotMismatch);
verify!(
block.previous_block_root.as_bytes() == &state.latest_block_header.hash_tree_root()[..],
Invalid::ParentBlockRootMismatch
);
state.latest_block_header = block.into_temporary_header(spec);
Ok(()) Ok(())
} }
/// Verifies the signature of a block. /// Verifies the signature of a block.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn verify_block_signature( pub fn verify_block_signature(
state: &BeaconState, state: &BeaconState,
block: &BeaconBlock, block: &BeaconBlock,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
let block_proposer = let block_proposer = &state.validator_registry
&state.validator_registry[state.get_beacon_proposer_index(block.slot, spec)?]; [state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?];
let proposal = Proposal {
slot: block.slot,
shard: spec.beacon_chain_shard_number,
block_root: Hash256::from_slice(&block.signed_root()[..]),
signature: block.signature.clone(),
};
let domain = spec.get_domain( let domain = spec.get_domain(
block.slot.epoch(spec.slots_per_epoch), block.slot.epoch(spec.slots_per_epoch),
Domain::Proposal, Domain::BeaconBlock,
&state.fork, &state.fork,
); );
verify!( verify!(
proposal block
.signature .signature
.verify(&proposal.signed_root()[..], domain, &block_proposer.pubkey), .verify(&block.signed_root()[..], domain, &block_proposer.pubkey),
Invalid::BadSignature Invalid::BadSignature
); );
@ -126,21 +140,18 @@ pub fn verify_block_signature(
/// Verifies the `randao_reveal` against the block's proposer pubkey and updates /// Verifies the `randao_reveal` against the block's proposer pubkey and updates
/// `state.latest_randao_mixes`. /// `state.latest_randao_mixes`.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn process_randao( pub fn process_randao(
state: &mut BeaconState, state: &mut BeaconState,
block: &BeaconBlock, block: &BeaconBlock,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
// Let `proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)]`. let block_proposer = &state.validator_registry
let block_proposer = [state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?];
&state.validator_registry[state.get_beacon_proposer_index(block.slot, spec)?];
// Verify that `bls_verify(pubkey=proposer.pubkey, // Verify the RANDAO is a valid signature of the proposer.
// message_hash=hash_tree_root(get_current_epoch(state)), signature=block.randao_reveal,
// domain=get_domain(state.fork, get_current_epoch(state), DOMAIN_RANDAO))`.
verify!( verify!(
block.randao_reveal.verify( block.body.randao_reveal.verify(
&state.current_epoch(spec).hash_tree_root()[..], &state.current_epoch(spec).hash_tree_root()[..],
spec.get_domain( spec.get_domain(
block.slot.epoch(spec.slots_per_epoch), block.slot.epoch(spec.slots_per_epoch),
@ -152,21 +163,23 @@ pub fn process_randao(
Invalid::BadRandaoSignature Invalid::BadRandaoSignature
); );
// Update the state's RANDAO mix with the one revealed in the block. // Update the current epoch RANDAO mix.
update_randao(state, &block.randao_reveal, spec)?; state.update_randao_mix(state.current_epoch(spec), &block.body.randao_reveal, spec)?;
Ok(()) Ok(())
} }
/// Update the `state.eth1_data_votes` based upon the `eth1_data` provided. /// Update the `state.eth1_data_votes` based upon the `eth1_data` provided.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn process_eth1_data(state: &mut BeaconState, eth1_data: &Eth1Data) -> Result<(), Error> { pub fn process_eth1_data(state: &mut BeaconState, eth1_data: &Eth1Data) -> Result<(), Error> {
// Either increment the eth1_data vote count, or add a new eth1_data. // Attempt to find a `Eth1DataVote` with matching `Eth1Data`.
let matching_eth1_vote_index = state let matching_eth1_vote_index = state
.eth1_data_votes .eth1_data_votes
.iter() .iter()
.position(|vote| vote.eth1_data == *eth1_data); .position(|vote| vote.eth1_data == *eth1_data);
// If a vote exists, increment it's `vote_count`. Otherwise, create a new `Eth1DataVote`.
if let Some(index) = matching_eth1_vote_index { if let Some(index) = matching_eth1_vote_index {
state.eth1_data_votes[index].vote_count += 1; state.eth1_data_votes[index].vote_count += 1;
} else { } else {
@ -179,46 +192,12 @@ pub fn process_eth1_data(state: &mut BeaconState, eth1_data: &Eth1Data) -> Resul
Ok(()) Ok(())
} }
/// Updates the present randao mix.
///
/// Set `state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] =
/// xor(get_randao_mix(state, get_current_epoch(state)), hash(block.randao_reveal))`.
///
/// Spec v0.4.0
pub fn update_randao(
state: &mut BeaconState,
reveal: &Signature,
spec: &ChainSpec,
) -> Result<(), BeaconStateError> {
let hashed_reveal = {
let encoded_signature = ssz_encode(reveal);
Hash256::from_slice(&hash(&encoded_signature[..])[..])
};
let current_epoch = state.slot.epoch(spec.slots_per_epoch);
let current_mix = state
.get_randao_mix(current_epoch, spec)
.ok_or_else(|| BeaconStateError::InsufficientRandaoMixes)?;
let new_mix = *current_mix ^ hashed_reveal;
let index = current_epoch.as_usize() % spec.latest_randao_mixes_length;
if index < state.latest_randao_mixes.len() {
state.latest_randao_mixes[index] = new_mix;
Ok(())
} else {
Err(BeaconStateError::InsufficientRandaoMixes)
}
}
/// Validates each `ProposerSlashing` and updates the state, short-circuiting on an invalid object. /// Validates each `ProposerSlashing` and updates the state, short-circuiting on an invalid object.
/// ///
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
/// an `Err` describing the invalid object or cause of failure. /// an `Err` describing the invalid object or cause of failure.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn process_proposer_slashings( pub fn process_proposer_slashings(
state: &mut BeaconState, state: &mut BeaconState,
proposer_slashings: &[ProposerSlashing], proposer_slashings: &[ProposerSlashing],
@ -228,9 +207,18 @@ pub fn process_proposer_slashings(
proposer_slashings.len() as u64 <= spec.max_proposer_slashings, proposer_slashings.len() as u64 <= spec.max_proposer_slashings,
Invalid::MaxProposerSlashingsExceeded Invalid::MaxProposerSlashingsExceeded
); );
for (i, proposer_slashing) in proposer_slashings.iter().enumerate() {
verify_proposer_slashing(proposer_slashing, &state, spec) // Verify proposer slashings in parallel.
.map_err(|e| e.into_with_index(i))?; proposer_slashings
.par_iter()
.enumerate()
.try_for_each(|(i, proposer_slashing)| {
verify_proposer_slashing(proposer_slashing, &state, spec)
.map_err(|e| e.into_with_index(i))
})?;
// Update the state.
for proposer_slashing in proposer_slashings {
state.slash_validator(proposer_slashing.proposer_index as usize, spec)?; state.slash_validator(proposer_slashing.proposer_index as usize, spec)?;
} }
@ -242,7 +230,7 @@ pub fn process_proposer_slashings(
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
/// an `Err` describing the invalid object or cause of failure. /// an `Err` describing the invalid object or cause of failure.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn process_attester_slashings( pub fn process_attester_slashings(
state: &mut BeaconState, state: &mut BeaconState,
attester_slashings: &[AttesterSlashing], attester_slashings: &[AttesterSlashing],
@ -252,9 +240,41 @@ pub fn process_attester_slashings(
attester_slashings.len() as u64 <= spec.max_attester_slashings, attester_slashings.len() as u64 <= spec.max_attester_slashings,
Invalid::MaxAttesterSlashingsExceed Invalid::MaxAttesterSlashingsExceed
); );
// Verify the `SlashableAttestation`s in parallel (these are the resource-consuming objects, not
// the `AttesterSlashing`s themselves).
let mut slashable_attestations: Vec<&SlashableAttestation> =
Vec::with_capacity(attester_slashings.len() * 2);
for attester_slashing in attester_slashings {
slashable_attestations.push(&attester_slashing.slashable_attestation_1);
slashable_attestations.push(&attester_slashing.slashable_attestation_2);
}
// Verify slashable attestations in parallel.
slashable_attestations
.par_iter()
.enumerate()
.try_for_each(|(i, slashable_attestation)| {
verify_slashable_attestation(&state, slashable_attestation, spec)
.map_err(|e| e.into_with_index(i))
})?;
let all_slashable_attestations_have_been_checked = true;
// Gather the slashable indices and preform the final verification and update the state in series.
for (i, attester_slashing) in attester_slashings.iter().enumerate() { for (i, attester_slashing) in attester_slashings.iter().enumerate() {
let slashable_indices = verify_attester_slashing(&state, &attester_slashing, spec) let should_verify_slashable_attestations = !all_slashable_attestations_have_been_checked;
verify_attester_slashing(
&state,
&attester_slashing,
should_verify_slashable_attestations,
spec,
)
.map_err(|e| e.into_with_index(i))?;
let slashable_indices = gather_attester_slashing_indices(&state, &attester_slashing, spec)
.map_err(|e| e.into_with_index(i))?; .map_err(|e| e.into_with_index(i))?;
for i in slashable_indices { for i in slashable_indices {
state.slash_validator(i as usize, spec)?; state.slash_validator(i as usize, spec)?;
} }
@ -268,7 +288,7 @@ pub fn process_attester_slashings(
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
/// an `Err` describing the invalid object or cause of failure. /// an `Err` describing the invalid object or cause of failure.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn process_attestations( pub fn process_attestations(
state: &mut BeaconState, state: &mut BeaconState,
attestations: &[Attestation], attestations: &[Attestation],
@ -278,21 +298,34 @@ pub fn process_attestations(
attestations.len() as u64 <= spec.max_attestations, attestations.len() as u64 <= spec.max_attestations,
Invalid::MaxAttestationsExceeded Invalid::MaxAttestationsExceeded
); );
for (i, attestation) in attestations.iter().enumerate() {
// Build the previous epoch cache only if required by an attestation.
if attestation.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec) {
state.build_epoch_cache(RelativeEpoch::Previous, spec)?;
}
validate_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i))?; // Ensure the previous epoch cache exists.
state.build_epoch_cache(RelativeEpoch::Previous, spec)?;
// Verify attestations in parallel.
attestations
.par_iter()
.enumerate()
.try_for_each(|(i, attestation)| {
validate_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i))
})?;
// Update the state in series.
for attestation in attestations {
let pending_attestation = PendingAttestation { let pending_attestation = PendingAttestation {
data: attestation.data.clone(), data: attestation.data.clone(),
aggregation_bitfield: attestation.aggregation_bitfield.clone(), aggregation_bitfield: attestation.aggregation_bitfield.clone(),
custody_bitfield: attestation.custody_bitfield.clone(), custody_bitfield: attestation.custody_bitfield.clone(),
inclusion_slot: state.slot, inclusion_slot: state.slot,
}; };
state.latest_attestations.push(pending_attestation);
let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch);
if attestation_epoch == state.current_epoch(spec) {
state.current_epoch_attestations.push(pending_attestation)
} else if attestation_epoch == state.previous_epoch(spec) {
state.previous_epoch_attestations.push(pending_attestation)
}
} }
Ok(()) Ok(())
@ -303,7 +336,7 @@ pub fn process_attestations(
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
/// an `Err` describing the invalid object or cause of failure. /// an `Err` describing the invalid object or cause of failure.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn process_deposits( pub fn process_deposits(
state: &mut BeaconState, state: &mut BeaconState,
deposits: &[Deposit], deposits: &[Deposit],
@ -313,24 +346,54 @@ pub fn process_deposits(
deposits.len() as u64 <= spec.max_deposits, deposits.len() as u64 <= spec.max_deposits,
Invalid::MaxDepositsExceeded Invalid::MaxDepositsExceeded
); );
for (i, deposit) in deposits.iter().enumerate() {
verify_deposit(state, deposit, VERIFY_DEPOSIT_MERKLE_PROOFS, spec)
.map_err(|e| e.into_with_index(i))?;
state // Verify deposits in parallel.
.process_deposit( deposits
deposit.deposit_data.deposit_input.pubkey.clone(), .par_iter()
deposit.deposit_data.amount, .enumerate()
deposit .try_for_each(|(i, deposit)| {
.deposit_data verify_deposit(state, deposit, VERIFY_DEPOSIT_MERKLE_PROOFS, spec)
.deposit_input .map_err(|e| e.into_with_index(i))
.proof_of_possession })?;
.clone(),
deposit.deposit_data.deposit_input.withdrawal_credentials, // Check `state.deposit_index` and update the state in series.
None, for (i, deposit) in deposits.iter().enumerate() {
spec, verify_deposit_index(state, deposit).map_err(|e| e.into_with_index(i))?;
)
.map_err(|_| Error::Invalid(Invalid::DepositProcessingFailed(i)))?; // Ensure the state's pubkey cache is fully up-to-date, it will be used to check to see if the
// depositing validator already exists in the registry.
state.update_pubkey_cache()?;
// Get an `Option<u64>` where `u64` is the validator index if this deposit public key
// already exists in the beacon_state.
//
// This function also verifies the withdrawal credentials.
let validator_index =
get_existing_validator_index(state, deposit).map_err(|e| e.into_with_index(i))?;
let deposit_data = &deposit.deposit_data;
let deposit_input = &deposit.deposit_data.deposit_input;
if let Some(index) = validator_index {
// Update the existing validator balance.
safe_add_assign!(
state.validator_balances[index as usize],
deposit_data.amount
);
} else {
// Create a new validator.
let validator = Validator {
pubkey: deposit_input.pubkey.clone(),
withdrawal_credentials: deposit_input.withdrawal_credentials.clone(),
activation_epoch: spec.far_future_epoch,
exit_epoch: spec.far_future_epoch,
withdrawable_epoch: spec.far_future_epoch,
initiated_exit: false,
slashed: false,
};
state.validator_registry.push(validator);
state.validator_balances.push(deposit_data.amount);
}
state.deposit_index += 1; state.deposit_index += 1;
} }
@ -343,7 +406,7 @@ pub fn process_deposits(
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
/// an `Err` describing the invalid object or cause of failure. /// an `Err` describing the invalid object or cause of failure.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn process_exits( pub fn process_exits(
state: &mut BeaconState, state: &mut BeaconState,
voluntary_exits: &[VoluntaryExit], voluntary_exits: &[VoluntaryExit],
@ -353,9 +416,17 @@ pub fn process_exits(
voluntary_exits.len() as u64 <= spec.max_voluntary_exits, voluntary_exits.len() as u64 <= spec.max_voluntary_exits,
Invalid::MaxExitsExceeded Invalid::MaxExitsExceeded
); );
for (i, exit) in voluntary_exits.iter().enumerate() {
verify_exit(&state, exit, spec).map_err(|e| e.into_with_index(i))?;
// Verify exits in parallel.
voluntary_exits
.par_iter()
.enumerate()
.try_for_each(|(i, exit)| {
verify_exit(&state, exit, spec).map_err(|e| e.into_with_index(i))
})?;
// Update the state in series.
for exit in voluntary_exits {
state.initiate_validator_exit(exit.validator_index as usize); state.initiate_validator_exit(exit.validator_index as usize);
} }
@ -367,7 +438,7 @@ pub fn process_exits(
/// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns
/// an `Err` describing the invalid object or cause of failure. /// an `Err` describing the invalid object or cause of failure.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn process_transfers( pub fn process_transfers(
state: &mut BeaconState, state: &mut BeaconState,
transfers: &[Transfer], transfers: &[Transfer],
@ -377,8 +448,15 @@ pub fn process_transfers(
transfers.len() as u64 <= spec.max_transfers, transfers.len() as u64 <= spec.max_transfers,
Invalid::MaxTransfersExceed Invalid::MaxTransfersExceed
); );
transfers
.par_iter()
.enumerate()
.try_for_each(|(i, transfer)| {
verify_transfer(&state, transfer, spec).map_err(|e| e.into_with_index(i))
})?;
for (i, transfer) in transfers.iter().enumerate() { for (i, transfer) in transfers.iter().enumerate() {
verify_transfer(&state, transfer, spec).map_err(|e| e.into_with_index(i))?;
execute_transfer(state, transfer, spec).map_err(|e| e.into_with_index(i))?; execute_transfer(state, transfer, spec).map_err(|e| e.into_with_index(i))?;
} }

View File

@ -67,6 +67,7 @@ impl_from_beacon_state_error!(BlockProcessingError);
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum BlockInvalid { pub enum BlockInvalid {
StateSlotMismatch, StateSlotMismatch,
ParentBlockRootMismatch,
BadSignature, BadSignature,
BadRandaoSignature, BadRandaoSignature,
MaxAttestationsExceeded, MaxAttestationsExceeded,
@ -76,6 +77,10 @@ pub enum BlockInvalid {
MaxExitsExceeded, MaxExitsExceeded,
MaxTransfersExceed, MaxTransfersExceed,
AttestationInvalid(usize, AttestationInvalid), AttestationInvalid(usize, AttestationInvalid),
/// A `SlashableAttestation` inside an `AttesterSlashing` was invalid.
///
/// To determine the offending `AttesterSlashing` index, divide the error message `usize` by two.
SlashableAttestationInvalid(usize, SlashableAttestationInvalid),
AttesterSlashingInvalid(usize, AttesterSlashingInvalid), AttesterSlashingInvalid(usize, AttesterSlashingInvalid),
ProposerSlashingInvalid(usize, ProposerSlashingInvalid), ProposerSlashingInvalid(usize, ProposerSlashingInvalid),
DepositInvalid(usize, DepositInvalid), DepositInvalid(usize, DepositInvalid),
@ -108,45 +113,55 @@ pub enum AttestationValidationError {
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum AttestationInvalid { pub enum AttestationInvalid {
/// Attestation references a pre-genesis slot. /// Attestation references a pre-genesis slot.
/// PreGenesis { genesis: Slot, attestation: Slot },
/// (genesis_slot, attestation_slot)
PreGenesis(Slot, Slot),
/// Attestation included before the inclusion delay. /// Attestation included before the inclusion delay.
/// IncludedTooEarly {
/// (state_slot, inclusion_delay, attestation_slot) state: Slot,
IncludedTooEarly(Slot, u64, Slot), delay: u64,
attestation: Slot,
},
/// Attestation slot is too far in the past to be included in a block. /// Attestation slot is too far in the past to be included in a block.
/// IncludedTooLate { state: Slot, attestation: Slot },
/// (state_slot, attestation_slot)
IncludedTooLate(Slot, Slot),
/// Attestation justified epoch does not match the states current or previous justified epoch. /// Attestation justified epoch does not match the states current or previous justified epoch.
/// ///
/// (attestation_justified_epoch, state_epoch, used_previous_epoch) /// `is_current` is `true` if the attestation was compared to the
WrongJustifiedEpoch(Epoch, Epoch, bool), /// `state.current_justified_epoch`, `false` if compared to `state.previous_justified_epoch`.
WrongJustifiedEpoch {
state: Epoch,
attestation: Epoch,
is_current: bool,
},
/// Attestation justified epoch root does not match root known to the state. /// Attestation justified epoch root does not match root known to the state.
/// ///
/// (state_justified_root, attestation_justified_root) /// `is_current` is `true` if the attestation was compared to the
WrongJustifiedRoot(Hash256, Hash256), /// `state.current_justified_epoch`, `false` if compared to `state.previous_justified_epoch`.
WrongJustifiedRoot {
state: Hash256,
attestation: Hash256,
is_current: bool,
},
/// Attestation crosslink root does not match the state crosslink root for the attestations /// Attestation crosslink root does not match the state crosslink root for the attestations
/// slot. /// slot.
BadLatestCrosslinkRoot, BadPreviousCrosslink,
/// The custody bitfield has some bits set `true`. This is not allowed in phase 0. /// The custody bitfield has some bits set `true`. This is not allowed in phase 0.
CustodyBitfieldHasSetBits, CustodyBitfieldHasSetBits,
/// There are no set bits on the attestation -- an attestation must be signed by at least one /// There are no set bits on the attestation -- an attestation must be signed by at least one
/// validator. /// validator.
AggregationBitfieldIsEmpty, AggregationBitfieldIsEmpty,
/// The custody bitfield length is not the smallest possible size to represent the committee. /// The custody bitfield length is not the smallest possible size to represent the committee.
/// BadCustodyBitfieldLength {
/// (committee_len, bitfield_len) committee_len: usize,
BadCustodyBitfieldLength(usize, usize), bitfield_len: usize,
},
/// The aggregation bitfield length is not the smallest possible size to represent the committee. /// The aggregation bitfield length is not the smallest possible size to represent the committee.
/// BadAggregationBitfieldLength {
/// (committee_len, bitfield_len) committee_len: usize,
BadAggregationBitfieldLength(usize, usize), bitfield_len: usize,
/// There was no known committee for the given shard in the given slot. },
/// /// There was no known committee in this `epoch` for the given shard and slot.
/// (attestation_data_shard, attestation_data_slot) NoCommitteeForShard { shard: u64, slot: Slot },
NoCommitteeForShard(u64, Slot), /// The validator index was unknown.
UnknownValidator(u64),
/// The attestation signature verification failed. /// The attestation signature verification failed.
BadSignature, BadSignature,
/// The shard block root was not set to zero. This is a phase 0 requirement. /// The shard block root was not set to zero. This is a phase 0 requirement.
@ -182,6 +197,8 @@ pub enum AttesterSlashingInvalid {
SlashableAttestation2Invalid(SlashableAttestationInvalid), SlashableAttestation2Invalid(SlashableAttestationInvalid),
/// The validator index is unknown. One cannot slash one who does not exist. /// The validator index is unknown. One cannot slash one who does not exist.
UnknownValidator(u64), UnknownValidator(u64),
/// The specified validator has already been withdrawn.
ValidatorAlreadyWithdrawn(u64),
/// There were no indices able to be slashed. /// There were no indices able to be slashed.
NoSlashableIndices, NoSlashableIndices,
} }
@ -233,6 +250,11 @@ impl Into<SlashableAttestationInvalid> for SlashableAttestationValidationError {
} }
} }
impl_into_with_index_without_beacon_error!(
SlashableAttestationValidationError,
SlashableAttestationInvalid
);
/* /*
* `ProposerSlashing` Validation * `ProposerSlashing` Validation
*/ */
@ -253,16 +275,12 @@ pub enum ProposerSlashingInvalid {
/// ///
/// (proposal_1_slot, proposal_2_slot) /// (proposal_1_slot, proposal_2_slot)
ProposalSlotMismatch(Slot, Slot), ProposalSlotMismatch(Slot, Slot),
/// The two proposal have different shards. /// The proposals are identical and therefore not slashable.
/// ProposalsIdentical,
/// (proposal_1_shard, proposal_2_shard)
ProposalShardMismatch(u64, u64),
/// The two proposal have different block roots.
///
/// (proposal_1_root, proposal_2_root)
ProposalBlockRootMismatch(Hash256, Hash256),
/// The specified proposer has already been slashed. /// The specified proposer has already been slashed.
ProposerAlreadySlashed, ProposerAlreadySlashed,
/// The specified proposer has already been withdrawn.
ProposerAlreadyWithdrawn(u64),
/// The first proposal signature was invalid. /// The first proposal signature was invalid.
BadProposal1Signature, BadProposal1Signature,
/// The second proposal signature was invalid. /// The second proposal signature was invalid.
@ -283,21 +301,27 @@ impl_into_with_index_without_beacon_error!(
pub enum DepositValidationError { pub enum DepositValidationError {
/// Validation completed successfully and the object is invalid. /// Validation completed successfully and the object is invalid.
Invalid(DepositInvalid), Invalid(DepositInvalid),
/// Encountered a `BeaconStateError` whilst attempting to determine validity.
BeaconStateError(BeaconStateError),
} }
/// Describes why an object is invalid. /// Describes why an object is invalid.
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum DepositInvalid { pub enum DepositInvalid {
/// The deposit index does not match the state index. /// The deposit index does not match the state index.
/// BadIndex { state: u64, deposit: u64 },
/// (state_index, deposit_index) /// The proof-of-possession does not match the given pubkey.
BadIndex(u64, u64), BadProofOfPossession,
/// The withdrawal credentials for the depositing validator did not match the withdrawal
/// credentials of an existing validator with the same public key.
BadWithdrawalCredentials,
/// The specified `branch` and `index` did not form a valid proof that the deposit is included /// The specified `branch` and `index` did not form a valid proof that the deposit is included
/// in the eth1 deposit root. /// in the eth1 deposit root.
BadMerkleProof, BadMerkleProof,
} }
impl_into_with_index_without_beacon_error!(DepositValidationError, DepositInvalid); impl_from_beacon_state_error!(DepositValidationError);
impl_into_with_index_with_beacon_error!(DepositValidationError, DepositInvalid);
/* /*
* `Exit` Validation * `Exit` Validation
@ -315,11 +339,14 @@ pub enum ExitValidationError {
pub enum ExitInvalid { pub enum ExitInvalid {
/// The specified validator is not in the state's validator registry. /// The specified validator is not in the state's validator registry.
ValidatorUnknown(u64), ValidatorUnknown(u64),
AlreadyExited, /// The specified validator has a non-maximum exit epoch.
AlreadyExited(u64),
/// The specified validator has already initiated exit.
AlreadyInitiatedExited(u64),
/// The exit is for a future epoch. /// The exit is for a future epoch.
/// FutureEpoch { state: Epoch, exit: Epoch },
/// (state_epoch, exit_epoch) /// The validator has not been active for long enough.
FutureEpoch(Epoch, Epoch), TooYoungToLeave { lifespan: Epoch, expected: u64 },
/// The exit signature was not signed by the validator. /// The exit signature was not signed by the validator.
BadSignature, BadSignature,
} }

View File

@ -8,7 +8,7 @@ use types::*;
/// ///
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. /// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn validate_attestation( pub fn validate_attestation(
state: &BeaconState, state: &BeaconState,
attestation: &Attestation, attestation: &Attestation,
@ -22,7 +22,7 @@ pub fn validate_attestation(
/// ///
/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. /// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn validate_attestation_without_signature( pub fn validate_attestation_without_signature(
state: &BeaconState, state: &BeaconState,
attestation: &Attestation, attestation: &Attestation,
@ -35,74 +35,83 @@ pub fn validate_attestation_without_signature(
/// given state, optionally validating the aggregate signature. /// given state, optionally validating the aggregate signature.
/// ///
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
fn validate_attestation_signature_optional( fn validate_attestation_signature_optional(
state: &BeaconState, state: &BeaconState,
attestation: &Attestation, attestation: &Attestation,
spec: &ChainSpec, spec: &ChainSpec,
verify_signature: bool, verify_signature: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
// Verify that `attestation.data.slot >= GENESIS_SLOT`. let state_epoch = state.slot.epoch(spec.slots_per_epoch);
let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch);
// Can't submit pre-historic attestations.
verify!( verify!(
attestation.data.slot >= spec.genesis_slot, attestation.data.slot >= spec.genesis_slot,
Invalid::PreGenesis(spec.genesis_slot, attestation.data.slot) Invalid::PreGenesis {
genesis: spec.genesis_slot,
attestation: attestation.data.slot
}
); );
// Verify that `attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot`. // Can't submit attestations too far in history.
verify!(
state.slot <= attestation.data.slot + spec.slots_per_epoch,
Invalid::IncludedTooLate {
state: spec.genesis_slot,
attestation: attestation.data.slot
}
);
// Can't submit attestation too quickly.
verify!( verify!(
attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot, attestation.data.slot + spec.min_attestation_inclusion_delay <= state.slot,
Invalid::IncludedTooEarly( Invalid::IncludedTooEarly {
state.slot, state: state.slot,
spec.min_attestation_inclusion_delay, delay: spec.min_attestation_inclusion_delay,
attestation.data.slot attestation: attestation.data.slot
) }
); );
// Verify that `state.slot < attestation.data.slot + SLOTS_PER_EPOCH`. // Verify the justified epoch and root is correct.
verify!( if attestation_epoch >= state_epoch {
state.slot < attestation.data.slot + spec.slots_per_epoch,
Invalid::IncludedTooLate(state.slot, attestation.data.slot)
);
// Verify that `attestation.data.justified_epoch` is equal to `state.justified_epoch` if
// `slot_to_epoch(attestation.data.slot + 1) >= get_current_epoch(state) else
// state.previous_justified_epoch`.
if (attestation.data.slot + 1).epoch(spec.slots_per_epoch) >= state.current_epoch(spec) {
verify!( verify!(
attestation.data.justified_epoch == state.justified_epoch, attestation.data.source_epoch == state.current_justified_epoch,
Invalid::WrongJustifiedEpoch( Invalid::WrongJustifiedEpoch {
attestation.data.justified_epoch, state: state.current_justified_epoch,
state.justified_epoch, attestation: attestation.data.source_epoch,
false is_current: true,
) }
);
verify!(
attestation.data.source_root == state.current_justified_root,
Invalid::WrongJustifiedRoot {
state: state.current_justified_root,
attestation: attestation.data.source_root,
is_current: true,
}
); );
} else { } else {
verify!( verify!(
attestation.data.justified_epoch == state.previous_justified_epoch, attestation.data.source_epoch == state.previous_justified_epoch,
Invalid::WrongJustifiedEpoch( Invalid::WrongJustifiedEpoch {
attestation.data.justified_epoch, state: state.previous_justified_epoch,
state.previous_justified_epoch, attestation: attestation.data.source_epoch,
true is_current: false,
) }
);
verify!(
attestation.data.source_root == state.previous_justified_root,
Invalid::WrongJustifiedRoot {
state: state.previous_justified_root,
attestation: attestation.data.source_root,
is_current: true,
}
); );
} }
// Verify that `attestation.data.justified_block_root` is equal to `get_block_root(state, // Check that the crosslink data is valid.
// get_epoch_start_slot(attestation.data.justified_epoch))`. //
let justified_block_root = *state
.get_block_root(
attestation
.data
.justified_epoch
.start_slot(spec.slots_per_epoch),
&spec,
)
.ok_or(BeaconStateError::InsufficientBlockRoots)?;
verify!(
attestation.data.justified_block_root == justified_block_root,
Invalid::WrongJustifiedRoot(justified_block_root, attestation.data.justified_block_root)
);
// Verify that either: // Verify that either:
// //
// (i)`state.latest_crosslinks[attestation.data.shard] == attestation.data.latest_crosslink`, // (i)`state.latest_crosslinks[attestation.data.shard] == attestation.data.latest_crosslink`,
@ -115,65 +124,72 @@ fn validate_attestation_signature_optional(
epoch: attestation.data.slot.epoch(spec.slots_per_epoch), epoch: attestation.data.slot.epoch(spec.slots_per_epoch),
}; };
verify!( verify!(
(attestation.data.latest_crosslink (attestation.data.previous_crosslink
== state.latest_crosslinks[attestation.data.shard as usize]) == state.latest_crosslinks[attestation.data.shard as usize])
| (state.latest_crosslinks[attestation.data.shard as usize] == potential_crosslink), | (state.latest_crosslinks[attestation.data.shard as usize] == potential_crosslink),
Invalid::BadLatestCrosslinkRoot Invalid::BadPreviousCrosslink
); );
// Get the committee for this attestation // Attestation must be non-empty!
let (committee, _shard) = state
.get_crosslink_committees_at_slot(attestation.data.slot, spec)?
.iter()
.find(|(_committee, shard)| *shard == attestation.data.shard)
.ok_or_else(|| {
Error::Invalid(Invalid::NoCommitteeForShard(
attestation.data.shard,
attestation.data.slot,
))
})?;
// Custody bitfield is all zeros (phase 0 requirement).
verify!(
attestation.custody_bitfield.num_set_bits() == 0,
Invalid::CustodyBitfieldHasSetBits
);
// Custody bitfield length is correct.
verify!(
verify_bitfield_length(&attestation.custody_bitfield, committee.len()),
Invalid::BadCustodyBitfieldLength(committee.len(), attestation.custody_bitfield.len())
);
// Aggregation bitfield isn't empty.
verify!( verify!(
attestation.aggregation_bitfield.num_set_bits() != 0, attestation.aggregation_bitfield.num_set_bits() != 0,
Invalid::AggregationBitfieldIsEmpty Invalid::AggregationBitfieldIsEmpty
); );
// Custody bitfield must be empty (be be removed in phase 1)
verify!(
attestation.custody_bitfield.num_set_bits() == 0,
Invalid::CustodyBitfieldHasSetBits
);
// Get the committee for the specific shard that this attestation is for.
let crosslink_committee = state
.get_crosslink_committees_at_slot(attestation.data.slot, spec)?
.iter()
.find(|c| c.shard == attestation.data.shard)
.ok_or_else(|| {
Error::Invalid(Invalid::NoCommitteeForShard {
shard: attestation.data.shard,
slot: attestation.data.slot,
})
})?;
let committee = &crosslink_committee.committee;
// Custody bitfield length is correct.
//
// This is not directly in the spec, but it is inferred.
verify!(
verify_bitfield_length(&attestation.custody_bitfield, committee.len()),
Invalid::BadCustodyBitfieldLength {
committee_len: committee.len(),
bitfield_len: attestation.custody_bitfield.len()
}
);
// Aggregation bitfield length is correct. // Aggregation bitfield length is correct.
//
// This is not directly in the spec, but it is inferred.
verify!( verify!(
verify_bitfield_length(&attestation.aggregation_bitfield, committee.len()), verify_bitfield_length(&attestation.aggregation_bitfield, committee.len()),
Invalid::BadAggregationBitfieldLength( Invalid::BadAggregationBitfieldLength {
committee.len(), committee_len: committee.len(),
attestation.aggregation_bitfield.len() bitfield_len: attestation.custody_bitfield.len()
) }
); );
if verify_signature { if verify_signature {
let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch); let attestation_epoch = attestation.data.slot.epoch(spec.slots_per_epoch);
verify!( verify_attestation_signature(
verify_attestation_signature( state,
state, committee,
committee, attestation_epoch,
attestation_epoch, &attestation.aggregation_bitfield,
&attestation.custody_bitfield, &attestation.custody_bitfield,
&attestation.data, &attestation.data,
&attestation.aggregate_signature, &attestation.aggregate_signature,
spec spec,
), )?;
Invalid::BadSignature
);
} }
// [TO BE REMOVED IN PHASE 1] Verify that `attestation.data.crosslink_data_root == ZERO_HASH`. // Crosslink data root is zero (to be removed in phase 1).
verify!( verify!(
attestation.data.crosslink_data_root == spec.zero_hash, attestation.data.crosslink_data_root == spec.zero_hash,
Invalid::ShardBlockRootNotZero Invalid::ShardBlockRootNotZero
@ -190,35 +206,50 @@ fn validate_attestation_signature_optional(
/// - `custody_bitfield` does not have a bit for each index of `committee`. /// - `custody_bitfield` does not have a bit for each index of `committee`.
/// - A `validator_index` in `committee` is not in `state.validator_registry`. /// - A `validator_index` in `committee` is not in `state.validator_registry`.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
fn verify_attestation_signature( fn verify_attestation_signature(
state: &BeaconState, state: &BeaconState,
committee: &[usize], committee: &[usize],
attestation_epoch: Epoch, attestation_epoch: Epoch,
aggregation_bitfield: &Bitfield,
custody_bitfield: &Bitfield, custody_bitfield: &Bitfield,
attestation_data: &AttestationData, attestation_data: &AttestationData,
aggregate_signature: &AggregateSignature, aggregate_signature: &AggregateSignature,
spec: &ChainSpec, spec: &ChainSpec,
) -> bool { ) -> Result<(), Error> {
let mut aggregate_pubs = vec![AggregatePublicKey::new(); 2]; let mut aggregate_pubs = vec![AggregatePublicKey::new(); 2];
let mut message_exists = vec![false; 2]; let mut message_exists = vec![false; 2];
for (i, v) in committee.iter().enumerate() { for (i, v) in committee.iter().enumerate() {
let custody_bit = match custody_bitfield.get(i) { let validator_signed = aggregation_bitfield.get(i).map_err(|_| {
Ok(bit) => bit, Error::Invalid(Invalid::BadAggregationBitfieldLength {
// Invalidate signature if custody_bitfield.len() < committee committee_len: committee.len(),
Err(_) => return false, bitfield_len: aggregation_bitfield.len(),
}; })
})?;
message_exists[custody_bit as usize] = true; if validator_signed {
let custody_bit: bool = match custody_bitfield.get(i) {
Ok(bit) => bit,
// Invalidate signature if custody_bitfield.len() < committee
Err(_) => {
return Err(Error::Invalid(Invalid::BadCustodyBitfieldLength {
committee_len: committee.len(),
bitfield_len: aggregation_bitfield.len(),
}));
}
};
match state.validator_registry.get(*v as usize) { message_exists[custody_bit as usize] = true;
Some(validator) => {
aggregate_pubs[custody_bit as usize].add(&validator.pubkey); match state.validator_registry.get(*v as usize) {
} Some(validator) => {
// Invalidate signature if validator index is unknown. aggregate_pubs[custody_bit as usize].add(&validator.pubkey);
None => return false, }
}; // Return error if validator index is unknown.
None => return Err(Error::BeaconStateError(BeaconStateError::UnknownValidator)),
};
}
} }
// Message when custody bitfield is `false` // Message when custody bitfield is `false`
@ -251,5 +282,10 @@ fn verify_attestation_signature(
let domain = spec.get_domain(attestation_epoch, Domain::Attestation, &state.fork); let domain = spec.get_domain(attestation_epoch, Domain::Attestation, &state.fork);
aggregate_signature.verify_multiple(&messages[..], domain, &keys[..]) verify!(
aggregate_signature.verify_multiple(&messages[..], domain, &keys[..]),
Invalid::BadSignature
);
Ok(())
} }

View File

@ -7,12 +7,13 @@ use types::*;
/// ///
/// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity. /// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn verify_attester_slashing( pub fn verify_attester_slashing(
state: &BeaconState, state: &BeaconState,
attester_slashing: &AttesterSlashing, attester_slashing: &AttesterSlashing,
should_verify_slashable_attestations: bool,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Vec<u64>, Error> { ) -> Result<(), Error> {
let slashable_attestation_1 = &attester_slashing.slashable_attestation_1; let slashable_attestation_1 = &attester_slashing.slashable_attestation_1;
let slashable_attestation_2 = &attester_slashing.slashable_attestation_2; let slashable_attestation_2 = &attester_slashing.slashable_attestation_2;
@ -26,24 +27,51 @@ pub fn verify_attester_slashing(
Invalid::NotSlashable Invalid::NotSlashable
); );
verify_slashable_attestation(state, &slashable_attestation_1, spec) if should_verify_slashable_attestations {
.map_err(|e| Error::Invalid(Invalid::SlashableAttestation1Invalid(e.into())))?; verify_slashable_attestation(state, &slashable_attestation_1, spec)
verify_slashable_attestation(state, &slashable_attestation_2, spec) .map_err(|e| Error::Invalid(Invalid::SlashableAttestation1Invalid(e.into())))?;
.map_err(|e| Error::Invalid(Invalid::SlashableAttestation2Invalid(e.into())))?; verify_slashable_attestation(state, &slashable_attestation_2, spec)
.map_err(|e| Error::Invalid(Invalid::SlashableAttestation2Invalid(e.into())))?;
}
let mut slashable_indices = vec![]; Ok(())
}
/// For a given attester slashing, return the indices able to be slashed.
///
/// Returns Ok(indices) if `indices.len() > 0`.
///
/// Spec v0.5.0
pub fn gather_attester_slashing_indices(
state: &BeaconState,
attester_slashing: &AttesterSlashing,
spec: &ChainSpec,
) -> Result<Vec<u64>, Error> {
let slashable_attestation_1 = &attester_slashing.slashable_attestation_1;
let slashable_attestation_2 = &attester_slashing.slashable_attestation_2;
let mut slashable_indices = Vec::with_capacity(spec.max_indices_per_slashable_vote);
for i in &slashable_attestation_1.validator_indices { for i in &slashable_attestation_1.validator_indices {
let validator = state let validator = state
.validator_registry .validator_registry
.get(*i as usize) .get(*i as usize)
.ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(*i)))?; .ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(*i)))?;
if slashable_attestation_1.validator_indices.contains(&i) & !validator.slashed { if slashable_attestation_2.validator_indices.contains(&i) & !validator.slashed {
// TODO: verify that we should reject any slashable attestation which includes a
// withdrawn validator. PH has asked the question on gitter, awaiting response.
verify!(
validator.withdrawable_epoch > state.slot.epoch(spec.slots_per_epoch),
Invalid::ValidatorAlreadyWithdrawn(*i)
);
slashable_indices.push(*i); slashable_indices.push(*i);
} }
} }
verify!(!slashable_indices.is_empty(), Invalid::NoSlashableIndices); verify!(!slashable_indices.is_empty(), Invalid::NoSlashableIndices);
slashable_indices.shrink_to_fit();
Ok(slashable_indices) Ok(slashable_indices)
} }

View File

@ -10,9 +10,12 @@ use types::*;
/// ///
/// Returns `Ok(())` if the `Deposit` is valid, otherwise indicates the reason for invalidity. /// Returns `Ok(())` if the `Deposit` is valid, otherwise indicates the reason for invalidity.
/// ///
/// This function _does not_ check `state.deposit_index` so this function may be run in parallel.
/// See the `verify_deposit_index` function for this.
///
/// Note: this function is incomplete. /// Note: this function is incomplete.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn verify_deposit( pub fn verify_deposit(
state: &BeaconState, state: &BeaconState,
deposit: &Deposit, deposit: &Deposit,
@ -20,8 +23,15 @@ pub fn verify_deposit(
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
verify!( verify!(
deposit.index == state.deposit_index, deposit
Invalid::BadIndex(state.deposit_index, deposit.index) .deposit_data
.deposit_input
.validate_proof_of_possession(
state.slot.epoch(spec.slots_per_epoch),
&state.fork,
spec
),
Invalid::BadProofOfPossession
); );
if verify_merkle_branch { if verify_merkle_branch {
@ -34,14 +44,58 @@ pub fn verify_deposit(
Ok(()) Ok(())
} }
/// Verify that the `Deposit` index is correct.
///
/// Spec v0.5.0
pub fn verify_deposit_index(state: &BeaconState, deposit: &Deposit) -> Result<(), Error> {
verify!(
deposit.index == state.deposit_index,
Invalid::BadIndex {
state: state.deposit_index,
deposit: deposit.index
}
);
Ok(())
}
/// Returns a `Some(validator index)` if a pubkey already exists in the `validator_registry`,
/// otherwise returns `None`.
///
/// ## Errors
///
/// Errors if the state's `pubkey_cache` is not current.
pub fn get_existing_validator_index(
state: &BeaconState,
deposit: &Deposit,
) -> Result<Option<u64>, Error> {
let deposit_input = &deposit.deposit_data.deposit_input;
let validator_index = state
.get_validator_index(&deposit_input.pubkey)?
.and_then(|i| Some(i));
match validator_index {
None => Ok(None),
Some(index) => {
verify!(
deposit_input.withdrawal_credentials
== state.validator_registry[index as usize].withdrawal_credentials,
Invalid::BadWithdrawalCredentials
);
Ok(Some(index as u64))
}
}
}
/// Verify that a deposit is included in the state's eth1 deposit root. /// Verify that a deposit is included in the state's eth1 deposit root.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
fn verify_deposit_merkle_proof(state: &BeaconState, deposit: &Deposit, spec: &ChainSpec) -> bool { fn verify_deposit_merkle_proof(state: &BeaconState, deposit: &Deposit, spec: &ChainSpec) -> bool {
let leaf = hash(&get_serialized_deposit_data(deposit)); let leaf = hash(&get_serialized_deposit_data(deposit));
verify_merkle_proof( verify_merkle_proof(
Hash256::from_slice(&leaf), Hash256::from_slice(&leaf),
&deposit.branch, &deposit.proof,
spec.deposit_contract_tree_depth as usize, spec.deposit_contract_tree_depth as usize,
deposit.index as usize, deposit.index as usize,
state.latest_eth1_data.deposit_root, state.latest_eth1_data.deposit_root,
@ -50,7 +104,7 @@ fn verify_deposit_merkle_proof(state: &BeaconState, deposit: &Deposit, spec: &Ch
/// Helper struct for easily getting the serialized data generated by the deposit contract. /// Helper struct for easily getting the serialized data generated by the deposit contract.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Encode)] #[derive(Encode)]
struct SerializedDepositData { struct SerializedDepositData {
amount: u64, amount: u64,
@ -61,7 +115,7 @@ struct SerializedDepositData {
/// Return the serialized data generated by the deposit contract that is used to generate the /// Return the serialized data generated by the deposit contract that is used to generate the
/// merkle proof. /// merkle proof.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
fn get_serialized_deposit_data(deposit: &Deposit) -> Vec<u8> { fn get_serialized_deposit_data(deposit: &Deposit) -> Vec<u8> {
let serialized_deposit_data = SerializedDepositData { let serialized_deposit_data = SerializedDepositData {
amount: deposit.deposit_data.amount, amount: deposit.deposit_data.amount,

View File

@ -7,7 +7,7 @@ use types::*;
/// ///
/// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity. /// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn verify_exit( pub fn verify_exit(
state: &BeaconState, state: &BeaconState,
exit: &VoluntaryExit, exit: &VoluntaryExit,
@ -18,15 +18,35 @@ pub fn verify_exit(
.get(exit.validator_index as usize) .get(exit.validator_index as usize)
.ok_or_else(|| Error::Invalid(Invalid::ValidatorUnknown(exit.validator_index)))?; .ok_or_else(|| Error::Invalid(Invalid::ValidatorUnknown(exit.validator_index)))?;
// Verify that the validator has not yet exited.
verify!( verify!(
validator.exit_epoch validator.exit_epoch == spec.far_future_epoch,
> state.get_delayed_activation_exit_epoch(state.current_epoch(spec), spec), Invalid::AlreadyExited(exit.validator_index)
Invalid::AlreadyExited
); );
// Verify that the validator has not yet initiated.
verify!(
!validator.initiated_exit,
Invalid::AlreadyInitiatedExited(exit.validator_index)
);
// Exits must specify an epoch when they become valid; they are not valid before then.
verify!( verify!(
state.current_epoch(spec) >= exit.epoch, state.current_epoch(spec) >= exit.epoch,
Invalid::FutureEpoch(state.current_epoch(spec), exit.epoch) Invalid::FutureEpoch {
state: state.current_epoch(spec),
exit: exit.epoch
}
);
// Must have been in the validator set long enough.
let lifespan = state.slot.epoch(spec.slots_per_epoch) - validator.activation_epoch;
verify!(
lifespan >= spec.persistent_committee_period,
Invalid::TooYoungToLeave {
lifespan,
expected: spec.persistent_committee_period,
}
); );
let message = exit.signed_root(); let message = exit.signed_root();

View File

@ -7,7 +7,7 @@ use types::*;
/// ///
/// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity. /// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn verify_proposer_slashing( pub fn verify_proposer_slashing(
proposer_slashing: &ProposerSlashing, proposer_slashing: &ProposerSlashing,
state: &BeaconState, state: &BeaconState,
@ -21,34 +21,28 @@ pub fn verify_proposer_slashing(
})?; })?;
verify!( verify!(
proposer_slashing.proposal_1.slot == proposer_slashing.proposal_2.slot, proposer_slashing.header_1.slot == proposer_slashing.header_2.slot,
Invalid::ProposalSlotMismatch( Invalid::ProposalSlotMismatch(
proposer_slashing.proposal_1.slot, proposer_slashing.header_1.slot,
proposer_slashing.proposal_2.slot proposer_slashing.header_2.slot
) )
); );
verify!( verify!(
proposer_slashing.proposal_1.shard == proposer_slashing.proposal_2.shard, proposer_slashing.header_1 != proposer_slashing.header_2,
Invalid::ProposalShardMismatch( Invalid::ProposalsIdentical
proposer_slashing.proposal_1.shard,
proposer_slashing.proposal_2.shard
)
);
verify!(
proposer_slashing.proposal_1.block_root != proposer_slashing.proposal_2.block_root,
Invalid::ProposalBlockRootMismatch(
proposer_slashing.proposal_1.block_root,
proposer_slashing.proposal_2.block_root
)
); );
verify!(!proposer.slashed, Invalid::ProposerAlreadySlashed); verify!(!proposer.slashed, Invalid::ProposerAlreadySlashed);
verify!( verify!(
verify_proposal_signature( proposer.withdrawable_epoch > state.slot.epoch(spec.slots_per_epoch),
&proposer_slashing.proposal_1, Invalid::ProposerAlreadyWithdrawn(proposer_slashing.proposer_index)
);
verify!(
verify_header_signature(
&proposer_slashing.header_1,
&proposer.pubkey, &proposer.pubkey,
&state.fork, &state.fork,
spec spec
@ -56,8 +50,8 @@ pub fn verify_proposer_slashing(
Invalid::BadProposal1Signature Invalid::BadProposal1Signature
); );
verify!( verify!(
verify_proposal_signature( verify_header_signature(
&proposer_slashing.proposal_2, &proposer_slashing.header_2,
&proposer.pubkey, &proposer.pubkey,
&state.fork, &state.fork,
spec spec
@ -71,17 +65,19 @@ pub fn verify_proposer_slashing(
/// Verifies the signature of a proposal. /// Verifies the signature of a proposal.
/// ///
/// Returns `true` if the signature is valid. /// Returns `true` if the signature is valid.
fn verify_proposal_signature( ///
proposal: &Proposal, /// Spec v0.5.0
fn verify_header_signature(
header: &BeaconBlockHeader,
pubkey: &PublicKey, pubkey: &PublicKey,
fork: &Fork, fork: &Fork,
spec: &ChainSpec, spec: &ChainSpec,
) -> bool { ) -> bool {
let message = proposal.signed_root(); let message = header.signed_root();
let domain = spec.get_domain( let domain = spec.get_domain(
proposal.slot.epoch(spec.slots_per_epoch), header.slot.epoch(spec.slots_per_epoch),
Domain::Proposal, Domain::BeaconBlock,
fork, fork,
); );
proposal.signature.verify(&message[..], domain, pubkey) header.signature.verify(&message[..], domain, pubkey)
} }

View File

@ -10,7 +10,7 @@ use types::*;
/// ///
/// Returns `Ok(())` if the `SlashableAttestation` is valid, otherwise indicates the reason for invalidity. /// Returns `Ok(())` if the `SlashableAttestation` is valid, otherwise indicates the reason for invalidity.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn verify_slashable_attestation( pub fn verify_slashable_attestation(
state: &BeaconState, state: &BeaconState,
slashable_attestation: &SlashableAttestation, slashable_attestation: &SlashableAttestation,

View File

@ -10,16 +10,16 @@ use types::*;
/// ///
/// Note: this function is incomplete. /// Note: this function is incomplete.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn verify_transfer( pub fn verify_transfer(
state: &BeaconState, state: &BeaconState,
transfer: &Transfer, transfer: &Transfer,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
let from_balance = *state let sender_balance = *state
.validator_balances .validator_balances
.get(transfer.from as usize) .get(transfer.sender as usize)
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?; .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?;
let total_amount = transfer let total_amount = transfer
.amount .amount
@ -27,19 +27,22 @@ pub fn verify_transfer(
.ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?; .ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?;
verify!( verify!(
from_balance >= transfer.amount, sender_balance >= transfer.amount,
Invalid::FromBalanceInsufficient(transfer.amount, from_balance) Invalid::FromBalanceInsufficient(transfer.amount, sender_balance)
); );
verify!( verify!(
from_balance >= transfer.fee, sender_balance >= transfer.fee,
Invalid::FromBalanceInsufficient(transfer.fee, from_balance) Invalid::FromBalanceInsufficient(transfer.fee, sender_balance)
); );
verify!( verify!(
(from_balance == total_amount) (sender_balance == total_amount)
|| (from_balance >= (total_amount + spec.min_deposit_amount)), || (sender_balance >= (total_amount + spec.min_deposit_amount)),
Invalid::InvalidResultingFromBalance(from_balance - total_amount, spec.min_deposit_amount) Invalid::InvalidResultingFromBalance(
sender_balance - total_amount,
spec.min_deposit_amount
)
); );
verify!( verify!(
@ -47,25 +50,25 @@ pub fn verify_transfer(
Invalid::StateSlotMismatch(state.slot, transfer.slot) Invalid::StateSlotMismatch(state.slot, transfer.slot)
); );
let from_validator = state let sender_validator = state
.validator_registry .validator_registry
.get(transfer.from as usize) .get(transfer.sender as usize)
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?; .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?;
let epoch = state.slot.epoch(spec.slots_per_epoch); let epoch = state.slot.epoch(spec.slots_per_epoch);
verify!( verify!(
from_validator.is_withdrawable_at(epoch) sender_validator.is_withdrawable_at(epoch)
|| from_validator.activation_epoch == spec.far_future_epoch, || sender_validator.activation_epoch == spec.far_future_epoch,
Invalid::FromValidatorIneligableForTransfer(transfer.from) Invalid::FromValidatorIneligableForTransfer(transfer.sender)
); );
let transfer_withdrawal_credentials = Hash256::from_slice( let transfer_withdrawal_credentials = Hash256::from_slice(
&get_withdrawal_credentials(&transfer.pubkey, spec.bls_withdrawal_prefix_byte)[..], &get_withdrawal_credentials(&transfer.pubkey, spec.bls_withdrawal_prefix_byte)[..],
); );
verify!( verify!(
from_validator.withdrawal_credentials == transfer_withdrawal_credentials, sender_validator.withdrawal_credentials == transfer_withdrawal_credentials,
Invalid::WithdrawalCredentialsMismatch( Invalid::WithdrawalCredentialsMismatch(
from_validator.withdrawal_credentials, sender_validator.withdrawal_credentials,
transfer_withdrawal_credentials transfer_withdrawal_credentials
) )
); );
@ -91,22 +94,23 @@ pub fn verify_transfer(
/// ///
/// Does not check that the transfer is valid, however checks for overflow in all actions. /// Does not check that the transfer is valid, however checks for overflow in all actions.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn execute_transfer( pub fn execute_transfer(
state: &mut BeaconState, state: &mut BeaconState,
transfer: &Transfer, transfer: &Transfer,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
let from_balance = *state let sender_balance = *state
.validator_balances .validator_balances
.get(transfer.from as usize) .get(transfer.sender as usize)
.ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.from)))?; .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?;
let to_balance = *state let recipient_balance = *state
.validator_balances .validator_balances
.get(transfer.to as usize) .get(transfer.recipient as usize)
.ok_or_else(|| Error::Invalid(Invalid::ToValidatorUnknown(transfer.to)))?; .ok_or_else(|| Error::Invalid(Invalid::ToValidatorUnknown(transfer.recipient)))?;
let proposer_index = state.get_beacon_proposer_index(state.slot, spec)?; let proposer_index =
state.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)?;
let proposer_balance = state.validator_balances[proposer_index]; let proposer_balance = state.validator_balances[proposer_index];
let total_amount = transfer let total_amount = transfer
@ -114,14 +118,22 @@ pub fn execute_transfer(
.checked_add(transfer.fee) .checked_add(transfer.fee)
.ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?; .ok_or_else(|| Error::Invalid(Invalid::FeeOverflow(transfer.amount, transfer.fee)))?;
state.validator_balances[transfer.from as usize] = state.validator_balances[transfer.sender as usize] =
from_balance.checked_sub(total_amount).ok_or_else(|| { sender_balance.checked_sub(total_amount).ok_or_else(|| {
Error::Invalid(Invalid::FromBalanceInsufficient(total_amount, from_balance)) Error::Invalid(Invalid::FromBalanceInsufficient(
total_amount,
sender_balance,
))
})?; })?;
state.validator_balances[transfer.to as usize] = to_balance state.validator_balances[transfer.recipient as usize] = recipient_balance
.checked_add(transfer.amount) .checked_add(transfer.amount)
.ok_or_else(|| Error::Invalid(Invalid::ToBalanceOverflow(to_balance, transfer.amount)))?; .ok_or_else(|| {
Error::Invalid(Invalid::ToBalanceOverflow(
recipient_balance,
transfer.amount,
))
})?;
state.validator_balances[proposer_index] = state.validator_balances[proposer_index] =
proposer_balance.checked_add(transfer.fee).ok_or_else(|| { proposer_balance.checked_add(transfer.fee).ok_or_else(|| {

View File

@ -1,120 +1,105 @@
use attester_sets::AttesterSets;
use errors::EpochProcessingError as Error; use errors::EpochProcessingError as Error;
use inclusion_distance::{inclusion_distance, inclusion_slot};
use integer_sqrt::IntegerSquareRoot; use integer_sqrt::IntegerSquareRoot;
use log::debug; use process_validator_registry::process_validator_registry;
use rayon::prelude::*; use rayon::prelude::*;
use ssz::TreeHash; use ssz::TreeHash;
use std::collections::{HashMap, HashSet}; use std::collections::HashMap;
use std::iter::FromIterator;
use types::{validator_registry::get_active_validator_indices, *}; use types::{validator_registry::get_active_validator_indices, *};
use validator_statuses::{TotalBalances, ValidatorStatuses};
use winning_root::{winning_root, WinningRoot}; use winning_root::{winning_root, WinningRoot};
pub mod attester_sets;
pub mod errors; pub mod errors;
pub mod get_attestation_participants;
pub mod inclusion_distance; pub mod inclusion_distance;
pub mod process_validator_registry;
pub mod tests; pub mod tests;
pub mod validator_statuses;
pub mod winning_root; pub mod winning_root;
/// Maps a shard to a winning root.
///
/// It is generated during crosslink processing and later used to reward/penalize validators.
pub type WinningRootHashSet = HashMap<u64, WinningRoot>;
/// Performs per-epoch processing on some BeaconState.
///
/// Mutates the given `BeaconState`, returning early if an error is encountered. If an error is
/// returned, a state might be "half-processed" and therefore in an invalid state.
///
/// Spec v0.4.0
pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> {
let current_epoch = state.current_epoch(spec); // Ensure the previous and next epoch caches are built.
let previous_epoch = state.previous_epoch(spec);
let next_epoch = state.next_epoch(spec);
debug!(
"Starting per-epoch processing on epoch {}...",
state.current_epoch(spec)
);
// Ensure all of the caches are built.
state.build_epoch_cache(RelativeEpoch::Previous, spec)?; state.build_epoch_cache(RelativeEpoch::Previous, spec)?;
state.build_epoch_cache(RelativeEpoch::Current, spec)?; state.build_epoch_cache(RelativeEpoch::Current, spec)?;
state.build_epoch_cache(RelativeEpoch::Next, spec)?;
let attesters = AttesterSets::new(&state, spec)?; let mut statuses = initialize_validator_statuses(&state, spec)?;
let active_validator_indices = get_active_validator_indices(
&state.validator_registry,
state.slot.epoch(spec.slots_per_epoch),
);
let current_total_balance = state.get_total_balance(&active_validator_indices[..], spec);
let previous_total_balance = state.get_total_balance(
&get_active_validator_indices(&state.validator_registry, previous_epoch)[..],
spec,
);
process_eth1_data(state, spec); process_eth1_data(state, spec);
process_justification( process_justification(state, &statuses.total_balances, spec);
state,
current_total_balance,
previous_total_balance,
attesters.previous_epoch_boundary.balance,
attesters.current_epoch_boundary.balance,
spec,
);
// Crosslinks // Crosslinks
let winning_root_for_shards = process_crosslinks(state, spec)?; let winning_root_for_shards = process_crosslinks(state, spec)?;
// Rewards and Penalities // Rewards and Penalities
let active_validator_indices_hashset: HashSet<usize> = process_rewards_and_penalities(state, &mut statuses, &winning_root_for_shards, spec)?;
HashSet::from_iter(active_validator_indices.iter().cloned());
process_rewards_and_penalities(
state,
active_validator_indices_hashset,
&attesters,
previous_total_balance,
&winning_root_for_shards,
spec,
)?;
// Ejections // Ejections
state.process_ejections(spec); state.process_ejections(spec)?;
// Validator Registry // Validator Registry
process_validator_registry(state, spec)?; process_validator_registry(state, spec)?;
// Final updates // Final updates
let active_tree_root = get_active_validator_indices( update_active_tree_index_roots(state, spec)?;
&state.validator_registry, update_latest_slashed_balances(state, spec);
next_epoch + Epoch::from(spec.activation_exit_delay), clean_attestations(state);
)
.hash_tree_root();
state.latest_active_index_roots[(next_epoch.as_usize()
+ spec.activation_exit_delay as usize)
% spec.latest_active_index_roots_length] = Hash256::from_slice(&active_tree_root[..]);
state.latest_slashed_balances[next_epoch.as_usize() % spec.latest_slashed_exit_length] =
state.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length];
state.latest_randao_mixes[next_epoch.as_usize() % spec.latest_randao_mixes_length] = state
.get_randao_mix(current_epoch, spec)
.and_then(|x| Some(*x))
.ok_or_else(|| Error::NoRandaoSeed)?;
state.latest_attestations = state
.latest_attestations
.iter()
.filter(|a| a.data.slot.epoch(spec.slots_per_epoch) >= current_epoch)
.cloned()
.collect();
// Rotate the epoch caches to suit the epoch transition. // Rotate the epoch caches to suit the epoch transition.
state.advance_caches(); state.advance_caches();
debug!("Epoch transition complete.");
Ok(()) Ok(())
} }
/// Spec v0.4.0 /// Returns a list of active validator indices for the state's current epoch.
fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) { ///
/// Spec v0.5.0
pub fn calculate_active_validator_indices(state: &BeaconState, spec: &ChainSpec) -> Vec<usize> {
get_active_validator_indices(
&state.validator_registry,
state.slot.epoch(spec.slots_per_epoch),
)
}
/// Calculates various sets of attesters, including:
///
/// - current epoch attesters
/// - current epoch boundary attesters
/// - previous epoch attesters
/// - etc.
///
/// Spec v0.5.0
pub fn initialize_validator_statuses(
state: &BeaconState,
spec: &ChainSpec,
) -> Result<ValidatorStatuses, BeaconStateError> {
let mut statuses = ValidatorStatuses::new(state, spec)?;
statuses.process_attestations(&state, spec)?;
Ok(statuses)
}
/// Maybe resets the eth1 period.
///
/// Spec v0.5.0
pub fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) {
let next_epoch = state.next_epoch(spec); let next_epoch = state.next_epoch(spec);
let voting_period = spec.epochs_per_eth1_voting_period; let voting_period = spec.epochs_per_eth1_voting_period;
if next_epoch % voting_period == 0 { if next_epoch % voting_period == 0 {
for eth1_data_vote in &state.eth1_data_votes { for eth1_data_vote in &state.eth1_data_votes {
if eth1_data_vote.vote_count * 2 > voting_period { if eth1_data_vote.vote_count * 2 > voting_period * spec.slots_per_epoch {
state.latest_eth1_data = eth1_data_vote.eth1_data.clone(); state.latest_eth1_data = eth1_data_vote.eth1_data.clone();
} }
} }
@ -122,26 +107,31 @@ fn process_eth1_data(state: &mut BeaconState, spec: &ChainSpec) {
} }
} }
/// Update the following fields on the `BeaconState`:
///
/// - `justification_bitfield`.
/// - `finalized_epoch`
/// - `justified_epoch`
/// - `previous_justified_epoch`
///
/// Spec v0.4.0 /// Spec v0.4.0
fn process_justification( pub fn process_justification(
state: &mut BeaconState, state: &mut BeaconState,
current_total_balance: u64, total_balances: &TotalBalances,
previous_total_balance: u64,
previous_epoch_boundary_attesting_balance: u64,
current_epoch_boundary_attesting_balance: u64,
spec: &ChainSpec, spec: &ChainSpec,
) { ) {
let previous_epoch = state.previous_epoch(spec); let previous_epoch = state.previous_epoch(spec);
let current_epoch = state.current_epoch(spec); let current_epoch = state.current_epoch(spec);
let mut new_justified_epoch = state.justified_epoch; let mut new_justified_epoch = state.current_justified_epoch;
state.justification_bitfield <<= 1; state.justification_bitfield <<= 1;
// If > 2/3 of the total balance attested to the previous epoch boundary // If > 2/3 of the total balance attested to the previous epoch boundary
// //
// - Set the 2nd bit of the bitfield. // - Set the 2nd bit of the bitfield.
// - Set the previous epoch to be justified. // - Set the previous epoch to be justified.
if (3 * previous_epoch_boundary_attesting_balance) >= (2 * previous_total_balance) { if (3 * total_balances.previous_epoch_boundary_attesters) >= (2 * total_balances.previous_epoch)
{
state.justification_bitfield |= 2; state.justification_bitfield |= 2;
new_justified_epoch = previous_epoch; new_justified_epoch = previous_epoch;
} }
@ -149,7 +139,7 @@ fn process_justification(
// //
// - Set the 1st bit of the bitfield. // - Set the 1st bit of the bitfield.
// - Set the current epoch to be justified. // - Set the current epoch to be justified.
if (3 * current_epoch_boundary_attesting_balance) >= (2 * current_total_balance) { if (3 * total_balances.current_epoch_boundary_attesters) >= (2 * total_balances.current_epoch) {
state.justification_bitfield |= 1; state.justification_bitfield |= 1;
new_justified_epoch = current_epoch; new_justified_epoch = current_epoch;
} }
@ -182,8 +172,10 @@ fn process_justification(
// - The presently justified epoch was two epochs ago. // - The presently justified epoch was two epochs ago.
// //
// Then, set the finalized epoch to two epochs ago. // Then, set the finalized epoch to two epochs ago.
if (state.justification_bitfield % 8 == 0b111) & (state.justified_epoch == previous_epoch - 1) { if (state.justification_bitfield % 8 == 0b111)
state.finalized_epoch = state.justified_epoch; & (state.current_justified_epoch == previous_epoch - 1)
{
state.finalized_epoch = state.current_justified_epoch;
} }
// If: // If:
// //
@ -191,32 +183,27 @@ fn process_justification(
// - Set the previous epoch to be justified. // - Set the previous epoch to be justified.
// //
// Then, set the finalized epoch to be the previous epoch. // Then, set the finalized epoch to be the previous epoch.
if (state.justification_bitfield % 4 == 0b11) & (state.justified_epoch == previous_epoch) { if (state.justification_bitfield % 4 == 0b11)
state.finalized_epoch = state.justified_epoch; & (state.current_justified_epoch == previous_epoch)
{
state.finalized_epoch = state.current_justified_epoch;
} }
state.previous_justified_epoch = state.justified_epoch; state.previous_justified_epoch = state.current_justified_epoch;
state.justified_epoch = new_justified_epoch; state.current_justified_epoch = new_justified_epoch;
} }
pub type WinningRootHashSet = HashMap<u64, WinningRoot>; /// Updates the following fields on the `BeaconState`:
///
fn process_crosslinks( /// - `latest_crosslinks`
///
/// Also returns a `WinningRootHashSet` for later use during epoch processing.
///
/// Spec v0.5.0
pub fn process_crosslinks(
state: &mut BeaconState, state: &mut BeaconState,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<WinningRootHashSet, Error> { ) -> Result<WinningRootHashSet, Error> {
let current_epoch_attestations: Vec<&PendingAttestation> = state
.latest_attestations
.par_iter()
.filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.current_epoch(spec))
.collect();
let previous_epoch_attestations: Vec<&PendingAttestation> = state
.latest_attestations
.par_iter()
.filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec))
.collect();
let mut winning_root_for_shards: WinningRootHashSet = HashMap::new(); let mut winning_root_for_shards: WinningRootHashSet = HashMap::new();
let previous_and_current_epoch_slots: Vec<Slot> = state let previous_and_current_epoch_slots: Vec<Slot> = state
@ -230,24 +217,18 @@ fn process_crosslinks(
let crosslink_committees_at_slot = let crosslink_committees_at_slot =
state.get_crosslink_committees_at_slot(slot, spec)?.clone(); state.get_crosslink_committees_at_slot(slot, spec)?.clone();
for (crosslink_committee, shard) in crosslink_committees_at_slot { for c in crosslink_committees_at_slot {
let shard = shard as u64; let shard = c.shard as u64;
let winning_root = winning_root( let winning_root = winning_root(state, shard, spec)?;
state,
shard,
&current_epoch_attestations[..],
&previous_epoch_attestations[..],
spec,
)?;
if let Some(winning_root) = winning_root { if let Some(winning_root) = winning_root {
let total_committee_balance = state.get_total_balance(&crosslink_committee, spec); let total_committee_balance = state.get_total_balance(&c.committee, spec)?;
// TODO: I think this has a bug. // TODO: I think this has a bug.
if (3 * winning_root.total_attesting_balance) >= (2 * total_committee_balance) { if (3 * winning_root.total_attesting_balance) >= (2 * total_committee_balance) {
state.latest_crosslinks[shard as usize] = Crosslink { state.latest_crosslinks[shard as usize] = Crosslink {
epoch: state.current_epoch(spec), epoch: slot.epoch(spec.slots_per_epoch),
crosslink_data_root: winning_root.crosslink_data_root, crosslink_data_root: winning_root.crosslink_data_root,
} }
} }
@ -259,183 +240,161 @@ fn process_crosslinks(
Ok(winning_root_for_shards) Ok(winning_root_for_shards)
} }
/// Updates the following fields on the BeaconState:
///
/// - `validator_balances`
///
/// Spec v0.4.0 /// Spec v0.4.0
fn process_rewards_and_penalities( pub fn process_rewards_and_penalities(
state: &mut BeaconState, state: &mut BeaconState,
active_validator_indices: HashSet<usize>, statuses: &mut ValidatorStatuses,
attesters: &AttesterSets,
previous_total_balance: u64,
winning_root_for_shards: &WinningRootHashSet, winning_root_for_shards: &WinningRootHashSet,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
let next_epoch = state.next_epoch(spec); let next_epoch = state.next_epoch(spec);
let previous_epoch_attestations: Vec<&PendingAttestation> = state statuses.process_winning_roots(state, winning_root_for_shards, spec)?;
.latest_attestations
.par_iter()
.filter(|a| a.data.slot.epoch(spec.slots_per_epoch) == state.previous_epoch(spec))
.collect();
let base_reward_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient; let total_balances = &statuses.total_balances;
let base_reward_quotient =
total_balances.previous_epoch.integer_sqrt() / spec.base_reward_quotient;
// Guard against a divide-by-zero during the validator balance update.
if base_reward_quotient == 0 { if base_reward_quotient == 0 {
return Err(Error::BaseRewardQuotientIsZero); return Err(Error::BaseRewardQuotientIsZero);
} }
// Guard against a divide-by-zero during the validator balance update.
if total_balances.previous_epoch == 0 {
return Err(Error::PreviousTotalBalanceIsZero);
}
// Guard against an out-of-bounds during the validator balance update.
if statuses.statuses.len() != state.validator_balances.len() {
return Err(Error::ValidatorStatusesInconsistent);
}
// Justification and finalization // Justification and finalization
let epochs_since_finality = next_epoch - state.finalized_epoch; let epochs_since_finality = next_epoch - state.finalized_epoch;
if epochs_since_finality <= 4 { state.validator_balances = state
for index in 0..state.validator_balances.len() { .validator_balances
let base_reward = state.base_reward(index, base_reward_quotient, spec); .par_iter()
.enumerate()
// Expected FFG source .map(|(index, &balance)| {
if attesters.previous_epoch.indices.contains(&index) { let mut balance = balance;
safe_add_assign!( let status = &statuses.statuses[index];
state.validator_balances[index], let base_reward = get_base_reward(state, index, total_balances.previous_epoch, spec)
base_reward * attesters.previous_epoch.balance / previous_total_balance .expect(
"Cannot fail to access a validator balance when iterating validator balances.",
); );
} else if active_validator_indices.contains(&index) {
safe_sub_assign!(state.validator_balances[index], base_reward);
}
// Expected FFG target if epochs_since_finality <= 4 {
if attesters.previous_epoch_boundary.indices.contains(&index) { // Expected FFG source
safe_add_assign!( if status.is_previous_epoch_attester {
state.validator_balances[index], safe_add_assign!(
base_reward * attesters.previous_epoch_boundary.balance balance,
/ previous_total_balance base_reward * total_balances.previous_epoch_attesters
); / total_balances.previous_epoch
} else if active_validator_indices.contains(&index) {
safe_sub_assign!(state.validator_balances[index], base_reward);
}
// Expected beacon chain head
if attesters.previous_epoch_head.indices.contains(&index) {
safe_add_assign!(
state.validator_balances[index],
base_reward * attesters.previous_epoch_head.balance / previous_total_balance
);
} else if active_validator_indices.contains(&index) {
safe_sub_assign!(state.validator_balances[index], base_reward);
}
}
// Inclusion distance
for &index in &attesters.previous_epoch.indices {
let base_reward = state.base_reward(index, base_reward_quotient, spec);
let inclusion_distance =
inclusion_distance(state, &previous_epoch_attestations, index, spec)?;
safe_add_assign!(
state.validator_balances[index],
base_reward * spec.min_attestation_inclusion_delay / inclusion_distance
)
}
} else {
for index in 0..state.validator_balances.len() {
let inactivity_penalty =
state.inactivity_penalty(index, epochs_since_finality, base_reward_quotient, spec);
if active_validator_indices.contains(&index) {
if !attesters.previous_epoch.indices.contains(&index) {
safe_sub_assign!(state.validator_balances[index], inactivity_penalty);
}
if !attesters.previous_epoch_boundary.indices.contains(&index) {
safe_sub_assign!(state.validator_balances[index], inactivity_penalty);
}
if !attesters.previous_epoch_head.indices.contains(&index) {
safe_sub_assign!(state.validator_balances[index], inactivity_penalty);
}
if state.validator_registry[index].slashed {
let base_reward = state.base_reward(index, base_reward_quotient, spec);
safe_sub_assign!(
state.validator_balances[index],
2 * inactivity_penalty + base_reward
); );
} else if status.is_active_in_previous_epoch {
safe_sub_assign!(balance, base_reward);
}
// Expected FFG target
if status.is_previous_epoch_boundary_attester {
safe_add_assign!(
balance,
base_reward * total_balances.previous_epoch_boundary_attesters
/ total_balances.previous_epoch
);
} else if status.is_active_in_previous_epoch {
safe_sub_assign!(balance, base_reward);
}
// Expected beacon chain head
if status.is_previous_epoch_head_attester {
safe_add_assign!(
balance,
base_reward * total_balances.previous_epoch_head_attesters
/ total_balances.previous_epoch
);
} else if status.is_active_in_previous_epoch {
safe_sub_assign!(balance, base_reward);
};
} else {
let inactivity_penalty = get_inactivity_penalty(
state,
index,
epochs_since_finality.as_u64(),
total_balances.previous_epoch,
spec,
)
.expect(
"Cannot fail to access a validator balance when iterating validator balances.",
);
if status.is_active_in_previous_epoch {
if !status.is_previous_epoch_attester {
safe_sub_assign!(balance, inactivity_penalty);
}
if !status.is_previous_epoch_boundary_attester {
safe_sub_assign!(balance, inactivity_penalty);
}
if !status.is_previous_epoch_head_attester {
safe_sub_assign!(balance, inactivity_penalty);
}
if state.validator_registry[index].slashed {
let base_reward =
get_base_reward(state, index, total_balances.previous_epoch, spec).expect(
"Cannot fail to access a validator balance when iterating validator balances.",
);
safe_sub_assign!(balance, 2 * inactivity_penalty + base_reward);
}
} }
} }
}
for &index in &attesters.previous_epoch.indices { // Crosslinks
let base_reward = state.base_reward(index, base_reward_quotient, spec);
let inclusion_distance =
inclusion_distance(state, &previous_epoch_attestations, index, spec)?;
safe_sub_assign!( if let Some(ref info) = status.winning_root_info {
state.validator_balances[index], safe_add_assign!(
base_reward balance,
- base_reward * spec.min_attestation_inclusion_delay / inclusion_distance base_reward * info.total_attesting_balance / info.total_committee_balance
); );
} } else {
} safe_sub_assign!(balance, base_reward);
}
balance
})
.collect();
// Attestation inclusion // Attestation inclusion
for &index in &attesters.previous_epoch.indices { // Guard against an out-of-bounds during the attester inclusion balance update.
let inclusion_slot = inclusion_slot(state, &previous_epoch_attestations[..], index, spec)?; if statuses.statuses.len() != state.validator_registry.len() {
return Err(Error::ValidatorStatusesInconsistent);
let proposer_index = state
.get_beacon_proposer_index(inclusion_slot, spec)
.map_err(|_| Error::UnableToDetermineProducer)?;
let base_reward = state.base_reward(proposer_index, base_reward_quotient, spec);
safe_add_assign!(
state.validator_balances[proposer_index],
base_reward / spec.attestation_inclusion_reward_quotient
);
} }
//Crosslinks for (index, _validator) in state.validator_registry.iter().enumerate() {
let status = &statuses.statuses[index];
for slot in state.previous_epoch(spec).slot_iter(spec.slots_per_epoch) { if status.is_previous_epoch_attester {
// Clone removes the borrow which becomes an issue when mutating `state.balances`. let proposer_index = status.inclusion_info.proposer_index;
let crosslink_committees_at_slot = let inclusion_distance = status.inclusion_info.distance;
state.get_crosslink_committees_at_slot(slot, spec)?.clone();
for (crosslink_committee, shard) in crosslink_committees_at_slot { let base_reward =
let shard = shard as u64; get_base_reward(state, proposer_index, total_balances.previous_epoch, spec).expect(
"Cannot fail to access a validator balance when iterating validator balances.",
);
// Note: I'm a little uncertain of the logic here -- I am waiting for spec v0.5.0 to if inclusion_distance > 0 && inclusion_distance < Slot::max_value() {
// clear it up. safe_add_assign!(
// state.validator_balances[proposer_index],
// What happens here is: base_reward * spec.min_attestation_inclusion_delay
// / inclusion_distance.as_u64()
// - If there was some crosslink root elected by the super-majority of this committee, )
// then we reward all who voted for that root and penalize all that did not.
// - However, if there _was not_ some super-majority-voted crosslink root, then penalize
// all the validators.
//
// I'm not quite sure that the second case (no super-majority crosslink) is correct.
if let Some(winning_root) = winning_root_for_shards.get(&shard) {
// Hash set de-dedups and (hopefully) offers a speed improvement from faster
// lookups.
let attesting_validator_indices: HashSet<usize> =
HashSet::from_iter(winning_root.attesting_validator_indices.iter().cloned());
for &index in &crosslink_committee {
let base_reward = state.base_reward(index, base_reward_quotient, spec);
let total_balance = state.get_total_balance(&crosslink_committee, spec);
if attesting_validator_indices.contains(&index) {
safe_add_assign!(
state.validator_balances[index],
base_reward * winning_root.total_attesting_balance / total_balance
);
} else {
safe_sub_assign!(state.validator_balances[index], base_reward);
}
}
} else {
for &index in &crosslink_committee {
let base_reward = state.base_reward(index, base_reward_quotient, spec);
safe_sub_assign!(state.validator_balances[index], base_reward);
}
} }
} }
} }
@ -443,49 +402,76 @@ fn process_rewards_and_penalities(
Ok(()) Ok(())
} }
// Spec v0.4.0 /// Returns the base reward for some validator.
fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { ///
let current_epoch = state.current_epoch(spec); /// Spec v0.5.0
pub fn get_base_reward(
state: &BeaconState,
index: usize,
previous_total_balance: u64,
spec: &ChainSpec,
) -> Result<u64, BeaconStateError> {
if previous_total_balance == 0 {
Ok(0)
} else {
let adjusted_quotient = previous_total_balance.integer_sqrt() / spec.base_reward_quotient;
Ok(state.get_effective_balance(index, spec)? / adjusted_quotient / 5)
}
}
/// Returns the inactivity penalty for some validator.
///
/// Spec v0.5.0
pub fn get_inactivity_penalty(
state: &BeaconState,
index: usize,
epochs_since_finality: u64,
previous_total_balance: u64,
spec: &ChainSpec,
) -> Result<u64, BeaconStateError> {
Ok(get_base_reward(state, index, previous_total_balance, spec)?
+ state.get_effective_balance(index, spec)? * epochs_since_finality
/ spec.inactivity_penalty_quotient
/ 2)
}
/// Updates the state's `latest_active_index_roots` field with a tree hash the active validator
/// indices for the next epoch.
///
/// Spec v0.4.0
pub fn update_active_tree_index_roots(
state: &mut BeaconState,
spec: &ChainSpec,
) -> Result<(), Error> {
let next_epoch = state.next_epoch(spec); let next_epoch = state.next_epoch(spec);
state.previous_shuffling_epoch = state.current_shuffling_epoch; let active_tree_root = get_active_validator_indices(
state.previous_shuffling_start_shard = state.current_shuffling_start_shard; &state.validator_registry,
next_epoch + Epoch::from(spec.activation_exit_delay),
)
.hash_tree_root();
state.previous_shuffling_seed = state.current_shuffling_seed; state.latest_active_index_roots[(next_epoch.as_usize()
+ spec.activation_exit_delay as usize)
let should_update_validator_registy = if state.finalized_epoch % spec.latest_active_index_roots_length] = Hash256::from_slice(&active_tree_root[..]);
> state.validator_registry_update_epoch
{
(0..state.get_current_epoch_committee_count(spec)).all(|i| {
let shard = (state.current_shuffling_start_shard + i as u64) % spec.shard_count;
state.latest_crosslinks[shard as usize].epoch > state.validator_registry_update_epoch
})
} else {
false
};
if should_update_validator_registy {
state.update_validator_registry(spec);
state.current_shuffling_epoch = next_epoch;
state.current_shuffling_start_shard = (state.current_shuffling_start_shard
+ state.get_current_epoch_committee_count(spec) as u64)
% spec.shard_count;
state.current_shuffling_seed = state.generate_seed(state.current_shuffling_epoch, spec)?
} else {
let epochs_since_last_registry_update =
current_epoch - state.validator_registry_update_epoch;
if (epochs_since_last_registry_update > 1)
& epochs_since_last_registry_update.is_power_of_two()
{
state.current_shuffling_epoch = next_epoch;
state.current_shuffling_seed =
state.generate_seed(state.current_shuffling_epoch, spec)?
}
}
state.process_slashings(spec);
state.process_exit_queue(spec);
Ok(()) Ok(())
} }
/// Advances the state's `latest_slashed_balances` field.
///
/// Spec v0.4.0
pub fn update_latest_slashed_balances(state: &mut BeaconState, spec: &ChainSpec) {
let current_epoch = state.current_epoch(spec);
let next_epoch = state.next_epoch(spec);
state.latest_slashed_balances[next_epoch.as_usize() % spec.latest_slashed_exit_length] =
state.latest_slashed_balances[current_epoch.as_usize() % spec.latest_slashed_exit_length];
}
/// Removes all pending attestations from the previous epoch.
///
/// Spec v0.4.0
pub fn clean_attestations(state: &mut BeaconState) {
state.previous_epoch_attestations = vec![];
}

View File

@ -1,98 +0,0 @@
use std::collections::HashSet;
use types::*;
#[derive(Default)]
pub struct Attesters {
pub indices: HashSet<usize>,
pub balance: u64,
}
impl Attesters {
fn add(&mut self, additional_indices: &[usize], additional_balance: u64) {
self.indices.reserve(additional_indices.len());
for i in additional_indices {
self.indices.insert(*i);
}
self.balance.saturating_add(additional_balance);
}
}
pub struct AttesterSets {
pub current_epoch: Attesters,
pub current_epoch_boundary: Attesters,
pub previous_epoch: Attesters,
pub previous_epoch_boundary: Attesters,
pub previous_epoch_head: Attesters,
}
impl AttesterSets {
pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result<Self, BeaconStateError> {
let mut current_epoch = Attesters::default();
let mut current_epoch_boundary = Attesters::default();
let mut previous_epoch = Attesters::default();
let mut previous_epoch_boundary = Attesters::default();
let mut previous_epoch_head = Attesters::default();
for a in &state.latest_attestations {
let attesting_indices =
state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?;
let attesting_balance = state.get_total_balance(&attesting_indices, spec);
if is_from_epoch(a, state.current_epoch(spec), spec) {
current_epoch.add(&attesting_indices, attesting_balance);
if has_common_epoch_boundary_root(a, state, state.current_epoch(spec), spec)? {
current_epoch_boundary.add(&attesting_indices, attesting_balance);
}
} else if is_from_epoch(a, state.previous_epoch(spec), spec) {
previous_epoch.add(&attesting_indices, attesting_balance);
if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? {
previous_epoch_boundary.add(&attesting_indices, attesting_balance);
}
if has_common_beacon_block_root(a, state, spec)? {
previous_epoch_head.add(&attesting_indices, attesting_balance);
}
}
}
Ok(Self {
current_epoch,
current_epoch_boundary,
previous_epoch,
previous_epoch_boundary,
previous_epoch_head,
})
}
}
fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool {
a.data.slot.epoch(spec.slots_per_epoch) == epoch
}
fn has_common_epoch_boundary_root(
a: &PendingAttestation,
state: &BeaconState,
epoch: Epoch,
spec: &ChainSpec,
) -> Result<bool, BeaconStateError> {
let slot = epoch.start_slot(spec.slots_per_epoch);
let state_boundary_root = *state
.get_block_root(slot, spec)
.ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?;
Ok(a.data.epoch_boundary_root == state_boundary_root)
}
fn has_common_beacon_block_root(
a: &PendingAttestation,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<bool, BeaconStateError> {
let state_block_root = *state
.get_block_root(a.data.slot, spec)
.ok_or_else(|| BeaconStateError::InsufficientBlockRoots)?;
Ok(a.data.beacon_block_root == state_block_root)
}

View File

@ -6,6 +6,14 @@ pub enum EpochProcessingError {
NoBlockRoots, NoBlockRoots,
BaseRewardQuotientIsZero, BaseRewardQuotientIsZero,
NoRandaoSeed, NoRandaoSeed,
PreviousTotalBalanceIsZero,
InclusionDistanceZero,
ValidatorStatusesInconsistent,
/// Unable to get the inclusion distance for a validator that should have an inclusion
/// distance. This indicates an internal inconsistency.
///
/// (validator_index)
InclusionSlotsInconsistent(usize),
BeaconStateError(BeaconStateError), BeaconStateError(BeaconStateError),
InclusionError(InclusionError), InclusionError(InclusionError),
} }

View File

@ -0,0 +1,37 @@
use types::{beacon_state::helpers::verify_bitfield_length, *};
/// Returns validator indices which participated in the attestation.
///
/// Spec v0.5.0
pub fn get_attestation_participants(
state: &BeaconState,
attestation_data: &AttestationData,
bitfield: &Bitfield,
spec: &ChainSpec,
) -> Result<Vec<usize>, BeaconStateError> {
let epoch = attestation_data.slot.epoch(spec.slots_per_epoch);
let crosslink_committee =
state.get_crosslink_committee_for_shard(epoch, attestation_data.shard, spec)?;
if crosslink_committee.slot != attestation_data.slot {
return Err(BeaconStateError::NoCommitteeForShard);
}
let committee = &crosslink_committee.committee;
if !verify_bitfield_length(&bitfield, committee.len()) {
return Err(BeaconStateError::InvalidBitfield);
}
let mut participants = Vec::with_capacity(committee.len());
for (i, validator_index) in committee.iter().enumerate() {
match bitfield.get(i) {
Ok(bit) if bit == true => participants.push(*validator_index),
_ => {}
}
}
participants.shrink_to_fit();
Ok(participants)
}

View File

@ -1,12 +1,11 @@
use super::errors::InclusionError; use super::errors::InclusionError;
use super::get_attestation_participants::get_attestation_participants;
use types::*; use types::*;
/// Returns the distance between the first included attestation for some validator and this /// Returns the distance between the first included attestation for some validator and this
/// slot. /// slot.
/// ///
/// Note: In the spec this is defined "inline", not as a helper function. /// Spec v0.5.0
///
/// Spec v0.4.0
pub fn inclusion_distance( pub fn inclusion_distance(
state: &BeaconState, state: &BeaconState,
attestations: &[&PendingAttestation], attestations: &[&PendingAttestation],
@ -19,9 +18,7 @@ pub fn inclusion_distance(
/// Returns the slot of the earliest included attestation for some validator. /// Returns the slot of the earliest included attestation for some validator.
/// ///
/// Note: In the spec this is defined "inline", not as a helper function. /// Spec v0.5.0
///
/// Spec v0.4.0
pub fn inclusion_slot( pub fn inclusion_slot(
state: &BeaconState, state: &BeaconState,
attestations: &[&PendingAttestation], attestations: &[&PendingAttestation],
@ -34,9 +31,7 @@ pub fn inclusion_slot(
/// Finds the earliest included attestation for some validator. /// Finds the earliest included attestation for some validator.
/// ///
/// Note: In the spec this is defined "inline", not as a helper function. /// Spec v0.5.0
///
/// Spec v0.4.0
fn earliest_included_attestation( fn earliest_included_attestation(
state: &BeaconState, state: &BeaconState,
attestations: &[&PendingAttestation], attestations: &[&PendingAttestation],
@ -47,7 +42,7 @@ fn earliest_included_attestation(
for (i, a) in attestations.iter().enumerate() { for (i, a) in attestations.iter().enumerate() {
let participants = let participants =
state.get_attestation_participants(&a.data, &a.aggregation_bitfield, spec)?; get_attestation_participants(state, &a.data, &a.aggregation_bitfield, spec)?;
if participants.iter().any(|i| *i == validator_index) { if participants.iter().any(|i| *i == validator_index) {
included_attestations.push(i); included_attestations.push(i);
} }

View File

@ -0,0 +1,72 @@
use super::Error;
use types::*;
/// Peforms a validator registry update, if required.
///
/// Spec v0.4.0
pub fn process_validator_registry(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> {
let current_epoch = state.current_epoch(spec);
let next_epoch = state.next_epoch(spec);
state.previous_shuffling_epoch = state.current_shuffling_epoch;
state.previous_shuffling_start_shard = state.current_shuffling_start_shard;
state.previous_shuffling_seed = state.current_shuffling_seed;
if should_update_validator_registry(state, spec)? {
state.update_validator_registry(spec)?;
state.current_shuffling_epoch = next_epoch;
state.current_shuffling_start_shard = (state.current_shuffling_start_shard
+ spec.get_epoch_committee_count(
state
.get_active_validator_indices(current_epoch, spec)?
.len(),
) as u64)
% spec.shard_count;
state.current_shuffling_seed = state.generate_seed(state.current_shuffling_epoch, spec)?
} else {
let epochs_since_last_registry_update =
current_epoch - state.validator_registry_update_epoch;
if (epochs_since_last_registry_update > 1)
& epochs_since_last_registry_update.is_power_of_two()
{
state.current_shuffling_epoch = next_epoch;
state.current_shuffling_seed =
state.generate_seed(state.current_shuffling_epoch, spec)?
}
}
state.process_slashings(spec)?;
state.process_exit_queue(spec);
Ok(())
}
/// Returns `true` if the validator registry should be updated during an epoch processing.
///
/// Spec v0.5.0
pub fn should_update_validator_registry(
state: &BeaconState,
spec: &ChainSpec,
) -> Result<bool, BeaconStateError> {
if state.finalized_epoch <= state.validator_registry_update_epoch {
return Ok(false);
}
let num_active_validators = state
.get_active_validator_indices(state.current_epoch(spec), spec)?
.len();
let current_epoch_committee_count = spec.get_epoch_committee_count(num_active_validators);
for shard in (0..current_epoch_committee_count)
.into_iter()
.map(|i| (state.current_shuffling_start_shard + i as u64) % spec.shard_count)
{
if state.latest_crosslinks[shard as usize].epoch <= state.validator_registry_update_epoch {
return Ok(false);
}
}
Ok(true)
}

View File

@ -1,21 +1,21 @@
#![cfg(test)] #![cfg(test)]
use crate::per_epoch_processing; use crate::per_epoch_processing;
use env_logger::{Builder, Env}; use env_logger::{Builder, Env};
use types::beacon_state::BeaconStateBuilder; use types::test_utils::TestingBeaconStateBuilder;
use types::*; use types::*;
#[test] #[test]
fn runs_without_error() { fn runs_without_error() {
Builder::from_env(Env::default().default_filter_or("error")).init(); Builder::from_env(Env::default().default_filter_or("error")).init();
let mut builder = BeaconStateBuilder::new(8); let spec = ChainSpec::few_validators();
builder.spec = ChainSpec::few_validators();
builder.build().unwrap(); let mut builder = TestingBeaconStateBuilder::from_deterministic_keypairs(8, &spec);
builder.teleport_to_end_of_epoch(builder.spec.genesis_epoch + 4);
let mut state = builder.cloned_state(); let target_slot = (spec.genesis_epoch + 4).end_slot(spec.slots_per_epoch);
builder.teleport_to_slot(target_slot, &spec);
let spec = &builder.spec; let (mut state, _keypairs) = builder.build();
per_epoch_processing(&mut state, spec).unwrap();
per_epoch_processing(&mut state, &spec).unwrap();
} }

View File

@ -0,0 +1,323 @@
use super::get_attestation_participants::get_attestation_participants;
use super::WinningRootHashSet;
use types::*;
/// Sets the boolean `var` on `self` to be true if it is true on `other`. Otherwise leaves `self`
/// as is.
macro_rules! set_self_if_other_is_true {
($self_: ident, $other: ident, $var: ident) => {
if $other.$var {
$self_.$var = true;
}
};
}
/// The information required to reward some validator for their participation in a "winning"
/// crosslink root.
#[derive(Default, Clone)]
pub struct WinningRootInfo {
/// The total balance of the crosslink committee.
pub total_committee_balance: u64,
/// The total balance of the crosslink committee that attested for the "winning" root.
pub total_attesting_balance: u64,
}
/// The information required to reward a block producer for including an attestation in a block.
#[derive(Clone)]
pub struct InclusionInfo {
/// The earliest slot a validator had an attestation included in the previous epoch.
pub slot: Slot,
/// The distance between the attestation slot and the slot that attestation was included in a
/// block.
pub distance: Slot,
/// The index of the proposer at the slot where the attestation was included.
pub proposer_index: usize,
}
impl Default for InclusionInfo {
/// Defaults to `slot` and `distance` at their maximum values and `proposer_index` at zero.
fn default() -> Self {
Self {
slot: Slot::max_value(),
distance: Slot::max_value(),
proposer_index: 0,
}
}
}
impl InclusionInfo {
/// Tests if some `other` `InclusionInfo` has a lower inclusion slot than `self`. If so,
/// replaces `self` with `other`.
pub fn update(&mut self, other: &Self) {
if other.slot < self.slot {
self.slot = other.slot;
self.distance = other.distance;
self.proposer_index = other.proposer_index;
}
}
}
/// Information required to reward some validator during the current and previous epoch.
#[derive(Default, Clone)]
pub struct AttesterStatus {
/// True if the validator was active in the state's _current_ epoch.
pub is_active_in_current_epoch: bool,
/// True if the validator was active in the state's _previous_ epoch.
pub is_active_in_previous_epoch: bool,
/// True if the validator had an attestation included in the _current_ epoch.
pub is_current_epoch_attester: bool,
/// True if the validator's beacon block root attestation for the first slot of the _current_
/// epoch matches the block root known to the state.
pub is_current_epoch_boundary_attester: bool,
/// True if the validator had an attestation included in the _previous_ epoch.
pub is_previous_epoch_attester: bool,
/// True if the validator's beacon block root attestation for the first slot of the _previous_
/// epoch matches the block root known to the state.
pub is_previous_epoch_boundary_attester: bool,
/// True if the validator's beacon block root attestation in the _previous_ epoch at the
/// attestation's slot (`attestation_data.slot`) matches the block root known to the state.
pub is_previous_epoch_head_attester: bool,
/// Information used to reward the block producer of this validators earliest-included
/// attestation.
pub inclusion_info: InclusionInfo,
/// Information used to reward/penalize the validator if they voted in the super-majority for
/// some shard block.
pub winning_root_info: Option<WinningRootInfo>,
}
impl AttesterStatus {
/// Accepts some `other` `AttesterStatus` and updates `self` if required.
///
/// Will never set one of the `bool` fields to `false`, it will only set it to `true` if other
/// contains a `true` field.
///
/// Note: does not update the winning root info, this is done manually.
pub fn update(&mut self, other: &Self) {
// Update all the bool fields, only updating `self` if `other` is true (never setting
// `self` to false).
set_self_if_other_is_true!(self, other, is_active_in_current_epoch);
set_self_if_other_is_true!(self, other, is_active_in_previous_epoch);
set_self_if_other_is_true!(self, other, is_current_epoch_attester);
set_self_if_other_is_true!(self, other, is_current_epoch_boundary_attester);
set_self_if_other_is_true!(self, other, is_previous_epoch_attester);
set_self_if_other_is_true!(self, other, is_previous_epoch_boundary_attester);
set_self_if_other_is_true!(self, other, is_previous_epoch_head_attester);
self.inclusion_info.update(&other.inclusion_info);
}
}
/// The total effective balances for different sets of validators during the previous and current
/// epochs.
#[derive(Default, Clone)]
pub struct TotalBalances {
/// The total effective balance of all active validators during the _current_ epoch.
pub current_epoch: u64,
/// The total effective balance of all active validators during the _previous_ epoch.
pub previous_epoch: u64,
/// The total effective balance of all validators who attested during the _current_ epoch.
pub current_epoch_attesters: u64,
/// The total effective balance of all validators who attested during the _current_ epoch and
/// agreed with the state about the beacon block at the first slot of the _current_ epoch.
pub current_epoch_boundary_attesters: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch.
pub previous_epoch_attesters: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch and
/// agreed with the state about the beacon block at the first slot of the _previous_ epoch.
pub previous_epoch_boundary_attesters: u64,
/// The total effective balance of all validators who attested during the _previous_ epoch and
/// agreed with the state about the beacon block at the time of attestation.
pub previous_epoch_head_attesters: u64,
}
/// Summarised information about validator participation in the _previous and _current_ epochs of
/// some `BeaconState`.
#[derive(Clone)]
pub struct ValidatorStatuses {
/// Information about each individual validator from the state's validator registy.
pub statuses: Vec<AttesterStatus>,
/// Summed balances for various sets of validators.
pub total_balances: TotalBalances,
}
impl ValidatorStatuses {
/// Initializes a new instance, determining:
///
/// - Active validators
/// - Total balances for the current and previous epochs.
///
/// Spec v0.5.0
pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result<Self, BeaconStateError> {
let mut statuses = Vec::with_capacity(state.validator_registry.len());
let mut total_balances = TotalBalances::default();
for (i, validator) in state.validator_registry.iter().enumerate() {
let mut status = AttesterStatus::default();
if validator.is_active_at(state.current_epoch(spec)) {
status.is_active_in_current_epoch = true;
total_balances.current_epoch += state.get_effective_balance(i, spec)?;
}
if validator.is_active_at(state.previous_epoch(spec)) {
status.is_active_in_previous_epoch = true;
total_balances.previous_epoch += state.get_effective_balance(i, spec)?;
}
statuses.push(status);
}
Ok(Self {
statuses,
total_balances,
})
}
/// Process some attestations from the given `state` updating the `statuses` and
/// `total_balances` fields.
///
/// Spec v0.5.0
pub fn process_attestations(
&mut self,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<(), BeaconStateError> {
for a in state
.previous_epoch_attestations
.iter()
.chain(state.current_epoch_attestations.iter())
{
let attesting_indices =
get_attestation_participants(state, &a.data, &a.aggregation_bitfield, spec)?;
let attesting_balance = state.get_total_balance(&attesting_indices, spec)?;
let mut status = AttesterStatus::default();
// Profile this attestation, updating the total balances and generating an
// `AttesterStatus` object that applies to all participants in the attestation.
if is_from_epoch(a, state.current_epoch(spec), spec) {
self.total_balances.current_epoch_attesters += attesting_balance;
status.is_current_epoch_attester = true;
if has_common_epoch_boundary_root(a, state, state.current_epoch(spec), spec)? {
self.total_balances.current_epoch_boundary_attesters += attesting_balance;
status.is_current_epoch_boundary_attester = true;
}
} else if is_from_epoch(a, state.previous_epoch(spec), spec) {
self.total_balances.previous_epoch_attesters += attesting_balance;
status.is_previous_epoch_attester = true;
// The inclusion slot and distance are only required for previous epoch attesters.
let relative_epoch = RelativeEpoch::from_slot(state.slot, a.data.slot, spec)?;
status.inclusion_info = InclusionInfo {
slot: a.inclusion_slot,
distance: inclusion_distance(a),
proposer_index: state.get_beacon_proposer_index(
a.inclusion_slot,
relative_epoch,
spec,
)?,
};
if has_common_epoch_boundary_root(a, state, state.previous_epoch(spec), spec)? {
self.total_balances.previous_epoch_boundary_attesters += attesting_balance;
status.is_previous_epoch_boundary_attester = true;
}
if has_common_beacon_block_root(a, state, spec)? {
self.total_balances.previous_epoch_head_attesters += attesting_balance;
status.is_previous_epoch_head_attester = true;
}
}
// Loop through the participating validator indices and update the status vec.
for validator_index in attesting_indices {
self.statuses[validator_index].update(&status);
}
}
Ok(())
}
/// Update the `statuses` for each validator based upon whether or not they attested to the
/// "winning" shard block root for the previous epoch.
///
/// Spec v0.5.0
pub fn process_winning_roots(
&mut self,
state: &BeaconState,
winning_roots: &WinningRootHashSet,
spec: &ChainSpec,
) -> Result<(), BeaconStateError> {
// Loop through each slot in the previous epoch.
for slot in state.previous_epoch(spec).slot_iter(spec.slots_per_epoch) {
let crosslink_committees_at_slot =
state.get_crosslink_committees_at_slot(slot, spec)?;
// Loop through each committee in the slot.
for c in crosslink_committees_at_slot {
// If there was some winning crosslink root for the committee's shard.
if let Some(winning_root) = winning_roots.get(&c.shard) {
let total_committee_balance = state.get_total_balance(&c.committee, spec)?;
for &validator_index in &winning_root.attesting_validator_indices {
// Take note of the balance information for the winning root, it will be
// used later to calculate rewards for that validator.
self.statuses[validator_index].winning_root_info = Some(WinningRootInfo {
total_committee_balance,
total_attesting_balance: winning_root.total_attesting_balance,
})
}
}
}
}
Ok(())
}
}
/// Returns the distance between when the attestation was created and when it was included in a
/// block.
///
/// Spec v0.5.0
fn inclusion_distance(a: &PendingAttestation) -> Slot {
a.inclusion_slot - a.data.slot
}
/// Returns `true` if some `PendingAttestation` is from the supplied `epoch`.
///
/// Spec v0.5.0
fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool {
a.data.slot.epoch(spec.slots_per_epoch) == epoch
}
/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for
/// the first slot of the given epoch.
///
/// Spec v0.5.0
fn has_common_epoch_boundary_root(
a: &PendingAttestation,
state: &BeaconState,
epoch: Epoch,
spec: &ChainSpec,
) -> Result<bool, BeaconStateError> {
let slot = epoch.start_slot(spec.slots_per_epoch);
let state_boundary_root = *state.get_block_root(slot, spec)?;
Ok(a.data.target_root == state_boundary_root)
}
/// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for
/// the current slot of the `PendingAttestation`.
///
/// Spec v0.5.0
fn has_common_beacon_block_root(
a: &PendingAttestation,
state: &BeaconState,
spec: &ChainSpec,
) -> Result<bool, BeaconStateError> {
let state_block_root = *state.get_block_root(a.data.slot, spec)?;
Ok(a.data.beacon_block_root == state_block_root)
}

View File

@ -1,3 +1,4 @@
use super::get_attestation_participants::get_attestation_participants;
use std::collections::HashSet; use std::collections::HashSet;
use std::iter::FromIterator; use std::iter::FromIterator;
use types::*; use types::*;
@ -13,14 +14,14 @@ impl WinningRoot {
/// Returns `true` if `self` is a "better" candidate than `other`. /// Returns `true` if `self` is a "better" candidate than `other`.
/// ///
/// A winning root is "better" than another if it has a higher `total_attesting_balance`. Ties /// A winning root is "better" than another if it has a higher `total_attesting_balance`. Ties
/// are broken by favouring the lower `crosslink_data_root` value. /// are broken by favouring the higher `crosslink_data_root` value.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn is_better_than(&self, other: &Self) -> bool { pub fn is_better_than(&self, other: &Self) -> bool {
if self.total_attesting_balance > other.total_attesting_balance { if self.total_attesting_balance > other.total_attesting_balance {
true true
} else if self.total_attesting_balance == other.total_attesting_balance { } else if self.total_attesting_balance == other.total_attesting_balance {
self.crosslink_data_root < other.crosslink_data_root self.crosslink_data_root > other.crosslink_data_root
} else { } else {
false false
} }
@ -33,22 +34,21 @@ impl WinningRoot {
/// The `WinningRoot` object also contains additional fields that are useful in later stages of /// The `WinningRoot` object also contains additional fields that are useful in later stages of
/// per-epoch processing. /// per-epoch processing.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn winning_root( pub fn winning_root(
state: &BeaconState, state: &BeaconState,
shard: u64, shard: u64,
current_epoch_attestations: &[&PendingAttestation],
previous_epoch_attestations: &[&PendingAttestation],
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Option<WinningRoot>, BeaconStateError> { ) -> Result<Option<WinningRoot>, BeaconStateError> {
let mut winning_root: Option<WinningRoot> = None; let mut winning_root: Option<WinningRoot> = None;
let crosslink_data_roots: HashSet<Hash256> = HashSet::from_iter( let crosslink_data_roots: HashSet<Hash256> = HashSet::from_iter(
previous_epoch_attestations state
.previous_epoch_attestations
.iter() .iter()
.chain(current_epoch_attestations.iter()) .chain(state.current_epoch_attestations.iter())
.filter_map(|a| { .filter_map(|a| {
if a.data.shard == shard { if is_eligible_for_winning_root(state, a, shard) {
Some(a.data.crosslink_data_root) Some(a.data.crosslink_data_root)
} else { } else {
None None
@ -57,18 +57,17 @@ pub fn winning_root(
); );
for crosslink_data_root in crosslink_data_roots { for crosslink_data_root in crosslink_data_roots {
let attesting_validator_indices = get_attesting_validator_indices( let attesting_validator_indices =
state, get_attesting_validator_indices(state, shard, &crosslink_data_root, spec)?;
shard,
current_epoch_attestations,
previous_epoch_attestations,
&crosslink_data_root,
spec,
)?;
let total_attesting_balance: u64 = attesting_validator_indices let total_attesting_balance: u64 =
.iter() attesting_validator_indices
.fold(0, |acc, i| acc + state.get_effective_balance(*i, spec)); .iter()
.try_fold(0_u64, |acc, i| {
state
.get_effective_balance(*i, spec)
.and_then(|bal| Ok(acc + bal))
})?;
let candidate = WinningRoot { let candidate = WinningRoot {
crosslink_data_root, crosslink_data_root,
@ -88,25 +87,36 @@ pub fn winning_root(
Ok(winning_root) Ok(winning_root)
} }
/// Returns all indices which voted for a given crosslink. May contain duplicates. /// Returns `true` if pending attestation `a` is eligible to become a winning root.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
fn is_eligible_for_winning_root(state: &BeaconState, a: &PendingAttestation, shard: Shard) -> bool {
if shard >= state.latest_crosslinks.len() as u64 {
return false;
}
a.data.previous_crosslink == state.latest_crosslinks[shard as usize]
}
/// Returns all indices which voted for a given crosslink. Does not contain duplicates.
///
/// Spec v0.5.0
fn get_attesting_validator_indices( fn get_attesting_validator_indices(
state: &BeaconState, state: &BeaconState,
shard: u64, shard: u64,
current_epoch_attestations: &[&PendingAttestation],
previous_epoch_attestations: &[&PendingAttestation],
crosslink_data_root: &Hash256, crosslink_data_root: &Hash256,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<Vec<usize>, BeaconStateError> { ) -> Result<Vec<usize>, BeaconStateError> {
let mut indices = vec![]; let mut indices = vec![];
for a in current_epoch_attestations for a in state
.current_epoch_attestations
.iter() .iter()
.chain(previous_epoch_attestations.iter()) .chain(state.previous_epoch_attestations.iter())
{ {
if (a.data.shard == shard) && (a.data.crosslink_data_root == *crosslink_data_root) { if (a.data.shard == shard) && (a.data.crosslink_data_root == *crosslink_data_root) {
indices.append(&mut state.get_attestation_participants( indices.append(&mut get_attestation_participants(
state,
&a.data, &a.data,
&a.aggregation_bitfield, &a.aggregation_bitfield,
spec, spec,
@ -114,5 +124,41 @@ fn get_attesting_validator_indices(
} }
} }
// Sort the list (required for dedup). "Unstable" means the sort may re-order equal elements,
// this causes no issue here.
//
// These sort + dedup ops are potentially good CPU time optimisation targets.
indices.sort_unstable();
// Remove all duplicate indices (requires a sorted list).
indices.dedup();
Ok(indices) Ok(indices)
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_better_than() {
let worse = WinningRoot {
crosslink_data_root: Hash256::from_slice(&[1; 32]),
attesting_validator_indices: vec![],
total_attesting_balance: 42,
};
let better = WinningRoot {
crosslink_data_root: Hash256::from_slice(&[2; 32]),
..worse.clone()
};
assert!(better.is_better_than(&worse));
let better = WinningRoot {
total_attesting_balance: worse.total_attesting_balance + 1,
..worse.clone()
};
assert!(better.is_better_than(&worse));
}
}

View File

@ -1,5 +1,6 @@
use crate::*; use crate::*;
use types::{BeaconState, BeaconStateError, ChainSpec, Hash256}; use ssz::TreeHash;
use types::*;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum Error { pub enum Error {
@ -9,12 +10,14 @@ pub enum Error {
/// Advances a state forward by one slot, performing per-epoch processing if required. /// Advances a state forward by one slot, performing per-epoch processing if required.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn per_slot_processing( pub fn per_slot_processing(
state: &mut BeaconState, state: &mut BeaconState,
previous_block_root: Hash256, latest_block_header: &BeaconBlockHeader,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
cache_state(state, latest_block_header, spec)?;
if (state.slot + 1) % spec.slots_per_epoch == 0 { if (state.slot + 1) % spec.slots_per_epoch == 0 {
per_epoch_processing(state, spec)?; per_epoch_processing(state, spec)?;
state.advance_caches(); state.advance_caches();
@ -22,27 +25,38 @@ pub fn per_slot_processing(
state.slot += 1; state.slot += 1;
update_block_roots(state, previous_block_root, spec); let latest_block_root = Hash256::from_slice(&state.latest_block_header.hash_tree_root()[..]);
state.set_block_root(state.slot - 1, latest_block_root, spec)?;
Ok(()) Ok(())
} }
/// Updates the state's block roots as per-slot processing is performed. fn cache_state(
/// state: &mut BeaconState,
/// Spec v0.4.0 latest_block_header: &BeaconBlockHeader,
pub fn update_block_roots(state: &mut BeaconState, previous_block_root: Hash256, spec: &ChainSpec) { spec: &ChainSpec,
state.latest_block_roots[(state.slot.as_usize() - 1) % spec.latest_block_roots_length] = ) -> Result<(), Error> {
previous_block_root; let previous_slot_state_root = Hash256::from_slice(&state.hash_tree_root()[..]);
if state.slot.as_usize() % spec.latest_block_roots_length == 0 { // Note: increment the state slot here to allow use of our `state_root` and `block_root`
let root = merkle_root(&state.latest_block_roots[..]); // getter/setter functions.
state.batched_block_roots.push(root); //
// This is a bit hacky, however it gets the job safely without lots of code.
let previous_slot = state.slot;
state.slot += 1;
// Store the previous slot's post-state transition root.
if state.latest_block_header.state_root == spec.zero_hash {
state.latest_block_header.state_root = previous_slot_state_root
} }
}
fn merkle_root(_input: &[Hash256]) -> Hash256 { let latest_block_root = Hash256::from_slice(&latest_block_header.hash_tree_root()[..]);
// TODO: implement correctly. state.set_block_root(previous_slot, latest_block_root, spec)?;
Hash256::zero()
// Set the state slot back to what it should be.
state.slot -= 1;
Ok(())
} }
impl From<BeaconStateError> for Error { impl From<BeaconStateError> for Error {

View File

@ -0,0 +1,42 @@
use serde_derive::Deserialize;
use types::*;
#[derive(Debug, Deserialize)]
pub struct TestCase {
pub name: String,
pub config: ChainSpec,
pub verify_signatures: bool,
pub initial_state: BeaconState,
pub blocks: Vec<BeaconBlock>,
}
#[derive(Debug, Deserialize)]
pub struct TestDoc {
pub title: String,
pub summary: String,
pub fork: String,
pub version: String,
pub test_cases: Vec<TestCase>,
}
#[test]
#[ignore]
fn yaml() {
use serde_yaml;
use std::{fs::File, io::prelude::*, path::PathBuf};
let mut file = {
let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
file_path_buf.push("specs/example.yml");
File::open(file_path_buf).unwrap()
};
let mut yaml_str = String::new();
file.read_to_string(&mut yaml_str).unwrap();
let yaml_str = yaml_str.to_lowercase();
let _doc: TestDoc = serde_yaml::from_str(&yaml_str.as_str()).unwrap();
}

View File

@ -7,6 +7,7 @@ edition = "2018"
[dependencies] [dependencies]
bls = { path = "../utils/bls" } bls = { path = "../utils/bls" }
boolean-bitfield = { path = "../utils/boolean-bitfield" } boolean-bitfield = { path = "../utils/boolean-bitfield" }
dirs = "1.0"
ethereum-types = "0.5" ethereum-types = "0.5"
hashing = { path = "../utils/hashing" } hashing = { path = "../utils/hashing" }
honey-badger-split = { path = "../utils/honey-badger-split" } honey-badger-split = { path = "../utils/honey-badger-split" }
@ -17,6 +18,7 @@ rand = "0.5.5"
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
serde_json = "1.0" serde_json = "1.0"
serde_yaml = "0.8"
slog = "^2.2.3" slog = "^2.2.3"
ssz = { path = "../utils/ssz" } ssz = { path = "../utils/ssz" }
ssz_derive = { path = "../utils/ssz_derive" } ssz_derive = { path = "../utils/ssz_derive" }

View File

@ -1,15 +1,26 @@
use super::{AggregateSignature, AttestationData, Bitfield}; use super::{AggregateSignature, AttestationData, Bitfield};
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz::TreeHash; use ssz::TreeHash;
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// Details an attestation that can be slashable. /// Details an attestation that can be slashable.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] #[derive(
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
TestRandom,
SignedRoot,
)]
pub struct Attestation { pub struct Attestation {
pub aggregation_bitfield: Bitfield, pub aggregation_bitfield: Bitfield,
pub data: AttestationData, pub data: AttestationData,
@ -20,29 +31,6 @@ pub struct Attestation {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(Attestation);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Attestation::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Attestation::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,20 +1,21 @@
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::{Crosslink, Epoch, Hash256, Slot}; use crate::{Crosslink, Epoch, Hash256, Slot};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz::TreeHash; use ssz::TreeHash;
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// The data upon which an attestation is based. /// The data upon which an attestation is based.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive( #[derive(
Debug, Debug,
Clone, Clone,
PartialEq, PartialEq,
Default, Default,
Serialize, Serialize,
Deserialize,
Hash, Hash,
Encode, Encode,
Decode, Decode,
@ -23,14 +24,19 @@ use test_random_derive::TestRandom;
SignedRoot, SignedRoot,
)] )]
pub struct AttestationData { pub struct AttestationData {
// LMD GHOST vote
pub slot: Slot, pub slot: Slot,
pub shard: u64,
pub beacon_block_root: Hash256, pub beacon_block_root: Hash256,
pub epoch_boundary_root: Hash256,
// FFG Vote
pub source_epoch: Epoch,
pub source_root: Hash256,
pub target_root: Hash256,
// Crosslink Vote
pub shard: u64,
pub previous_crosslink: Crosslink,
pub crosslink_data_root: Hash256, pub crosslink_data_root: Hash256,
pub latest_crosslink: Crosslink,
pub justified_epoch: Epoch,
pub justified_block_root: Hash256,
} }
impl Eq for AttestationData {} impl Eq for AttestationData {}
@ -38,29 +44,6 @@ impl Eq for AttestationData {}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(AttestationData);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttestationData::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttestationData::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -6,7 +6,7 @@ use ssz_derive::{Decode, Encode, TreeHash};
/// Used for pairing an attestation with a proof-of-custody. /// Used for pairing an attestation with a proof-of-custody.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash)] #[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash)]
pub struct AttestationDataAndCustodyBit { pub struct AttestationDataAndCustodyBit {
pub data: AttestationData, pub data: AttestationData,
@ -25,31 +25,6 @@ impl<T: RngCore> TestRandom<T> for AttestationDataAndCustodyBit {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(AttestationDataAndCustodyBit);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttestationDataAndCustodyBit::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttestationDataAndCustodyBit::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -0,0 +1,9 @@
use crate::*;
use serde_derive::{Deserialize, Serialize};
#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)]
pub struct AttestationDuty {
pub slot: Slot,
pub shard: Shard,
pub committee_index: usize,
}

View File

@ -1,17 +1,13 @@
use crate::{test_utils::TestRandom, SlashableAttestation}; use crate::{test_utils::TestRandom, SlashableAttestation};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
mod builder;
pub use builder::AttesterSlashingBuilder;
/// Two conflicting attestations. /// Two conflicting attestations.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
pub struct AttesterSlashing { pub struct AttesterSlashing {
pub slashable_attestation_1: SlashableAttestation, pub slashable_attestation_1: SlashableAttestation,
pub slashable_attestation_2: SlashableAttestation, pub slashable_attestation_2: SlashableAttestation,
@ -20,29 +16,6 @@ pub struct AttesterSlashing {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(AttesterSlashing);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttesterSlashing::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = AttesterSlashing::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,40 +1,50 @@
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::{BeaconBlockBody, ChainSpec, Eth1Data, Hash256, Slot}; use crate::*;
use bls::Signature; use bls::Signature;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz::TreeHash; use ssz::TreeHash;
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// A block of the `BeaconChain`. /// A block of the `BeaconChain`.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] #[derive(
Debug,
PartialEq,
Clone,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
TestRandom,
SignedRoot,
)]
pub struct BeaconBlock { pub struct BeaconBlock {
pub slot: Slot, pub slot: Slot,
pub parent_root: Hash256, pub previous_block_root: Hash256,
pub state_root: Hash256, pub state_root: Hash256,
pub randao_reveal: Signature,
pub eth1_data: Eth1Data,
pub body: BeaconBlockBody, pub body: BeaconBlockBody,
pub signature: Signature, pub signature: Signature,
} }
impl BeaconBlock { impl BeaconBlock {
/// Produce the first block of the Beacon Chain. /// Returns an empty block to be used during genesis.
pub fn genesis(state_root: Hash256, spec: &ChainSpec) -> BeaconBlock { ///
/// Spec v0.5.0
pub fn empty(spec: &ChainSpec) -> BeaconBlock {
BeaconBlock { BeaconBlock {
slot: spec.genesis_slot, slot: spec.genesis_slot,
parent_root: spec.zero_hash, previous_block_root: spec.zero_hash,
state_root, state_root: spec.zero_hash,
randao_reveal: spec.empty_signature.clone(),
eth1_data: Eth1Data {
deposit_root: spec.zero_hash,
block_hash: spec.zero_hash,
},
signature: spec.empty_signature.clone(),
body: BeaconBlockBody { body: BeaconBlockBody {
randao_reveal: spec.empty_signature.clone(),
eth1_data: Eth1Data {
deposit_root: spec.zero_hash,
block_hash: spec.zero_hash,
},
proposer_slashings: vec![], proposer_slashings: vec![],
attester_slashings: vec![], attester_slashings: vec![],
attestations: vec![], attestations: vec![],
@ -42,41 +52,50 @@ impl BeaconBlock {
voluntary_exits: vec![], voluntary_exits: vec![],
transfers: vec![], transfers: vec![],
}, },
signature: spec.empty_signature.clone(),
} }
} }
/// Returns the `hash_tree_root` of the block. /// Returns the `hash_tree_root` of the block.
///
/// Spec v0.5.0
pub fn canonical_root(&self) -> Hash256 { pub fn canonical_root(&self) -> Hash256 {
Hash256::from_slice(&self.hash_tree_root()[..]) Hash256::from_slice(&self.hash_tree_root()[..])
} }
/// Returns a full `BeaconBlockHeader` of this block.
///
/// Note: This method is used instead of an `Into` impl to avoid a `Clone` of an entire block
/// when you want to have the block _and_ the header.
///
/// Note: performs a full tree-hash of `self.body`.
///
/// Spec v0.5.0
pub fn into_header(&self) -> BeaconBlockHeader {
BeaconBlockHeader {
slot: self.slot,
previous_block_root: self.previous_block_root,
state_root: self.state_root,
block_body_root: Hash256::from_slice(&self.body.hash_tree_root()[..]),
signature: self.signature.clone(),
}
}
/// Returns a "temporary" header, where the `state_root` is `spec.zero_hash`.
///
/// Spec v0.5.0
pub fn into_temporary_header(&self, spec: &ChainSpec) -> BeaconBlockHeader {
BeaconBlockHeader {
state_root: spec.zero_hash,
signature: spec.empty_signature.clone(),
..self.into_header()
}
}
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(BeaconBlock);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconBlock::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconBlock::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,15 +1,17 @@
use super::{Attestation, AttesterSlashing, Deposit, ProposerSlashing, Transfer, VoluntaryExit};
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::*;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// The body of a `BeaconChain` block, containing operations. /// The body of a `BeaconChain` block, containing operations.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
pub struct BeaconBlockBody { pub struct BeaconBlockBody {
pub randao_reveal: Signature,
pub eth1_data: Eth1Data,
pub proposer_slashings: Vec<ProposerSlashing>, pub proposer_slashings: Vec<ProposerSlashing>,
pub attester_slashings: Vec<AttesterSlashing>, pub attester_slashings: Vec<AttesterSlashing>,
pub attestations: Vec<Attestation>, pub attestations: Vec<Attestation>,
@ -21,29 +23,6 @@ pub struct BeaconBlockBody {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(BeaconBlockBody);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconBlockBody::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconBlockBody::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -0,0 +1,47 @@
use crate::test_utils::TestRandom;
use crate::*;
use bls::Signature;
use rand::RngCore;
use serde_derive::{Deserialize, Serialize};
use ssz::TreeHash;
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom;
/// A header of a `BeaconBlock`.
///
/// Spec v0.5.0
#[derive(
Debug,
PartialEq,
Clone,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
TestRandom,
SignedRoot,
)]
pub struct BeaconBlockHeader {
pub slot: Slot,
pub previous_block_root: Hash256,
pub state_root: Hash256,
pub block_body_root: Hash256,
pub signature: Signature,
}
impl BeaconBlockHeader {
/// Returns the `hash_tree_root` of the header.
///
/// Spec v0.5.0
pub fn canonical_root(&self) -> Hash256 {
Hash256::from_slice(&self.hash_tree_root()[..])
}
}
#[cfg(test)]
mod tests {
use super::*;
ssz_tests!(BeaconBlockHeader);
}

File diff suppressed because it is too large Load Diff

View File

@ -1,263 +0,0 @@
use crate::*;
use bls::create_proof_of_possession;
/// Builds a `BeaconState` for use in testing or benchmarking.
///
/// Building the `BeaconState` is a three step processes:
///
/// 1. Create a new `BeaconStateBuilder`.
/// 2. Call `Self::build()` or `Self::build_fast()` generate a `BeaconState`.
/// 3. (Optional) Use builder functions to modify the `BeaconState`.
/// 4. Call `Self::cloned_state()` to obtain a `BeaconState` cloned from this struct.
///
/// Step (2) happens prior to step (3) because some functionality requires an existing
/// `BeaconState`.
///
/// Step (4) produces a clone of the BeaconState and doesn't consume the `BeaconStateBuilder` to
/// allow access to `self.keypairs` and `self.spec`.
pub struct BeaconStateBuilder {
pub validator_count: usize,
pub state: Option<BeaconState>,
pub genesis_time: u64,
pub latest_eth1_data: Eth1Data,
pub spec: ChainSpec,
pub keypairs: Vec<Keypair>,
}
impl BeaconStateBuilder {
/// Create a new builder with the given number of validators.
pub fn new(validator_count: usize) -> Self {
let genesis_time = 10_000_000;
let latest_eth1_data = Eth1Data {
deposit_root: Hash256::zero(),
block_hash: Hash256::zero(),
};
let spec = ChainSpec::foundation();
Self {
validator_count,
state: None,
genesis_time,
latest_eth1_data,
spec,
keypairs: vec![],
}
}
/// Builds a `BeaconState` using the `BeaconState::genesis(..)` function.
///
/// Each validator is assigned a unique, randomly-generated keypair and all
/// proof-of-possessions are verified during genesis.
pub fn build(&mut self) -> Result<(), BeaconStateError> {
self.keypairs = (0..self.validator_count)
.collect::<Vec<usize>>()
.iter()
.map(|_| Keypair::random())
.collect();
let initial_validator_deposits = self
.keypairs
.iter()
.map(|keypair| Deposit {
branch: vec![], // branch verification is not specified.
index: 0, // index verification is not specified.
deposit_data: DepositData {
amount: 32_000_000_000, // 32 ETH (in Gwei)
timestamp: self.genesis_time - 1,
deposit_input: DepositInput {
pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(), // Withdrawal not possible.
proof_of_possession: create_proof_of_possession(&keypair),
},
},
})
.collect();
let state = BeaconState::genesis(
self.genesis_time,
initial_validator_deposits,
self.latest_eth1_data.clone(),
&self.spec,
)?;
self.state = Some(state);
Ok(())
}
/// Builds a `BeaconState` using the `BeaconState::genesis(..)` function, without supplying any
/// validators. Instead validators are added to the state post-genesis.
///
/// One keypair is randomly generated and all validators are assigned this same keypair.
/// Proof-of-possessions are not created (or validated).
///
/// This function runs orders of magnitude faster than `Self::build()`, however it will be
/// erroneous for functions which use a validators public key as an identifier (e.g.,
/// deposits).
pub fn build_fast(&mut self) -> Result<(), BeaconStateError> {
let common_keypair = Keypair::random();
let mut validator_registry = Vec::with_capacity(self.validator_count);
let mut validator_balances = Vec::with_capacity(self.validator_count);
self.keypairs = Vec::with_capacity(self.validator_count);
for _ in 0..self.validator_count {
self.keypairs.push(common_keypair.clone());
validator_balances.push(32_000_000_000);
validator_registry.push(Validator {
pubkey: common_keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(),
activation_epoch: self.spec.genesis_epoch,
..Validator::default()
})
}
let state = BeaconState {
validator_registry,
validator_balances,
..BeaconState::genesis(
self.genesis_time,
vec![],
self.latest_eth1_data.clone(),
&self.spec,
)?
};
self.state = Some(state);
Ok(())
}
/// Sets the `BeaconState` to be in the last slot of the given epoch.
///
/// Sets all justification/finalization parameters to be be as "perfect" as possible (i.e.,
/// highest justified and finalized slots, full justification bitfield, etc).
pub fn teleport_to_end_of_epoch(&mut self, epoch: Epoch) {
let state = self.state.as_mut().expect("Genesis required");
let slot = epoch.end_slot(self.spec.slots_per_epoch);
state.slot = slot;
state.validator_registry_update_epoch = epoch - 1;
state.previous_shuffling_epoch = epoch - 1;
state.current_shuffling_epoch = epoch;
state.previous_shuffling_seed = Hash256::from_low_u64_le(0);
state.current_shuffling_seed = Hash256::from_low_u64_le(1);
state.previous_justified_epoch = epoch - 2;
state.justified_epoch = epoch - 1;
state.justification_bitfield = u64::max_value();
state.finalized_epoch = epoch - 1;
}
/// Creates a full set of attestations for the `BeaconState`. Each attestation has full
/// participation from its committee and references the expected beacon_block hashes.
///
/// These attestations should be fully conducive to justification and finalization.
pub fn insert_attestations(&mut self) {
let state = self.state.as_mut().expect("Genesis required");
state
.build_epoch_cache(RelativeEpoch::Previous, &self.spec)
.unwrap();
state
.build_epoch_cache(RelativeEpoch::Current, &self.spec)
.unwrap();
let current_epoch = state.current_epoch(&self.spec);
let previous_epoch = state.previous_epoch(&self.spec);
let current_epoch_depth =
(state.slot - current_epoch.end_slot(self.spec.slots_per_epoch)).as_usize();
let previous_epoch_slots = previous_epoch.slot_iter(self.spec.slots_per_epoch);
let current_epoch_slots = current_epoch
.slot_iter(self.spec.slots_per_epoch)
.take(current_epoch_depth);
for slot in previous_epoch_slots.chain(current_epoch_slots) {
let committees = state
.get_crosslink_committees_at_slot(slot, &self.spec)
.unwrap()
.clone();
for (committee, shard) in committees {
state
.latest_attestations
.push(committee_to_pending_attestation(
state, &committee, shard, slot, &self.spec,
))
}
}
}
/// Returns a cloned `BeaconState`.
pub fn cloned_state(&self) -> BeaconState {
self.state.as_ref().expect("Genesis required").clone()
}
}
/// Builds a valid PendingAttestation with full participation for some committee.
fn committee_to_pending_attestation(
state: &BeaconState,
committee: &[usize],
shard: u64,
slot: Slot,
spec: &ChainSpec,
) -> PendingAttestation {
let current_epoch = state.current_epoch(spec);
let previous_epoch = state.previous_epoch(spec);
let mut aggregation_bitfield = Bitfield::new();
let mut custody_bitfield = Bitfield::new();
for (i, _) in committee.iter().enumerate() {
aggregation_bitfield.set(i, true);
custody_bitfield.set(i, true);
}
let is_previous_epoch =
state.slot.epoch(spec.slots_per_epoch) != slot.epoch(spec.slots_per_epoch);
let justified_epoch = if is_previous_epoch {
state.previous_justified_epoch
} else {
state.justified_epoch
};
let epoch_boundary_root = if is_previous_epoch {
*state
.get_block_root(previous_epoch.start_slot(spec.slots_per_epoch), spec)
.unwrap()
} else {
*state
.get_block_root(current_epoch.start_slot(spec.slots_per_epoch), spec)
.unwrap()
};
let justified_block_root = *state
.get_block_root(justified_epoch.start_slot(spec.slots_per_epoch), &spec)
.unwrap();
PendingAttestation {
aggregation_bitfield,
data: AttestationData {
slot,
shard,
beacon_block_root: *state.get_block_root(slot, spec).unwrap(),
epoch_boundary_root,
crosslink_data_root: Hash256::zero(),
latest_crosslink: Crosslink {
epoch: slot.epoch(spec.slots_per_epoch),
crosslink_data_root: Hash256::zero(),
},
justified_epoch,
justified_block_root,
},
custody_bitfield,
inclusion_slot: slot,
}
}

View File

@ -1,84 +1,305 @@
use super::{AttestationDutyMap, BeaconState, CrosslinkCommittees, Error, ShardCommitteeIndexMap}; use super::BeaconState;
use crate::{ChainSpec, Epoch}; use crate::*;
use log::trace; use honey_badger_split::SplitExt;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use std::collections::HashMap; use swap_or_not_shuffle::shuffle_list;
#[derive(Debug, PartialEq, Clone, Serialize)] #[derive(Debug, PartialEq)]
pub enum Error {
UnableToShuffle,
UnableToGenerateSeed,
}
mod tests;
#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)]
pub struct EpochCache { pub struct EpochCache {
/// True if this cache has been initialized. /// `Some(epoch)` if the cache is initialized, where `epoch` is the cache it holds.
pub initialized: bool, pub initialized_epoch: Option<Epoch>,
/// The crosslink committees for an epoch. /// All crosslink committees for an epoch.
pub committees: Vec<CrosslinkCommittees>, pub epoch_crosslink_committees: EpochCrosslinkCommittees,
/// Maps validator index to a slot, shard and committee index for attestation. /// Maps validator index to a slot, shard and committee index for attestation.
pub attestation_duty_map: AttestationDutyMap, pub attestation_duties: Vec<Option<AttestationDuty>>,
/// Maps a shard to an index of `self.committees`. /// Maps a shard to an index of `self.committees`.
pub shard_committee_index_map: ShardCommitteeIndexMap, pub shard_committee_indices: Vec<Option<(Slot, usize)>>,
/// Indices of all active validators in the epoch
pub active_validator_indices: Vec<usize>,
} }
impl EpochCache { impl EpochCache {
pub fn empty() -> EpochCache { /// Return a new, fully initialized cache.
EpochCache {
initialized: false,
committees: vec![],
attestation_duty_map: AttestationDutyMap::new(),
shard_committee_index_map: ShardCommitteeIndexMap::new(),
}
}
pub fn initialized( pub fn initialized(
state: &BeaconState, state: &BeaconState,
epoch: Epoch, relative_epoch: RelativeEpoch,
spec: &ChainSpec, spec: &ChainSpec,
) -> Result<EpochCache, Error> { ) -> Result<EpochCache, Error> {
let mut epoch_committees: Vec<CrosslinkCommittees> = let epoch = relative_epoch.into_epoch(state.slot.epoch(spec.slots_per_epoch));
Vec::with_capacity(spec.slots_per_epoch as usize);
let mut attestation_duty_map: AttestationDutyMap = HashMap::new();
let mut shard_committee_index_map: ShardCommitteeIndexMap = HashMap::new();
let shuffling = let active_validator_indices =
state.get_shuffling_for_slot(epoch.start_slot(spec.slots_per_epoch), false, spec)?; get_active_validator_indices(&state.validator_registry, epoch);
for (epoch_committeess_index, slot) in epoch.slot_iter(spec.slots_per_epoch).enumerate() { let builder = match relative_epoch {
let slot_committees = state.calculate_crosslink_committees_at_slot( RelativeEpoch::Previous => EpochCrosslinkCommitteesBuilder::for_previous_epoch(
slot, state,
false, active_validator_indices.clone(),
shuffling.clone(),
spec, spec,
)?; ),
RelativeEpoch::Current => EpochCrosslinkCommitteesBuilder::for_current_epoch(
state,
active_validator_indices.clone(),
spec,
),
RelativeEpoch::NextWithRegistryChange => {
EpochCrosslinkCommitteesBuilder::for_next_epoch(
state,
active_validator_indices.clone(),
true,
spec,
)?
}
RelativeEpoch::NextWithoutRegistryChange => {
EpochCrosslinkCommitteesBuilder::for_next_epoch(
state,
active_validator_indices.clone(),
false,
spec,
)?
}
};
let epoch_crosslink_committees = builder.build(spec)?;
for (slot_committees_index, (committee, shard)) in slot_committees.iter().enumerate() { // Loop through all the validators in the committees and create the following maps:
// Empty committees are not permitted. //
if committee.is_empty() { // 1. `attestation_duties`: maps `ValidatorIndex` to `AttestationDuty`.
return Err(Error::InsufficientValidators); // 2. `shard_committee_indices`: maps `Shard` into a `CrosslinkCommittee` in
} // `EpochCrosslinkCommittees`.
let mut attestation_duties = vec![None; state.validator_registry.len()];
let mut shard_committee_indices = vec![None; spec.shard_count as usize];
for (i, slot_committees) in epoch_crosslink_committees
.crosslink_committees
.iter()
.enumerate()
{
let slot = epoch.start_slot(spec.slots_per_epoch) + i as u64;
trace!( for (j, crosslink_committee) in slot_committees.iter().enumerate() {
"shard: {}, epoch_i: {}, slot_i: {}", let shard = crosslink_committee.shard;
shard,
epoch_committeess_index,
slot_committees_index
);
shard_committee_index_map shard_committee_indices[shard as usize] = Some((slot, j));
.insert(*shard, (epoch_committeess_index, slot_committees_index));
for (committee_index, validator_index) in committee.iter().enumerate() { for (k, validator_index) in crosslink_committee.committee.iter().enumerate() {
attestation_duty_map.insert( let attestation_duty = AttestationDuty {
*validator_index as u64, slot,
(slot, *shard, committee_index as u64), shard,
); committee_index: k,
};
attestation_duties[*validator_index] = Some(attestation_duty)
} }
} }
epoch_committees.push(slot_committees)
} }
Ok(EpochCache { Ok(EpochCache {
initialized: true, initialized_epoch: Some(epoch),
committees: epoch_committees, epoch_crosslink_committees,
attestation_duty_map, attestation_duties,
shard_committee_index_map, shard_committee_indices,
active_validator_indices,
}) })
} }
pub fn get_crosslink_committees_at_slot(
&self,
slot: Slot,
spec: &ChainSpec,
) -> Option<&Vec<CrosslinkCommittee>> {
self.epoch_crosslink_committees
.get_crosslink_committees_at_slot(slot, spec)
}
pub fn get_crosslink_committee_for_shard(
&self,
shard: Shard,
spec: &ChainSpec,
) -> Option<&CrosslinkCommittee> {
if shard > self.shard_committee_indices.len() as u64 {
None
} else {
let (slot, committee) = self.shard_committee_indices[shard as usize]?;
let slot_committees = self.get_crosslink_committees_at_slot(slot, spec)?;
slot_committees.get(committee)
}
}
}
pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec<usize> {
let mut active = Vec::with_capacity(validators.len());
for (index, validator) in validators.iter().enumerate() {
if validator.is_active_at(epoch) {
active.push(index)
}
}
active.shrink_to_fit();
active
}
#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize)]
pub struct EpochCrosslinkCommittees {
epoch: Epoch,
pub crosslink_committees: Vec<Vec<CrosslinkCommittee>>,
}
impl EpochCrosslinkCommittees {
fn new(epoch: Epoch, spec: &ChainSpec) -> Self {
Self {
epoch,
crosslink_committees: vec![vec![]; spec.slots_per_epoch as usize],
}
}
fn get_crosslink_committees_at_slot(
&self,
slot: Slot,
spec: &ChainSpec,
) -> Option<&Vec<CrosslinkCommittee>> {
let epoch_start_slot = self.epoch.start_slot(spec.slots_per_epoch);
let epoch_end_slot = self.epoch.end_slot(spec.slots_per_epoch);
if (epoch_start_slot <= slot) && (slot <= epoch_end_slot) {
let index = slot - epoch_start_slot;
self.crosslink_committees.get(index.as_usize())
} else {
None
}
}
}
pub struct EpochCrosslinkCommitteesBuilder {
epoch: Epoch,
shuffling_start_shard: Shard,
shuffling_seed: Hash256,
active_validator_indices: Vec<usize>,
committees_per_epoch: u64,
}
impl EpochCrosslinkCommitteesBuilder {
pub fn for_previous_epoch(
state: &BeaconState,
active_validator_indices: Vec<usize>,
spec: &ChainSpec,
) -> Self {
Self {
epoch: state.previous_epoch(spec),
shuffling_start_shard: state.previous_shuffling_start_shard,
shuffling_seed: state.previous_shuffling_seed,
committees_per_epoch: spec.get_epoch_committee_count(active_validator_indices.len()),
active_validator_indices,
}
}
pub fn for_current_epoch(
state: &BeaconState,
active_validator_indices: Vec<usize>,
spec: &ChainSpec,
) -> Self {
Self {
epoch: state.current_epoch(spec),
shuffling_start_shard: state.current_shuffling_start_shard,
shuffling_seed: state.current_shuffling_seed,
committees_per_epoch: spec.get_epoch_committee_count(active_validator_indices.len()),
active_validator_indices,
}
}
pub fn for_next_epoch(
state: &BeaconState,
active_validator_indices: Vec<usize>,
registry_change: bool,
spec: &ChainSpec,
) -> Result<Self, Error> {
let current_epoch = state.current_epoch(spec);
let next_epoch = state.next_epoch(spec);
let committees_per_epoch = spec.get_epoch_committee_count(active_validator_indices.len());
let epochs_since_last_registry_update =
current_epoch - state.validator_registry_update_epoch;
let (seed, shuffling_start_shard) = if registry_change {
let next_seed = state
.generate_seed(next_epoch, spec)
.map_err(|_| Error::UnableToGenerateSeed)?;
(
next_seed,
(state.current_shuffling_start_shard + committees_per_epoch) % spec.shard_count,
)
} else if (epochs_since_last_registry_update > 1)
& epochs_since_last_registry_update.is_power_of_two()
{
let next_seed = state
.generate_seed(next_epoch, spec)
.map_err(|_| Error::UnableToGenerateSeed)?;
(next_seed, state.current_shuffling_start_shard)
} else {
(
state.current_shuffling_seed,
state.current_shuffling_start_shard,
)
};
Ok(Self {
epoch: state.next_epoch(spec),
shuffling_start_shard,
shuffling_seed: seed,
active_validator_indices,
committees_per_epoch,
})
}
pub fn build(self, spec: &ChainSpec) -> Result<EpochCrosslinkCommittees, Error> {
// The shuffler fails on a empty list, so if there are no active validator indices, simply
// return an empty list.
let shuffled_active_validator_indices = if self.active_validator_indices.is_empty() {
vec![]
} else {
shuffle_list(
self.active_validator_indices,
spec.shuffle_round_count,
&self.shuffling_seed[..],
true,
)
.ok_or_else(|| Error::UnableToShuffle)?
};
let mut committees: Vec<Vec<usize>> = shuffled_active_validator_indices
.honey_badger_split(self.committees_per_epoch as usize)
.map(|slice: &[usize]| slice.to_vec())
.collect();
let mut epoch_crosslink_committees = EpochCrosslinkCommittees::new(self.epoch, spec);
let mut shard = self.shuffling_start_shard;
let committees_per_slot = (self.committees_per_epoch / spec.slots_per_epoch) as usize;
for (i, slot) in self.epoch.slot_iter(spec.slots_per_epoch).enumerate() {
for j in (0..committees.len())
.into_iter()
.skip(i * committees_per_slot)
.take(committees_per_slot)
{
let crosslink_committee = CrosslinkCommittee {
slot,
shard,
committee: committees[j].drain(..).collect(),
};
epoch_crosslink_committees.crosslink_committees[i].push(crosslink_committee);
shard += 1;
shard %= spec.shard_count;
}
}
Ok(epoch_crosslink_committees)
}
} }

View File

@ -0,0 +1,142 @@
#![cfg(test)]
use super::*;
use crate::test_utils::*;
use swap_or_not_shuffle::shuffle_list;
fn do_sane_cache_test(
state: BeaconState,
epoch: Epoch,
validator_count: usize,
expected_seed: Hash256,
expected_shuffling_start: u64,
spec: &ChainSpec,
) {
let active_indices: Vec<usize> = (0..validator_count).collect();
assert_eq!(
&active_indices[..],
state.get_active_validator_indices(epoch, &spec).unwrap(),
"Validator indices mismatch"
);
let shuffling = shuffle_list(
active_indices,
spec.shuffle_round_count,
&expected_seed[..],
true,
)
.unwrap();
let committees_per_epoch = spec.get_epoch_committee_count(shuffling.len());
let committees_per_slot = committees_per_epoch / spec.slots_per_epoch;
let mut expected_indices_iter = shuffling.iter();
let mut shard_counter = expected_shuffling_start;
for (i, slot) in epoch.slot_iter(spec.slots_per_epoch).enumerate() {
let crosslink_committees_at_slot =
state.get_crosslink_committees_at_slot(slot, &spec).unwrap();
assert_eq!(
crosslink_committees_at_slot.len(),
committees_per_slot as usize,
"Bad committees per slot ({})",
i
);
for c in crosslink_committees_at_slot {
assert_eq!(c.shard, shard_counter, "Bad shard");
shard_counter += 1;
shard_counter %= spec.shard_count;
for &i in &c.committee {
assert_eq!(
i,
*expected_indices_iter.next().unwrap(),
"Non-sequential validators."
);
}
}
}
}
fn setup_sane_cache_test(validator_count: usize, spec: &ChainSpec) -> BeaconState {
let mut builder =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, spec);
let epoch = spec.genesis_epoch + 4;
let slot = epoch.start_slot(spec.slots_per_epoch);
builder.teleport_to_slot(slot, spec);
let (mut state, _keypairs) = builder.build();
state.current_shuffling_start_shard = 0;
state.current_shuffling_seed = Hash256::from_slice(&[1; 32]);
state.previous_shuffling_start_shard = spec.shard_count - 1;
state.previous_shuffling_seed = Hash256::from_slice(&[2; 32]);
state
.build_epoch_cache(RelativeEpoch::Previous, spec)
.unwrap();
state
.build_epoch_cache(RelativeEpoch::Current, spec)
.unwrap();
state
.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, spec)
.unwrap();
state
.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, spec)
.unwrap();
state
}
#[test]
fn builds_sane_current_epoch_cache() {
let mut spec = ChainSpec::few_validators();
spec.shard_count = 4;
let validator_count = (spec.shard_count * spec.target_committee_size) + 1;
let state = setup_sane_cache_test(validator_count as usize, &spec);
do_sane_cache_test(
state.clone(),
state.current_epoch(&spec),
validator_count as usize,
state.current_shuffling_seed,
state.current_shuffling_start_shard,
&spec,
);
}
#[test]
fn builds_sane_previous_epoch_cache() {
let mut spec = ChainSpec::few_validators();
spec.shard_count = 2;
let validator_count = (spec.shard_count * spec.target_committee_size) + 1;
let state = setup_sane_cache_test(validator_count as usize, &spec);
do_sane_cache_test(
state.clone(),
state.previous_epoch(&spec),
validator_count as usize,
state.previous_shuffling_seed,
state.previous_shuffling_start_shard,
&spec,
);
}
#[test]
fn builds_sane_next_without_update_epoch_cache() {
let mut spec = ChainSpec::few_validators();
spec.shard_count = 2;
let validator_count = (spec.shard_count * spec.target_committee_size) + 1;
let mut state = setup_sane_cache_test(validator_count as usize, &spec);
state.validator_registry_update_epoch = state.slot.epoch(spec.slots_per_epoch);
do_sane_cache_test(
state.clone(),
state.next_epoch(&spec),
validator_count as usize,
state.current_shuffling_seed,
state.current_shuffling_start_shard,
&spec,
);
}

View File

@ -11,7 +11,7 @@ pub fn verify_bitfield_length(bitfield: &Bitfield, committee_size: usize) -> boo
} }
for i in committee_size..(bitfield.num_bytes() * 8) { for i in committee_size..(bitfield.num_bytes() * 8) {
if bitfield.get(i).expect("Impossible due to previous check.") { if bitfield.get(i).unwrap_or(false) {
return false; return false;
} }
} }

View File

@ -0,0 +1,38 @@
use crate::*;
use serde_derive::{Deserialize, Serialize};
use std::collections::HashMap;
type ValidatorIndex = usize;
#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)]
pub struct PubkeyCache {
map: HashMap<PublicKey, ValidatorIndex>,
}
impl PubkeyCache {
/// Returns the number of validator indices already in the map.
pub fn len(&self) -> ValidatorIndex {
self.map.len()
}
/// Inserts a validator index into the map.
///
/// The added index must equal the number of validators already added to the map. This ensures
/// that an index is never skipped.
pub fn insert(&mut self, pubkey: PublicKey, index: ValidatorIndex) -> bool {
if index == self.map.len() {
self.map.insert(pubkey, index);
true
} else {
false
}
}
/// Inserts a validator index into the map.
///
/// The added index must equal the number of validators already added to the map. This ensures
/// that an index is never skipped.
pub fn get(&self, pubkey: &PublicKey) -> Option<ValidatorIndex> {
self.map.get(pubkey).cloned()
}
}

View File

@ -1,84 +1,57 @@
#![cfg(test)] #![cfg(test)]
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::*;
use crate::{BeaconState, ChainSpec};
use ssz::{ssz_encode, Decodable};
#[test] ssz_tests!(BeaconState);
pub fn can_produce_genesis_block() {
let mut builder = BeaconStateBuilder::new(2);
builder.build().unwrap();
}
/// Tests that `get_attestation_participants` is consistent with the result of /// Test that
/// get_crosslink_committees_at_slot` with a full bitfield. ///
#[test] /// 1. Using the cache before it's built fails.
pub fn get_attestation_participants_consistency() { /// 2. Using the cache after it's build passes.
let mut rng = XorShiftRng::from_seed([42; 16]); /// 3. Using the cache after it's dropped fails.
fn test_cache_initialization<'a>(
state: &'a mut BeaconState,
relative_epoch: RelativeEpoch,
spec: &ChainSpec,
) {
let slot = relative_epoch
.into_epoch(state.slot.epoch(spec.slots_per_epoch))
.start_slot(spec.slots_per_epoch);
let mut builder = BeaconStateBuilder::new(8); // Assuming the cache isn't already built, assert that a call to a cache-using function fails.
builder.spec = ChainSpec::few_validators(); assert_eq!(
state.get_beacon_proposer_index(slot, relative_epoch, spec),
Err(BeaconStateError::EpochCacheUninitialized(relative_epoch))
);
builder.build().unwrap(); // Build the cache.
state.build_epoch_cache(relative_epoch, spec).unwrap();
let mut state = builder.cloned_state(); // Assert a call to a cache-using function passes.
let spec = builder.spec.clone(); let _ = state
.get_beacon_proposer_index(slot, relative_epoch, spec)
state
.build_epoch_cache(RelativeEpoch::Previous, &spec)
.unwrap(); .unwrap();
state
.build_epoch_cache(RelativeEpoch::Current, &spec)
.unwrap();
state.build_epoch_cache(RelativeEpoch::Next, &spec).unwrap();
for slot in state // Drop the cache.
.slot state.drop_cache(relative_epoch);
.epoch(spec.slots_per_epoch)
.slot_iter(spec.slots_per_epoch)
{
let committees = state.get_crosslink_committees_at_slot(slot, &spec).unwrap();
for (committee, shard) in committees { // Assert a call to a cache-using function fail.
let mut attestation_data = AttestationData::random_for_test(&mut rng); assert_eq!(
attestation_data.slot = slot; state.get_beacon_proposer_index(slot, relative_epoch, spec),
attestation_data.shard = *shard; Err(BeaconStateError::EpochCacheUninitialized(relative_epoch))
);
let mut bitfield = Bitfield::new();
for (i, _) in committee.iter().enumerate() {
bitfield.set(i, true);
}
assert_eq!(
state
.get_attestation_participants(&attestation_data, &bitfield, &spec)
.unwrap(),
*committee
);
}
}
} }
#[test] #[test]
pub fn test_ssz_round_trip() { fn cache_initialization() {
let mut rng = XorShiftRng::from_seed([42; 16]); let spec = ChainSpec::few_validators();
let original = BeaconState::random_for_test(&mut rng); let (mut state, _keypairs) =
TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(16, &spec).build();
let bytes = ssz_encode(&original); state.slot = (spec.genesis_epoch + 1).start_slot(spec.slots_per_epoch);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded); test_cache_initialization(&mut state, RelativeEpoch::Previous, &spec);
} test_cache_initialization(&mut state, RelativeEpoch::Current, &spec);
test_cache_initialization(&mut state, RelativeEpoch::NextWithRegistryChange, &spec);
#[test] test_cache_initialization(&mut state, RelativeEpoch::NextWithoutRegistryChange, &spec);
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = BeaconState::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
} }

View File

@ -1,21 +1,27 @@
use crate::{Address, Epoch, Fork, Hash256, Multiaddr, Slot}; use crate::*;
use bls::Signature; use bls::Signature;
use int_to_bytes::int_to_bytes4;
use serde_derive::Deserialize;
const GWEI: u64 = 1_000_000_000; const GWEI: u64 = 1_000_000_000;
/// Each of the BLS signature domains.
///
/// Spec v0.5.0
pub enum Domain { pub enum Domain {
Deposit, BeaconBlock,
Attestation,
Proposal,
Exit,
Randao, Randao,
Attestation,
Deposit,
Exit,
Transfer, Transfer,
} }
/// Holds all the "constants" for a BeaconChain. /// Holds all the "constants" for a BeaconChain.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(PartialEq, Debug, Clone)] #[derive(PartialEq, Debug, Clone, Deserialize)]
#[serde(default)]
pub struct ChainSpec { pub struct ChainSpec {
/* /*
* Misc * Misc
@ -23,8 +29,7 @@ pub struct ChainSpec {
pub shard_count: u64, pub shard_count: u64,
pub target_committee_size: u64, pub target_committee_size: u64,
pub max_balance_churn_quotient: u64, pub max_balance_churn_quotient: u64,
pub beacon_chain_shard_number: u64, pub max_indices_per_slashable_vote: usize,
pub max_indices_per_slashable_vote: u64,
pub max_exit_dequeues_per_epoch: u64, pub max_exit_dequeues_per_epoch: u64,
pub shuffle_round_count: u8, pub shuffle_round_count: u8,
@ -45,7 +50,7 @@ pub struct ChainSpec {
/* /*
* Initial Values * Initial Values
*/ */
pub genesis_fork_version: u64, pub genesis_fork_version: u32,
pub genesis_slot: Slot, pub genesis_slot: Slot,
pub genesis_epoch: Epoch, pub genesis_epoch: Epoch,
pub genesis_start_shard: u64, pub genesis_start_shard: u64,
@ -63,12 +68,13 @@ pub struct ChainSpec {
pub min_seed_lookahead: Epoch, pub min_seed_lookahead: Epoch,
pub activation_exit_delay: u64, pub activation_exit_delay: u64,
pub epochs_per_eth1_voting_period: u64, pub epochs_per_eth1_voting_period: u64,
pub slots_per_historical_root: usize,
pub min_validator_withdrawability_delay: Epoch, pub min_validator_withdrawability_delay: Epoch,
pub persistent_committee_period: u64,
/* /*
* State list lengths * State list lengths
*/ */
pub latest_block_roots_length: usize,
pub latest_randao_mixes_length: usize, pub latest_randao_mixes_length: usize,
pub latest_active_index_roots_length: usize, pub latest_active_index_roots_length: usize,
pub latest_slashed_exit_length: usize, pub latest_slashed_exit_length: usize,
@ -100,12 +106,12 @@ pub struct ChainSpec {
* *
* Use `ChainSpec::get_domain(..)` to access these values. * Use `ChainSpec::get_domain(..)` to access these values.
*/ */
domain_deposit: u64, domain_beacon_block: u32,
domain_attestation: u64, domain_randao: u32,
domain_proposal: u64, domain_attestation: u32,
domain_exit: u64, domain_deposit: u32,
domain_randao: u64, domain_exit: u32,
domain_transfer: u64, domain_transfer: u32,
/* /*
* Network specific parameters * Network specific parameters
@ -130,24 +136,29 @@ impl ChainSpec {
/// Get the domain number that represents the fork meta and signature domain. /// Get the domain number that represents the fork meta and signature domain.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn get_domain(&self, epoch: Epoch, domain: Domain, fork: &Fork) -> u64 { pub fn get_domain(&self, epoch: Epoch, domain: Domain, fork: &Fork) -> u64 {
let domain_constant = match domain { let domain_constant = match domain {
Domain::Deposit => self.domain_deposit, Domain::BeaconBlock => self.domain_beacon_block,
Domain::Attestation => self.domain_attestation,
Domain::Proposal => self.domain_proposal,
Domain::Exit => self.domain_exit,
Domain::Randao => self.domain_randao, Domain::Randao => self.domain_randao,
Domain::Attestation => self.domain_attestation,
Domain::Deposit => self.domain_deposit,
Domain::Exit => self.domain_exit,
Domain::Transfer => self.domain_transfer, Domain::Transfer => self.domain_transfer,
}; };
let fork_version = fork.get_fork_version(epoch); let mut bytes: Vec<u8> = fork.get_fork_version(epoch).to_vec();
fork_version * u64::pow(2, 32) + domain_constant bytes.append(&mut int_to_bytes4(domain_constant));
let mut fork_and_domain = [0; 8];
fork_and_domain.copy_from_slice(&bytes);
u64::from_le_bytes(fork_and_domain)
} }
/// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn foundation() -> Self { pub fn foundation() -> Self {
let genesis_slot = Slot::new(2_u64.pow(32)); let genesis_slot = Slot::new(2_u64.pow(32));
let slots_per_epoch = 64; let slots_per_epoch = 64;
@ -160,7 +171,6 @@ impl ChainSpec {
shard_count: 1_024, shard_count: 1_024,
target_committee_size: 128, target_committee_size: 128,
max_balance_churn_quotient: 32, max_balance_churn_quotient: 32,
beacon_chain_shard_number: u64::max_value(),
max_indices_per_slashable_vote: 4_096, max_indices_per_slashable_vote: 4_096,
max_exit_dequeues_per_epoch: 4, max_exit_dequeues_per_epoch: 4,
shuffle_round_count: 90, shuffle_round_count: 90,
@ -200,12 +210,13 @@ impl ChainSpec {
min_seed_lookahead: Epoch::new(1), min_seed_lookahead: Epoch::new(1),
activation_exit_delay: 4, activation_exit_delay: 4,
epochs_per_eth1_voting_period: 16, epochs_per_eth1_voting_period: 16,
slots_per_historical_root: 8_192,
min_validator_withdrawability_delay: Epoch::new(256), min_validator_withdrawability_delay: Epoch::new(256),
persistent_committee_period: 2_048,
/* /*
* State list lengths * State list lengths
*/ */
latest_block_roots_length: 8_192,
latest_randao_mixes_length: 8_192, latest_randao_mixes_length: 8_192,
latest_active_index_roots_length: 8_192, latest_active_index_roots_length: 8_192,
latest_slashed_exit_length: 8_192, latest_slashed_exit_length: 8_192,
@ -232,11 +243,11 @@ impl ChainSpec {
/* /*
* Signature domains * Signature domains
*/ */
domain_deposit: 0, domain_beacon_block: 0,
domain_attestation: 1, domain_randao: 1,
domain_proposal: 2, domain_attestation: 2,
domain_exit: 3, domain_deposit: 3,
domain_randao: 4, domain_exit: 4,
domain_transfer: 5, domain_transfer: 5,
/* /*
@ -264,8 +275,6 @@ impl ChainSpec {
} }
/// Returns a `ChainSpec` compatible with the specification suitable for 8 validators. /// Returns a `ChainSpec` compatible with the specification suitable for 8 validators.
///
/// Spec v0.4.0
pub fn few_validators() -> Self { pub fn few_validators() -> Self {
let genesis_slot = Slot::new(2_u64.pow(32)); let genesis_slot = Slot::new(2_u64.pow(32));
let slots_per_epoch = 8; let slots_per_epoch = 8;
@ -282,12 +291,43 @@ impl ChainSpec {
} }
} }
impl Default for ChainSpec {
fn default() -> Self {
Self::foundation()
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use int_to_bytes::int_to_bytes8;
#[test] #[test]
fn test_foundation_spec_can_be_constructed() { fn test_foundation_spec_can_be_constructed() {
let _ = ChainSpec::foundation(); let _ = ChainSpec::foundation();
} }
fn test_domain(domain_type: Domain, raw_domain: u32, spec: &ChainSpec) {
let fork = Fork::genesis(&spec);
let epoch = Epoch::new(0);
let domain = spec.get_domain(epoch, domain_type, &fork);
let mut expected = fork.get_fork_version(epoch).to_vec();
expected.append(&mut int_to_bytes4(raw_domain));
assert_eq!(int_to_bytes8(domain), expected);
}
#[test]
fn test_get_domain() {
let spec = ChainSpec::foundation();
test_domain(Domain::BeaconBlock, spec.domain_beacon_block, &spec);
test_domain(Domain::Randao, spec.domain_randao, &spec);
test_domain(Domain::Attestation, spec.domain_attestation, &spec);
test_domain(Domain::Deposit, spec.domain_deposit, &spec);
test_domain(Domain::Exit, spec.domain_exit, &spec);
test_domain(Domain::Transfer, spec.domain_transfer, &spec);
}
} }

View File

@ -1,15 +1,25 @@
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::{Epoch, Hash256}; use crate::{Epoch, Hash256};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// Specifies the block hash for a shard at an epoch. /// Specifies the block hash for a shard at an epoch.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive( #[derive(
Debug, Clone, PartialEq, Default, Serialize, Hash, Encode, Decode, TreeHash, TestRandom, Debug,
Clone,
PartialEq,
Default,
Serialize,
Deserialize,
Hash,
Encode,
Decode,
TreeHash,
TestRandom,
)] )]
pub struct Crosslink { pub struct Crosslink {
pub epoch: Epoch, pub epoch: Epoch,
@ -19,29 +29,6 @@ pub struct Crosslink {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(Crosslink);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Crosslink::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Crosslink::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -0,0 +1,10 @@
use crate::*;
use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash};
#[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, Decode, Encode, TreeHash)]
pub struct CrosslinkCommittee {
pub slot: Slot,
pub shard: Shard,
pub committee: Vec<usize>,
}

View File

@ -1,16 +1,16 @@
use super::{DepositData, Hash256}; use super::{DepositData, Hash256};
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// A deposit to potentially become a beacon chain validator. /// A deposit to potentially become a beacon chain validator.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
pub struct Deposit { pub struct Deposit {
pub branch: Vec<Hash256>, pub proof: Vec<Hash256>,
pub index: u64, pub index: u64,
pub deposit_data: DepositData, pub deposit_data: DepositData,
} }
@ -18,29 +18,6 @@ pub struct Deposit {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(Deposit);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Deposit::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Deposit::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,14 +1,14 @@
use super::DepositInput; use super::DepositInput;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// Data generated by the deposit contract. /// Data generated by the deposit contract.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
pub struct DepositData { pub struct DepositData {
pub amount: u64, pub amount: u64,
pub timestamp: u64, pub timestamp: u64,
@ -18,29 +18,6 @@ pub struct DepositData {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(DepositData);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = DepositData::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = DepositData::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,47 +1,88 @@
use super::Hash256;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::*;
use bls::{PublicKey, Signature}; use bls::{PublicKey, Signature};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash}; use ssz::{SignedRoot, TreeHash};
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// The data supplied by the user to the deposit contract. /// The data supplied by the user to the deposit contract.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(
Debug,
PartialEq,
Clone,
Serialize,
Deserialize,
Encode,
Decode,
SignedRoot,
TreeHash,
TestRandom,
)]
pub struct DepositInput { pub struct DepositInput {
pub pubkey: PublicKey, pub pubkey: PublicKey,
pub withdrawal_credentials: Hash256, pub withdrawal_credentials: Hash256,
pub proof_of_possession: Signature, pub proof_of_possession: Signature,
} }
impl DepositInput {
/// Generate the 'proof_of_posession' signature for a given DepositInput details.
///
/// Spec v0.5.0
pub fn create_proof_of_possession(
&self,
secret_key: &SecretKey,
epoch: Epoch,
fork: &Fork,
spec: &ChainSpec,
) -> Signature {
let msg = self.signed_root();
let domain = spec.get_domain(epoch, Domain::Deposit, fork);
Signature::new(msg.as_slice(), domain, secret_key)
}
/// Verify that proof-of-possession is valid.
///
/// Spec v0.5.0
pub fn validate_proof_of_possession(
&self,
epoch: Epoch,
fork: &Fork,
spec: &ChainSpec,
) -> bool {
let msg = self.signed_root();
let domain = spec.get_domain(epoch, Domain::Deposit, fork);
self.proof_of_possession.verify(&msg, domain, &self.pubkey)
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash}; ssz_tests!(DepositInput);
#[test] #[test]
pub fn test_ssz_round_trip() { fn can_create_and_validate() {
let mut rng = XorShiftRng::from_seed([42; 16]); let spec = ChainSpec::foundation();
let original = DepositInput::random_for_test(&mut rng); let fork = Fork::genesis(&spec);
let keypair = Keypair::random();
let epoch = Epoch::new(0);
let bytes = ssz_encode(&original); let mut deposit_input = DepositInput {
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); pubkey: keypair.pk.clone(),
withdrawal_credentials: Hash256::zero(),
proof_of_possession: Signature::empty_signature(),
};
assert_eq!(original, decoded); deposit_input.proof_of_possession =
} deposit_input.create_proof_of_possession(&keypair.sk, epoch, &fork, &spec);
#[test] assert!(deposit_input.validate_proof_of_possession(epoch, &fork, &spec));
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = DepositInput::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
} }
} }

View File

View File

@ -1,14 +1,16 @@
use super::Hash256; use super::Hash256;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// Contains data obtained from the Eth1 chain. /// Contains data obtained from the Eth1 chain.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(
Debug, PartialEq, Clone, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom,
)]
pub struct Eth1Data { pub struct Eth1Data {
pub deposit_root: Hash256, pub deposit_root: Hash256,
pub block_hash: Hash256, pub block_hash: Hash256,
@ -17,29 +19,6 @@ pub struct Eth1Data {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(Eth1Data);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Eth1Data::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Eth1Data::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,14 +1,16 @@
use super::Eth1Data; use super::Eth1Data;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// A summation of votes for some `Eth1Data`. /// A summation of votes for some `Eth1Data`.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(
Debug, PartialEq, Clone, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom,
)]
pub struct Eth1DataVote { pub struct Eth1DataVote {
pub eth1_data: Eth1Data, pub eth1_data: Eth1Data,
pub vote_count: u64, pub vote_count: u64,
@ -17,29 +19,6 @@ pub struct Eth1DataVote {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(Eth1DataVote);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Eth1DataVote::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Eth1DataVote::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,24 +1,41 @@
use crate::{test_utils::TestRandom, Epoch}; use crate::{test_utils::TestRandom, ChainSpec, Epoch};
use int_to_bytes::int_to_bytes4;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// Specifies a fork of the `BeaconChain`, to prevent replay attacks.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(
Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom,
)]
pub struct Fork { pub struct Fork {
pub previous_version: u64, pub previous_version: [u8; 4],
pub current_version: u64, pub current_version: [u8; 4],
pub epoch: Epoch, pub epoch: Epoch,
} }
impl Fork { impl Fork {
/// Initialize the `Fork` from the genesis parameters in the `spec`.
///
/// Spec v0.5.0
pub fn genesis(spec: &ChainSpec) -> Self {
let mut current_version: [u8; 4] = [0; 4];
current_version.copy_from_slice(&int_to_bytes4(spec.genesis_fork_version));
Self {
previous_version: current_version,
current_version,
epoch: spec.genesis_epoch,
}
}
/// Return the fork version of the given ``epoch``. /// Return the fork version of the given ``epoch``.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn get_fork_version(&self, epoch: Epoch) -> u64 { pub fn get_fork_version(&self, epoch: Epoch) -> [u8; 4] {
if epoch < self.epoch { if epoch < self.epoch {
return self.previous_version; return self.previous_version;
} }
@ -29,29 +46,51 @@ impl Fork {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(Fork);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Fork::random_for_test(&mut rng);
let bytes = ssz_encode(&original); fn test_genesis(version: u32, epoch: Epoch) {
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap(); let mut spec = ChainSpec::foundation();
assert_eq!(original, decoded); spec.genesis_fork_version = version;
spec.genesis_epoch = epoch;
let fork = Fork::genesis(&spec);
assert_eq!(fork.epoch, spec.genesis_epoch, "epoch incorrect");
assert_eq!(
fork.previous_version, fork.current_version,
"previous and current are not identical"
);
assert_eq!(
fork.current_version,
version.to_le_bytes(),
"current version incorrect"
);
} }
#[test] #[test]
pub fn test_hash_tree_root_internal() { fn genesis() {
let mut rng = XorShiftRng::from_seed([42; 16]); test_genesis(0, Epoch::new(0));
let original = Fork::random_for_test(&mut rng); test_genesis(9, Epoch::new(11));
test_genesis(2_u32.pow(31), Epoch::new(2_u64.pow(63)));
test_genesis(u32::max_value(), Epoch::max_value());
}
let result = original.hash_tree_root_internal(); #[test]
fn get_fork_version() {
let previous_version = [1; 4];
let current_version = [2; 4];
let epoch = Epoch::new(10);
assert_eq!(result.len(), 32); let fork = Fork {
// TODO: Add further tests previous_version,
// https://github.com/sigp/lighthouse/issues/170 current_version,
epoch,
};
assert_eq!(fork.get_fork_version(epoch - 1), previous_version);
assert_eq!(fork.get_fork_version(epoch), current_version);
assert_eq!(fork.get_fork_version(epoch + 1), current_version);
} }
} }

View File

@ -0,0 +1,22 @@
use crate::test_utils::TestRandom;
use crate::Hash256;
use rand::RngCore;
use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom;
/// Historical block and state roots.
///
/// Spec v0.5.0
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
pub struct HistoricalBatch {
pub block_roots: Vec<Hash256>,
pub state_roots: Vec<Hash256>,
}
#[cfg(test)]
mod tests {
use super::*;
ssz_tests!(HistoricalBatch);
}

View File

@ -1,14 +1,20 @@
//! Ethereum 2.0 types
#[macro_use]
pub mod test_utils; pub mod test_utils;
pub mod attestation; pub mod attestation;
pub mod attestation_data; pub mod attestation_data;
pub mod attestation_data_and_custody_bit; pub mod attestation_data_and_custody_bit;
pub mod attestation_duty;
pub mod attester_slashing; pub mod attester_slashing;
pub mod beacon_block; pub mod beacon_block;
pub mod beacon_block_body; pub mod beacon_block_body;
pub mod beacon_block_header;
pub mod beacon_state; pub mod beacon_state;
pub mod chain_spec; pub mod chain_spec;
pub mod crosslink; pub mod crosslink;
pub mod crosslink_committee;
pub mod deposit; pub mod deposit;
pub mod deposit_data; pub mod deposit_data;
pub mod deposit_input; pub mod deposit_input;
@ -16,16 +22,15 @@ pub mod eth1_data;
pub mod eth1_data_vote; pub mod eth1_data_vote;
pub mod fork; pub mod fork;
pub mod free_attestation; pub mod free_attestation;
pub mod historical_batch;
pub mod pending_attestation; pub mod pending_attestation;
pub mod proposal;
pub mod proposer_slashing; pub mod proposer_slashing;
pub mod readers;
pub mod shard_reassignment_record;
pub mod slashable_attestation; pub mod slashable_attestation;
pub mod transfer; pub mod transfer;
pub mod voluntary_exit; pub mod voluntary_exit;
#[macro_use] #[macro_use]
pub mod slot_epoch_macros; pub mod slot_epoch_macros;
pub mod relative_epoch;
pub mod slot_epoch; pub mod slot_epoch;
pub mod slot_height; pub mod slot_height;
pub mod validator; pub mod validator;
@ -37,12 +42,15 @@ use std::collections::HashMap;
pub use crate::attestation::Attestation; pub use crate::attestation::Attestation;
pub use crate::attestation_data::AttestationData; pub use crate::attestation_data::AttestationData;
pub use crate::attestation_data_and_custody_bit::AttestationDataAndCustodyBit; pub use crate::attestation_data_and_custody_bit::AttestationDataAndCustodyBit;
pub use crate::attestation_duty::AttestationDuty;
pub use crate::attester_slashing::AttesterSlashing; pub use crate::attester_slashing::AttesterSlashing;
pub use crate::beacon_block::BeaconBlock; pub use crate::beacon_block::BeaconBlock;
pub use crate::beacon_block_body::BeaconBlockBody; pub use crate::beacon_block_body::BeaconBlockBody;
pub use crate::beacon_state::{BeaconState, Error as BeaconStateError, RelativeEpoch}; pub use crate::beacon_block_header::BeaconBlockHeader;
pub use crate::beacon_state::{BeaconState, Error as BeaconStateError};
pub use crate::chain_spec::{ChainSpec, Domain}; pub use crate::chain_spec::{ChainSpec, Domain};
pub use crate::crosslink::Crosslink; pub use crate::crosslink::Crosslink;
pub use crate::crosslink_committee::CrosslinkCommittee;
pub use crate::deposit::Deposit; pub use crate::deposit::Deposit;
pub use crate::deposit_data::DepositData; pub use crate::deposit_data::DepositData;
pub use crate::deposit_input::DepositInput; pub use crate::deposit_input::DepositInput;
@ -50,9 +58,10 @@ pub use crate::eth1_data::Eth1Data;
pub use crate::eth1_data_vote::Eth1DataVote; pub use crate::eth1_data_vote::Eth1DataVote;
pub use crate::fork::Fork; pub use crate::fork::Fork;
pub use crate::free_attestation::FreeAttestation; pub use crate::free_attestation::FreeAttestation;
pub use crate::historical_batch::HistoricalBatch;
pub use crate::pending_attestation::PendingAttestation; pub use crate::pending_attestation::PendingAttestation;
pub use crate::proposal::Proposal;
pub use crate::proposer_slashing::ProposerSlashing; pub use crate::proposer_slashing::ProposerSlashing;
pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch};
pub use crate::slashable_attestation::SlashableAttestation; pub use crate::slashable_attestation::SlashableAttestation;
pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::slot_epoch::{Epoch, Slot};
pub use crate::slot_height::SlotHeight; pub use crate::slot_height::SlotHeight;
@ -60,6 +69,10 @@ pub use crate::transfer::Transfer;
pub use crate::validator::Validator; pub use crate::validator::Validator;
pub use crate::voluntary_exit::VoluntaryExit; pub use crate::voluntary_exit::VoluntaryExit;
pub type Shard = u64;
pub type Committee = Vec<usize>;
pub type CrosslinkCommittees = Vec<(Committee, u64)>;
pub type Hash256 = H256; pub type Hash256 = H256;
pub type Address = H160; pub type Address = H160;
pub type EthBalance = U256; pub type EthBalance = U256;
@ -72,7 +85,7 @@ pub type AttesterMap = HashMap<(u64, u64), Vec<usize>>;
/// Maps a slot to a block proposer. /// Maps a slot to a block proposer.
pub type ProposerMap = HashMap<u64, usize>; pub type ProposerMap = HashMap<u64, usize>;
pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, Signature}; pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, SecretKey, Signature};
pub use libp2p::floodsub::{Topic, TopicBuilder}; pub use libp2p::floodsub::{Topic, TopicBuilder};
pub use libp2p::multiaddr; pub use libp2p::multiaddr;
pub use libp2p::Multiaddr; pub use libp2p::Multiaddr;

View File

@ -1,14 +1,14 @@
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::{AttestationData, Bitfield, Slot}; use crate::{AttestationData, Bitfield, Slot};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
/// An attestation that has been included in the state but not yet fully processed. /// An attestation that has been included in the state but not yet fully processed.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, Clone, PartialEq, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
pub struct PendingAttestation { pub struct PendingAttestation {
pub aggregation_bitfield: Bitfield, pub aggregation_bitfield: Bitfield,
pub data: AttestationData, pub data: AttestationData,
@ -19,29 +19,6 @@ pub struct PendingAttestation {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(PendingAttestation);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = PendingAttestation::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = PendingAttestation::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,78 +0,0 @@
use crate::test_utils::TestRandom;
use crate::{Hash256, Slot};
use bls::Signature;
use rand::RngCore;
use serde_derive::Serialize;
use ssz::TreeHash;
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom;
/// A proposal for some shard or beacon block.
///
/// Spec v0.4.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)]
pub struct Proposal {
pub slot: Slot,
/// Shard number (spec.beacon_chain_shard_number for beacon chain)
pub shard: u64,
pub block_root: Hash256,
pub signature: Signature,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, SignedRoot, TreeHash};
#[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Proposal::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Proposal::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
#[derive(TreeHash)]
struct SignedProposal {
pub slot: Slot,
pub shard: u64,
pub block_root: Hash256,
}
impl Into<SignedProposal> for Proposal {
fn into(self) -> SignedProposal {
SignedProposal {
slot: self.slot,
shard: self.shard,
block_root: self.block_root,
}
}
}
#[test]
pub fn test_signed_root() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = Proposal::random_for_test(&mut rng);
let other: SignedProposal = original.clone().into();
assert_eq!(original.signed_root(), other.hash_tree_root());
}
}

View File

@ -1,20 +0,0 @@
use crate::test_utils::TestRandom;
use crate::{Hash256, Slot};
use rand::RngCore;
use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom;
#[derive(Debug, PartialEq, Clone, Default, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct ProposalSignedData {
pub slot: Slot,
pub shard: u64,
pub block_root: Hash256,
}
#[cfg(test)]
mod tests {
use super::*;
ssz_tests!(ProposalSignedData);
}

View File

@ -1,50 +1,23 @@
use super::Proposal; use super::BeaconBlockHeader;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode, TreeHash}; use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
mod builder;
pub use builder::ProposerSlashingBuilder;
/// Two conflicting proposals from the same proposer (validator). /// Two conflicting proposals from the same proposer (validator).
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
pub struct ProposerSlashing { pub struct ProposerSlashing {
pub proposer_index: u64, pub proposer_index: u64,
pub proposal_1: Proposal, pub header_1: BeaconBlockHeader,
pub proposal_2: Proposal, pub header_2: BeaconBlockHeader,
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] ssz_tests!(ProposerSlashing);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ProposerSlashing::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ProposerSlashing::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
} }

View File

@ -1,35 +0,0 @@
use crate::{BeaconBlock, Hash256, Slot};
use std::fmt::Debug;
/// The `BeaconBlockReader` provides interfaces for reading a subset of fields of a `BeaconBlock`.
///
/// The purpose of this trait is to allow reading from either;
/// - a standard `BeaconBlock` struct, or
/// - a SSZ serialized byte array.
///
/// Note: presently, direct SSZ reading has not been implemented so this trait is being used for
/// "future proofing".
pub trait BeaconBlockReader: Debug + PartialEq {
fn slot(&self) -> Slot;
fn parent_root(&self) -> Hash256;
fn state_root(&self) -> Hash256;
fn into_beacon_block(self) -> Option<BeaconBlock>;
}
impl BeaconBlockReader for BeaconBlock {
fn slot(&self) -> Slot {
self.slot
}
fn parent_root(&self) -> Hash256 {
self.parent_root
}
fn state_root(&self) -> Hash256 {
self.state_root
}
fn into_beacon_block(self) -> Option<BeaconBlock> {
Some(self)
}
}

View File

@ -1,5 +0,0 @@
mod block_reader;
mod state_reader;
pub use self::block_reader::BeaconBlockReader;
pub use self::state_reader::BeaconStateReader;

View File

@ -1,25 +0,0 @@
use crate::{BeaconState, Slot};
use std::fmt::Debug;
/// The `BeaconStateReader` provides interfaces for reading a subset of fields of a `BeaconState`.
///
/// The purpose of this trait is to allow reading from either;
/// - a standard `BeaconState` struct, or
/// - a SSZ serialized byte array.
///
/// Note: presently, direct SSZ reading has not been implemented so this trait is being used for
/// "future proofing".
pub trait BeaconStateReader: Debug + PartialEq {
fn slot(&self) -> Slot;
fn into_beacon_state(self) -> Option<BeaconState>;
}
impl BeaconStateReader for BeaconState {
fn slot(&self) -> Slot {
self.slot
}
fn into_beacon_state(self) -> Option<BeaconState> {
Some(self)
}
}

View File

@ -0,0 +1,134 @@
use crate::*;
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Error {
EpochTooLow { base: Epoch, other: Epoch },
EpochTooHigh { base: Epoch, other: Epoch },
AmbiguiousNextEpoch,
}
/// Defines the epochs relative to some epoch. Most useful when referring to the committees prior
/// to and following some epoch.
///
/// Spec v0.5.0
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum RelativeEpoch {
/// The prior epoch.
Previous,
/// The current epoch.
Current,
/// The next epoch if there _is_ a validator registry update.
///
/// If the validator registry is updated during an epoch transition, a new shuffling seed is
/// generated, this changes the attestation and proposal roles.
NextWithRegistryChange,
/// The next epoch if there _is not_ a validator registry update.
///
/// If the validator registry _is not_ updated during an epoch transition, the shuffling stays
/// the same.
NextWithoutRegistryChange,
}
impl RelativeEpoch {
/// Returns the `epoch` that `self` refers to, with respect to the `base` epoch.
///
/// Spec v0.5.0
pub fn into_epoch(&self, base: Epoch) -> Epoch {
match self {
RelativeEpoch::Previous => base - 1,
RelativeEpoch::Current => base,
RelativeEpoch::NextWithoutRegistryChange => base + 1,
RelativeEpoch::NextWithRegistryChange => base + 1,
}
}
/// Converts the `other` epoch into a `RelativeEpoch`, with respect to `base`
///
/// ## Errors
/// Returns an error when:
/// - `EpochTooLow` when `other` is more than 1 prior to `base`.
/// - `EpochTooHigh` when `other` is more than 1 after `base`.
/// - `AmbiguiousNextEpoch` whenever `other` is one after `base`, because it's unknowable if
/// there will be a registry change.
///
/// Spec v0.5.0
pub fn from_epoch(base: Epoch, other: Epoch) -> Result<Self, Error> {
if other == base - 1 {
Ok(RelativeEpoch::Previous)
} else if other == base {
Ok(RelativeEpoch::Current)
} else if other == base + 1 {
Err(Error::AmbiguiousNextEpoch)
} else if other < base {
Err(Error::EpochTooLow { base, other })
} else {
Err(Error::EpochTooHigh { base, other })
}
}
/// Convenience function for `Self::from_epoch` where both slots are converted into epochs.
pub fn from_slot(base: Slot, other: Slot, spec: &ChainSpec) -> Result<Self, Error> {
Self::from_epoch(
base.epoch(spec.slots_per_epoch),
other.epoch(spec.slots_per_epoch),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_into_epoch() {
let base = Epoch::new(10);
assert_eq!(RelativeEpoch::Current.into_epoch(base), base);
assert_eq!(RelativeEpoch::Previous.into_epoch(base), base - 1);
assert_eq!(
RelativeEpoch::NextWithRegistryChange.into_epoch(base),
base + 1
);
assert_eq!(
RelativeEpoch::NextWithoutRegistryChange.into_epoch(base),
base + 1
);
}
#[test]
fn from_epoch() {
let base = Epoch::new(10);
assert_eq!(
RelativeEpoch::from_epoch(base, base - 1),
Ok(RelativeEpoch::Previous)
);
assert_eq!(
RelativeEpoch::from_epoch(base, base),
Ok(RelativeEpoch::Current)
);
assert_eq!(
RelativeEpoch::from_epoch(base, base + 1),
Err(RelativeEpochError::AmbiguiousNextEpoch)
);
}
#[test]
fn from_slot() {
let spec = ChainSpec::foundation();
let base = Epoch::new(10).start_slot(spec.slots_per_epoch);
assert_eq!(
RelativeEpoch::from_slot(base, base - 1, &spec),
Ok(RelativeEpoch::Previous)
);
assert_eq!(
RelativeEpoch::from_slot(base, base, &spec),
Ok(RelativeEpoch::Current)
);
assert_eq!(
RelativeEpoch::from_slot(base, base + spec.slots_per_epoch, &spec),
Err(RelativeEpochError::AmbiguiousNextEpoch)
);
}
}

View File

@ -1,42 +0,0 @@
use crate::{test_utils::TestRandom, Slot};
use rand::RngCore;
use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom;
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct ShardReassignmentRecord {
pub validator_index: u64,
pub shard: u64,
pub slot: Slot,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ShardReassignmentRecord::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = ShardReassignmentRecord::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
}

View File

@ -1,6 +1,6 @@
use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, Bitfield, ChainSpec}; use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, Bitfield, ChainSpec};
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use ssz::TreeHash; use ssz::TreeHash;
use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; use ssz_derive::{Decode, Encode, SignedRoot, TreeHash};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
@ -9,8 +9,19 @@ use test_random_derive::TestRandom;
/// ///
/// To be included in an `AttesterSlashing`. /// To be included in an `AttesterSlashing`.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom, SignedRoot)] #[derive(
Debug,
PartialEq,
Clone,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
TestRandom,
SignedRoot,
)]
pub struct SlashableAttestation { pub struct SlashableAttestation {
/// Lists validator registry indices, not committee indices. /// Lists validator registry indices, not committee indices.
pub validator_indices: Vec<u64>, pub validator_indices: Vec<u64>,
@ -22,17 +33,17 @@ pub struct SlashableAttestation {
impl SlashableAttestation { impl SlashableAttestation {
/// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target. /// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn is_double_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool { pub fn is_double_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool {
self.data.slot.epoch(spec.slots_per_epoch) == other.data.slot.epoch(spec.slots_per_epoch) self.data.slot.epoch(spec.slots_per_epoch) == other.data.slot.epoch(spec.slots_per_epoch)
} }
/// Check if ``attestation_data_1`` surrounds ``attestation_data_2``. /// Check if ``attestation_data_1`` surrounds ``attestation_data_2``.
/// ///
/// Spec v0.4.0 /// Spec v0.5.0
pub fn is_surround_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool { pub fn is_surround_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool {
let source_epoch_1 = self.data.justified_epoch; let source_epoch_1 = self.data.source_epoch;
let source_epoch_2 = other.data.justified_epoch; let source_epoch_2 = other.data.source_epoch;
let target_epoch_1 = self.data.slot.epoch(spec.slots_per_epoch); let target_epoch_1 = self.data.slot.epoch(spec.slots_per_epoch);
let target_epoch_2 = other.data.slot.epoch(spec.slots_per_epoch); let target_epoch_2 = other.data.slot.epoch(spec.slots_per_epoch);
@ -46,7 +57,6 @@ mod tests {
use crate::chain_spec::ChainSpec; use crate::chain_spec::ChainSpec;
use crate::slot_epoch::{Epoch, Slot}; use crate::slot_epoch::{Epoch, Slot};
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::{ssz_encode, Decodable, TreeHash};
#[test] #[test]
pub fn test_is_double_vote_true() { pub fn test_is_double_vote_true() {
@ -120,39 +130,18 @@ mod tests {
); );
} }
#[test] ssz_tests!(SlashableAttestation);
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = SlashableAttestation::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = <_>::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = SlashableAttestation::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
fn create_slashable_attestation( fn create_slashable_attestation(
slot_factor: u64, slot_factor: u64,
justified_epoch: u64, source_epoch: u64,
spec: &ChainSpec, spec: &ChainSpec,
) -> SlashableAttestation { ) -> SlashableAttestation {
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let mut slashable_vote = SlashableAttestation::random_for_test(&mut rng); let mut slashable_vote = SlashableAttestation::random_for_test(&mut rng);
slashable_vote.data.slot = Slot::new(slot_factor * spec.slots_per_epoch); slashable_vote.data.slot = Slot::new(slot_factor * spec.slots_per_epoch);
slashable_vote.data.justified_epoch = Epoch::new(justified_epoch); slashable_vote.data.source_epoch = Epoch::new(source_epoch);
slashable_vote slashable_vote
} }
} }

View File

@ -1,132 +0,0 @@
use super::AttestationData;
use crate::chain_spec::ChainSpec;
use crate::test_utils::TestRandom;
use bls::AggregateSignature;
use rand::RngCore;
use serde_derive::Serialize;
use ssz_derive::{Decode, Encode, TreeHash};
use test_random_derive::TestRandom;
#[derive(Debug, PartialEq, Clone, Serialize, Encode, Decode, TreeHash, TestRandom)]
pub struct SlashableVoteData {
pub custody_bit_0_indices: Vec<u32>,
pub custody_bit_1_indices: Vec<u32>,
pub data: AttestationData,
pub aggregate_signature: AggregateSignature,
}
impl SlashableVoteData {
/// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target.
///
/// Spec v0.3.0
pub fn is_double_vote(&self, other: &SlashableVoteData, spec: &ChainSpec) -> bool {
self.data.slot.epoch(spec.epoch_length) == other.data.slot.epoch(spec.epoch_length)
}
/// Check if ``attestation_data_1`` surrounds ``attestation_data_2``.
///
/// Spec v0.3.0
pub fn is_surround_vote(&self, other: &SlashableVoteData, spec: &ChainSpec) -> bool {
let source_epoch_1 = self.data.justified_epoch;
let source_epoch_2 = other.data.justified_epoch;
let target_epoch_1 = self.data.slot.epoch(spec.epoch_length);
let target_epoch_2 = other.data.slot.epoch(spec.epoch_length);
(source_epoch_1 < source_epoch_2) && (target_epoch_2 < target_epoch_1)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::chain_spec::ChainSpec;
use crate::slot_epoch::{Epoch, Slot};
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
#[test]
pub fn test_is_double_vote_true() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(1, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(1, 1, &spec);
assert_eq!(
slashable_vote_first.is_double_vote(&slashable_vote_second, &spec),
true
)
}
#[test]
pub fn test_is_double_vote_false() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(1, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(2, 1, &spec);
assert_eq!(
slashable_vote_first.is_double_vote(&slashable_vote_second, &spec),
false
);
}
#[test]
pub fn test_is_surround_vote_true() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(2, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(1, 2, &spec);
assert_eq!(
slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec),
true
);
}
#[test]
pub fn test_is_surround_vote_true_realistic() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(4, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(3, 2, &spec);
assert_eq!(
slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec),
true
);
}
#[test]
pub fn test_is_surround_vote_false_source_epoch_fails() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(2, 2, &spec);
let slashable_vote_second = create_slashable_vote_data(1, 1, &spec);
assert_eq!(
slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec),
false
);
}
#[test]
pub fn test_is_surround_vote_false_target_epoch_fails() {
let spec = ChainSpec::foundation();
let slashable_vote_first = create_slashable_vote_data(1, 1, &spec);
let slashable_vote_second = create_slashable_vote_data(2, 2, &spec);
assert_eq!(
slashable_vote_first.is_surround_vote(&slashable_vote_second, &spec),
false
);
}
ssz_tests!(SlashableVoteData);
fn create_slashable_vote_data(
slot_factor: u64,
justified_epoch: u64,
spec: &ChainSpec,
) -> SlashableVoteData {
let mut rng = XorShiftRng::from_seed([42; 16]);
let mut slashable_vote = SlashableVoteData::random_for_test(&mut rng);
slashable_vote.data.slot = Slot::new(slot_factor * spec.epoch_length);
slashable_vote.data.justified_epoch = Epoch::new(justified_epoch);
slashable_vote
}
}

View File

@ -12,7 +12,7 @@ use crate::slot_height::SlotHeight;
/// may lead to programming errors which are not detected by the compiler. /// may lead to programming errors which are not detected by the compiler.
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use rand::RngCore; use rand::RngCore;
use serde_derive::Serialize; use serde_derive::{Deserialize, Serialize};
use slog; use slog;
use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash};
use std::cmp::{Ord, Ordering}; use std::cmp::{Ord, Ordering};
@ -21,10 +21,10 @@ use std::hash::{Hash, Hasher};
use std::iter::Iterator; use std::iter::Iterator;
use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign};
#[derive(Eq, Debug, Clone, Copy, Default, Serialize)] #[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)]
pub struct Slot(u64); pub struct Slot(u64);
#[derive(Eq, Debug, Clone, Copy, Default, Serialize)] #[derive(Eq, Debug, Clone, Copy, Default, Serialize, Deserialize)]
pub struct Epoch(u64); pub struct Epoch(u64);
impl_common!(Slot); impl_common!(Slot);
@ -103,8 +103,6 @@ impl<'a> Iterator for SlotIter<'a> {
#[cfg(test)] #[cfg(test)]
mod slot_tests { mod slot_tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode;
all_tests!(Slot); all_tests!(Slot);
} }
@ -112,8 +110,6 @@ mod slot_tests {
#[cfg(test)] #[cfg(test)]
mod epoch_tests { mod epoch_tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode;
all_tests!(Epoch); all_tests!(Epoch);

View File

@ -207,9 +207,9 @@ macro_rules! impl_ssz {
} }
impl TreeHash for $type { impl TreeHash for $type {
fn hash_tree_root_internal(&self) -> Vec<u8> { fn hash_tree_root(&self) -> Vec<u8> {
let mut result: Vec<u8> = vec![]; let mut result: Vec<u8> = vec![];
result.append(&mut self.0.hash_tree_root_internal()); result.append(&mut self.0.hash_tree_root());
hash(&result) hash(&result)
} }
} }
@ -248,7 +248,7 @@ macro_rules! impl_common {
} }
// test macros // test macros
#[allow(unused_macros)] #[cfg(test)]
macro_rules! new_tests { macro_rules! new_tests {
($type: ident) => { ($type: ident) => {
#[test] #[test]
@ -260,7 +260,7 @@ macro_rules! new_tests {
}; };
} }
#[allow(unused_macros)] #[cfg(test)]
macro_rules! from_into_tests { macro_rules! from_into_tests {
($type: ident, $other: ident) => { ($type: ident, $other: ident) => {
#[test] #[test]
@ -286,7 +286,7 @@ macro_rules! from_into_tests {
}; };
} }
#[allow(unused_macros)] #[cfg(test)]
macro_rules! math_between_tests { macro_rules! math_between_tests {
($type: ident, $other: ident) => { ($type: ident, $other: ident) => {
#[test] #[test]
@ -434,7 +434,7 @@ macro_rules! math_between_tests {
}; };
} }
#[allow(unused_macros)] #[cfg(test)]
macro_rules! math_tests { macro_rules! math_tests {
($type: ident) => { ($type: ident) => {
#[test] #[test]
@ -528,35 +528,7 @@ macro_rules! math_tests {
}; };
} }
#[allow(unused_macros)] #[cfg(test)]
macro_rules! ssz_tests {
($type: ident) => {
#[test]
pub fn test_ssz_round_trip() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = $type::random_for_test(&mut rng);
let bytes = ssz_encode(&original);
let (decoded, _) = $type::ssz_decode(&bytes, 0).unwrap();
assert_eq!(original, decoded);
}
#[test]
pub fn test_hash_tree_root_internal() {
let mut rng = XorShiftRng::from_seed([42; 16]);
let original = $type::random_for_test(&mut rng);
let result = original.hash_tree_root_internal();
assert_eq!(result.len(), 32);
// TODO: Add further tests
// https://github.com/sigp/lighthouse/issues/170
}
};
}
#[allow(unused_macros)]
macro_rules! all_tests { macro_rules! all_tests {
($type: ident) => { ($type: ident) => {
new_tests!($type); new_tests!($type);

View File

@ -33,11 +33,8 @@ impl SlotHeight {
} }
#[cfg(test)] #[cfg(test)]
mod slot_height_tests { mod slot_height_tests {
use super::*; use super::*;
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::ssz_encode;
all_tests!(SlotHeight); all_tests!(SlotHeight);
} }

View File

@ -0,0 +1,30 @@
use crate::*;
use int_to_bytes::int_to_bytes48;
use log::debug;
use rayon::prelude::*;
/// Generates `validator_count` keypairs where the secret key is the index of the
/// validator.
///
/// For example, the first validator has a secret key of `int_to_bytes48(1)`, the second has
/// `int_to_bytes48(2)` and so on. (We skip `0` as it generates a weird looking public key and is
/// probably invalid).
pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec<Keypair> {
debug!(
"Generating {} deterministic validator keypairs...",
validator_count
);
let keypairs: Vec<Keypair> = (0..validator_count)
.collect::<Vec<usize>>()
.par_iter()
.map(|&i| {
let secret = int_to_bytes48(i as u64 + 1);
let sk = SecretKey::from_bytes(&secret).unwrap();
let pk = PublicKey::from_secret_key(&sk);
Keypair { sk, pk }
})
.collect();
keypairs
}

View File

@ -0,0 +1,128 @@
use crate::*;
use rayon::prelude::*;
use std::fs::File;
use std::io::{Error, ErrorKind, Read, Write};
use std::path::Path;
pub const PUBLIC_KEY_BYTES_LEN: usize = 96;
pub const SECRET_KEY_BYTES_LEN: usize = 48;
pub const BATCH_SIZE: usize = 1_000; // ~15MB
pub const KEYPAIR_BYTES_LEN: usize = PUBLIC_KEY_BYTES_LEN + SECRET_KEY_BYTES_LEN;
pub const BATCH_BYTE_LEN: usize = KEYPAIR_BYTES_LEN * BATCH_SIZE;
/// Defines a trait that allows reading/writing a vec of `Keypair` from/to a file.
pub trait KeypairsFile {
/// Write to file, without guaranteeing interoperability with other clients.
fn to_raw_file(&self, path: &Path, keypairs: &[Keypair]) -> Result<(), Error>;
/// Read from file, without guaranteeing interoperability with other clients.
fn from_raw_file(path: &Path, count: usize) -> Result<Vec<Keypair>, Error>;
}
impl KeypairsFile for Vec<Keypair> {
/// Write the keypairs to file, using the fastest possible method without guaranteeing
/// interoperability with other clients.
fn to_raw_file(&self, path: &Path, keypairs: &[Keypair]) -> Result<(), Error> {
let mut keypairs_file = File::create(path)?;
for keypair_batch in keypairs.chunks(BATCH_SIZE) {
let mut buf = Vec::with_capacity(BATCH_BYTE_LEN);
for keypair in keypair_batch {
buf.append(&mut keypair.sk.as_raw().as_bytes());
buf.append(&mut keypair.pk.clone().as_uncompressed_bytes());
}
keypairs_file.write_all(&buf)?;
}
Ok(())
}
/// Read the keypairs from file, using the fastest possible method without guaranteeing
/// interoperability with other clients.
fn from_raw_file(path: &Path, count: usize) -> Result<Vec<Keypair>, Error> {
let mut keypairs_file = File::open(path)?;
let mut keypairs = Vec::with_capacity(count);
let indices: Vec<usize> = (0..count).collect();
for batch in indices.chunks(BATCH_SIZE) {
let mut buf = vec![0; batch.len() * KEYPAIR_BYTES_LEN];
keypairs_file.read_exact(&mut buf)?;
let mut keypair_batch = batch
.par_iter()
.enumerate()
.map(|(i, _)| {
let sk_start = i * KEYPAIR_BYTES_LEN;
let sk_end = sk_start + SECRET_KEY_BYTES_LEN;
let sk = SecretKey::from_bytes(&buf[sk_start..sk_end])
.map_err(|_| Error::new(ErrorKind::Other, "Invalid SecretKey bytes"))
.unwrap();
let pk_start = sk_end;
let pk_end = pk_start + PUBLIC_KEY_BYTES_LEN;
let pk = PublicKey::from_uncompressed_bytes(&buf[pk_start..pk_end])
.map_err(|_| Error::new(ErrorKind::Other, "Invalid PublicKey bytes"))
.unwrap();
Keypair { sk, pk }
})
.collect();
keypairs.append(&mut keypair_batch);
}
Ok(keypairs)
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::{distributions::Alphanumeric, thread_rng, Rng};
use std::fs::remove_file;
fn random_keypairs(n: usize) -> Vec<Keypair> {
(0..n).into_par_iter().map(|_| Keypair::random()).collect()
}
fn random_tmp_file() -> String {
let mut rng = thread_rng();
rng.sample_iter(&Alphanumeric).take(7).collect()
}
#[test]
#[ignore]
fn read_write_consistency_small_batch() {
let num_keypairs = 10;
let keypairs = random_keypairs(num_keypairs);
let keypairs_path = Path::new("/tmp").join(random_tmp_file());
keypairs.to_raw_file(&keypairs_path, &keypairs).unwrap();
let decoded = Vec::from_raw_file(&keypairs_path, num_keypairs).unwrap();
remove_file(keypairs_path).unwrap();
assert_eq!(keypairs, decoded);
}
#[test]
#[ignore]
fn read_write_consistency_big_batch() {
let num_keypairs = BATCH_SIZE + 1;
let keypairs = random_keypairs(num_keypairs);
let keypairs_path = Path::new("/tmp").join(random_tmp_file());
keypairs.to_raw_file(&keypairs_path, &keypairs).unwrap();
let decoded = Vec::from_raw_file(&keypairs_path, num_keypairs).unwrap();
remove_file(keypairs_path).unwrap();
assert_eq!(keypairs, decoded);
}
}

View File

@ -17,14 +17,14 @@ macro_rules! ssz_tests {
} }
#[test] #[test]
pub fn test_hash_tree_root_internal() { pub fn test_hash_tree_root() {
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use ssz::TreeHash; use ssz::TreeHash;
let mut rng = XorShiftRng::from_seed([42; 16]); let mut rng = XorShiftRng::from_seed([42; 16]);
let original = $type::random_for_test(&mut rng); let original = $type::random_for_test(&mut rng);
let result = original.hash_tree_root_internal(); let result = original.hash_tree_root();
assert_eq!(result.len(), 32); assert_eq!(result.len(), 32);
// TODO: Add further tests // TODO: Add further tests

View File

@ -1,55 +1,30 @@
use rand::RngCore; #[macro_use]
mod macros;
mod generate_deterministic_keypairs;
mod keypairs_file;
mod test_random;
mod testing_attestation_builder;
mod testing_attestation_data_builder;
mod testing_attester_slashing_builder;
mod testing_beacon_block_builder;
mod testing_beacon_state_builder;
mod testing_deposit_builder;
mod testing_pending_attestation_builder;
mod testing_proposer_slashing_builder;
mod testing_transfer_builder;
mod testing_voluntary_exit_builder;
pub use generate_deterministic_keypairs::generate_deterministic_keypairs;
pub use keypairs_file::KeypairsFile;
pub use rand::{prng::XorShiftRng, SeedableRng}; pub use rand::{prng::XorShiftRng, SeedableRng};
pub use test_random::TestRandom;
pub mod address; pub use testing_attestation_builder::TestingAttestationBuilder;
pub mod aggregate_signature; pub use testing_attestation_data_builder::TestingAttestationDataBuilder;
pub mod bitfield; pub use testing_attester_slashing_builder::TestingAttesterSlashingBuilder;
pub mod hash256; pub use testing_beacon_block_builder::TestingBeaconBlockBuilder;
pub mod public_key; pub use testing_beacon_state_builder::{keypairs_path, TestingBeaconStateBuilder};
pub mod secret_key; pub use testing_deposit_builder::TestingDepositBuilder;
pub mod signature; pub use testing_pending_attestation_builder::TestingPendingAttestationBuilder;
pub use testing_proposer_slashing_builder::TestingProposerSlashingBuilder;
pub trait TestRandom<T> pub use testing_transfer_builder::TestingTransferBuilder;
where pub use testing_voluntary_exit_builder::TestingVoluntaryExitBuilder;
T: RngCore,
{
fn random_for_test(rng: &mut T) -> Self;
}
impl<T: RngCore> TestRandom<T> for bool {
fn random_for_test(rng: &mut T) -> Self {
(rng.next_u32() % 2) == 1
}
}
impl<T: RngCore> TestRandom<T> for u64 {
fn random_for_test(rng: &mut T) -> Self {
rng.next_u64()
}
}
impl<T: RngCore> TestRandom<T> for u32 {
fn random_for_test(rng: &mut T) -> Self {
rng.next_u32()
}
}
impl<T: RngCore> TestRandom<T> for usize {
fn random_for_test(rng: &mut T) -> Self {
rng.next_u32() as usize
}
}
impl<T: RngCore, U> TestRandom<T> for Vec<U>
where
U: TestRandom<T>,
{
fn random_for_test(rng: &mut T) -> Self {
vec![
<U>::random_for_test(rng),
<U>::random_for_test(rng),
<U>::random_for_test(rng),
]
}
}

Some files were not shown because too many files have changed in this diff Show More