Merge pull request #85 from sigp/attestation-update

Update `AttestationRecord` as per spec
This commit is contained in:
Paul Hauner 2018-12-12 09:59:15 +11:00 committed by GitHub
commit 7259e1d7b6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
37 changed files with 885 additions and 2657 deletions

View File

@ -32,6 +32,7 @@ name = "lighthouse"
[workspace] [workspace]
members = [ members = [
"beacon_chain/attestation_validation",
"beacon_chain/chain", "beacon_chain/chain",
"beacon_chain/naive_fork_choice", "beacon_chain/naive_fork_choice",
"beacon_chain/state-transition", "beacon_chain/state-transition",
@ -45,7 +46,6 @@ members = [
"beacon_chain/utils/ssz", "beacon_chain/utils/ssz",
"beacon_chain/utils/ssz_helpers", "beacon_chain/utils/ssz_helpers",
"beacon_chain/utils/vec_shuffle", "beacon_chain/utils/vec_shuffle",
"beacon_chain/validation",
"beacon_chain/validator_change", "beacon_chain/validator_change",
"beacon_chain/validator_induction", "beacon_chain/validator_induction",
"beacon_chain/validator_shuffling", "beacon_chain/validator_shuffling",

View File

@ -1,5 +1,5 @@
[package] [package]
name = "validation" name = "attestation_validation"
version = "0.1.0" version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"] authors = ["Paul Hauner <paul@paulhauner.com>"]
@ -7,7 +7,6 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
bls = { path = "../utils/bls" } bls = { path = "../utils/bls" }
db = { path = "../../lighthouse/db" } db = { path = "../../lighthouse/db" }
hashing = { path = "../utils/hashing" } hashing = { path = "../utils/hashing" }
rayon = "1.0.2"
ssz = { path = "../utils/ssz" } ssz = { path = "../utils/ssz" }
ssz_helpers = { path = "../utils/ssz_helpers" } ssz_helpers = { path = "../utils/ssz_helpers" }
types = { path = "../types" } types = { path = "../types" }

View File

@ -0,0 +1,246 @@
use super::{Error, Invalid, Outcome};
/// Check that an attestation is valid to be included in some block.
pub fn validate_attestation_for_block(
attestation_slot: u64,
block_slot: u64,
parent_block_slot: u64,
min_attestation_inclusion_delay: u64,
epoch_length: u64,
) -> Result<Outcome, Error> {
/*
* There is a delay before an attestation may be included in a block, quantified by
* `slots` and defined as `min_attestation_inclusion_delay`.
*
* So, an attestation must be at least `min_attestation_inclusion_delay` slots "older" than the
* block it is contained in.
*/
verify_or!(
// TODO: this differs from the spec as it does not handle underflows correctly.
// https://github.com/sigp/lighthouse/issues/95
attestation_slot < block_slot.saturating_sub(min_attestation_inclusion_delay - 1),
reject!(Invalid::AttestationTooRecent)
);
/*
* A block may not include attestations reference slots more than an epoch length + 1 prior to
* the block slot.
*/
verify_or!(
attestation_slot >= parent_block_slot.saturating_sub(epoch_length + 1),
reject!(Invalid::AttestationTooOld)
);
accept!()
}
#[cfg(test)]
mod tests {
use super::*;
/*
* Invalid::AttestationTooOld tests.
*/
#[test]
fn test_inclusion_too_old_minimal() {
let min_attestation_inclusion_delay = 10;
let epoch_length = 20;
let block_slot = 100;
let parent_block_slot = block_slot - 1;
let attestation_slot = block_slot - min_attestation_inclusion_delay;
let outcome = validate_attestation_for_block(
attestation_slot,
block_slot,
parent_block_slot,
min_attestation_inclusion_delay,
epoch_length,
);
assert_eq!(outcome, Ok(Outcome::Valid));
}
#[test]
fn test_inclusion_too_old_maximal() {
let min_attestation_inclusion_delay = 10;
let epoch_length = 20;
let block_slot = 100;
let parent_block_slot = block_slot - 1;
let attestation_slot = block_slot - epoch_length + 1;
let outcome = validate_attestation_for_block(
attestation_slot,
block_slot,
parent_block_slot,
min_attestation_inclusion_delay,
epoch_length,
);
assert_eq!(outcome, Ok(Outcome::Valid));
}
#[test]
fn test_inclusion_too_old_saturating_non_zero_attestation_slot() {
let min_attestation_inclusion_delay = 10;
let epoch_length = 20;
let block_slot = epoch_length + 1;
let parent_block_slot = block_slot - 1;
let attestation_slot = block_slot - min_attestation_inclusion_delay;
let outcome = validate_attestation_for_block(
attestation_slot,
block_slot,
parent_block_slot,
min_attestation_inclusion_delay,
epoch_length,
);
assert_eq!(outcome, Ok(Outcome::Valid));
}
#[test]
fn test_inclusion_too_old_saturating_zero_attestation_slot() {
let min_attestation_inclusion_delay = 10;
let epoch_length = 20;
let block_slot = epoch_length + 1;
let parent_block_slot = block_slot - 1;
let attestation_slot = 0;
let outcome = validate_attestation_for_block(
attestation_slot,
block_slot,
parent_block_slot,
min_attestation_inclusion_delay,
epoch_length,
);
assert_eq!(outcome, Ok(Outcome::Valid));
}
#[test]
fn test_inclusion_too_old() {
let min_attestation_inclusion_delay = 10;
let epoch_length = 20;
let block_slot = epoch_length * 2;
let parent_block_slot = block_slot - 1;
let attestation_slot = parent_block_slot - (epoch_length + 2);
let outcome = validate_attestation_for_block(
attestation_slot,
block_slot,
parent_block_slot,
min_attestation_inclusion_delay,
epoch_length,
);
assert_eq!(outcome, Ok(Outcome::Invalid(Invalid::AttestationTooOld)));
}
/*
* Invalid::AttestationTooRecent tests.
*/
#[test]
fn test_inclusion_too_recent_minimal() {
let parent_block_slot = 99;
let min_attestation_inclusion_delay = 10;
let epoch_length = 20;
let block_slot = 100;
let attestation_slot = block_slot - min_attestation_inclusion_delay;
let outcome = validate_attestation_for_block(
attestation_slot,
block_slot,
parent_block_slot,
min_attestation_inclusion_delay,
epoch_length,
);
assert_eq!(outcome, Ok(Outcome::Valid));
}
#[test]
fn test_inclusion_too_recent_maximal() {
let parent_block_slot = 99;
let min_attestation_inclusion_delay = 10;
let epoch_length = 20;
let block_slot = 100;
let attestation_slot = block_slot - epoch_length;
let outcome = validate_attestation_for_block(
attestation_slot,
block_slot,
parent_block_slot,
min_attestation_inclusion_delay,
epoch_length,
);
assert_eq!(outcome, Ok(Outcome::Valid));
}
#[test]
fn test_inclusion_too_recent_insufficient() {
let parent_block_slot = 99;
let min_attestation_inclusion_delay = 10;
let epoch_length = 20;
let block_slot = 100;
let attestation_slot = block_slot - (min_attestation_inclusion_delay - 1);
let outcome = validate_attestation_for_block(
attestation_slot,
block_slot,
parent_block_slot,
min_attestation_inclusion_delay,
epoch_length,
);
assert_eq!(outcome, Ok(Outcome::Invalid(Invalid::AttestationTooRecent)));
}
#[test]
fn test_inclusion_too_recent_first_possible_slot() {
let min_attestation_inclusion_delay = 10;
let epoch_length = 20;
let block_slot = min_attestation_inclusion_delay;
let attestation_slot = 0;
let parent_block_slot = block_slot - 1;
let outcome = validate_attestation_for_block(
attestation_slot,
block_slot,
parent_block_slot,
min_attestation_inclusion_delay,
epoch_length,
);
assert_eq!(outcome, Ok(Outcome::Valid));
}
#[test]
fn test_inclusion_too_recent_saturation_non_zero_slot() {
let min_attestation_inclusion_delay = 10;
let epoch_length = 20;
let block_slot = min_attestation_inclusion_delay - 1;
let parent_block_slot = block_slot - 1;
let attestation_slot = 0;
let outcome = validate_attestation_for_block(
attestation_slot,
block_slot,
parent_block_slot,
min_attestation_inclusion_delay,
epoch_length,
);
assert_eq!(outcome, Ok(Outcome::Invalid(Invalid::AttestationTooRecent)));
}
#[test]
fn test_inclusion_too_recent_saturation_zero_slot() {
let min_attestation_inclusion_delay = 10;
let epoch_length = 20;
let block_slot = min_attestation_inclusion_delay - 1;
let parent_block_slot = block_slot - 1;
let attestation_slot = 0;
let outcome = validate_attestation_for_block(
attestation_slot,
block_slot,
parent_block_slot,
min_attestation_inclusion_delay,
epoch_length,
);
assert_eq!(outcome, Ok(Outcome::Invalid(Invalid::AttestationTooRecent)));
}
}

View File

@ -0,0 +1,37 @@
/// Reasons why an `AttestationRecord` can be invalid.
#[derive(PartialEq, Debug)]
pub enum Invalid {
AttestationTooRecent,
AttestationTooOld,
JustifiedSlotImpermissable,
JustifiedBlockNotInChain,
JustifiedBlockHashMismatch,
UnknownShard,
ShardBlockHashMismatch,
SignatureInvalid,
}
/// The outcome of validating the `AttestationRecord`.
///
/// Distinct from the `Error` enum as an `Outcome` indicates that validation executed sucessfully
/// and determined the validity `AttestationRecord`.
#[derive(PartialEq, Debug)]
pub enum Outcome {
Valid,
Invalid(Invalid),
}
/// Errors that prevent this function from correctly validating the `AttestationRecord`.
///
/// Distinct from the `Outcome` enum as `Errors` indicate that validation encountered an unexpected
/// condition and was unable to perform its duty.
#[derive(PartialEq, Debug)]
pub enum Error {
BlockHasNoParent,
BadValidatorIndex,
UnableToLookupBlockAtSlot,
OutOfBoundsBitfieldIndex,
PublicKeyCorrupt,
NoPublicKeyForValidator,
DBError(String),
}

View File

@ -0,0 +1,80 @@
use super::db::stores::{BeaconBlockAtSlotError, BeaconBlockStore};
use super::db::ClientDB;
use super::types::AttestationData;
use super::types::Hash256;
use super::{Error, Invalid, Outcome};
use std::sync::Arc;
/// Verify that a attestation's `data.justified_block_hash` matches the local hash of the block at the
/// attestation's `data.justified_slot`.
///
/// `chain_tip_block_hash` is the tip of the chain in which the justified block hash should exist
/// locally. As Lightouse stores multiple chains locally, it is possible to have multiple blocks at
/// the same slot. `chain_tip_block_hash` serves to restrict the lookup to a single chain, where
/// each slot may have exactly zero or one blocks.
pub fn validate_attestation_justified_block_hash<T>(
data: &AttestationData,
chain_tip_block_hash: &Hash256,
block_store: &Arc<BeaconBlockStore<T>>,
) -> Result<Outcome, Error>
where
T: ClientDB + Sized,
{
/*
* The `justified_block_hash` in the attestation must match exactly the hash of the block at
* that slot in the local chain.
*
* This condition also infers that the `justified_slot` specified in attestation must exist
* locally.
*/
match block_hash_at_slot(chain_tip_block_hash, data.justified_slot, block_store)? {
None => reject!(Invalid::JustifiedBlockNotInChain),
Some(local_justified_block_hash) => {
verify_or!(
data.justified_block_hash == local_justified_block_hash,
reject!(Invalid::JustifiedBlockHashMismatch)
);
}
};
accept!()
}
/// Returns the hash (or None) of a block at a slot in the chain that is specified by
/// `chain_tip_hash`.
///
/// Given that the database stores multiple chains, it is possible for there to be multiple blocks
/// at the given slot. `chain_tip_hash` specifies exactly which chain should be used.
fn block_hash_at_slot<T>(
chain_tip_hash: &Hash256,
slot: u64,
block_store: &Arc<BeaconBlockStore<T>>,
) -> Result<Option<Hash256>, Error>
where
T: ClientDB + Sized,
{
match block_store.block_at_slot(&chain_tip_hash, slot)? {
None => Ok(None),
Some((hash_bytes, _)) => Ok(Some(Hash256::from(&hash_bytes[..]))),
}
}
impl From<BeaconBlockAtSlotError> for Error {
fn from(e: BeaconBlockAtSlotError) -> Self {
match e {
BeaconBlockAtSlotError::DBError(s) => Error::DBError(s),
_ => Error::UnableToLookupBlockAtSlot,
}
}
}
#[cfg(test)]
mod tests {
/*
* TODO: Implement tests.
*
* These tests will require the `BeaconBlock` and `BeaconBlockBody` updates, which are not
* yet included in the code base. Adding tests now will result in duplicated work.
*
* https://github.com/sigp/lighthouse/issues/97
*/
}

View File

@ -0,0 +1,38 @@
use super::types::{AttestationData, BeaconState};
use super::{Error, Invalid, Outcome};
/// Verify that an attestation's `data.justified_slot` matches the justified slot known to the
/// `state`.
///
/// In the case that an attestation references a slot _before_ the latest state transition, is
/// acceptable for the attestation to reference the previous known `justified_slot`. If this were
/// not the case, all attestations created _prior_ to the last state recalculation would be rejected
/// if a block was justified in that state recalculation. It is both ideal and likely that blocks
/// will be justified during a state recalcuation.
pub fn validate_attestation_justified_slot(
data: &AttestationData,
state: &BeaconState,
) -> Result<Outcome, Error> {
let permissable_justified_slot = if data.slot >= state.latest_state_recalculation_slot {
state.justified_slot
} else {
state.previous_justified_slot
};
verify_or!(
data.justified_slot == permissable_justified_slot,
reject!(Invalid::JustifiedSlotImpermissable)
);
accept!()
}
#[cfg(test)]
mod tests {
/*
* TODO: Implement tests.
*
* These tests will require the `BeaconBlock` and `BeaconBlockBody` updates, which are not
* yet included in the code base. Adding tests now will result in duplicated work.
*
* https://github.com/sigp/lighthouse/issues/97
*/
}

View File

@ -0,0 +1,23 @@
extern crate bls;
extern crate db;
extern crate hashing;
extern crate ssz;
extern crate ssz_helpers;
extern crate types;
#[macro_use]
mod macros;
mod block_inclusion;
mod enums;
mod justified_block;
mod justified_slot;
mod shard_block;
mod signature;
pub use enums::{Invalid, Outcome, Error};
pub use block_inclusion::validate_attestation_for_block;
pub use justified_slot::validate_attestation_justified_slot;
pub use justified_block::validate_attestation_justified_block_hash;
pub use signature::validate_attestation_signature;
pub use shard_block::validate_attestation_data_shard_block_hash;

View File

@ -0,0 +1,19 @@
macro_rules! verify_or {
($condition: expr, $result: expr) => {
if !$condition {
$result
}
};
}
macro_rules! reject {
($result: expr) => {
return Ok(Outcome::Invalid($result));
};
}
macro_rules! accept {
() => {
Ok(Outcome::Valid)
};
}

View File

@ -0,0 +1,46 @@
use super::db::ClientDB;
use super::types::{AttestationData, BeaconState};
use super::{Error, Invalid, Outcome};
/// Check that an attestation is valid with reference to some state.
pub fn validate_attestation_data_shard_block_hash<T>(
data: &AttestationData,
state: &BeaconState,
) -> Result<Outcome, Error>
where
T: ClientDB + Sized,
{
/*
* The `shard_block_hash` in the state's `latest_crosslinks` must match either the
* `latest_crosslink_hash` or the `shard_block_hash` on the attestation.
*
* TODO: figure out the reasoning behind this.
*/
match state.latest_crosslinks.get(data.shard as usize) {
None => reject!(Invalid::UnknownShard),
Some(crosslink) => {
let local_shard_block_hash = crosslink.shard_block_hash;
let shard_block_hash_is_permissable = {
(local_shard_block_hash == data.latest_crosslink_hash)
|| (local_shard_block_hash == data.shard_block_hash)
};
verify_or!(
shard_block_hash_is_permissable,
reject!(Invalid::ShardBlockHashMismatch)
);
}
};
accept!()
}
#[cfg(test)]
mod tests {
/*
* TODO: Implement tests.
*
* These tests will require the `BeaconBlock` and `BeaconBlockBody` updates, which are not
* yet included in the code base. Adding tests now will result in duplicated work.
*
* https://github.com/sigp/lighthouse/issues/97
*/
}

View File

@ -0,0 +1,149 @@
use super::bls::{AggregatePublicKey, AggregateSignature};
use super::db::stores::{ValidatorStore, ValidatorStoreError};
use super::db::ClientDB;
use super::types::{AttestationData, Bitfield, BitfieldError};
use super::{Error, Invalid, Outcome};
/// Validate that some signature is correct for some attestation data and known validator set.
pub fn validate_attestation_signature<T>(
attestation_data: &AttestationData,
participation_bitfield: &Bitfield,
aggregate_signature: &AggregateSignature,
attestation_indices: &[usize],
validator_store: &ValidatorStore<T>,
) -> Result<Outcome, Error>
where
T: ClientDB + Sized,
{
let mut agg_pub_key = AggregatePublicKey::new();
for i in 0..attestation_indices.len() {
let voted = participation_bitfield.get(i)?;
if voted {
// De-reference the attestation index into a canonical ValidatorRecord index.
let validator = *attestation_indices.get(i).ok_or(Error::BadValidatorIndex)?;
// Load the public key.
let pub_key = validator_store
.get_public_key_by_index(validator)?
.ok_or(Error::NoPublicKeyForValidator)?;
// Aggregate the public key.
agg_pub_key.add(&pub_key);
}
}
let signed_message = attestation_data_signing_message(attestation_data);
verify_or!(
// TODO: ensure "domain" for aggregate signatures is included.
// https://github.com/sigp/lighthouse/issues/91
aggregate_signature.verify(&signed_message, &agg_pub_key),
reject!(Invalid::SignatureInvalid)
);
accept!()
}
fn attestation_data_signing_message(attestation_data: &AttestationData) -> Vec<u8> {
let mut signed_message = attestation_data.canonical_root().to_vec();
signed_message.append(&mut vec![0]);
signed_message
}
impl From<ValidatorStoreError> for Error {
fn from(error: ValidatorStoreError) -> Self {
match error {
ValidatorStoreError::DBError(s) => Error::DBError(s),
ValidatorStoreError::DecodeError => Error::PublicKeyCorrupt,
}
}
}
impl From<BitfieldError> for Error {
fn from(_error: BitfieldError) -> Self {
Error::OutOfBoundsBitfieldIndex
}
}
#[cfg(test)]
mod tests {
use super::super::bls::{Keypair, Signature};
use super::super::db::MemoryDB;
use super::*;
use std::sync::Arc;
/*
* TODO: Test cases are not comprehensive.
* https://github.com/sigp/lighthouse/issues/94
*/
#[test]
fn test_signature_verification() {
let attestation_data = AttestationData::zero();
let message = attestation_data_signing_message(&attestation_data);
let signing_keypairs = vec![
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
];
let non_signing_keypairs = vec![
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
];
/*
* Signing keypairs first, then non-signing
*/
let mut all_keypairs = signing_keypairs.clone();
all_keypairs.append(&mut non_signing_keypairs.clone());
let attestation_indices: Vec<usize> = (0..all_keypairs.len()).collect();
let mut bitfield = Bitfield::from_elem(all_keypairs.len(), false);
for i in 0..signing_keypairs.len() {
bitfield.set(i, true).unwrap();
}
let db = Arc::new(MemoryDB::open());
let store = ValidatorStore::new(db);
for (i, keypair) in all_keypairs.iter().enumerate() {
store.put_public_key_by_index(i, &keypair.pk).unwrap();
}
let mut agg_sig = AggregateSignature::new();
for keypair in &signing_keypairs {
let sig = Signature::new(&message, &keypair.sk);
agg_sig.add(&sig);
}
/*
* Test using all valid parameters.
*/
let outcome = validate_attestation_signature(
&attestation_data,
&bitfield,
&agg_sig,
&attestation_indices,
&store,
).unwrap();
assert_eq!(outcome, Outcome::Valid);
/*
* Add another validator to the bitfield, run validation will all other
* parameters the same and assert that it fails.
*/
bitfield.set(signing_keypairs.len() + 1, true).unwrap();
let outcome = validate_attestation_signature(
&attestation_data,
&bitfield,
&agg_sig,
&attestation_indices,
&store,
).unwrap();
assert_eq!(outcome, Outcome::Invalid(Invalid::SignatureInvalid));
}
}

View File

@ -11,6 +11,5 @@ ssz = { path = "../utils/ssz" }
ssz_helpers = { path = "../utils/ssz_helpers" } ssz_helpers = { path = "../utils/ssz_helpers" }
state-transition = { path = "../state-transition" } state-transition = { path = "../state-transition" }
types = { path = "../types" } types = { path = "../types" }
validation = { path = "../validation" }
validator_induction = { path = "../validator_induction" } validator_induction = { path = "../validator_induction" }
validator_shuffling = { path = "../validator_shuffling" } validator_shuffling = { path = "../validator_shuffling" }

View File

@ -1,93 +0,0 @@
use super::BeaconChain;
use db::stores::BeaconBlockAtSlotError;
use db::ClientDB;
use ssz_helpers::ssz_beacon_block::SszBeaconBlock;
use std::sync::Arc;
use types::Hash256;
use validation::block_validation::BeaconBlockValidationContext;
pub enum BlockValidationContextError {
UnknownCrystallizedState,
UnknownActiveState,
UnknownAttesterProposerMaps,
NoParentHash,
UnknownJustifiedBlock,
BlockAlreadyKnown,
BlockSlotLookupError(BeaconBlockAtSlotError),
}
impl From<BeaconBlockAtSlotError> for BlockValidationContextError {
fn from(e: BeaconBlockAtSlotError) -> BlockValidationContextError {
BlockValidationContextError::BlockSlotLookupError(e)
}
}
impl<T> BeaconChain<T>
where
T: ClientDB + Sized,
{
pub(crate) fn block_validation_context(
&self,
block: &SszBeaconBlock,
parent_block: &SszBeaconBlock,
present_slot: u64,
) -> Result<BeaconBlockValidationContext<T>, BlockValidationContextError> {
/*
* Load the crystallized state for this block from our caches.
*
* Fail if the crystallized state is unknown.
*/
let cry_state_root = Hash256::from(parent_block.cry_state_root());
let cry_state = self
.crystallized_states
.get(&cry_state_root)
.ok_or(BlockValidationContextError::UnknownCrystallizedState)?;
/*
* Load the active state for this block from our caches.
*
* Fail if the active state is unknown.
*/
let act_state_root = Hash256::from(parent_block.act_state_root());
let act_state = self
.active_states
.get(&act_state_root)
.ok_or(BlockValidationContextError::UnknownActiveState)?;
/*
* Learn the last justified slot from the crystallized state and load
* the hash of this block from the database
*/
let last_justified_slot = cry_state.last_justified_slot;
let parent_block_hash = block
.parent_hash()
.ok_or(BlockValidationContextError::NoParentHash)?;
let (last_justified_block_hash, _) = self
.store
.block
.block_at_slot(&parent_block_hash, last_justified_slot)?
.ok_or(BlockValidationContextError::UnknownJustifiedBlock)?;
/*
* Load the attester and proposer maps for the crystallized state.
*/
let (attester_map, proposer_map) = self
.attester_proposer_maps
.get(&cry_state_root)
.ok_or(BlockValidationContextError::UnknownAttesterProposerMaps)?;
Ok(BeaconBlockValidationContext {
present_slot,
cycle_length: self.config.cycle_length,
last_justified_slot: cry_state.last_justified_slot,
last_justified_block_hash: Hash256::from(&last_justified_block_hash[..]),
last_finalized_slot: self.last_finalized_slot,
recent_block_hashes: Arc::new(act_state.recent_block_hashes.clone()),
proposer_map: proposer_map.clone(),
attester_map: attester_map.clone(),
block_store: self.store.block.clone(),
validator_store: self.store.validator.clone(),
pow_store: self.store.pow_chain.clone(),
})
}
}

View File

@ -1,11 +1,6 @@
use super::block_context::BlockValidationContextError;
use super::state_transition::StateTransitionError;
use super::BeaconChain; use super::BeaconChain;
use db::{ClientDB, DBError}; use db::ClientDB;
use naive_fork_choice::{naive_fork_choice, ForkChoiceError};
use ssz_helpers::ssz_beacon_block::{SszBeaconBlock, SszBeaconBlockError};
use types::Hash256; use types::Hash256;
use validation::block_validation::SszBeaconBlockValidationError;
pub enum BlockProcessingOutcome { pub enum BlockProcessingOutcome {
BlockAlreadyKnown, BlockAlreadyKnown,
@ -14,17 +9,8 @@ pub enum BlockProcessingOutcome {
NewForkBlock, NewForkBlock,
} }
pub enum BlockProcessingError { pub enum Error {
ParentBlockNotFound, NotImplemented,
ActiveStateRootInvalid,
CrystallizedStateRootInvalid,
NoHeadHashes,
ForkChoiceFailed(ForkChoiceError),
ContextGenerationFailed(BlockValidationContextError),
DeserializationFailed(SszBeaconBlockError),
ValidationFailed(SszBeaconBlockValidationError),
StateTransitionFailed(StateTransitionError),
DBError(String),
} }
impl<T> BeaconChain<T> impl<T> BeaconChain<T>
@ -33,216 +19,11 @@ where
{ {
pub fn process_block( pub fn process_block(
&mut self, &mut self,
ssz: &[u8], _ssz: &[u8],
present_slot: u64, _present_slot: u64,
) -> Result<(BlockProcessingOutcome, Hash256), BlockProcessingError> { ) -> Result<(BlockProcessingOutcome, Hash256), Error> {
/* // TODO: block processing has been removed.
* Generate a SszBlock to read directly from the serialized SSZ. // https://github.com/sigp/lighthouse/issues/98
*/ Err(Error::NotImplemented)
let ssz_block = SszBeaconBlock::from_slice(ssz)?;
let block_hash = Hash256::from(&ssz_block.block_hash()[..]);
/*
* If this block is already known, return immediately and indicate the the block is
* known. Don't attempt to deserialize the block.
*/
if self.store.block.block_exists(&block_hash)? {
return Ok((BlockProcessingOutcome::BlockAlreadyKnown, block_hash));
}
/*
* Determine the hash of the blocks parent
*/
let parent_hash = ssz_block
.parent_hash()
.ok_or(BlockProcessingError::ValidationFailed(
SszBeaconBlockValidationError::UnknownParentHash,
))?;
/*
* Load the parent block from the database and create an SszBeaconBlock for reading it.
*/
let parent_block_ssz_bytes = self
.store
.block
.get_serialized_block(&parent_hash[..])?
.ok_or(BlockProcessingError::ParentBlockNotFound)?;
let parent_ssz_block = SszBeaconBlock::from_slice(&parent_block_ssz_bytes)?;
/*
* Generate the context in which to validate this block.
*/
let validation_context =
self.block_validation_context(&ssz_block, &parent_ssz_block, present_slot)?;
/*
* Validate the block against the context, checking signatures, parent_hashes, etc.
*/
let block = validation_context.validate_ssz_block(&ssz_block)?;
let (new_act_state, new_cry_state_option) = {
/*
* Load the states from memory.
*
* Note: this is the second time we load these, the first was in
* `block_validation_context`. Theres an opportunity for some opimisation here.
* It was left out because it made the code more cumbersome.
*/
let act_state = self
.active_states
.get(&block.active_state_root)
.ok_or(BlockValidationContextError::UnknownActiveState)?;
let cry_state = self
.crystallized_states
.get(&block.crystallized_state_root)
.ok_or(BlockValidationContextError::UnknownCrystallizedState)?;
self.transition_states(act_state, cry_state, &block, &block_hash)?
};
/*
* Calculate the new active state root and ensure the block state root matches.
*/
let new_act_state_root = new_act_state.canonical_root();
if new_act_state_root != block.active_state_root {
return Err(BlockProcessingError::ActiveStateRootInvalid);
}
/*
* Determine the crystallized state root and ensure the block state root matches.
*
* If a new crystallized state was created, store it in memory.
*/
let (new_cry_state_root, cry_state_transitioned) = match new_cry_state_option {
None => {
/*
* A new crystallized state was not created, therefore the
* `crystallized_state_root` of this block must match its parent.
*/
if Hash256::from(parent_ssz_block.cry_state_root()) != block.crystallized_state_root
{
return Err(BlockProcessingError::ActiveStateRootInvalid);
}
// Return the old root
(block.crystallized_state_root, false)
}
Some(new_cry_state) => {
/*
* A new crystallized state was created. Check to ensure the crystallized
* state root in the block is the same as the calculated on this node.
*/
let cry_state_root = new_cry_state.canonical_root();
if cry_state_root != block.crystallized_state_root {
return Err(BlockProcessingError::ActiveStateRootInvalid);
}
/*
* Store the new crystallized state in memory.
*/
self.crystallized_states
.insert(cry_state_root, new_cry_state);
// Return the new root
(cry_state_root, true)
}
};
/*
* Store the new block as a leaf in the block tree.
*/
let mut new_head_block_hashes = self.head_block_hashes.clone();
let new_parent_head_hash_index = match new_head_block_hashes
.iter()
.position(|x| *x == Hash256::from(parent_hash))
{
Some(i) => {
new_head_block_hashes[i] = block_hash.clone();
i
}
None => {
new_head_block_hashes.push(block_hash.clone());
new_head_block_hashes.len() - 1
}
};
/*
* Store the new block in the database.
*/
self.store
.block
.put_serialized_block(&block_hash[..], ssz_block.block_ssz())?;
/*
* Store the active state in memory.
*/
self.active_states.insert(new_act_state_root, new_act_state);
let new_canonical_head_block_hash_index =
match naive_fork_choice(&self.head_block_hashes, self.store.block.clone())? {
None => {
/*
* Fork choice failed, therefore the block, active state and crystallized state
* can be removed from storage (i.e., forgotten).
*/
if cry_state_transitioned {
// A new crystallized state was generated, so it should be deleted.
self.crystallized_states.remove(&new_cry_state_root);
}
self.active_states.remove(&new_act_state_root);
self.store.block.delete_block(&block_hash[..])?;
return Err(BlockProcessingError::NoHeadHashes);
}
Some(i) => i,
};
if new_canonical_head_block_hash_index != self.canonical_head_block_hash {
/*
* The block caused a re-org (switch of chains).
*/
Ok((BlockProcessingOutcome::NewReorgBlock, block_hash))
} else {
/*
* The block did not cause a re-org.
*/
if new_parent_head_hash_index == self.canonical_head_block_hash {
Ok((BlockProcessingOutcome::NewCanonicalBlock, block_hash))
} else {
Ok((BlockProcessingOutcome::NewForkBlock, block_hash))
}
}
}
}
impl From<BlockValidationContextError> for BlockProcessingError {
fn from(e: BlockValidationContextError) -> Self {
BlockProcessingError::ContextGenerationFailed(e)
}
}
impl From<SszBeaconBlockError> for BlockProcessingError {
fn from(e: SszBeaconBlockError) -> Self {
BlockProcessingError::DeserializationFailed(e)
}
}
impl From<DBError> for BlockProcessingError {
fn from(e: DBError) -> Self {
BlockProcessingError::DBError(e.message)
}
}
impl From<ForkChoiceError> for BlockProcessingError {
fn from(e: ForkChoiceError) -> Self {
BlockProcessingError::ForkChoiceFailed(e)
}
}
impl From<SszBeaconBlockValidationError> for BlockProcessingError {
fn from(e: SszBeaconBlockValidationError) -> Self {
BlockProcessingError::ValidationFailed(e)
}
}
impl From<StateTransitionError> for BlockProcessingError {
fn from(e: StateTransitionError) -> Self {
BlockProcessingError::StateTransitionFailed(e)
} }
} }

View File

@ -1,22 +1,24 @@
use super::{ActiveState, BeaconChainError, ChainConfig, CrystallizedState}; use super::{ActiveState, ChainConfig, CrystallizedState};
use types::{CrosslinkRecord, Hash256, ValidatorStatus}; use types::ValidatorStatus;
use validator_induction::ValidatorInductor; use validator_induction::ValidatorInductor;
use validator_shuffling::{shard_and_committees_for_cycle, ValidatorAssignmentError}; use validator_shuffling::{shard_and_committees_for_cycle, ValidatorAssignmentError};
pub const INITIAL_FORK_VERSION: u32 = 0; #[derive(Debug, PartialEq)]
pub enum Error {
ValidationAssignmentError(ValidatorAssignmentError),
NotImplemented,
}
impl From<ValidatorAssignmentError> for BeaconChainError { impl From<ValidatorAssignmentError> for Error {
fn from(_: ValidatorAssignmentError) -> BeaconChainError { fn from(e: ValidatorAssignmentError) -> Error {
BeaconChainError::InvalidGenesis Error::ValidationAssignmentError(e)
} }
} }
/// Initialize a new ChainHead with genesis parameters. /// Initialize a new ChainHead with genesis parameters.
/// ///
/// Used when syncing a chain from scratch. /// Used when syncing a chain from scratch.
pub fn genesis_states( pub fn genesis_states(config: &ChainConfig) -> Result<(ActiveState, CrystallizedState), Error> {
config: &ChainConfig,
) -> Result<(ActiveState, CrystallizedState), ValidatorAssignmentError> {
/* /*
* Parse the ValidatorRegistrations into ValidatorRecords and induct them. * Parse the ValidatorRegistrations into ValidatorRecords and induct them.
* *
@ -35,63 +37,17 @@ pub fn genesis_states(
* *
* Crystallizedstate stores two cycles, so we simply repeat the same assignment twice. * Crystallizedstate stores two cycles, so we simply repeat the same assignment twice.
*/ */
let shard_and_committee_for_slots = { let _shard_and_committee_for_slots = {
let mut a = shard_and_committees_for_cycle(&vec![0; 32], &validators, 0, &config)?; let mut a = shard_and_committees_for_cycle(&vec![0; 32], &validators, 0, &config)?;
let mut b = a.clone(); let mut b = a.clone();
a.append(&mut b); a.append(&mut b);
a a
}; };
/* // TODO: implement genesis for `BeaconState`
* Set all the crosslink records to reference zero hashes. // https://github.com/sigp/lighthouse/issues/99
*/
let crosslinks = {
let mut c = vec![];
for _ in 0..config.shard_count {
c.push(CrosslinkRecord {
recently_changed: false,
slot: 0,
hash: Hash256::zero(),
});
}
c
};
/* Err(Error::NotImplemented)
* Initialize a genesis `Crystallizedstate`
*/
let crystallized_state = CrystallizedState {
validator_set_change_slot: 0,
validators: validators.to_vec(),
crosslinks,
last_state_recalculation_slot: 0,
last_finalized_slot: 0,
last_justified_slot: 0,
justified_streak: 0,
shard_and_committee_for_slots,
deposits_penalized_in_period: vec![],
validator_set_delta_hash_chain: Hash256::zero(),
pre_fork_version: INITIAL_FORK_VERSION,
post_fork_version: INITIAL_FORK_VERSION,
fork_slot_number: 0,
};
/*
* Set all recent block hashes to zero.
*/
let recent_block_hashes = vec![Hash256::zero(); config.cycle_length as usize];
/*
* Create an active state.
*/
let active_state = ActiveState {
pending_attestations: vec![],
pending_specials: vec![],
recent_block_hashes,
randao_mix: Hash256::zero(),
};
Ok((active_state, crystallized_state))
} }
#[cfg(test)] #[cfg(test)]
@ -99,6 +55,10 @@ mod tests {
extern crate bls; extern crate bls;
extern crate validator_induction; extern crate validator_induction;
// TODO: implement genesis for `BeaconState`
// https://github.com/sigp/lighthouse/issues/99
//
/*
use self::bls::{create_proof_of_possession, Keypair}; use self::bls::{create_proof_of_possession, Keypair};
use super::*; use super::*;
use types::{Address, Hash256, ValidatorRegistration}; use types::{Address, Hash256, ValidatorRegistration};
@ -190,4 +150,5 @@ mod tests {
); );
assert_eq!(cry.validators.len(), good_validator_count); assert_eq!(cry.validators.len(), good_validator_count);
} }
*/
} }

View File

@ -4,11 +4,9 @@ extern crate ssz;
extern crate ssz_helpers; extern crate ssz_helpers;
extern crate state_transition; extern crate state_transition;
extern crate types; extern crate types;
extern crate validation;
extern crate validator_induction; extern crate validator_induction;
extern crate validator_shuffling; extern crate validator_shuffling;
mod block_context;
mod block_processing; mod block_processing;
mod genesis; mod genesis;
mod maps; mod maps;
@ -16,7 +14,7 @@ mod stores;
mod transition; mod transition;
use db::ClientDB; use db::ClientDB;
use genesis::genesis_states; use genesis::{genesis_states, Error as GenesisError};
use maps::{generate_attester_and_proposer_maps, AttesterAndProposerMapError}; use maps::{generate_attester_and_proposer_maps, AttesterAndProposerMapError};
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
@ -28,6 +26,7 @@ pub enum BeaconChainError {
InvalidGenesis, InvalidGenesis,
InsufficientValidators, InsufficientValidators,
UnableToGenerateMaps(AttesterAndProposerMapError), UnableToGenerateMaps(AttesterAndProposerMapError),
GenesisError(GenesisError),
DBError(String), DBError(String),
} }
@ -103,42 +102,8 @@ impl From<AttesterAndProposerMapError> for BeaconChainError {
} }
} }
#[cfg(test)] impl From<GenesisError> for BeaconChainError {
mod tests { fn from(e: GenesisError) -> BeaconChainError {
use super::*; BeaconChainError::GenesisError(e)
use db::stores::*;
use db::MemoryDB;
use std::sync::Arc;
use types::ValidatorRegistration;
#[test]
fn test_new_chain() {
let mut config = ChainConfig::standard();
config.cycle_length = 4;
config.shard_count = 4;
let db = Arc::new(MemoryDB::open());
let store = BeaconChainStore {
block: Arc::new(BeaconBlockStore::new(db.clone())),
pow_chain: Arc::new(PoWChainStore::new(db.clone())),
validator: Arc::new(ValidatorStore::new(db.clone())),
};
for _ in 0..config.cycle_length * 2 {
config
.initial_validators
.push(ValidatorRegistration::random())
}
let chain = BeaconChain::new(store, config.clone()).unwrap();
let (act, cry) = genesis_states(&config).unwrap();
assert_eq!(chain.last_finalized_slot, 0);
assert_eq!(chain.canonical_block_hash(), Hash256::zero());
let stored_act = chain.active_states.get(&Hash256::zero()).unwrap();
assert_eq!(act, *stored_act);
let stored_cry = chain.crystallized_states.get(&Hash256::zero()).unwrap();
assert_eq!(cry, *stored_cry);
} }
} }

View File

@ -1,7 +0,0 @@
extern crate chain;
#[cfg(test)]
mod tests {
use chain::{BeaconChain, BeaconChainError};
}

View File

@ -1,6 +1,18 @@
use super::ssz::{Decodable, DecodeError, Encodable, SszStream};
use super::Hash256; use super::Hash256;
#[derive(Debug, Clone, PartialEq)] pub const SSZ_ATTESTION_DATA_LENGTH: usize = {
8 + // slot
8 + // shard
32 + // beacon_block_hash
32 + // epoch_boundary_hash
32 + // shard_block_hash
32 + // latest_crosslink_hash
8 + // justified_slot
32 // justified_block_hash
};
#[derive(Debug, Clone, PartialEq, Default)]
pub struct AttestationData { pub struct AttestationData {
pub slot: u64, pub slot: u64,
pub shard: u64, pub shard: u64,
@ -11,3 +23,88 @@ pub struct AttestationData {
pub justified_slot: u64, pub justified_slot: u64,
pub justified_block_hash: Hash256, pub justified_block_hash: Hash256,
} }
impl AttestationData {
pub fn zero() -> Self {
Self {
slot: 0,
shard: 0,
beacon_block_hash: Hash256::zero(),
epoch_boundary_hash: Hash256::zero(),
shard_block_hash: Hash256::zero(),
latest_crosslink_hash: Hash256::zero(),
justified_slot: 0,
justified_block_hash: Hash256::zero(),
}
}
// TODO: Implement this as a merkle root, once tree_ssz is implemented.
// https://github.com/sigp/lighthouse/issues/92
pub fn canonical_root(&self) -> Hash256 {
Hash256::zero()
}
}
impl Encodable for AttestationData {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.slot);
s.append(&self.shard);
s.append(&self.beacon_block_hash);
s.append(&self.epoch_boundary_hash);
s.append(&self.shard_block_hash);
s.append(&self.latest_crosslink_hash);
s.append(&self.justified_slot);
s.append(&self.justified_block_hash);
}
}
impl Decodable for AttestationData {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (slot, i) = u64::ssz_decode(bytes, i)?;
let (shard, i) = u64::ssz_decode(bytes, i)?;
let (beacon_block_hash, i) = Hash256::ssz_decode(bytes, i)?;
let (epoch_boundary_hash, i) = Hash256::ssz_decode(bytes, i)?;
let (shard_block_hash, i) = Hash256::ssz_decode(bytes, i)?;
let (latest_crosslink_hash, i) = Hash256::ssz_decode(bytes, i)?;
let (justified_slot, i) = u64::ssz_decode(bytes, i)?;
let (justified_block_hash, i) = Hash256::ssz_decode(bytes, i)?;
let attestation_data = AttestationData {
slot,
shard,
beacon_block_hash,
epoch_boundary_hash,
shard_block_hash,
latest_crosslink_hash,
justified_slot,
justified_block_hash,
};
Ok((attestation_data, i))
}
}
#[cfg(test)]
mod tests {
use super::super::ssz::ssz_encode;
use super::*;
#[test]
pub fn test_attestation_record_ssz_round_trip() {
let original = AttestationData {
slot: 42,
shard: 16,
beacon_block_hash: Hash256::from("beacon".as_bytes()),
epoch_boundary_hash: Hash256::from("epoch".as_bytes()),
shard_block_hash: Hash256::from("shard".as_bytes()),
latest_crosslink_hash: Hash256::from("xlink".as_bytes()),
justified_slot: 8,
justified_block_hash: Hash256::from("justified".as_bytes()),
};
let ssz = ssz_encode(&original);
let (decoded, _) = AttestationData::ssz_decode(&ssz, 0).unwrap();
assert_eq!(original, decoded);
}
}

View File

@ -1,65 +1,45 @@
use super::attestation_data::SSZ_ATTESTION_DATA_LENGTH;
use super::bls::{AggregateSignature, BLS_AGG_SIG_BYTE_SIZE}; use super::bls::{AggregateSignature, BLS_AGG_SIG_BYTE_SIZE};
use super::ssz::{decode_ssz_list, Decodable, DecodeError, Encodable, SszStream}; use super::ssz::{decode_ssz_list, Decodable, DecodeError, Encodable, SszStream, LENGTH_BYTES};
use super::{Bitfield, Hash256}; use super::{AttestationData, Bitfield};
pub const MIN_SSZ_ATTESTION_RECORD_LENGTH: usize = { pub const MIN_SSZ_ATTESTION_RECORD_LENGTH: usize = {
8 + // slot SSZ_ATTESTION_DATA_LENGTH + // data
2 + // shard_id 5 + // participation_bitfield (assuming 1 byte of bitfield)
4 + // oblique_parent_hashes (empty list) 5 + // custody_bitfield (assuming 1 byte of bitfield)
32 + // shard_block_hash LENGTH_BYTES + BLS_AGG_SIG_BYTE_SIZE // aggregate sig
5 + // attester_bitfield (assuming 1 byte of bitfield)
8 + // justified_slot
32 + // justified_block_hash
4 + BLS_AGG_SIG_BYTE_SIZE // aggregate sig (two 256 bit points)
}; };
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub struct AttestationRecord { pub struct AttestationRecord {
pub slot: u64, pub data: AttestationData,
pub shard_id: u16, pub participation_bitfield: Bitfield,
pub oblique_parent_hashes: Vec<Hash256>, pub custody_bitfield: Bitfield,
pub shard_block_hash: Hash256,
pub attester_bitfield: Bitfield,
pub justified_slot: u64,
pub justified_block_hash: Hash256,
pub aggregate_sig: AggregateSignature, pub aggregate_sig: AggregateSignature,
} }
impl Encodable for AttestationRecord { impl Encodable for AttestationRecord {
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.slot); s.append(&self.data);
s.append(&self.shard_id); s.append(&self.participation_bitfield);
s.append_vec(&self.oblique_parent_hashes); s.append(&self.custody_bitfield);
s.append(&self.shard_block_hash);
s.append(&self.attester_bitfield);
s.append(&self.justified_slot);
s.append(&self.justified_block_hash);
s.append_vec(&self.aggregate_sig.as_bytes()); s.append_vec(&self.aggregate_sig.as_bytes());
} }
} }
impl Decodable for AttestationRecord { impl Decodable for AttestationRecord {
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> { fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (slot, i) = u64::ssz_decode(bytes, i)?; let (data, i) = AttestationData::ssz_decode(bytes, i)?;
let (shard_id, i) = u16::ssz_decode(bytes, i)?; let (participation_bitfield, i) = Bitfield::ssz_decode(bytes, i)?;
let (oblique_parent_hashes, i) = decode_ssz_list(bytes, i)?; let (custody_bitfield, i) = Bitfield::ssz_decode(bytes, i)?;
let (shard_block_hash, i) = Hash256::ssz_decode(bytes, i)?;
let (attester_bitfield, i) = Bitfield::ssz_decode(bytes, i)?;
let (justified_slot, i) = u64::ssz_decode(bytes, i)?;
let (justified_block_hash, i) = Hash256::ssz_decode(bytes, i)?;
// Do aggregate sig decoding properly.
let (agg_sig_bytes, i) = decode_ssz_list(bytes, i)?; let (agg_sig_bytes, i) = decode_ssz_list(bytes, i)?;
let aggregate_sig = let aggregate_sig =
AggregateSignature::from_bytes(&agg_sig_bytes).map_err(|_| DecodeError::TooShort)?; // also could be TooLong AggregateSignature::from_bytes(&agg_sig_bytes).map_err(|_| DecodeError::TooShort)?; // also could be TooLong
let attestation_record = Self { let attestation_record = Self {
slot, data,
shard_id, participation_bitfield,
oblique_parent_hashes, custody_bitfield,
shard_block_hash,
attester_bitfield,
justified_slot,
justified_block_hash,
aggregate_sig, aggregate_sig,
}; };
Ok((attestation_record, i)) Ok((attestation_record, i))
@ -69,13 +49,9 @@ impl Decodable for AttestationRecord {
impl AttestationRecord { impl AttestationRecord {
pub fn zero() -> Self { pub fn zero() -> Self {
Self { Self {
slot: 0, data: AttestationData::zero(),
shard_id: 0, participation_bitfield: Bitfield::new(),
oblique_parent_hashes: vec![], custody_bitfield: Bitfield::new(),
shard_block_hash: Hash256::zero(),
attester_bitfield: Bitfield::new(),
justified_slot: 0,
justified_block_hash: Hash256::zero(),
aggregate_sig: AggregateSignature::new(), aggregate_sig: AggregateSignature::new(),
} }
} }
@ -83,45 +59,29 @@ impl AttestationRecord {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::super::ssz::SszStream; use super::super::ssz::ssz_encode;
use super::*; use super::*;
#[test] #[test]
pub fn test_attestation_record_min_ssz_length() { pub fn test_attestation_record_min_ssz_length() {
let ar = AttestationRecord::zero(); let ar = AttestationRecord::zero();
let mut ssz_stream = SszStream::new(); let ssz = ssz_encode(&ar);
ssz_stream.append(&ar);
let ssz = ssz_stream.drain();
assert_eq!(ssz.len(), MIN_SSZ_ATTESTION_RECORD_LENGTH); assert_eq!(ssz.len(), MIN_SSZ_ATTESTION_RECORD_LENGTH);
} }
#[test] #[test]
pub fn test_attestation_record_min_ssz_encode_decode() { pub fn test_attestation_record_ssz_round_trip() {
let original = AttestationRecord { let original = AttestationRecord {
slot: 7, data: AttestationData::zero(),
shard_id: 9, participation_bitfield: Bitfield::from_bytes(&vec![17; 42][..]),
oblique_parent_hashes: vec![Hash256::from(&vec![14; 32][..])], custody_bitfield: Bitfield::from_bytes(&vec![18; 12][..]),
shard_block_hash: Hash256::from(&vec![15; 32][..]),
attester_bitfield: Bitfield::from_bytes(&vec![17; 42][..]),
justified_slot: 19,
justified_block_hash: Hash256::from(&vec![15; 32][..]),
aggregate_sig: AggregateSignature::new(), aggregate_sig: AggregateSignature::new(),
}; };
let mut ssz_stream = SszStream::new(); let ssz = ssz_encode(&original);
ssz_stream.append(&original); let (decoded, _) = AttestationRecord::ssz_decode(&ssz, 0).unwrap();
let (decoded, _) = AttestationRecord::ssz_decode(&ssz_stream.drain(), 0).unwrap(); assert_eq!(original, decoded);
assert_eq!(original.slot, decoded.slot);
assert_eq!(original.shard_id, decoded.shard_id);
assert_eq!(
original.oblique_parent_hashes,
decoded.oblique_parent_hashes
);
assert_eq!(original.shard_block_hash, decoded.shard_block_hash);
assert_eq!(original.attester_bitfield, decoded.attester_bitfield);
assert_eq!(original.justified_slot, decoded.justified_slot);
assert_eq!(original.justified_block_hash, decoded.justified_block_hash);
} }
} }

View File

@ -15,7 +15,7 @@ pub const MIN_SSZ_BLOCK_LENGTH: usize = {
}; };
pub const MAX_SSZ_BLOCK_LENGTH: usize = MIN_SSZ_BLOCK_LENGTH + (1 << 24); pub const MAX_SSZ_BLOCK_LENGTH: usize = MIN_SSZ_BLOCK_LENGTH + (1 << 24);
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone, Default)]
pub struct BeaconBlock { pub struct BeaconBlock {
pub slot: u64, pub slot: u64,
pub randao_reveal: Hash256, pub randao_reveal: Hash256,

View File

@ -2,6 +2,7 @@ use super::ValidatorRegistration;
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]
pub struct ChainConfig { pub struct ChainConfig {
// Old, potentially outdated constants
pub cycle_length: u8, pub cycle_length: u8,
pub deposit_size_gwei: u64, pub deposit_size_gwei: u64,
pub shard_count: u16, pub shard_count: u16,
@ -10,6 +11,10 @@ pub struct ChainConfig {
pub genesis_time: u64, pub genesis_time: u64,
pub slot_duration_millis: u64, pub slot_duration_millis: u64,
pub initial_validators: Vec<ValidatorRegistration>, pub initial_validators: Vec<ValidatorRegistration>,
// New constants
pub epoch_length: u64,
pub min_attestation_inclusion_delay: u64,
} }
/* /*
@ -28,6 +33,10 @@ impl ChainConfig {
genesis_time: TEST_GENESIS_TIME, genesis_time: TEST_GENESIS_TIME,
slot_duration_millis: 16 * 1000, slot_duration_millis: 16 * 1000,
initial_validators: vec![], initial_validators: vec![],
// New
epoch_length: 64,
min_attestation_inclusion_delay: 4,
} }
} }
@ -54,6 +63,10 @@ impl ChainConfig {
genesis_time: TEST_GENESIS_TIME, // arbitrary genesis_time: TEST_GENESIS_TIME, // arbitrary
slot_duration_millis: 16 * 1000, slot_duration_millis: 16 * 1000,
initial_validators: vec![], initial_validators: vec![],
// New constants
epoch_length: 64,
min_attestation_inclusion_delay: 4,
} }
} }
} }

View File

@ -2,31 +2,16 @@ use super::Hash256;
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub struct CrosslinkRecord { pub struct CrosslinkRecord {
pub recently_changed: bool,
pub slot: u64, pub slot: u64,
pub hash: Hash256, pub shard_block_hash: Hash256,
} }
impl CrosslinkRecord { impl CrosslinkRecord {
/// Generates a new instance where `dynasty` and `hash` are both zero. /// Generates a new instance where `dynasty` and `hash` are both zero.
pub fn zero() -> Self { pub fn zero() -> Self {
Self { Self {
recently_changed: false,
slot: 0, slot: 0,
hash: Hash256::zero(), shard_block_hash: Hash256::zero(),
} }
} }
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_crosslink_record_zero() {
let c = CrosslinkRecord::zero();
assert_eq!(c.recently_changed, false);
assert_eq!(c.slot, 0);
assert!(c.hash.is_zero());
}
}

View File

@ -1,6 +1,8 @@
use super::bls::BLS_AGG_SIG_BYTE_SIZE;
use super::ssz::decode::decode_length; use super::ssz::decode::decode_length;
use super::ssz::LENGTH_BYTES; use super::ssz::LENGTH_BYTES;
use super::types::attestation_record::MIN_SSZ_ATTESTION_RECORD_LENGTH as MIN_LENGTH; use super::types::attestation_data::SSZ_ATTESTION_DATA_LENGTH;
use super::types::attestation_record::MIN_SSZ_ATTESTION_RECORD_LENGTH;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum AttestationSplitError { pub enum AttestationSplitError {
@ -29,52 +31,89 @@ pub fn split_one_attestation(
full_ssz: &[u8], full_ssz: &[u8],
index: usize, index: usize,
) -> Result<(&[u8], usize), AttestationSplitError> { ) -> Result<(&[u8], usize), AttestationSplitError> {
if full_ssz.len() < MIN_LENGTH { let length = determine_ssz_attestation_len(full_ssz, index)?;
let end = index + length;
// The check to ensure that the slice exists _should_ be redundant as it is already checked in
// `determine_ssz_attestation_len`, however it is checked here again for additional safety
// against panics.
match full_ssz.get(index..end) {
None => Err(AttestationSplitError::TooShort),
Some(slice) => Ok((slice, end)),
}
}
/// Given some SSZ, assume that a serialized `AttestationRecord` begins at the `index` position and
/// attempt to find the length (in bytes) of that serialized `AttestationRecord`.
///
/// This function does not perform validation on the `AttestationRecord`. It is very likely that
/// given some sufficiently long non-`AttestationRecord` bytes it will not raise an error.
fn determine_ssz_attestation_len(
full_ssz: &[u8],
index: usize,
) -> Result<usize, AttestationSplitError> {
if full_ssz.len() < MIN_SSZ_ATTESTION_RECORD_LENGTH {
return Err(AttestationSplitError::TooShort); return Err(AttestationSplitError::TooShort);
} }
let hashes_len = decode_length(full_ssz, index + 10, LENGTH_BYTES) let data_struct_end = index + SSZ_ATTESTION_DATA_LENGTH;
// Determine the end of the first bitfield.
let participation_bitfield_len = decode_length(full_ssz, data_struct_end, LENGTH_BYTES)
.map_err(|_| AttestationSplitError::TooShort)?; .map_err(|_| AttestationSplitError::TooShort)?;
let participation_bitfield_end = data_struct_end + LENGTH_BYTES + participation_bitfield_len;
let bitfield_len = decode_length(full_ssz, index + hashes_len + 46, LENGTH_BYTES) // Determine the end of the second bitfield.
let custody_bitfield_len = decode_length(full_ssz, participation_bitfield_end, LENGTH_BYTES)
.map_err(|_| AttestationSplitError::TooShort)?; .map_err(|_| AttestationSplitError::TooShort)?;
let custody_bitfield_end = participation_bitfield_end + LENGTH_BYTES + custody_bitfield_len;
// Subtract one because the min length assumes 1 byte of bitfield // Determine the very end of the AttestationRecord.
let len = MIN_LENGTH - 1 + hashes_len + bitfield_len; let agg_sig_end = custody_bitfield_end + LENGTH_BYTES + BLS_AGG_SIG_BYTE_SIZE;
if full_ssz.len() < index + len { if agg_sig_end > full_ssz.len() {
return Err(AttestationSplitError::TooShort); Err(AttestationSplitError::TooShort)
} else {
Ok(agg_sig_end - index)
} }
Ok((&full_ssz[index..(index + len)], index + len))
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::super::bls::AggregateSignature; use super::super::bls::AggregateSignature;
use super::super::ssz::{Decodable, SszStream}; use super::super::ssz::{Decodable, SszStream};
use super::super::types::{AttestationRecord, Bitfield, Hash256}; use super::super::types::{AttestationData, AttestationRecord, Bitfield, Hash256};
use super::*; use super::*;
fn get_two_records() -> Vec<AttestationRecord> { fn get_two_records() -> Vec<AttestationRecord> {
let a = AttestationRecord { let a = AttestationRecord {
data: AttestationData {
slot: 7, slot: 7,
shard_id: 9, shard: 9,
oblique_parent_hashes: vec![Hash256::from(&vec![14; 32][..])], beacon_block_hash: Hash256::from("a_beacon".as_bytes()),
shard_block_hash: Hash256::from(&vec![15; 32][..]), epoch_boundary_hash: Hash256::from("a_epoch".as_bytes()),
attester_bitfield: Bitfield::from_bytes(&vec![17; 42][..]), shard_block_hash: Hash256::from("a_shard".as_bytes()),
latest_crosslink_hash: Hash256::from("a_xlink".as_bytes()),
justified_slot: 19, justified_slot: 19,
justified_block_hash: Hash256::from(&vec![15; 32][..]), justified_block_hash: Hash256::from("a_justified".as_bytes()),
},
participation_bitfield: Bitfield::from_bytes(&vec![17; 42][..]),
custody_bitfield: Bitfield::from_bytes(&vec![255; 12][..]),
aggregate_sig: AggregateSignature::new(), aggregate_sig: AggregateSignature::new(),
}; };
let b = AttestationRecord { let b = AttestationRecord {
data: AttestationData {
slot: 9, slot: 9,
shard_id: 7, shard: 7,
oblique_parent_hashes: vec![Hash256::from(&vec![15; 32][..])], beacon_block_hash: Hash256::from("b_beacon".as_bytes()),
shard_block_hash: Hash256::from(&vec![14; 32][..]), epoch_boundary_hash: Hash256::from("b_epoch".as_bytes()),
attester_bitfield: Bitfield::from_bytes(&vec![19; 42][..]), shard_block_hash: Hash256::from("b_shard".as_bytes()),
latest_crosslink_hash: Hash256::from("b_xlink".as_bytes()),
justified_slot: 15, justified_slot: 15,
justified_block_hash: Hash256::from(&vec![17; 32][..]), justified_block_hash: Hash256::from("b_justified".as_bytes()),
},
participation_bitfield: Bitfield::from_bytes(&vec![1; 42][..]),
custody_bitfield: Bitfield::from_bytes(&vec![11; 3][..]),
aggregate_sig: AggregateSignature::new(), aggregate_sig: AggregateSignature::new(),
}; };
vec![a, b] vec![a, b]

View File

@ -293,8 +293,8 @@ mod tests {
// will tell us if the hash changes, not that it matches some // will tell us if the hash changes, not that it matches some
// canonical reference. // canonical reference.
let expected_hash = [ let expected_hash = [
11, 181, 149, 114, 248, 15, 46, 0, 106, 135, 158, 31, 15, 194, 149, 176, 43, 110, 154, 254, 192, 124, 164, 240, 137, 162, 126, 50, 255, 118, 88, 189, 151, 221, 4, 40, 121,
26, 253, 67, 18, 139, 250, 84, 144, 219, 3, 208, 50, 145, 198, 33, 248, 221, 104, 255, 46, 234, 146, 161, 202, 140, 109, 175,
]; ];
assert_eq!(hash, expected_hash); assert_eq!(hash, expected_hash);

View File

@ -1,229 +0,0 @@
use super::types::Hash256;
#[derive(Debug)]
pub enum ParentHashesError {
BadCurrentHashes,
BadObliqueHashes,
SlotTooHigh,
SlotTooLow,
IntWrapping,
}
/// This function is used to select the hashes used in
/// the signing of an AttestationRecord.
///
/// It either returns Result with a vector of length `cycle_length,` or
/// returns an Error.
///
/// This function corresponds to the `get_signed_parent_hashes` function
/// in the Python reference implentation.
///
/// See this slide for more information:
/// https://tinyurl.com/ybzn2spw
pub fn attestation_parent_hashes(
cycle_length: u8,
block_slot: u64,
attestation_slot: u64,
current_hashes: &[Hash256],
oblique_hashes: &[Hash256],
) -> Result<Vec<Hash256>, ParentHashesError> {
// This cast places a limit on cycle_length. If you change it, check math
// for overflow.
let cycle_length: u64 = u64::from(cycle_length);
if current_hashes.len() as u64 != (cycle_length * 2) {
return Err(ParentHashesError::BadCurrentHashes);
}
if oblique_hashes.len() as u64 > cycle_length {
return Err(ParentHashesError::BadObliqueHashes);
}
if attestation_slot >= block_slot {
return Err(ParentHashesError::SlotTooHigh);
}
/*
* Cannot underflow as block_slot cannot be less
* than attestation_slot.
*/
let attestation_distance = block_slot - attestation_slot;
if attestation_distance > cycle_length {
return Err(ParentHashesError::SlotTooLow);
}
/*
* Cannot underflow because attestation_distance cannot
* be larger than cycle_length.
*/
let start = cycle_length - attestation_distance;
/*
* Overflow is potentially impossible, but proof is complicated
* enough to just use checked math.
*
* Arithmetic is:
* start + cycle_length - oblique_hashes.len()
*/
let end = start
.checked_add(cycle_length)
.and_then(|x| x.checked_sub(oblique_hashes.len() as u64))
.ok_or(ParentHashesError::IntWrapping)?;
let mut hashes = Vec::new();
hashes.extend_from_slice(&current_hashes[(start as usize)..(end as usize)]);
hashes.extend_from_slice(oblique_hashes);
Ok(hashes)
}
#[cfg(test)]
mod tests {
use super::*;
fn get_range_of_hashes(from: usize, to: usize) -> Vec<Hash256> {
(from..to).map(|i| get_hash(&vec![i as u8])).collect()
}
fn get_hash(value: &[u8]) -> Hash256 {
Hash256::from_slice(value)
}
#[test]
fn test_get_signed_hashes_oblique_scenario_1() {
/*
* Two oblique hashes.
*/
let cycle_length: u8 = 8;
let block_slot: u64 = 19;
let attestation_slot: u64 = 15;
let current_hashes = get_range_of_hashes(3, 19);
let oblique_hashes = get_range_of_hashes(100, 102);
let result = attestation_parent_hashes(
cycle_length,
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes,
);
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(result.len(), cycle_length as usize);
let mut expected_result = get_range_of_hashes(7, 13);
expected_result.append(&mut get_range_of_hashes(100, 102));
assert_eq!(result, expected_result);
}
#[test]
fn test_get_signed_hashes_oblique_scenario_2() {
/*
* All oblique hashes.
*/
let cycle_length: u8 = 8;
let block_slot: u64 = 19;
let attestation_slot: u64 = 15;
let current_hashes = get_range_of_hashes(3, 19);
let oblique_hashes = get_range_of_hashes(100, 108);
let result = attestation_parent_hashes(
cycle_length,
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes,
);
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(result.len(), cycle_length as usize);
let expected_result = get_range_of_hashes(100, 108);
assert_eq!(result, expected_result);
}
#[test]
fn test_get_signed_hashes_scenario_1() {
/*
* Google Slides example.
* https://tinyurl.com/ybzn2spw
*/
let cycle_length: u8 = 8;
let block_slot: u64 = 19;
let attestation_slot: u64 = 15;
let current_hashes = get_range_of_hashes(3, 19);
let oblique_hashes = vec![];
let result = attestation_parent_hashes(
cycle_length,
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes,
);
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(result.len(), cycle_length as usize);
let expected_result = get_range_of_hashes(7, 15);
assert_eq!(result, expected_result);
}
#[test]
fn test_get_signed_hashes_scenario_2() {
/*
* Block 1, attestation 0.
*/
let cycle_length: u8 = 8;
let block_slot: u64 = 1;
let attestation_slot: u64 = 0;
let current_hashes = get_range_of_hashes(0, 16);
let oblique_hashes = vec![];
let result = attestation_parent_hashes(
cycle_length,
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes,
);
let result = result.unwrap();
assert_eq!(result.len(), cycle_length as usize);
let expected_result = get_range_of_hashes(7, 15);
assert_eq!(result, expected_result);
}
#[test]
fn test_get_signed_hashes_scenario_3() {
/*
* attestation_slot too large
*/
let cycle_length: u8 = 8;
let block_slot: u64 = 100;
let attestation_slot: u64 = 100;
let current_hashes = get_range_of_hashes(0, 16);
let oblique_hashes = vec![];
let result = attestation_parent_hashes(
cycle_length,
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes,
);
assert!(result.is_err());
}
#[test]
fn test_get_signed_hashes_scenario_4() {
/*
* Current hashes too small
*/
let cycle_length: u8 = 8;
let block_slot: u64 = 100;
let attestation_slot: u64 = 99;
let current_hashes = get_range_of_hashes(0, 15);
let oblique_hashes = vec![];
let result = attestation_parent_hashes(
cycle_length,
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes,
);
assert!(result.is_err());
}
}

View File

@ -1,252 +0,0 @@
use super::attestation_parent_hashes::{attestation_parent_hashes, ParentHashesError};
use super::db::stores::{BeaconBlockAtSlotError, BeaconBlockStore, ValidatorStore};
use super::db::{ClientDB, DBError};
use super::message_generation::generate_signed_message;
use super::signature_verification::{
verify_aggregate_signature_for_indices, SignatureVerificationError,
};
use super::types::Hash256;
use super::types::{AttestationRecord, AttesterMap};
use std::collections::HashSet;
use std::sync::Arc;
#[derive(Debug, PartialEq)]
pub enum AttestationValidationError {
ParentSlotTooHigh,
ParentSlotTooLow,
BlockSlotTooHigh,
BlockSlotTooLow,
JustifiedSlotIncorrect,
InvalidJustifiedBlockHash,
TooManyObliqueHashes,
BadCurrentHashes,
BadObliqueHashes,
BadAttesterMap,
IntWrapping,
PublicKeyCorrupt,
NoPublicKeyForValidator,
BadBitfieldLength,
InvalidBitfield,
InvalidBitfieldEndBits,
NoSignatures,
NonZeroTrailingBits,
BadAggregateSignature,
DBError(String),
OutOfBoundsBitfieldIndex,
}
/// The context against which some attestation should be validated.
pub struct AttestationValidationContext<T>
where
T: ClientDB + Sized,
{
/// The slot as determined by the system time.
pub block_slot: u64,
/// The slot of the parent of the block that contained this attestation.
pub parent_block_slot: u64,
/// The cycle_length as determined by the chain configuration.
pub cycle_length: u8,
/// The last justified slot as per the client's view of the canonical chain.
pub last_justified_slot: u64,
/// A vec of the hashes of the blocks preceeding the present slot.
pub recent_block_hashes: Arc<Vec<Hash256>>,
/// The store containing block information.
pub block_store: Arc<BeaconBlockStore<T>>,
/// The store containing validator information.
pub validator_store: Arc<ValidatorStore<T>>,
/// A map of (slot, shard_id) to the attestation set of validation indices.
pub attester_map: Arc<AttesterMap>,
}
impl<T> AttestationValidationContext<T>
where
T: ClientDB,
{
/// Validate a (fully deserialized) AttestationRecord against this context.
///
/// The function will return a HashSet of validator indices (canonical validator indices not
/// attestation indices) if the validation passed successfully, or an error otherwise.
///
/// The attestation's aggregate signature will be verified, therefore the function must able to
/// access all required validation public keys via the `validator_store`.
pub fn validate_attestation(
&self,
a: &AttestationRecord,
) -> Result<HashSet<usize>, AttestationValidationError> {
/*
* The attesation slot must be less than or equal to the parent of the slot of the block
* that contained the attestation.
*/
if a.slot > self.parent_block_slot {
return Err(AttestationValidationError::ParentSlotTooHigh);
}
/*
* The slot of this attestation must not be more than cycle_length + 1 distance
* from the parent_slot of block that contained it.
*/
if a.slot
< self
.parent_block_slot
.saturating_sub(u64::from(self.cycle_length).saturating_add(1))
{
return Err(AttestationValidationError::ParentSlotTooLow);
}
/*
* The attestation justified slot must not be higher than the last_justified_slot of the
* context.
*/
if a.justified_slot > self.last_justified_slot {
return Err(AttestationValidationError::JustifiedSlotIncorrect);
}
/*
* There is no need to include more oblique parents hashes than there are blocks
* in a cycle.
*/
if a.oblique_parent_hashes.len() > usize::from(self.cycle_length) {
return Err(AttestationValidationError::TooManyObliqueHashes);
}
/*
* Retrieve the set of attestation indices for this slot and shard id.
*
* This is an array mapping the order that validators will appear in the bitfield to the
* canonincal index of a validator.
*/
let attestation_indices = self
.attester_map
.get(&(a.slot, a.shard_id))
.ok_or(AttestationValidationError::BadAttesterMap)?;
/*
* The bitfield must be no longer than the minimum required to represent each validator in the
* attestation indices for this slot and shard id.
*/
if a.attester_bitfield.num_bytes() != bytes_for_bits(attestation_indices.len()) {
return Err(AttestationValidationError::BadBitfieldLength);
}
/*
* If there are excess bits in the bitfield because the number of a validators in not a
* multiple of 8, reject this attestation record.
*
* Allow extra set bits would permit mutliple different byte layouts (and therefore hashes) to
* refer to the same AttesationRecord.
*/
if a.attester_bitfield.num_set_bits() > attestation_indices.len() {
return Err(AttestationValidationError::InvalidBitfieldEndBits);
}
/*
* Generate the parent hashes for this attestation
*/
let parent_hashes = attestation_parent_hashes(
self.cycle_length,
self.block_slot,
a.slot,
&self.recent_block_hashes,
&a.oblique_parent_hashes,
)?;
/*
* The specified justified block hash supplied in the attestation must be in the chain at
* the given slot number.
*
* First, we find the latest parent hash from the parent_hashes array. Then, using the
* block store (database) we iterate back through the blocks until we find (or fail to
* find) the justified block hash referenced in the attestation record.
*/
let latest_parent_hash = parent_hashes
.last()
.ok_or(AttestationValidationError::BadCurrentHashes)?;
match self
.block_store
.block_at_slot(&latest_parent_hash, a.justified_slot)?
{
Some((ref hash, _)) if *hash == a.justified_block_hash.to_vec() => (),
_ => return Err(AttestationValidationError::InvalidJustifiedBlockHash),
};
/*
* Generate the message that this attestation aggregate signature must sign across.
*/
let signed_message = {
generate_signed_message(
a.slot,
&parent_hashes,
a.shard_id,
&a.shard_block_hash,
a.justified_slot,
)
};
let voted_hashset = verify_aggregate_signature_for_indices(
&signed_message,
&a.aggregate_sig,
&attestation_indices,
&a.attester_bitfield,
&self.validator_store,
)?;
/*
* If the hashset of voters is None, the signature verification failed.
*/
match voted_hashset {
None => Err(AttestationValidationError::BadAggregateSignature),
Some(hashset) => Ok(hashset),
}
}
}
fn bytes_for_bits(bits: usize) -> usize {
(bits.saturating_sub(1) / 8) + 1
}
impl From<ParentHashesError> for AttestationValidationError {
fn from(e: ParentHashesError) -> Self {
match e {
ParentHashesError::BadCurrentHashes => AttestationValidationError::BadCurrentHashes,
ParentHashesError::BadObliqueHashes => AttestationValidationError::BadObliqueHashes,
ParentHashesError::SlotTooLow => AttestationValidationError::BlockSlotTooLow,
ParentHashesError::SlotTooHigh => AttestationValidationError::BlockSlotTooHigh,
ParentHashesError::IntWrapping => AttestationValidationError::IntWrapping,
}
}
}
impl From<BeaconBlockAtSlotError> for AttestationValidationError {
fn from(e: BeaconBlockAtSlotError) -> Self {
match e {
BeaconBlockAtSlotError::DBError(s) => AttestationValidationError::DBError(s),
_ => AttestationValidationError::InvalidJustifiedBlockHash,
}
}
}
impl From<DBError> for AttestationValidationError {
fn from(e: DBError) -> Self {
AttestationValidationError::DBError(e.message)
}
}
impl From<SignatureVerificationError> for AttestationValidationError {
fn from(e: SignatureVerificationError) -> Self {
match e {
SignatureVerificationError::BadValidatorIndex => {
AttestationValidationError::BadAttesterMap
}
SignatureVerificationError::PublicKeyCorrupt => {
AttestationValidationError::PublicKeyCorrupt
}
SignatureVerificationError::NoPublicKeyForValidator => {
AttestationValidationError::NoPublicKeyForValidator
}
SignatureVerificationError::DBError(s) => AttestationValidationError::DBError(s),
SignatureVerificationError::OutOfBoundsBitfieldIndex => {
AttestationValidationError::OutOfBoundsBitfieldIndex
}
}
}
}

View File

@ -1,371 +0,0 @@
extern crate rayon;
use self::rayon::prelude::*;
use super::attestation_validation::{AttestationValidationContext, AttestationValidationError};
use super::db::stores::{BeaconBlockStore, PoWChainStore, ValidatorStore};
use super::db::{ClientDB, DBError};
use super::ssz::{Decodable, DecodeError};
use super::ssz_helpers::attestation_ssz_splitter::{
split_all_attestations, split_one_attestation, AttestationSplitError,
};
use super::ssz_helpers::ssz_beacon_block::{SszBeaconBlock, SszBeaconBlockError};
use super::types::Hash256;
use super::types::{AttestationRecord, AttesterMap, BeaconBlock, ProposerMap};
use std::sync::{Arc, RwLock};
#[derive(Debug, PartialEq)]
pub enum SszBeaconBlockValidationError {
FutureSlot,
SlotAlreadyFinalized,
UnknownPoWChainRef,
UnknownParentHash,
BadAttestationSsz,
BadAncestorHashesSsz,
BadSpecialsSsz,
ParentSlotHigherThanBlockSlot,
AttestationValidationError(AttestationValidationError),
AttestationSignatureFailed,
ProposerAttestationHasObliqueHashes,
NoProposerSignature,
BadProposerMap,
RwLockPoisoned,
DBError(String),
}
/// The context against which a block should be validated.
pub struct BeaconBlockValidationContext<T>
where
T: ClientDB + Sized,
{
/// The slot as determined by the system time.
pub present_slot: u64,
/// The cycle_length as determined by the chain configuration.
pub cycle_length: u8,
/// The last justified slot as per the client's view of the canonical chain.
pub last_justified_slot: u64,
/// The last justified block hash as per the client's view of the canonical chain.
pub last_justified_block_hash: Hash256,
/// The last finalized slot as per the client's view of the canonical chain.
pub last_finalized_slot: u64,
/// A vec of the hashes of the blocks preceeding the present slot.
pub recent_block_hashes: Arc<Vec<Hash256>>,
/// A map of slots to a block proposer validation index.
pub proposer_map: Arc<ProposerMap>,
/// A map of (slot, shard_id) to the attestation set of validation indices.
pub attester_map: Arc<AttesterMap>,
/// The store containing block information.
pub block_store: Arc<BeaconBlockStore<T>>,
/// The store containing validator information.
pub validator_store: Arc<ValidatorStore<T>>,
/// The store containing information about the proof-of-work chain.
pub pow_store: Arc<PoWChainStore<T>>,
}
impl<T> BeaconBlockValidationContext<T>
where
T: ClientDB,
{
/// Validate some SszBeaconBlock against a block validation context. An SszBeaconBlock varies from a BeaconBlock in
/// that is a read-only structure that reads directly from encoded SSZ.
///
/// The reason to validate an SzzBeaconBlock is to avoid decoding it in its entirety if there is
/// a suspicion that the block might be invalid. Such a suspicion should be applied to
/// all blocks coming from the network.
///
/// This function will determine if the block is new, already known or invalid (either
/// intrinsically or due to some application error.)
///
/// Note: this function does not implement randao_reveal checking as it is not in the
/// specification.
#[allow(dead_code)]
pub fn validate_ssz_block(
&self,
b: &SszBeaconBlock,
) -> Result<BeaconBlock, SszBeaconBlockValidationError>
where
T: ClientDB + Sized,
{
/*
* If the block slot corresponds to a slot in the future, return immediately with an error.
*
* It is up to the calling fn to determine what should be done with "future" blocks (e.g.,
* cache or discard).
*/
let block_slot = b.slot();
if block_slot > self.present_slot {
return Err(SszBeaconBlockValidationError::FutureSlot);
}
/*
* If the block is unknown (assumed unknown because we checked the db earlier in this
* function) and it comes from a slot that is already finalized, drop the block.
*
* If a slot is finalized, there's no point in considering any other blocks for that slot.
*
* TODO: We can more strongly throw away blocks based on the `last_finalized_block` related
* to this `last_finalized_slot`. Namely, any block in a future slot must include the
* `last_finalized_block` in it's chain.
*/
if block_slot <= self.last_finalized_slot {
return Err(SszBeaconBlockValidationError::SlotAlreadyFinalized);
}
/*
* If the PoW chain hash is not known to us, drop it.
*
* We only accept blocks that reference a known PoW hash.
*
* Note: it is not clear what a "known" PoW chain ref is. Likely it means the block hash is
* "sufficienty deep in the canonical PoW chain". This should be clarified as the spec
* crystallizes.
*/
let pow_chain_reference = b.pow_chain_reference();
if !self.pow_store.block_hash_exists(b.pow_chain_reference())? {
return Err(SszBeaconBlockValidationError::UnknownPoWChainRef);
}
/*
* Store a slice of the serialized attestations from the block SSZ.
*/
let attestations_ssz = &b.attestations_without_length();
/*
* Get a slice of the first serialized attestation (the 0'th) and decode it into
* a full AttestationRecord object.
*
* The first attestation must be validated separately as it must contain a signature of the
* proposer of the previous block (this is checked later in this function).
*/
let (first_attestation_ssz, next_index) = split_one_attestation(&attestations_ssz, 0)?;
let (first_attestation, _) = AttestationRecord::ssz_decode(&first_attestation_ssz, 0)?;
/*
* The first attestation may not have oblique hashes.
*
* The presence of oblique hashes in the first attestation would indicate that the proposer
* of the previous block is attesting to some other block than the one they produced.
*/
if !first_attestation.oblique_parent_hashes.is_empty() {
return Err(SszBeaconBlockValidationError::ProposerAttestationHasObliqueHashes);
}
/*
* Read the parent hash from the block we are validating then attempt to load
* that parent block ssz from the database.
*
* If that parent doesn't exist in the database or is invalid, reject the block.
*
* Also, read the slot from the parent block for later use.
*/
let parent_hash = b
.parent_hash()
.ok_or(SszBeaconBlockValidationError::BadAncestorHashesSsz)?;
let parent_block_slot = match self.block_store.get_serialized_block(&parent_hash)? {
None => return Err(SszBeaconBlockValidationError::UnknownParentHash),
Some(ssz) => {
let parent_block = SszBeaconBlock::from_slice(&ssz[..])?;
parent_block.slot()
}
};
/*
* The parent block slot must be less than the block slot.
*
* In other words, the parent must come before the child.
*/
if parent_block_slot >= block_slot {
return Err(SszBeaconBlockValidationError::ParentSlotHigherThanBlockSlot);
}
/*
* Generate the context in which attestations will be validated.
*/
let attestation_validation_context = Arc::new(AttestationValidationContext {
block_slot,
parent_block_slot,
cycle_length: self.cycle_length,
last_justified_slot: self.last_justified_slot,
recent_block_hashes: self.recent_block_hashes.clone(),
block_store: self.block_store.clone(),
validator_store: self.validator_store.clone(),
attester_map: self.attester_map.clone(),
});
/*
* Validate this first attestation.
*/
let attestation_voters =
attestation_validation_context.validate_attestation(&first_attestation)?;
/*
* Attempt to read load the parent block proposer from the proposer map. Return with an
* error if it fails.
*
* If the signature of proposer for the parent slot was not present in the first (0'th)
* attestation of this block, reject the block.
*/
let parent_block_proposer = self
.proposer_map
.get(&parent_block_slot)
.ok_or(SszBeaconBlockValidationError::BadProposerMap)?;
if !attestation_voters.contains(&parent_block_proposer) {
return Err(SszBeaconBlockValidationError::NoProposerSignature);
}
/*
* Split the remaining attestations into a vector of slices, each containing
* a single serialized attestation record.
*/
let other_attestations = split_all_attestations(attestations_ssz, next_index)?;
/*
* Verify each other AttestationRecord.
*
* This uses the `rayon` library to do "sometimes" parallelization. Put simply,
* if there are some spare threads, the verification of attestation records will happen
* concurrently.
*
* There is a thread-safe `failure` variable which is set whenever an attestation fails
* validation. This is so all attestation validation is halted if a single bad attestation
* is found.
*/
let failure: RwLock<Option<SszBeaconBlockValidationError>> = RwLock::new(None);
let mut deserialized_attestations: Vec<AttestationRecord> = other_attestations
.par_iter()
.filter_map(|attestation_ssz| {
/*
* If some thread has set the `failure` variable to `Some(error)` the abandon
* attestation serialization and validation. Also, fail early if the lock has been
* poisoned.
*/
match failure.read() {
Ok(ref option) if option.is_none() => (),
_ => return None,
}
/*
* If there has not been a failure yet, attempt to serialize and validate the
* attestation.
*/
match AttestationRecord::ssz_decode(&attestation_ssz, 0) {
/*
* Deserialization failed, therefore the block is invalid.
*/
Err(e) => {
/*
* If the failure lock isn't poisoned, set it to some error.
*/
if let Ok(mut f) = failure.write() {
*f = Some(SszBeaconBlockValidationError::from(e));
}
None
}
/*
* Deserialization succeeded and the attestation should be validated.
*/
Ok((attestation, _)) => {
match attestation_validation_context.validate_attestation(&attestation) {
/*
* Attestation validation failed with some error.
*/
Err(e) => {
/*
* If the failure lock isn't poisoned, set it to some error.
*/
if let Ok(mut f) = failure.write() {
*f = Some(SszBeaconBlockValidationError::from(e));
}
None
}
/*
* Attestation validation succeded.
*/
Ok(_) => Some(attestation),
}
}
}
}).collect();
match failure.into_inner() {
Err(_) => return Err(SszBeaconBlockValidationError::RwLockPoisoned),
Ok(failure) => match failure {
Some(error) => return Err(error),
_ => (),
},
}
/*
* Add the first attestation to the vec of deserialized attestations at
* index 0.
*/
deserialized_attestations.insert(0, first_attestation);
let (ancestor_hashes, _) = Decodable::ssz_decode(&b.ancestor_hashes(), 0)
.map_err(|_| SszBeaconBlockValidationError::BadAncestorHashesSsz)?;
let (specials, _) = Decodable::ssz_decode(&b.specials(), 0)
.map_err(|_| SszBeaconBlockValidationError::BadSpecialsSsz)?;
/*
* If we have reached this point, the block is a new valid block that is worthy of
* processing.
*/
let block = BeaconBlock {
slot: block_slot,
randao_reveal: Hash256::from(b.randao_reveal()),
pow_chain_reference: Hash256::from(pow_chain_reference),
ancestor_hashes,
active_state_root: Hash256::from(b.act_state_root()),
crystallized_state_root: Hash256::from(b.cry_state_root()),
attestations: deserialized_attestations,
specials,
};
Ok(block)
}
}
impl From<DBError> for SszBeaconBlockValidationError {
fn from(e: DBError) -> Self {
SszBeaconBlockValidationError::DBError(e.message)
}
}
impl From<AttestationSplitError> for SszBeaconBlockValidationError {
fn from(e: AttestationSplitError) -> Self {
match e {
AttestationSplitError::TooShort => SszBeaconBlockValidationError::BadAttestationSsz,
}
}
}
impl From<SszBeaconBlockError> for SszBeaconBlockValidationError {
fn from(e: SszBeaconBlockError) -> Self {
match e {
SszBeaconBlockError::TooShort => {
SszBeaconBlockValidationError::DBError("Bad parent block in db.".to_string())
}
SszBeaconBlockError::TooLong => {
SszBeaconBlockValidationError::DBError("Bad parent block in db.".to_string())
}
}
}
}
impl From<DecodeError> for SszBeaconBlockValidationError {
fn from(e: DecodeError) -> Self {
match e {
DecodeError::TooShort => SszBeaconBlockValidationError::BadAttestationSsz,
DecodeError::TooLong => SszBeaconBlockValidationError::BadAttestationSsz,
}
}
}
impl From<AttestationValidationError> for SszBeaconBlockValidationError {
fn from(e: AttestationValidationError) -> Self {
SszBeaconBlockValidationError::AttestationValidationError(e)
}
}
/*
* Tests for block validation are contained in the root directory "tests" directory (AKA
* "integration tests directory").
*/

View File

@ -1,12 +0,0 @@
extern crate bls;
extern crate db;
extern crate hashing;
extern crate ssz;
extern crate ssz_helpers;
extern crate types;
mod attestation_parent_hashes;
pub mod attestation_validation;
pub mod block_validation;
mod message_generation;
mod signature_verification;

View File

@ -1,67 +0,0 @@
use super::hashing::canonical_hash;
use super::ssz::SszStream;
use super::types::Hash256;
/// Generates the message used to validate the signature provided with an AttestationRecord.
///
/// Ensures that the signer of the message has a view of the chain that is compatible with ours.
pub fn generate_signed_message(
slot: u64,
parent_hashes: &[Hash256],
shard_id: u16,
shard_block_hash: &Hash256,
justified_slot: u64,
) -> Vec<u8> {
/*
* Note: it's a little risky here to use SSZ, because the encoding is not necessarily SSZ
* (for example, SSZ might change whilst this doesn't).
*
* I have suggested switching this to ssz here:
* https://github.com/ethereum/eth2.0-specs/issues/5
*
* If this doesn't happen, it would be safer to not use SSZ at all.
*/
let mut ssz_stream = SszStream::new();
ssz_stream.append(&slot);
ssz_stream.append_vec(&parent_hashes.to_vec());
ssz_stream.append(&shard_id);
ssz_stream.append(shard_block_hash);
ssz_stream.append(&justified_slot);
let bytes = ssz_stream.drain();
canonical_hash(&bytes)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_generate_signed_message() {
let slot = 93;
let parent_hashes: Vec<Hash256> = (0..12).map(|i| Hash256::from(i as u64)).collect();
let shard_id = 15;
let shard_block_hash = Hash256::from("shard_block_hash".as_bytes());
let justified_slot = 18;
let output = generate_signed_message(
slot,
&parent_hashes,
shard_id,
&shard_block_hash,
justified_slot,
);
/*
* Note: this is not some well-known test vector, it's simply the result of running
* this and printing the output.
*
* Once well-known test vectors are established, they should be placed here.
*/
let expected = vec![
149, 99, 94, 229, 72, 144, 233, 14, 164, 16, 143, 53, 94, 48, 118, 179, 33, 181, 172,
215, 2, 191, 176, 18, 188, 172, 137, 178, 236, 66, 74, 120,
];
assert_eq!(output, expected);
}
}

View File

@ -1,180 +0,0 @@
use super::bls::{AggregatePublicKey, AggregateSignature};
use super::db::stores::{ValidatorStore, ValidatorStoreError};
use super::db::ClientDB;
use super::types::{Bitfield, BitfieldError};
use std::collections::HashSet;
#[derive(Debug, PartialEq)]
pub enum SignatureVerificationError {
BadValidatorIndex,
PublicKeyCorrupt,
NoPublicKeyForValidator,
DBError(String),
OutOfBoundsBitfieldIndex,
}
impl From<BitfieldError> for SignatureVerificationError {
fn from(_error: BitfieldError) -> Self {
SignatureVerificationError::OutOfBoundsBitfieldIndex
}
}
/// Verify an aggregate signature across the supplied message.
///
/// The public keys used for verification are collected by mapping
/// each true bitfield bit to canonical ValidatorRecord index through
/// the attestation_indicies map.
///
/// Each public key is loaded from the store on-demand.
pub fn verify_aggregate_signature_for_indices<T>(
message: &[u8],
agg_sig: &AggregateSignature,
attestation_indices: &[usize],
bitfield: &Bitfield,
validator_store: &ValidatorStore<T>,
) -> Result<Option<HashSet<usize>>, SignatureVerificationError>
where
T: ClientDB + Sized,
{
let mut voters = HashSet::new();
let mut agg_pub_key = AggregatePublicKey::new();
for i in 0..attestation_indices.len() {
let voted = bitfield.get(i)?;
if voted {
/*
* De-reference the attestation index into a canonical ValidatorRecord index.
*/
let validator = *attestation_indices
.get(i)
.ok_or(SignatureVerificationError::BadValidatorIndex)?;
/*
* Load the validators public key from our store.
*/
let pub_key = validator_store
.get_public_key_by_index(validator)?
.ok_or(SignatureVerificationError::NoPublicKeyForValidator)?;
/*
* Add the validators public key to the aggregate public key.
*/
agg_pub_key.add(&pub_key);
/*
* Add validator to the set of voters for this attestation record.
*/
voters.insert(validator);
}
}
/*
* Verify the aggregate public key against the aggregate signature.
*
* This verification will only succeed if the exact set of public keys
* were added to the aggregate public key as those that signed the aggregate signature.
*/
if agg_sig.verify(&message, &agg_pub_key) {
Ok(Some(voters))
} else {
Ok(None)
}
}
impl From<ValidatorStoreError> for SignatureVerificationError {
fn from(error: ValidatorStoreError) -> Self {
match error {
ValidatorStoreError::DBError(s) => SignatureVerificationError::DBError(s),
ValidatorStoreError::DecodeError => SignatureVerificationError::PublicKeyCorrupt,
}
}
}
#[cfg(test)]
mod tests {
use super::super::bls::{Keypair, Signature};
use super::super::db::MemoryDB;
use super::*;
use std::sync::Arc;
/*
* Cases that still need testing:
*
* - No signatures.
* - Database failure.
* - Unknown validator index.
* - Extra validator on signature.
*/
#[test]
fn test_signature_verification() {
let message = "cats".as_bytes();
let signing_keypairs = vec![
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
];
let non_signing_keypairs = vec![
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
];
/*
* Signing keypairs first, then non-signing
*/
let mut all_keypairs = signing_keypairs.clone();
all_keypairs.append(&mut non_signing_keypairs.clone());
let attestation_indices: Vec<usize> = (0..all_keypairs.len()).collect();
let mut bitfield = Bitfield::from_elem(all_keypairs.len(), false);
for i in 0..signing_keypairs.len() {
bitfield.set(i, true).unwrap();
}
let db = Arc::new(MemoryDB::open());
let store = ValidatorStore::new(db);
for (i, keypair) in all_keypairs.iter().enumerate() {
store.put_public_key_by_index(i, &keypair.pk).unwrap();
}
let mut agg_sig = AggregateSignature::new();
for keypair in &signing_keypairs {
let sig = Signature::new(&message, &keypair.sk);
agg_sig.add(&sig);
}
/*
* Test using all valid parameters.
*/
let voters = verify_aggregate_signature_for_indices(
&message,
&agg_sig,
&attestation_indices,
&bitfield,
&store,
).unwrap();
let voters = voters.unwrap();
(0..signing_keypairs.len()).for_each(|i| assert!(voters.contains(&i)));
(signing_keypairs.len()..non_signing_keypairs.len())
.for_each(|i| assert!(!voters.contains(&i)));
/*
* Add another validator to the bitfield, run validation will all other
* parameters the same and assert that it fails.
*/
bitfield.set(signing_keypairs.len() + 1, true).unwrap();
let voters = verify_aggregate_signature_for_indices(
&message,
&agg_sig,
&attestation_indices,
&bitfield,
&store,
).unwrap();
assert_eq!(voters, None);
}
}

View File

@ -1,219 +0,0 @@
use std::sync::Arc;
use super::bls::{AggregateSignature, Keypair, SecretKey, Signature};
use super::db::stores::{BeaconBlockStore, ValidatorStore};
use super::db::MemoryDB;
use super::hashing::canonical_hash;
use super::ssz::SszStream;
use super::types::{AttestationRecord, AttesterMap, BeaconBlock, Bitfield, Hash256};
use super::validation::attestation_validation::AttestationValidationContext;
pub struct TestStore {
pub db: Arc<MemoryDB>,
pub validator: Arc<ValidatorStore<MemoryDB>>,
pub block: Arc<BeaconBlockStore<MemoryDB>>,
}
impl TestStore {
pub fn new() -> Self {
let db = Arc::new(MemoryDB::open());
let validator = Arc::new(ValidatorStore::new(db.clone()));
let block = Arc::new(BeaconBlockStore::new(db.clone()));
Self {
db,
validator,
block,
}
}
}
pub struct TestRig {
pub attestation: AttestationRecord,
pub context: AttestationValidationContext<MemoryDB>,
pub stores: TestStore,
pub attester_count: usize,
}
fn generate_message_hash(
slot: u64,
parent_hashes: &[Hash256],
shard_id: u16,
shard_block_hash: &Hash256,
justified_slot: u64,
) -> Vec<u8> {
let mut stream = SszStream::new();
stream.append(&slot);
stream.append_vec(&parent_hashes.to_vec());
stream.append(&shard_id);
stream.append(shard_block_hash);
stream.append(&justified_slot);
let bytes = stream.drain();
canonical_hash(&bytes)
}
pub fn generate_attestation(
shard_id: u16,
shard_block_hash: &Hash256,
block_slot: u64,
attestation_slot: u64,
justified_slot: u64,
justified_block_hash: &Hash256,
cycle_length: u8,
parent_hashes: &[Hash256],
signing_keys: &[Option<SecretKey>],
block_store: &BeaconBlockStore<MemoryDB>,
) -> AttestationRecord {
let mut attester_bitfield = Bitfield::from_elem(signing_keys.len(), false);
let mut aggregate_sig = AggregateSignature::new();
let parent_hashes_slice = {
let distance: usize = (block_slot - attestation_slot) as usize;
let last: usize = parent_hashes.len() - distance;
let first: usize = last - usize::from(cycle_length);
&parent_hashes[first..last]
};
/*
* Create a justified block at the correct slot and store it in the db.
*/
create_block_at_slot(&block_store, &justified_block_hash, justified_slot);
/*
* Generate the message that will be signed across for this attr record.
*/
let attestation_message = generate_message_hash(
attestation_slot,
parent_hashes_slice,
shard_id,
shard_block_hash,
justified_slot,
);
for (i, secret_key) in signing_keys.iter().enumerate() {
/*
* If the signing key is Some, set the bitfield bit to true
* and sign the aggregate sig.
*/
if let Some(sk) = secret_key {
attester_bitfield.set(i, true).unwrap();
let sig = Signature::new(&attestation_message, sk);
aggregate_sig.add(&sig);
}
}
AttestationRecord {
slot: attestation_slot,
shard_id,
oblique_parent_hashes: vec![],
shard_block_hash: shard_block_hash.clone(),
attester_bitfield,
justified_slot,
justified_block_hash: justified_block_hash.clone(),
aggregate_sig,
}
}
/// Create a minimum viable block at some slot.
///
/// Allows the validation function to read the block and verify its slot.
pub fn create_block_at_slot(block_store: &BeaconBlockStore<MemoryDB>, hash: &Hash256, slot: u64) {
let mut justified_block = BeaconBlock::zero();
justified_block.attestations.push(AttestationRecord::zero());
justified_block.slot = slot;
let mut s = SszStream::new();
s.append(&justified_block);
let justified_block_ssz = s.drain();
block_store
.put_serialized_block(&hash.to_vec(), &justified_block_ssz)
.unwrap();
}
/// Inserts a justified_block_hash in a position that will be referenced by an attestation record.
pub fn insert_justified_block_hash(
parent_hashes: &mut Vec<Hash256>,
justified_block_hash: &Hash256,
block_slot: u64,
attestation_slot: u64,
) {
let attestation_parent_hash_index =
parent_hashes.len() - 1 - (block_slot as usize - attestation_slot as usize);
parent_hashes[attestation_parent_hash_index] = justified_block_hash.clone();
}
pub fn setup_attestation_validation_test(shard_id: u16, attester_count: usize) -> TestRig {
let stores = TestStore::new();
let block_slot = 10000;
let cycle_length: u8 = 64;
let mut parent_hashes: Vec<Hash256> = (0..(cycle_length * 2))
.map(|i| Hash256::from(i as u64))
.collect();
let attestation_slot = block_slot - 1;
let parent_block_slot = attestation_slot;
let last_justified_slot = attestation_slot - 1;
let justified_block_hash = Hash256::from("justified_block".as_bytes());
let shard_block_hash = Hash256::from("shard_block".as_bytes());
/*
* Insert the required justified_block_hash into parent_hashes
*/
insert_justified_block_hash(
&mut parent_hashes,
&justified_block_hash,
block_slot,
attestation_slot,
);
let parent_hashes = Arc::new(parent_hashes);
let mut keypairs = vec![];
let mut signing_keys = vec![];
let mut attester_map = AttesterMap::new();
let mut attesters = vec![];
/*
* Generate a random keypair for each validator and clone it into the
* list of keypairs. Store it in the database.
*/
for i in 0..attester_count {
let keypair = Keypair::random();
keypairs.push(keypair.clone());
stores
.validator
.put_public_key_by_index(i, &keypair.pk)
.unwrap();
signing_keys.push(Some(keypair.sk.clone()));
attesters.push(i);
}
attester_map.insert((attestation_slot, shard_id), attesters);
let context: AttestationValidationContext<MemoryDB> = AttestationValidationContext {
block_slot,
parent_block_slot,
cycle_length,
last_justified_slot,
recent_block_hashes: parent_hashes.clone(),
block_store: stores.block.clone(),
validator_store: stores.validator.clone(),
attester_map: Arc::new(attester_map),
};
let attestation = generate_attestation(
shard_id,
&shard_block_hash,
block_slot,
attestation_slot,
last_justified_slot,
&justified_block_hash,
cycle_length,
&parent_hashes.clone(),
&signing_keys,
&stores.block,
);
TestRig {
attestation,
context,
stores,
attester_count,
}
}

View File

@ -1,9 +0,0 @@
pub mod helpers;
mod tests;
use super::bls;
use super::db;
use super::hashing;
use super::ssz;
use super::types;
use super::validation;

View File

@ -1,241 +0,0 @@
use std::sync::Arc;
use super::bls::AggregateSignature;
use super::helpers::{create_block_at_slot, setup_attestation_validation_test, TestRig};
use super::types::AttesterMap;
use super::types::Hash256;
use super::validation::attestation_validation::AttestationValidationError;
fn generic_rig() -> TestRig {
let shard_id = 10;
let validator_count = 2;
setup_attestation_validation_test(shard_id, validator_count)
}
#[test]
fn test_attestation_validation_valid() {
let rig = generic_rig();
let result = rig.context.validate_attestation(&rig.attestation);
let voter_map = result.unwrap();
assert_eq!(voter_map.len(), 2);
}
#[test]
fn test_attestation_validation_invalid_parent_slot_too_high() {
let mut rig = generic_rig();
rig.context.parent_block_slot = rig.attestation.slot - 1;
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::ParentSlotTooHigh));
}
#[test]
fn test_attestation_validation_invalid_parent_slot_too_low() {
let mut rig = generic_rig();
rig.attestation.slot = rig.context.parent_block_slot - u64::from(rig.context.cycle_length) - 2;
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::ParentSlotTooLow));
}
#[test]
fn test_attestation_validation_invalid_block_slot_too_high() {
let mut rig = generic_rig();
rig.context.block_slot = rig.attestation.slot - 1;
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::BlockSlotTooHigh));
}
#[test]
fn test_attestation_validation_invalid_block_slot_too_low() {
let mut rig = generic_rig();
rig.context.block_slot = rig.context.block_slot + u64::from(rig.context.cycle_length);
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::BlockSlotTooLow));
}
#[test]
fn test_attestation_validation_invalid_justified_slot_incorrect() {
let mut rig = generic_rig();
let original = rig.attestation.justified_slot;
rig.attestation.justified_slot = original - 1;
// Ensures we don't get a bad justified block error instead.
create_block_at_slot(
&rig.stores.block,
&rig.attestation.justified_block_hash,
rig.attestation.justified_slot,
);
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(
result,
Err(AttestationValidationError::BadAggregateSignature)
);
rig.attestation.justified_slot = original + 1;
// Ensures we don't get a bad justified block error instead.
create_block_at_slot(
&rig.stores.block,
&rig.attestation.justified_block_hash,
rig.attestation.justified_slot,
);
// Ensures we don't get an error that the last justified slot is ahead of the context justified
// slot.
rig.context.last_justified_slot = rig.attestation.justified_slot;
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(
result,
Err(AttestationValidationError::BadAggregateSignature)
);
}
#[test]
fn test_attestation_validation_invalid_too_many_oblique() {
let mut rig = generic_rig();
let obliques: Vec<Hash256> = (0..(rig.context.cycle_length + 1))
.map(|i| Hash256::from((i * 2) as u64))
.collect();
rig.attestation.oblique_parent_hashes = obliques;
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(
result,
Err(AttestationValidationError::TooManyObliqueHashes)
);
}
#[test]
fn test_attestation_validation_invalid_bad_attester_map() {
let mut rig = generic_rig();
rig.context.attester_map = Arc::new(AttesterMap::new());
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::BadAttesterMap));
}
#[test]
fn test_attestation_validation_invalid_bad_bitfield_length() {
let mut rig = generic_rig();
/*
* Extend the bitfield by one byte
*
* We take advantage of the fact that setting a bit outside the current bounds will grow the bitvector.
*/
let one_byte_higher = rig.attester_count + 8;
rig.attestation
.attester_bitfield
.set(one_byte_higher, false);
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::BadBitfieldLength));
}
#[test]
fn test_attestation_validation_invalid_invalid_bitfield_end_bit() {
let mut rig = generic_rig();
let one_bit_high = rig.attester_count + 1;
rig.attestation.attester_bitfield.set(one_bit_high, true);
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(
result,
Err(AttestationValidationError::InvalidBitfieldEndBits)
);
}
#[test]
fn test_attestation_validation_invalid_invalid_bitfield_end_bit_with_irreguar_bitfield_len() {
let mut rig = generic_rig();
/*
* This test ensure that if the number of attesters is "irregular" (with respect to the
* bitfield), and there is a invalid bit is set, validation will still fail.
*
* "Irregular" here means that number of validators + 1 is not a clean multiple of eight.
*
* This test exists to ensure that the application can distinguish between the highest set
* bit in a bitfield and the byte length of that bitfield
*/
let one_bit_high = rig.attester_count + 1;
assert!(
one_bit_high % 8 != 0,
"the test is ineffective in this case."
);
rig.attestation.attester_bitfield.set(one_bit_high, true);
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(
result,
Err(AttestationValidationError::InvalidBitfieldEndBits)
);
}
#[test]
fn test_attestation_validation_invalid_unknown_justified_block_hash() {
let mut rig = generic_rig();
rig.attestation.justified_block_hash = Hash256::from("unknown block hash".as_bytes());
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(
result,
Err(AttestationValidationError::InvalidJustifiedBlockHash)
);
}
#[test]
fn test_attestation_validation_invalid_unknown_justified_block_hash_wrong_slot() {
let rig = generic_rig();
/*
* justified_block_hash points to a block with a slot that is too high.
*/
create_block_at_slot(
&rig.stores.block,
&rig.attestation.justified_block_hash,
rig.attestation.justified_slot + 1,
);
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(
result,
Err(AttestationValidationError::InvalidJustifiedBlockHash)
);
/*
* justified_block_hash points to a block with a slot that is too low.
*/
create_block_at_slot(
&rig.stores.block,
&rig.attestation.justified_block_hash,
rig.attestation.justified_slot - 1,
);
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(
result,
Err(AttestationValidationError::InvalidJustifiedBlockHash)
);
}
#[test]
fn test_attestation_validation_invalid_empty_signature() {
let mut rig = generic_rig();
rig.attestation.aggregate_sig = AggregateSignature::new();
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(
result,
Err(AttestationValidationError::BadAggregateSignature)
);
}

View File

@ -1,233 +0,0 @@
use std::sync::Arc;
use super::attestation_validation::helpers::{generate_attestation, insert_justified_block_hash};
use super::bls::Keypair;
use super::db::stores::{BeaconBlockStore, PoWChainStore, ValidatorStore};
use super::db::MemoryDB;
use super::ssz::SszStream;
use super::ssz_helpers::ssz_beacon_block::SszBeaconBlock;
use super::types::{AttestationRecord, AttesterMap, BeaconBlock, Hash256, ProposerMap};
use super::validation::block_validation::{
BeaconBlockValidationContext, SszBeaconBlockValidationError,
};
#[derive(Debug)]
pub struct BeaconBlockTestParams {
pub total_validators: usize,
pub cycle_length: u8,
pub shard_count: u16,
pub shards_per_slot: u16,
pub validators_per_shard: usize,
pub block_slot: u64,
pub attestations_justified_slot: u64,
pub parent_proposer_index: usize,
pub validation_context_slot: u64,
pub validation_context_justified_slot: u64,
pub validation_context_justified_block_hash: Hash256,
pub validation_context_finalized_slot: u64,
}
pub struct TestStore {
pub db: Arc<MemoryDB>,
pub block: Arc<BeaconBlockStore<MemoryDB>>,
pub pow_chain: Arc<PoWChainStore<MemoryDB>>,
pub validator: Arc<ValidatorStore<MemoryDB>>,
}
impl TestStore {
pub fn new() -> Self {
let db = Arc::new(MemoryDB::open());
let block = Arc::new(BeaconBlockStore::new(db.clone()));
let pow_chain = Arc::new(PoWChainStore::new(db.clone()));
let validator = Arc::new(ValidatorStore::new(db.clone()));
Self {
db,
block,
pow_chain,
validator,
}
}
}
type ParentHashes = Vec<Hash256>;
/// Setup for a block validation function, without actually executing the
/// block validation function.
pub fn setup_block_validation_scenario(
params: &BeaconBlockTestParams,
) -> (
BeaconBlock,
ParentHashes,
AttesterMap,
ProposerMap,
TestStore,
) {
let stores = TestStore::new();
let cycle_length = params.cycle_length;
let shards_per_slot = params.shards_per_slot;
let validators_per_shard = params.validators_per_shard;
let block_slot = params.block_slot;
let attestations_justified_slot = params.attestations_justified_slot;
let mut parent_hashes: Vec<Hash256> = (0..(cycle_length * 2))
.map(|i| Hash256::from(i as u64))
.collect();
let parent_hash = Hash256::from("parent_hash".as_bytes());
let ancestor_hashes = vec![parent_hash.clone(); 32];
let randao_reveal = Hash256::from("randao_reveal".as_bytes());
let justified_block_hash = Hash256::from("justified_hash".as_bytes());
let pow_chain_ref = Hash256::from("pow_chain".as_bytes());
let active_state_root = Hash256::from("active_state".as_bytes());
let crystallized_state_root = Hash256::from("cry_state".as_bytes());
let shard_block_hash = Hash256::from("shard_block_hash".as_bytes());
/*
* Store a valid PoW chain ref
*/
stores
.pow_chain
.put_block_hash(pow_chain_ref.as_ref())
.unwrap();
/*
* Generate a minimum viable parent block and store it in the database.
*/
let mut parent_block = BeaconBlock::zero();
let parent_attestation = AttestationRecord::zero();
parent_block.slot = block_slot - 1;
parent_block.attestations.push(parent_attestation);
let parent_block_ssz = serialize_block(&parent_block);
stores
.block
.put_serialized_block(parent_hash.as_ref(), &parent_block_ssz)
.unwrap();
let proposer_map = {
let mut proposer_map = ProposerMap::new();
proposer_map.insert(parent_block.slot, params.parent_proposer_index);
proposer_map
};
let (attester_map, attestations, _keypairs) = {
let mut i = 0;
let attestation_slot = block_slot - 1;
let mut attester_map = AttesterMap::new();
let mut attestations = vec![];
let mut keypairs = vec![];
/*
* Insert the required justified_block_hash into parent_hashes
*/
insert_justified_block_hash(
&mut parent_hashes,
&justified_block_hash,
block_slot,
attestation_slot,
);
/*
* For each shard in this slot, generate an attestation.
*/
for shard in 0..shards_per_slot {
let mut signing_keys = vec![];
let mut attesters = vec![];
/*
* Generate a random keypair for each validator and clone it into the
* list of keypairs. Store it in the database.
*/
for _ in 0..validators_per_shard {
let keypair = Keypair::random();
keypairs.push(keypair.clone());
stores
.validator
.put_public_key_by_index(i, &keypair.pk)
.unwrap();
signing_keys.push(Some(keypair.sk.clone()));
attesters.push(i);
i += 1;
}
attester_map.insert((attestation_slot, shard), attesters);
let attestation = generate_attestation(
shard,
&shard_block_hash,
block_slot,
attestation_slot,
attestations_justified_slot,
&justified_block_hash,
cycle_length,
&parent_hashes,
&signing_keys[..],
&stores.block,
);
attestations.push(attestation);
}
(attester_map, attestations, keypairs)
};
let block = BeaconBlock {
slot: block_slot,
randao_reveal,
pow_chain_reference: pow_chain_ref,
ancestor_hashes,
active_state_root,
crystallized_state_root,
attestations,
specials: vec![],
};
(block, parent_hashes, attester_map, proposer_map, stores)
}
/// Helper function to take some BeaconBlock and SSZ serialize it.
pub fn serialize_block(b: &BeaconBlock) -> Vec<u8> {
let mut stream = SszStream::new();
stream.append(b);
stream.drain()
}
/// Setup and run a block validation scenario, given some parameters.
///
/// Returns the Result returned from the block validation function.
pub fn run_block_validation_scenario<F>(
params: &BeaconBlockTestParams,
mutator_func: F,
) -> Result<BeaconBlock, SszBeaconBlockValidationError>
where
F: FnOnce(BeaconBlock, AttesterMap, ProposerMap, TestStore)
-> (BeaconBlock, AttesterMap, ProposerMap, TestStore),
{
let (block, parent_hashes, attester_map, proposer_map, stores) =
setup_block_validation_scenario(&params);
let (block, attester_map, proposer_map, stores) =
mutator_func(block, attester_map, proposer_map, stores);
let ssz_bytes = serialize_block(&block);
let ssz_block = SszBeaconBlock::from_slice(&ssz_bytes[..]).unwrap();
let context = BeaconBlockValidationContext {
present_slot: params.validation_context_slot,
cycle_length: params.cycle_length,
last_justified_slot: params.validation_context_justified_slot,
last_justified_block_hash: params.validation_context_justified_block_hash,
last_finalized_slot: params.validation_context_finalized_slot,
recent_block_hashes: Arc::new(parent_hashes),
proposer_map: Arc::new(proposer_map),
attester_map: Arc::new(attester_map),
block_store: stores.block.clone(),
validator_store: stores.validator.clone(),
pow_store: stores.pow_chain.clone(),
};
let block_hash = Hash256::from(&ssz_block.block_hash()[..]);
let validation_result = context.validate_ssz_block(&ssz_block);
/*
* If validation returned a block, make sure it's the same block we supplied to it.
*
* I.e., there were no errors during the serialization -> deserialization process.
*/
if let Ok(returned_block) = &validation_result {
assert_eq!(*returned_block, block);
};
validation_result
}

View File

@ -1,12 +0,0 @@
mod helpers;
mod tests;
use super::bls;
use super::db;
use super::hashing;
use super::ssz;
use super::ssz_helpers;
use super::types;
use super::validation;
use super::attestation_validation;

View File

@ -1,264 +0,0 @@
use super::bls::AggregateSignature;
use super::hashing::canonical_hash;
use super::helpers::{
run_block_validation_scenario, serialize_block, BeaconBlockTestParams, TestStore,
};
use super::ssz_helpers::ssz_beacon_block::SszBeaconBlock;
use super::types::{BeaconBlock, Hash256, ProposerMap};
use super::validation::attestation_validation::AttestationValidationError;
use super::validation::block_validation::SszBeaconBlockValidationError;
fn get_simple_params() -> BeaconBlockTestParams {
let validators_per_shard: usize = 5;
let cycle_length: u8 = 2;
let shard_count: u16 = 4;
let shards_per_slot: u16 = shard_count / u16::from(cycle_length);
let total_validators: usize = validators_per_shard * shard_count as usize;
let block_slot = u64::from(cycle_length) * 10000;
let attestations_justified_slot = block_slot - u64::from(cycle_length);
let parent_proposer_index = 0;
let validation_context_slot = block_slot;
let validation_context_justified_slot = attestations_justified_slot;
let validation_context_justified_block_hash = Hash256::from("justified_hash".as_bytes());
let validation_context_finalized_slot = 0;
BeaconBlockTestParams {
total_validators,
cycle_length,
shard_count,
shards_per_slot,
validators_per_shard,
parent_proposer_index,
block_slot,
attestations_justified_slot,
validation_context_slot,
validation_context_justified_slot,
validation_context_justified_block_hash,
validation_context_finalized_slot,
}
}
// TODO: test bad ssz serialization
#[test]
fn test_block_validation_valid() {
let params = get_simple_params();
let mutator = |block: BeaconBlock, attester_map, proposer_map, stores| {
/*
* Do not mutate
*/
(block, attester_map, proposer_map, stores)
};
let status = run_block_validation_scenario(&params, mutator);
assert!(status.is_ok())
}
#[test]
fn test_block_validation_valid_known_block() {
let params = get_simple_params();
let mutator = |block: BeaconBlock, attester_map, proposer_map, stores: TestStore| {
/*
* Pre-store the block in the database
*/
let block_ssz = serialize_block(&block);
let block_hash = canonical_hash(&block_ssz);
stores
.block
.put_serialized_block(&block_hash, &block_ssz)
.unwrap();
(block, attester_map, proposer_map, stores)
};
let status = run_block_validation_scenario(&params, mutator);
/*
* This function does _not_ check if a block is already known.
*
* Known blocks will appear as valid blocks.
*/
assert!(status.is_ok())
}
#[test]
fn test_block_validation_parent_slot_too_high() {
let params = get_simple_params();
let mutator = |mut block: BeaconBlock, attester_map, proposer_map, stores| {
block.slot = params.validation_context_justified_slot + 1;
(block, attester_map, proposer_map, stores)
};
let status = run_block_validation_scenario(&params, mutator);
assert_eq!(
status,
Err(SszBeaconBlockValidationError::ParentSlotHigherThanBlockSlot)
);
}
#[test]
fn test_block_validation_invalid_future_slot() {
let params = get_simple_params();
let mutator = |mut block: BeaconBlock, attester_map, proposer_map, stores| {
block.slot = block.slot + 1;
(block, attester_map, proposer_map, stores)
};
let status = run_block_validation_scenario(&params, mutator);
assert_eq!(status, Err(SszBeaconBlockValidationError::FutureSlot));
}
#[test]
fn test_block_validation_invalid_slot_already_finalized() {
let mut params = get_simple_params();
params.validation_context_finalized_slot = params.block_slot;
params.validation_context_justified_slot =
params.validation_context_finalized_slot + u64::from(params.cycle_length);
let mutator = |block, attester_map, proposer_map, stores| {
/*
* Do not mutate
*/
(block, attester_map, proposer_map, stores)
};
let status = run_block_validation_scenario(&params, mutator);
assert_eq!(
status,
Err(SszBeaconBlockValidationError::SlotAlreadyFinalized)
);
}
#[test]
fn test_block_validation_invalid_unknown_pow_hash() {
let params = get_simple_params();
let mutator = |mut block: BeaconBlock, attester_map, proposer_map, stores| {
block.pow_chain_reference = Hash256::from("unknown pow hash".as_bytes());
(block, attester_map, proposer_map, stores)
};
let status = run_block_validation_scenario(&params, mutator);
assert_eq!(
status,
Err(SszBeaconBlockValidationError::UnknownPoWChainRef)
);
}
#[test]
fn test_block_validation_invalid_unknown_parent_hash() {
let params = get_simple_params();
let mutator = |mut block: BeaconBlock, attester_map, proposer_map, stores| {
block.ancestor_hashes[0] = Hash256::from("unknown parent block".as_bytes());
(block, attester_map, proposer_map, stores)
};
let status = run_block_validation_scenario(&params, mutator);
assert_eq!(
status,
Err(SszBeaconBlockValidationError::UnknownParentHash)
);
}
#[test]
fn test_block_validation_invalid_1st_attestation_signature() {
let params = get_simple_params();
let mutator = |mut block: BeaconBlock, attester_map, proposer_map, stores| {
/*
* Set the second attestaion record to have an invalid signature.
*/
block.attestations[0].aggregate_sig = AggregateSignature::new();
(block, attester_map, proposer_map, stores)
};
let status = run_block_validation_scenario(&params, mutator);
assert_eq!(
status,
Err(SszBeaconBlockValidationError::AttestationValidationError(
AttestationValidationError::BadAggregateSignature
))
);
}
#[test]
fn test_block_validation_invalid_no_parent_proposer_signature() {
let params = get_simple_params();
let mutator =
|block: BeaconBlock, attester_map, mut proposer_map: ProposerMap, stores: TestStore| {
/*
* Set the proposer for this slot to be a validator that does not exist.
*/
let ssz = {
let parent_hash = block.parent_hash().unwrap().as_ref();
stores
.block
.get_serialized_block(parent_hash)
.unwrap()
.unwrap()
};
let parent_block_slot = SszBeaconBlock::from_slice(&ssz[..]).unwrap().slot();
proposer_map.insert(parent_block_slot, params.total_validators + 1);
(block, attester_map, proposer_map, stores)
};
let status = run_block_validation_scenario(&params, mutator);
assert_eq!(
status,
Err(SszBeaconBlockValidationError::NoProposerSignature)
);
}
#[test]
fn test_block_validation_invalid_bad_proposer_map() {
let params = get_simple_params();
let mutator = |block, attester_map, _, stores| {
/*
* Initialize a new, empty proposer map
*/
let proposer_map = ProposerMap::new();
(block, attester_map, proposer_map, stores)
};
let status = run_block_validation_scenario(&params, mutator);
assert_eq!(status, Err(SszBeaconBlockValidationError::BadProposerMap));
}
#[test]
fn test_block_validation_invalid_2nd_attestation_signature() {
let params = get_simple_params();
let mutator = |mut block: BeaconBlock, attester_map, proposer_map, stores| {
/*
* Set the second attestaion record to have an invalid signature.
*/
block.attestations[1].aggregate_sig = AggregateSignature::new();
(block, attester_map, proposer_map, stores)
};
let status = run_block_validation_scenario(&params, mutator);
assert_eq!(
status,
Err(SszBeaconBlockValidationError::AttestationValidationError(
AttestationValidationError::BadAggregateSignature
))
);
}

View File

@ -1,20 +0,0 @@
extern crate validation;
extern crate bls;
extern crate db;
extern crate hashing;
extern crate ssz;
extern crate ssz_helpers;
extern crate types;
#[cfg(test)]
mod attestation_validation;
#[cfg(test)]
mod block_validation;
/*
use lighthouse::bls;
use lighthouse::db;
use lighthouse::state;
use lighthouse::utils;
*/