Merge pull request #69 from sigp/rustfmt

Run rustfmt globally.
This commit is contained in:
Age Manning 2018-11-14 18:12:21 +02:00 committed by GitHub
commit 2e2a1faff4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
43 changed files with 754 additions and 975 deletions

View File

@ -1,8 +1,8 @@
extern crate db; extern crate db;
extern crate naive_fork_choice; extern crate naive_fork_choice;
extern crate state_transition;
extern crate ssz; extern crate ssz;
extern crate ssz_helpers; extern crate ssz_helpers;
extern crate state_transition;
extern crate types; extern crate types;
extern crate validation; extern crate validation;
extern crate validator_induction; extern crate validator_induction;
@ -12,8 +12,8 @@ mod block_context;
mod block_processing; mod block_processing;
mod genesis; mod genesis;
mod maps; mod maps;
mod transition;
mod stores; mod stores;
mod transition;
use db::ClientDB; use db::ClientDB;
use genesis::genesis_states; use genesis::genesis_states;

View File

@ -1,15 +1,6 @@
use super::{ Hash256, Bitfield }; use super::bls::{AggregateSignature, BLS_AGG_SIG_BYTE_SIZE};
use super::bls::{ use super::ssz::{decode_ssz_list, Decodable, DecodeError, Encodable, SszStream};
AggregateSignature, use super::{Bitfield, Hash256};
BLS_AGG_SIG_BYTE_SIZE,
};
use super::ssz::{
Encodable,
Decodable,
DecodeError,
decode_ssz_list,
SszStream,
};
pub const MIN_SSZ_ATTESTION_RECORD_LENGTH: usize = { pub const MIN_SSZ_ATTESTION_RECORD_LENGTH: usize = {
8 + // slot 8 + // slot
@ -48,9 +39,7 @@ impl Encodable for AttestationRecord {
} }
impl Decodable for AttestationRecord { impl Decodable for AttestationRecord {
fn ssz_decode(bytes: &[u8], i: usize) fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
-> Result<(Self, usize), DecodeError>
{
let (slot, i) = u64::ssz_decode(bytes, i)?; let (slot, i) = u64::ssz_decode(bytes, i)?;
let (shard_id, i) = u16::ssz_decode(bytes, i)?; let (shard_id, i) = u16::ssz_decode(bytes, i)?;
let (oblique_parent_hashes, i) = decode_ssz_list(bytes, i)?; let (oblique_parent_hashes, i) = decode_ssz_list(bytes, i)?;
@ -60,8 +49,8 @@ impl Decodable for AttestationRecord {
let (justified_block_hash, i) = Hash256::ssz_decode(bytes, i)?; let (justified_block_hash, i) = Hash256::ssz_decode(bytes, i)?;
// Do aggregate sig decoding properly. // Do aggregate sig decoding properly.
let (agg_sig_bytes, i) = decode_ssz_list(bytes, i)?; let (agg_sig_bytes, i) = decode_ssz_list(bytes, i)?;
let aggregate_sig = AggregateSignature::from_bytes(&agg_sig_bytes) let aggregate_sig =
.map_err(|_| DecodeError::TooShort)?; // also could be TooLong AggregateSignature::from_bytes(&agg_sig_bytes).map_err(|_| DecodeError::TooShort)?; // also could be TooLong
let attestation_record = Self { let attestation_record = Self {
slot, slot,
@ -92,11 +81,10 @@ impl AttestationRecord {
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*;
use super::super::ssz::SszStream; use super::super::ssz::SszStream;
use super::*;
#[test] #[test]
pub fn test_attestation_record_min_ssz_length() { pub fn test_attestation_record_min_ssz_length() {
@ -124,11 +112,13 @@ mod tests {
let mut ssz_stream = SszStream::new(); let mut ssz_stream = SszStream::new();
ssz_stream.append(&original); ssz_stream.append(&original);
let (decoded, _) = AttestationRecord:: let (decoded, _) = AttestationRecord::ssz_decode(&ssz_stream.drain(), 0).unwrap();
ssz_decode(&ssz_stream.drain(), 0).unwrap();
assert_eq!(original.slot, decoded.slot); assert_eq!(original.slot, decoded.slot);
assert_eq!(original.shard_id, decoded.shard_id); assert_eq!(original.shard_id, decoded.shard_id);
assert_eq!(original.oblique_parent_hashes, decoded.oblique_parent_hashes); assert_eq!(
original.oblique_parent_hashes,
decoded.oblique_parent_hashes
);
assert_eq!(original.shard_block_hash, decoded.shard_block_hash); assert_eq!(original.shard_block_hash, decoded.shard_block_hash);
assert_eq!(original.attester_bitfield, decoded.attester_bitfield); assert_eq!(original.attester_bitfield, decoded.attester_bitfield);
assert_eq!(original.justified_slot, decoded.justified_slot); assert_eq!(original.justified_slot, decoded.justified_slot);

View File

@ -1,12 +1,7 @@
use super::Hash256;
use super::attestation_record::AttestationRecord; use super::attestation_record::AttestationRecord;
use super::special_record::SpecialRecord; use super::special_record::SpecialRecord;
use super::ssz::{ use super::ssz::{Decodable, DecodeError, Encodable, SszStream};
Encodable, use super::Hash256;
Decodable,
DecodeError,
SszStream,
};
pub const MIN_SSZ_BLOCK_LENGTH: usize = { pub const MIN_SSZ_BLOCK_LENGTH: usize = {
8 + // slot 8 + // slot
@ -68,9 +63,7 @@ impl Encodable for BeaconBlock {
} }
impl Decodable for BeaconBlock { impl Decodable for BeaconBlock {
fn ssz_decode(bytes: &[u8], i: usize) fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
-> Result<(Self, usize), DecodeError>
{
let (slot, i) = u64::ssz_decode(bytes, i)?; let (slot, i) = u64::ssz_decode(bytes, i)?;
let (randao_reveal, i) = Hash256::ssz_decode(bytes, i)?; let (randao_reveal, i) = Hash256::ssz_decode(bytes, i)?;
let (pow_chain_reference, i) = Hash256::ssz_decode(bytes, i)?; let (pow_chain_reference, i) = Hash256::ssz_decode(bytes, i)?;
@ -87,13 +80,12 @@ impl Decodable for BeaconBlock {
active_state_root, active_state_root,
crystallized_state_root, crystallized_state_root,
attestations, attestations,
specials specials,
}; };
Ok((block, i)) Ok((block, i))
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -21,7 +21,7 @@ impl ChainConfig {
pub fn standard() -> Self { pub fn standard() -> Self {
Self { Self {
cycle_length: 64, cycle_length: 64,
deposit_size_gwei: 32 * (10^9), deposit_size_gwei: 32 * (10 ^ 9),
shard_count: 1024, shard_count: 1024,
min_committee_size: 128, min_committee_size: 128,
max_validator_churn_quotient: 32, max_validator_churn_quotient: 32,
@ -43,13 +43,11 @@ impl ChainConfig {
true true
} }
#[cfg(test)] #[cfg(test)]
pub fn super_fast_tests() -> Self { pub fn super_fast_tests() -> Self {
Self { Self {
cycle_length: 2, cycle_length: 2,
deposit_size_gwei: 32 * (10^9), deposit_size_gwei: 32 * (10 ^ 9),
shard_count: 2, shard_count: 2,
min_committee_size: 2, min_committee_size: 2,
max_validator_churn_quotient: 32, max_validator_churn_quotient: 32,

View File

@ -1,37 +1,33 @@
extern crate ethereum_types;
extern crate bls; extern crate bls;
extern crate boolean_bitfield; extern crate boolean_bitfield;
extern crate ethereum_types;
extern crate ssz; extern crate ssz;
pub mod active_state; pub mod active_state;
pub mod attestation_record; pub mod attestation_record;
pub mod crystallized_state;
pub mod chain_config;
pub mod beacon_block; pub mod beacon_block;
pub mod chain_config;
pub mod crosslink_record; pub mod crosslink_record;
pub mod crystallized_state;
pub mod shard_and_committee; pub mod shard_and_committee;
pub mod special_record; pub mod special_record;
pub mod validator_record; pub mod validator_record;
pub mod validator_registration; pub mod validator_registration;
use self::ethereum_types::{
H256,
H160,
U256,
};
use self::boolean_bitfield::BooleanBitfield; use self::boolean_bitfield::BooleanBitfield;
use self::ethereum_types::{H160, H256, U256};
use std::collections::HashMap; use std::collections::HashMap;
pub use active_state::ActiveState; pub use active_state::ActiveState;
pub use attestation_record::AttestationRecord; pub use attestation_record::AttestationRecord;
pub use crystallized_state::CrystallizedState;
pub use chain_config::ChainConfig;
pub use beacon_block::BeaconBlock; pub use beacon_block::BeaconBlock;
pub use chain_config::ChainConfig;
pub use crosslink_record::CrosslinkRecord; pub use crosslink_record::CrosslinkRecord;
pub use crystallized_state::CrystallizedState;
pub use shard_and_committee::ShardAndCommittee; pub use shard_and_committee::ShardAndCommittee;
pub use special_record::{ SpecialRecord, SpecialRecordKind }; pub use special_record::{SpecialRecord, SpecialRecordKind};
pub use validator_record::{ ValidatorRecord, ValidatorStatus }; pub use validator_record::{ValidatorRecord, ValidatorStatus};
pub use validator_registration::{ ValidatorRegistration }; pub use validator_registration::ValidatorRegistration;
pub type Hash256 = H256; pub type Hash256 = H256;
pub type Address = H160; pub type Address = H160;

View File

@ -1,7 +1,7 @@
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub struct ShardAndCommittee { pub struct ShardAndCommittee {
pub shard: u16, pub shard: u16,
pub committee: Vec<usize> pub committee: Vec<usize>,
} }
impl ShardAndCommittee { impl ShardAndCommittee {

View File

@ -1,10 +1,4 @@
use super::ssz::{ use super::ssz::{Decodable, DecodeError, Encodable, SszStream};
Encodable,
Decodable,
DecodeError,
SszStream,
};
/// The value of the "type" field of SpecialRecord. /// The value of the "type" field of SpecialRecord.
/// ///
@ -16,7 +10,6 @@ pub enum SpecialRecordKind {
RandaoChange = 2, RandaoChange = 2,
} }
/// The structure used in the `BeaconBlock.specials` field. /// The structure used in the `BeaconBlock.specials` field.
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct SpecialRecord { pub struct SpecialRecord {
@ -51,13 +44,14 @@ impl SpecialRecord {
/// Returns `None` if `self.kind` is an unknown value. /// Returns `None` if `self.kind` is an unknown value.
pub fn resolve_kind(&self) -> Option<SpecialRecordKind> { pub fn resolve_kind(&self) -> Option<SpecialRecordKind> {
match self.kind { match self.kind {
x if x == SpecialRecordKind::Logout as u8 x if x == SpecialRecordKind::Logout as u8 => Some(SpecialRecordKind::Logout),
=> Some(SpecialRecordKind::Logout), x if x == SpecialRecordKind::CasperSlashing as u8 => {
x if x == SpecialRecordKind::CasperSlashing as u8 Some(SpecialRecordKind::CasperSlashing)
=> Some(SpecialRecordKind::CasperSlashing), }
x if x == SpecialRecordKind::RandaoChange as u8 x if x == SpecialRecordKind::RandaoChange as u8 => {
=> Some(SpecialRecordKind::RandaoChange), Some(SpecialRecordKind::RandaoChange)
_ => None }
_ => None,
} }
} }
} }
@ -70,16 +64,13 @@ impl Encodable for SpecialRecord {
} }
impl Decodable for SpecialRecord { impl Decodable for SpecialRecord {
fn ssz_decode(bytes: &[u8], i: usize) fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
-> Result<(Self, usize), DecodeError>
{
let (kind, i) = u8::ssz_decode(bytes, i)?; let (kind, i) = u8::ssz_decode(bytes, i)?;
let (data, i) = Decodable::ssz_decode(bytes, i)?; let (data, i) = Decodable::ssz_decode(bytes, i)?;
Ok((SpecialRecord{kind, data}, i)) Ok((SpecialRecord { kind, data }, i))
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -132,7 +123,10 @@ mod tests {
let s = SpecialRecord::randao_change(&vec![]); let s = SpecialRecord::randao_change(&vec![]);
assert_eq!(s.resolve_kind(), Some(SpecialRecordKind::RandaoChange)); assert_eq!(s.resolve_kind(), Some(SpecialRecordKind::RandaoChange));
let s = SpecialRecord { kind: 88, data: vec![] }; let s = SpecialRecord {
kind: 88,
data: vec![],
};
assert_eq!(s.resolve_kind(), None); assert_eq!(s.resolve_kind(), None);
} }
} }

View File

@ -1,11 +1,5 @@
use super::{ use super::bls::{Keypair, PublicKey};
Hash256, use super::{Address, Hash256};
Address,
};
use super::bls::{
PublicKey,
Keypair
};
#[derive(Debug, PartialEq, Clone, Copy)] #[derive(Debug, PartialEq, Clone, Copy)]
pub enum ValidatorStatus { pub enum ValidatorStatus {
@ -50,7 +44,6 @@ impl ValidatorRecord {
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -1,14 +1,5 @@
use bls::{ use super::{Address, Hash256};
create_proof_of_possession, use bls::{create_proof_of_possession, Keypair, PublicKey, Signature};
Keypair,
PublicKey,
Signature,
};
use super::{
Address,
Hash256,
};
/// The information gathered from the PoW chain validator registration function. /// The information gathered from the PoW chain validator registration function.
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, PartialEq)]

View File

@ -1,19 +1,15 @@
extern crate types; extern crate types;
use types::{ use types::{ValidatorRecord, ValidatorStatus};
ValidatorRecord,
ValidatorStatus,
};
pub fn validator_is_active(v: &ValidatorRecord) -> bool { pub fn validator_is_active(v: &ValidatorRecord) -> bool {
v.status == ValidatorStatus::Active as u8 v.status == ValidatorStatus::Active as u8
} }
/// Returns the indicies of each active validator in a given vec of validators. /// Returns the indicies of each active validator in a given vec of validators.
pub fn active_validator_indices(validators: &[ValidatorRecord]) pub fn active_validator_indices(validators: &[ValidatorRecord]) -> Vec<usize> {
-> Vec<usize> validators
{ .iter()
validators.iter()
.enumerate() .enumerate()
.filter_map(|(i, validator)| { .filter_map(|(i, validator)| {
if validator_is_active(&validator) { if validator_is_active(&validator) {
@ -21,8 +17,7 @@ pub fn active_validator_indices(validators: &[ValidatorRecord])
} else { } else {
None None
} }
}) }).collect()
.collect()
} }
#[cfg(test)] #[cfg(test)]

View File

@ -1,12 +1,12 @@
extern crate bls_aggregates; extern crate bls_aggregates;
extern crate hashing; extern crate hashing;
pub use self::bls_aggregates::AggregateSignature;
pub use self::bls_aggregates::AggregatePublicKey; pub use self::bls_aggregates::AggregatePublicKey;
pub use self::bls_aggregates::Signature; pub use self::bls_aggregates::AggregateSignature;
pub use self::bls_aggregates::Keypair; pub use self::bls_aggregates::Keypair;
pub use self::bls_aggregates::PublicKey; pub use self::bls_aggregates::PublicKey;
pub use self::bls_aggregates::SecretKey; pub use self::bls_aggregates::SecretKey;
pub use self::bls_aggregates::Signature;
pub const BLS_AGG_SIG_BYTE_SIZE: usize = 97; pub const BLS_AGG_SIG_BYTE_SIZE: usize = 97;
@ -14,16 +14,12 @@ use hashing::proof_of_possession_hash;
/// For some signature and public key, ensure that the signature message was the public key and it /// For some signature and public key, ensure that the signature message was the public key and it
/// was signed by the secret key that corresponds to that public key. /// was signed by the secret key that corresponds to that public key.
pub fn verify_proof_of_possession(sig: &Signature, pubkey: &PublicKey) pub fn verify_proof_of_possession(sig: &Signature, pubkey: &PublicKey) -> bool {
-> bool
{
let hash = proof_of_possession_hash(&pubkey.as_bytes()); let hash = proof_of_possession_hash(&pubkey.as_bytes());
sig.verify_hashed(&hash, &pubkey) sig.verify_hashed(&hash, &pubkey)
} }
pub fn create_proof_of_possession(keypair: &Keypair) pub fn create_proof_of_possession(keypair: &Keypair) -> Signature {
-> Signature
{
let hash = proof_of_possession_hash(&keypair.pk.as_bytes()); let hash = proof_of_possession_hash(&keypair.pk.as_bytes());
Signature::new_hashed(&hash, &keypair.sk) Signature::new_hashed(&hash, &keypair.sk)
} }

View File

@ -11,9 +11,9 @@ extern crate ssz;
use std::cmp::max; use std::cmp::max;
#[derive(Eq, Clone, Default, Debug)] #[derive(Eq, Clone, Default, Debug)]
pub struct BooleanBitfield{ pub struct BooleanBitfield {
len: usize, len: usize,
vec: Vec<u8> vec: Vec<u8>,
} }
impl BooleanBitfield { impl BooleanBitfield {
@ -21,7 +21,7 @@ impl BooleanBitfield {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self {
len: 0, len: 0,
vec: vec![0] vec: vec![0],
} }
} }
@ -29,10 +29,7 @@ impl BooleanBitfield {
pub fn with_capacity(capacity: usize) -> Self { pub fn with_capacity(capacity: usize) -> Self {
let mut vec = Vec::with_capacity(capacity / 8 + 1); let mut vec = Vec::with_capacity(capacity / 8 + 1);
vec.push(0); vec.push(0);
Self { Self { len: 0, vec }
len: 0,
vec
}
} }
/// Read the value of a bit. /// Read the value of a bit.
@ -64,11 +61,9 @@ impl BooleanBitfield {
self.vec.resize(byte(i) + 1, 0); self.vec.resize(byte(i) + 1, 0);
} }
if to { if to {
self.vec[byte(i)] = self.vec[byte(i)] = self.vec[byte(i)] | (1 << (bit(i) as u8))
self.vec[byte(i)] | (1 << (bit(i) as u8))
} else { } else {
self.vec[byte(i)] = self.vec[byte(i)] = self.vec[byte(i)] & !(1 << (bit(i) as u8))
self.vec[byte(i)] & !(1 << (bit(i) as u8))
} }
} }
@ -77,17 +72,23 @@ impl BooleanBitfield {
/// ///
/// Note: this is distinct from the length of the underlying /// Note: this is distinct from the length of the underlying
/// vector. /// vector.
pub fn len(&self) -> usize { self.len } pub fn len(&self) -> usize {
self.len
}
/// True if no bits have ever been set. A bit that is set and then /// True if no bits have ever been set. A bit that is set and then
/// unset will still count to the length of the bitfield. /// unset will still count to the length of the bitfield.
/// ///
/// Note: this is distinct from the length of the underlying /// Note: this is distinct from the length of the underlying
/// vector. /// vector.
pub fn is_empty(&self) -> bool { self.len == 0 } pub fn is_empty(&self) -> bool {
self.len == 0
}
/// The number of bytes required to represent the bitfield. /// The number of bytes required to represent the bitfield.
pub fn num_bytes(&self) -> usize { self.vec.len() } pub fn num_bytes(&self) -> usize {
self.vec.len()
}
/// Iterate through the underlying vector and count the number of /// Iterate through the underlying vector and count the number of
/// true bits. /// true bits.
@ -110,7 +111,7 @@ impl BooleanBitfield {
for byte in (0..bytes.len()).rev() { for byte in (0..bytes.len()).rev() {
for bit in (0..8).rev() { for bit in (0..8).rev() {
if bytes[byte] & (1 << (bit as u8)) != 0 { if bytes[byte] & (1 << (bit as u8)) != 0 {
return (byte * 8) + bit + 1 return (byte * 8) + bit + 1;
} }
} }
} }
@ -141,15 +142,14 @@ impl<'a> From<&'a [u8]> for BooleanBitfield {
vec.reverse(); vec.reverse();
BooleanBitfield { BooleanBitfield {
vec, vec,
len: BooleanBitfield::compute_length(input) len: BooleanBitfield::compute_length(input),
} }
} }
} }
impl PartialEq for BooleanBitfield { impl PartialEq for BooleanBitfield {
fn eq(&self, other: &BooleanBitfield) -> bool { fn eq(&self, other: &BooleanBitfield) -> bool {
(self.vec == other.vec) & (self.vec == other.vec) & (self.len == other.len)
(self.len == other.len)
} }
} }
@ -160,29 +160,21 @@ impl ssz::Encodable for BooleanBitfield {
} }
impl ssz::Decodable for BooleanBitfield { impl ssz::Decodable for BooleanBitfield {
fn ssz_decode(bytes: &[u8], index: usize) fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), ssz::DecodeError> {
-> Result<(Self, usize), ssz::DecodeError> let len = ssz::decode::decode_length(bytes, index, ssz::LENGTH_BYTES)?;
{
let len = ssz::decode::decode_length(
bytes,
index,
ssz::LENGTH_BYTES)?;
if (ssz::LENGTH_BYTES + len) > bytes.len() { if (ssz::LENGTH_BYTES + len) > bytes.len() {
return Err(ssz::DecodeError::TooShort); return Err(ssz::DecodeError::TooShort);
} }
if len == 0 { if len == 0 {
Ok((BooleanBitfield::new(), Ok((BooleanBitfield::new(), index + ssz::LENGTH_BYTES))
index + ssz::LENGTH_BYTES))
} else { } else {
let b = BooleanBitfield:: let b = BooleanBitfield::from(&bytes[(index + 4)..(index + len + 4)]);
from(&bytes[(index + 4)..(index + len + 4)]);
let index = index + ssz::LENGTH_BYTES + len; let index = index + ssz::LENGTH_BYTES + len;
Ok((b, index)) Ok((b, index))
} }
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -2,27 +2,28 @@
/// ///
/// We have titled it the "honey badger split" because of its robustness. It don't care. /// We have titled it the "honey badger split" because of its robustness. It don't care.
/// Iterator for the honey_badger_split function /// Iterator for the honey_badger_split function
pub struct Split<'a, T: 'a> { pub struct Split<'a, T: 'a> {
n: usize, n: usize,
current_pos: usize, current_pos: usize,
list: &'a [T], list: &'a [T],
list_length: usize list_length: usize,
} }
impl<'a,T> Iterator for Split<'a, T> { impl<'a, T> Iterator for Split<'a, T> {
type Item = &'a [T]; type Item = &'a [T];
fn next(&mut self) -> Option<Self::Item> { fn next(&mut self) -> Option<Self::Item> {
self.current_pos +=1; self.current_pos += 1;
if self.current_pos <= self.n { if self.current_pos <= self.n {
match self.list.get(self.list_length*(self.current_pos-1)/self.n..self.list_length*self.current_pos/self.n) { match self.list.get(
self.list_length * (self.current_pos - 1) / self.n
..self.list_length * self.current_pos / self.n,
) {
Some(v) => Some(v), Some(v) => Some(v),
None => unreachable!() None => unreachable!(),
} }
} } else {
else {
None None
} }
} }
@ -37,7 +38,6 @@ pub trait SplitExt<T> {
} }
impl<T> SplitExt<T> for [T] { impl<T> SplitExt<T> for [T] {
fn honey_badger_split(&self, n: usize) -> Split<T> { fn honey_badger_split(&self, n: usize) -> Split<T> {
Split { Split {
n, n,

View File

@ -1,29 +1,23 @@
use std::time::{ use std::time::{Duration, SystemTime, SystemTimeError};
Duration,
SystemTime,
SystemTimeError,
};
pub fn slot_now(genesis_seconds: u64, slot_duration_seconds: u64) pub fn slot_now(
-> Result<Option<u64>, SystemTimeError> genesis_seconds: u64,
{ slot_duration_seconds: u64,
) -> Result<Option<u64>, SystemTimeError> {
let sys_time = SystemTime::now(); let sys_time = SystemTime::now();
let duration_since_epoch = sys_time.duration_since(SystemTime::UNIX_EPOCH)?; let duration_since_epoch = sys_time.duration_since(SystemTime::UNIX_EPOCH)?;
let duration_since_genesis = duration_since_epoch let duration_since_genesis =
.checked_sub(Duration::from_secs(genesis_seconds)); duration_since_epoch.checked_sub(Duration::from_secs(genesis_seconds));
match duration_since_genesis { match duration_since_genesis {
None => Ok(None), None => Ok(None),
Some(d) => Ok(slot_from_duration(slot_duration_seconds, d)) Some(d) => Ok(slot_from_duration(slot_duration_seconds, d)),
} }
} }
fn slot_from_duration(slot_duration_seconds: u64, duration: Duration) fn slot_from_duration(slot_duration_seconds: u64, duration: Duration) -> Option<u64> {
-> Option<u64>
{
duration.as_secs().checked_div(slot_duration_seconds) duration.as_secs().checked_div(slot_duration_seconds)
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -55,9 +49,18 @@ mod tests {
assert_eq!(slot_from_duration(s_time, Duration::from_secs(0)), Some(0)); assert_eq!(slot_from_duration(s_time, Duration::from_secs(0)), Some(0));
assert_eq!(slot_from_duration(s_time, Duration::from_secs(10)), Some(0)); assert_eq!(slot_from_duration(s_time, Duration::from_secs(10)), Some(0));
assert_eq!(slot_from_duration(s_time, Duration::from_secs(100)), Some(1)); assert_eq!(
assert_eq!(slot_from_duration(s_time, Duration::from_secs(101)), Some(1)); slot_from_duration(s_time, Duration::from_secs(100)),
assert_eq!(slot_from_duration(s_time, Duration::from_secs(1000)), Some(10)); Some(1)
);
assert_eq!(
slot_from_duration(s_time, Duration::from_secs(101)),
Some(1)
);
assert_eq!(
slot_from_duration(s_time, Duration::from_secs(1000)),
Some(10)
);
} }
#[test] #[test]

View File

@ -1,6 +1,4 @@
use super::{ use super::LENGTH_BYTES;
LENGTH_BYTES,
};
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum DecodeError { pub enum DecodeError {
@ -16,12 +14,12 @@ pub trait Decodable: Sized {
/// ///
/// The single ssz encoded value will be decoded as the given type at the /// The single ssz encoded value will be decoded as the given type at the
/// given index. /// given index.
pub fn decode_ssz<T>(ssz_bytes: &[u8], index: usize) pub fn decode_ssz<T>(ssz_bytes: &[u8], index: usize) -> Result<(T, usize), DecodeError>
-> Result<(T, usize), DecodeError> where
where T: Decodable T: Decodable,
{ {
if index >= ssz_bytes.len() { if index >= ssz_bytes.len() {
return Err(DecodeError::TooShort) return Err(DecodeError::TooShort);
} }
T::ssz_decode(ssz_bytes, index) T::ssz_decode(ssz_bytes, index)
} }
@ -29,11 +27,10 @@ pub fn decode_ssz<T>(ssz_bytes: &[u8], index: usize)
/// Decode a vector (list) of encoded bytes. /// Decode a vector (list) of encoded bytes.
/// ///
/// Each element in the list will be decoded and placed into the vector. /// Each element in the list will be decoded and placed into the vector.
pub fn decode_ssz_list<T>(ssz_bytes: &[u8], index: usize) pub fn decode_ssz_list<T>(ssz_bytes: &[u8], index: usize) -> Result<(Vec<T>, usize), DecodeError>
-> Result<(Vec<T>, usize), DecodeError> where
where T: Decodable T: Decodable,
{ {
if index + LENGTH_BYTES > ssz_bytes.len() { if index + LENGTH_BYTES > ssz_bytes.len() {
return Err(DecodeError::TooShort); return Err(DecodeError::TooShort);
}; };
@ -59,10 +56,9 @@ pub fn decode_ssz_list<T>(ssz_bytes: &[u8], index: usize)
Ok(v) => { Ok(v) => {
tmp_index = v.1; tmp_index = v.1;
res_vec.push(v.0); res_vec.push(v.0);
}, }
};
}; };
}
Ok((res_vec, final_len)) Ok((res_vec, final_len))
} }
@ -70,15 +66,22 @@ pub fn decode_ssz_list<T>(ssz_bytes: &[u8], index: usize)
/// Given some number of bytes, interpret the first four /// Given some number of bytes, interpret the first four
/// bytes as a 32-bit big-endian integer and return the /// bytes as a 32-bit big-endian integer and return the
/// result. /// result.
pub fn decode_length(bytes: &[u8], index: usize, length_bytes: usize) pub fn decode_length(
-> Result<usize, DecodeError> bytes: &[u8],
{ index: usize,
length_bytes: usize,
) -> Result<usize, DecodeError> {
if bytes.len() < index + length_bytes { if bytes.len() < index + length_bytes {
return Err(DecodeError::TooShort); return Err(DecodeError::TooShort);
}; };
let mut len: usize = 0; let mut len: usize = 0;
for (i, byte) in bytes.iter().enumerate().take(index+length_bytes).skip(index) { for (i, byte) in bytes
let offset = (index+length_bytes - i - 1) * 8; .iter()
.enumerate()
.take(index + length_bytes)
.skip(index)
{
let offset = (index + length_bytes - i - 1) * 8;
len |= (*byte as usize) << offset; len |= (*byte as usize) << offset;
} }
Ok(len) Ok(len)
@ -86,50 +89,44 @@ pub fn decode_length(bytes: &[u8], index: usize, length_bytes: usize)
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*;
use super::super::encode::encode_length; use super::super::encode::encode_length;
use super::*;
#[test] #[test]
fn test_ssz_decode_length() { fn test_ssz_decode_length() {
let decoded = decode_length( let decoded = decode_length(&vec![0, 0, 0, 1], 0, LENGTH_BYTES);
&vec![0, 0, 0, 1],
0,
LENGTH_BYTES);
assert_eq!(decoded.unwrap(), 1); assert_eq!(decoded.unwrap(), 1);
let decoded = decode_length( let decoded = decode_length(&vec![0, 0, 1, 0], 0, LENGTH_BYTES);
&vec![0, 0, 1, 0],
0,
LENGTH_BYTES);
assert_eq!(decoded.unwrap(), 256); assert_eq!(decoded.unwrap(), 256);
let decoded = decode_length( let decoded = decode_length(&vec![0, 0, 1, 255], 0, LENGTH_BYTES);
&vec![0, 0, 1, 255],
0,
LENGTH_BYTES);
assert_eq!(decoded.unwrap(), 511); assert_eq!(decoded.unwrap(), 511);
let decoded = decode_length( let decoded = decode_length(&vec![255, 255, 255, 255], 0, LENGTH_BYTES);
&vec![255, 255, 255, 255],
0,
LENGTH_BYTES);
assert_eq!(decoded.unwrap(), 4294967295); assert_eq!(decoded.unwrap(), 4294967295);
} }
#[test] #[test]
fn test_encode_decode_length() { fn test_encode_decode_length() {
let params: Vec<usize> = vec![ let params: Vec<usize> = vec![
0, 1, 2, 3, 7, 8, 16, 0,
2^8, 2^8 + 1, 1,
2^16, 2^16 + 1, 2,
2^24, 2^24 + 1, 3,
2^32, 7,
8,
16,
2 ^ 8,
2 ^ 8 + 1,
2 ^ 16,
2 ^ 16 + 1,
2 ^ 24,
2 ^ 24 + 1,
2 ^ 32,
]; ];
for i in params { for i in params {
let decoded = decode_length( let decoded = decode_length(&encode_length(i, LENGTH_BYTES), 0, LENGTH_BYTES).unwrap();
&encode_length(i, LENGTH_BYTES),
0,
LENGTH_BYTES).unwrap();
assert_eq!(i, decoded); assert_eq!(i, decoded);
} }
} }
@ -138,10 +135,8 @@ mod tests {
fn test_decode_ssz_list() { fn test_decode_ssz_list() {
// u16 // u16
let v: Vec<u16> = vec![10, 10, 10, 10]; let v: Vec<u16> = vec![10, 10, 10, 10];
let decoded: (Vec<u16>, usize) = decode_ssz_list( let decoded: (Vec<u16>, usize) =
&vec![0, 0, 0, 8, 0, 10, 0, 10, 0, 10, 0, 10], decode_ssz_list(&vec![0, 0, 0, 8, 0, 10, 0, 10, 0, 10, 0, 10], 0).unwrap();
0
).unwrap();
assert_eq!(decoded.0, v); assert_eq!(decoded.0, v);
assert_eq!(decoded.1, 12); assert_eq!(decoded.1, 12);
@ -150,60 +145,45 @@ mod tests {
let v: Vec<u32> = vec![10, 10, 10, 10]; let v: Vec<u32> = vec![10, 10, 10, 10];
let decoded: (Vec<u32>, usize) = decode_ssz_list( let decoded: (Vec<u32>, usize) = decode_ssz_list(
&vec![ &vec![
0, 0, 0, 16, 0, 0, 0, 16, 0, 0, 0, 10, 0, 0, 0, 10, 0, 0, 0, 10, 0, 0, 0, 10,
0, 0, 0, 10, 0, 0, 0, 10, 0, 0, 0, 10, 0, 0, 0, 10
], ],
0 0,
).unwrap(); ).unwrap();
assert_eq!(decoded.0, v); assert_eq!(decoded.0, v);
assert_eq!(decoded.1, 20); assert_eq!(decoded.1, 20);
// u64 // u64
let v: Vec<u64> = vec![10,10,10,10]; let v: Vec<u64> = vec![10, 10, 10, 10];
let decoded: (Vec<u64>, usize) = decode_ssz_list( let decoded: (Vec<u64>, usize) = decode_ssz_list(
&vec![0, 0, 0, 32, &vec![
0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 10, 10, 0, 0, 0, 0, 0, 0, 0, 10,
0, 0, 0, 0, 0, 0, 0, 10,
0, 0, 0, 0, 0, 0, 0, 10,
], ],
0 0,
).unwrap(); ).unwrap();
assert_eq!(decoded.0, v); assert_eq!(decoded.0, v);
assert_eq!(decoded.1, 36); assert_eq!(decoded.1, 36);
// Check that it can accept index // Check that it can accept index
let v: Vec<usize> = vec![15,15,15,15]; let v: Vec<usize> = vec![15, 15, 15, 15];
let decoded: (Vec<usize>, usize) = decode_ssz_list( let decoded: (Vec<usize>, usize) = decode_ssz_list(
&vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, &vec![
0, 0, 0, 32, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 15,
0, 0, 0, 0, 0, 0, 0, 15,
0, 0, 0, 0, 0, 0, 0, 15,
0, 0, 0, 0, 0, 0, 0, 15,
], ],
10 10,
).unwrap(); ).unwrap();
assert_eq!(decoded.0, v); assert_eq!(decoded.0, v);
assert_eq!(decoded.1, 46); assert_eq!(decoded.1, 46);
// Check that length > bytes throws error // Check that length > bytes throws error
let decoded: Result<(Vec<usize>, usize), DecodeError> = decode_ssz_list( let decoded: Result<(Vec<usize>, usize), DecodeError> =
&vec![0, 0, 0, 32, decode_ssz_list(&vec![0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 15], 0);
0, 0, 0, 0, 0, 0, 0, 15,
],
0
);
assert_eq!(decoded, Err(DecodeError::TooShort)); assert_eq!(decoded, Err(DecodeError::TooShort));
// Check that incorrect index throws error // Check that incorrect index throws error
let decoded: Result<(Vec<usize>, usize), DecodeError> = decode_ssz_list( let decoded: Result<(Vec<usize>, usize), DecodeError> =
&vec![ decode_ssz_list(&vec![0, 0, 0, 0, 0, 0, 0, 15], 16);
0, 0, 0, 0, 0, 0, 0, 15,
],
16
);
assert_eq!(decoded, Err(DecodeError::TooShort)); assert_eq!(decoded, Err(DecodeError::TooShort));
} }
} }

View File

@ -1,6 +1,4 @@
use super::{ use super::LENGTH_BYTES;
LENGTH_BYTES
};
pub trait Encodable { pub trait Encodable {
fn ssz_append(&self, s: &mut SszStream); fn ssz_append(&self, s: &mut SszStream);
@ -13,20 +11,19 @@ pub trait Encodable {
/// ssz encoded bytes. /// ssz encoded bytes.
#[derive(Default)] #[derive(Default)]
pub struct SszStream { pub struct SszStream {
buffer: Vec<u8> buffer: Vec<u8>,
} }
impl SszStream { impl SszStream {
/// Create a new, empty stream for writing ssz values. /// Create a new, empty stream for writing ssz values.
pub fn new() -> Self { pub fn new() -> Self {
SszStream { SszStream { buffer: Vec::new() }
buffer: Vec::new()
}
} }
/// Append some ssz encodable value to the stream. /// Append some ssz encodable value to the stream.
pub fn append<E>(&mut self, value: &E) -> &mut Self pub fn append<E>(&mut self, value: &E) -> &mut Self
where E: Encodable where
E: Encodable,
{ {
value.ssz_append(self); value.ssz_append(self);
self self
@ -37,9 +34,8 @@ impl SszStream {
/// The length of the supplied bytes will be concatenated /// The length of the supplied bytes will be concatenated
/// to the stream before the supplied bytes. /// to the stream before the supplied bytes.
pub fn append_encoded_val(&mut self, vec: &[u8]) { pub fn append_encoded_val(&mut self, vec: &[u8]) {
self.buffer.extend_from_slice( self.buffer
&encode_length(vec.len(), .extend_from_slice(&encode_length(vec.len(), LENGTH_BYTES));
LENGTH_BYTES));
self.buffer.extend_from_slice(&vec); self.buffer.extend_from_slice(&vec);
} }
@ -55,7 +51,8 @@ impl SszStream {
/// The length of the list will be concatenated to the stream, then /// The length of the list will be concatenated to the stream, then
/// each item in the vector will be encoded and concatenated. /// each item in the vector will be encoded and concatenated.
pub fn append_vec<E>(&mut self, vec: &[E]) pub fn append_vec<E>(&mut self, vec: &[E])
where E: Encodable where
E: Encodable,
{ {
let mut list_stream = SszStream::new(); let mut list_stream = SszStream::new();
for item in vec { for item in vec {
@ -81,11 +78,10 @@ pub fn encode_length(len: usize, length_bytes: usize) -> Vec<u8> {
for (i, header_byte) in header.iter_mut().enumerate() { for (i, header_byte) in header.iter_mut().enumerate() {
let offset = (length_bytes - i - 1) * 8; let offset = (length_bytes - i - 1) * 8;
*header_byte = ((len >> offset) & 0xff) as u8; *header_byte = ((len >> offset) & 0xff) as u8;
}; }
header header
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -98,22 +94,10 @@ mod tests {
#[test] #[test]
fn test_encode_length_4_bytes() { fn test_encode_length_4_bytes() {
assert_eq!( assert_eq!(encode_length(0, LENGTH_BYTES), vec![0; 4]);
encode_length(0, LENGTH_BYTES), assert_eq!(encode_length(1, LENGTH_BYTES), vec![0, 0, 0, 1]);
vec![0; 4] assert_eq!(encode_length(255, LENGTH_BYTES), vec![0, 0, 0, 255]);
); assert_eq!(encode_length(256, LENGTH_BYTES), vec![0, 0, 1, 0]);
assert_eq!(
encode_length(1, LENGTH_BYTES),
vec![0, 0, 0, 1]
);
assert_eq!(
encode_length(255, LENGTH_BYTES),
vec![0, 0, 0, 255]
);
assert_eq!(
encode_length(256, LENGTH_BYTES),
vec![0, 0, 1, 0]
);
assert_eq!( assert_eq!(
encode_length(4294967295, LENGTH_BYTES), // 2^(3*8) - 1 encode_length(4294967295, LENGTH_BYTES), // 2^(3*8) - 1
vec![255, 255, 255, 255] vec![255, 255, 255, 255]

View File

@ -1,20 +1,12 @@
use super::ethereum_types::H256;
use super::decode::decode_ssz_list; use super::decode::decode_ssz_list;
use super::{ use super::ethereum_types::H256;
DecodeError, use super::{Decodable, DecodeError};
Decodable,
};
macro_rules! impl_decodable_for_uint { macro_rules! impl_decodable_for_uint {
($type: ident, $bit_size: expr) => { ($type: ident, $bit_size: expr) => {
impl Decodable for $type { impl Decodable for $type {
fn ssz_decode(bytes: &[u8], index: usize) fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> {
-> Result<(Self, usize), DecodeError> assert!((0 < $bit_size) & ($bit_size <= 64) & ($bit_size % 8 == 0));
{
assert!((0 < $bit_size) &
($bit_size <= 64) &
($bit_size % 8 == 0));
let max_bytes = $bit_size / 8; let max_bytes = $bit_size / 8;
if bytes.len() >= (index + max_bytes) { if bytes.len() >= (index + max_bytes) {
let end_bytes = index + max_bytes; let end_bytes = index + max_bytes;
@ -29,7 +21,7 @@ macro_rules! impl_decodable_for_uint {
} }
} }
} }
} };
} }
impl_decodable_for_uint!(u16, 16); impl_decodable_for_uint!(u16, 16);
@ -38,9 +30,7 @@ impl_decodable_for_uint!(u64, 64);
impl_decodable_for_uint!(usize, 64); impl_decodable_for_uint!(usize, 64);
impl Decodable for u8 { impl Decodable for u8 {
fn ssz_decode(bytes: &[u8], index: usize) fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> {
-> Result<(Self, usize), DecodeError>
{
if index >= bytes.len() { if index >= bytes.len() {
Err(DecodeError::TooShort) Err(DecodeError::TooShort)
} else { } else {
@ -50,35 +40,28 @@ impl Decodable for u8 {
} }
impl Decodable for H256 { impl Decodable for H256 {
fn ssz_decode(bytes: &[u8], index: usize) fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> {
-> Result<(Self, usize), DecodeError>
{
if bytes.len() < 32 || bytes.len() - 32 < index { if bytes.len() < 32 || bytes.len() - 32 < index {
Err(DecodeError::TooShort) Err(DecodeError::TooShort)
} } else {
else {
Ok((H256::from(&bytes[index..(index + 32)]), index + 32)) Ok((H256::from(&bytes[index..(index + 32)]), index + 32))
} }
} }
} }
impl<T> Decodable for Vec<T> impl<T> Decodable for Vec<T>
where T: Decodable where
T: Decodable,
{ {
fn ssz_decode(bytes: &[u8], index: usize) fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> {
-> Result<(Self, usize), DecodeError>
{
decode_ssz_list(bytes, index) decode_ssz_list(bytes, index)
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::super::{decode_ssz, DecodeError};
use super::*; use super::*;
use super::super::{
DecodeError,
decode_ssz,
};
#[test] #[test]
fn test_ssz_decode_h256() { fn test_ssz_decode_h256() {
@ -131,8 +114,7 @@ mod tests {
assert_eq!(result, 65535); assert_eq!(result, 65535);
let ssz = vec![1]; let ssz = vec![1];
let result: Result<(u16, usize), DecodeError> = let result: Result<(u16, usize), DecodeError> = decode_ssz(&ssz, 0);
decode_ssz(&ssz, 0);
assert_eq!(result, Err(DecodeError::TooShort)); assert_eq!(result, Err(DecodeError::TooShort));
} }
@ -153,7 +135,7 @@ mod tests {
assert_eq!(index, 7); assert_eq!(index, 7);
assert_eq!(result, 256); assert_eq!(result, 256);
let ssz = vec![0,200, 1, 0]; let ssz = vec![0, 200, 1, 0];
let (result, index): (u32, usize) = decode_ssz(&ssz, 0).unwrap(); let (result, index): (u32, usize) = decode_ssz(&ssz, 0).unwrap();
assert_eq!(index, 4); assert_eq!(index, 4);
assert_eq!(result, 13107456); assert_eq!(result, 13107456);
@ -164,8 +146,7 @@ mod tests {
assert_eq!(result, 4294967295); assert_eq!(result, 4294967295);
let ssz = vec![0, 0, 1]; let ssz = vec![0, 0, 1];
let result: Result<(u32, usize), DecodeError> = let result: Result<(u32, usize), DecodeError> = decode_ssz(&ssz, 0);
decode_ssz(&ssz, 0);
assert_eq!(result, Err(DecodeError::TooShort)); assert_eq!(result, Err(DecodeError::TooShort));
} }
@ -186,9 +167,8 @@ mod tests {
assert_eq!(index, 11); assert_eq!(index, 11);
assert_eq!(result, 18374686479671623680); assert_eq!(result, 18374686479671623680);
let ssz = vec![0,0,0,0,0,0,0]; let ssz = vec![0, 0, 0, 0, 0, 0, 0];
let result: Result<(u64, usize), DecodeError> = let result: Result<(u64, usize), DecodeError> = decode_ssz(&ssz, 0);
decode_ssz(&ssz, 0);
assert_eq!(result, Err(DecodeError::TooShort)); assert_eq!(result, Err(DecodeError::TooShort));
} }
@ -210,29 +190,19 @@ mod tests {
assert_eq!(result, 18446744073709551615); assert_eq!(result, 18446744073709551615);
let ssz = vec![0, 0, 0, 0, 0, 0, 1]; let ssz = vec![0, 0, 0, 0, 0, 0, 1];
let result: Result<(usize, usize), DecodeError> = let result: Result<(usize, usize), DecodeError> = decode_ssz(&ssz, 0);
decode_ssz(&ssz, 0);
assert_eq!(result, Err(DecodeError::TooShort)); assert_eq!(result, Err(DecodeError::TooShort));
} }
#[test] #[test]
fn test_decode_ssz_bounds() { fn test_decode_ssz_bounds() {
let err: Result<(u16, usize), DecodeError> = decode_ssz( let err: Result<(u16, usize), DecodeError> = decode_ssz(&vec![1], 2);
&vec![1],
2
);
assert_eq!(err, Err(DecodeError::TooShort)); assert_eq!(err, Err(DecodeError::TooShort));
let err: Result<(u16,usize), DecodeError> = decode_ssz( let err: Result<(u16, usize), DecodeError> = decode_ssz(&vec![0, 0, 0, 0], 3);
&vec![0, 0, 0, 0],
3
);
assert_eq!(err, Err(DecodeError::TooShort)); assert_eq!(err, Err(DecodeError::TooShort));
let result: u16 = decode_ssz( let result: u16 = decode_ssz(&vec![0, 0, 0, 0, 1], 3).unwrap().0;
&vec![0,0,0,0,1],
3
).unwrap().0;
assert_eq!(result, 1); assert_eq!(result, 1);
} }
} }

View File

@ -1,11 +1,8 @@
extern crate bytes; extern crate bytes;
use super::{ use self::bytes::{BufMut, BytesMut};
Encodable,
SszStream
};
use super::ethereum_types::H256; use super::ethereum_types::H256;
use self::bytes::{ BytesMut, BufMut }; use super::{Encodable, SszStream};
/* /*
* Note: there is a "to_bytes" function for integers * Note: there is a "to_bytes" function for integers
@ -18,12 +15,14 @@ macro_rules! impl_encodable_for_uint {
#[allow(cast_lossless)] #[allow(cast_lossless)]
fn ssz_append(&self, s: &mut SszStream) { fn ssz_append(&self, s: &mut SszStream) {
// Ensure bit size is valid // Ensure bit size is valid
assert!((0 < $bit_size) && assert!(
($bit_size % 8 == 0) && (0 < $bit_size)
(2_u128.pow($bit_size) > *self as u128)); && ($bit_size % 8 == 0)
&& (2_u128.pow($bit_size) > *self as u128)
);
// Serialize to bytes // Serialize to bytes
let mut buf = BytesMut::with_capacity($bit_size/8); let mut buf = BytesMut::with_capacity($bit_size / 8);
// Match bit size with encoding // Match bit size with encoding
match $bit_size { match $bit_size {
@ -31,14 +30,14 @@ macro_rules! impl_encodable_for_uint {
16 => buf.put_u16_be(*self as u16), 16 => buf.put_u16_be(*self as u16),
32 => buf.put_u32_be(*self as u32), 32 => buf.put_u32_be(*self as u32),
64 => buf.put_u64_be(*self as u64), 64 => buf.put_u64_be(*self as u64),
_ => { ; } _ => {}
} }
// Append bytes to the SszStream // Append bytes to the SszStream
s.append_encoded_raw(&buf.to_vec()); s.append_encoded_raw(&buf.to_vec());
} }
} }
} };
} }
impl_encodable_for_uint!(u8, 8); impl_encodable_for_uint!(u8, 8);
@ -53,7 +52,6 @@ impl Encodable for H256 {
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -13,27 +13,19 @@ extern crate ethereum_types;
pub mod decode; pub mod decode;
pub mod encode; pub mod encode;
mod impl_encode;
mod impl_decode; mod impl_decode;
mod impl_encode;
pub use decode::{ pub use decode::{decode_ssz, decode_ssz_list, Decodable, DecodeError};
Decodable, pub use encode::{Encodable, SszStream};
DecodeError,
decode_ssz,
decode_ssz_list,
};
pub use encode::{
Encodable,
SszStream,
};
pub const LENGTH_BYTES: usize = 4; pub const LENGTH_BYTES: usize = 4;
pub const MAX_LIST_SIZE : usize = 1 << (4 * 8); pub const MAX_LIST_SIZE: usize = 1 << (4 * 8);
/// Convenience function to SSZ encode an object supporting ssz::Encode. /// Convenience function to SSZ encode an object supporting ssz::Encode.
pub fn ssz_encode<T>(val: &T) -> Vec<u8> pub fn ssz_encode<T>(val: &T) -> Vec<u8>
where T: Encodable where
T: Encodable,
{ {
let mut ssz_stream = SszStream::new(); let mut ssz_stream = SszStream::new();
ssz_stream.append(val); ssz_stream.append(val);

View File

@ -1,6 +1,6 @@
use super::types::attestation_record::MIN_SSZ_ATTESTION_RECORD_LENGTH as MIN_LENGTH;
use super::ssz::LENGTH_BYTES;
use super::ssz::decode::decode_length; use super::ssz::decode::decode_length;
use super::ssz::LENGTH_BYTES;
use super::types::attestation_record::MIN_SSZ_ATTESTION_RECORD_LENGTH as MIN_LENGTH;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum AttestationSplitError { pub enum AttestationSplitError {
@ -9,9 +9,10 @@ pub enum AttestationSplitError {
/// Given some ssz slice, find the bounds of each serialized AttestationRecord and return a vec of /// Given some ssz slice, find the bounds of each serialized AttestationRecord and return a vec of
/// slices point to each. /// slices point to each.
pub fn split_all_attestations<'a>(full_ssz: &'a [u8], index: usize) pub fn split_all_attestations<'a>(
-> Result<Vec<&'a [u8]>, AttestationSplitError> full_ssz: &'a [u8],
{ index: usize,
) -> Result<Vec<&'a [u8]>, AttestationSplitError> {
let mut v = vec![]; let mut v = vec![];
let mut index = index; let mut index = index;
while index < full_ssz.len() - 1 { while index < full_ssz.len() - 1 {
@ -24,9 +25,10 @@ pub fn split_all_attestations<'a>(full_ssz: &'a [u8], index: usize)
/// Given some ssz slice, find the bounds of one serialized AttestationRecord /// Given some ssz slice, find the bounds of one serialized AttestationRecord
/// and return a slice pointing to that. /// and return a slice pointing to that.
pub fn split_one_attestation(full_ssz: &[u8], index: usize) pub fn split_one_attestation(
-> Result<(&[u8], usize), AttestationSplitError> full_ssz: &[u8],
{ index: usize,
) -> Result<(&[u8], usize), AttestationSplitError> {
if full_ssz.len() < MIN_LENGTH { if full_ssz.len() < MIN_LENGTH {
return Err(AttestationSplitError::TooShort); return Err(AttestationSplitError::TooShort);
} }
@ -34,15 +36,11 @@ pub fn split_one_attestation(full_ssz: &[u8], index: usize)
let hashes_len = decode_length(full_ssz, index + 10, LENGTH_BYTES) let hashes_len = decode_length(full_ssz, index + 10, LENGTH_BYTES)
.map_err(|_| AttestationSplitError::TooShort)?; .map_err(|_| AttestationSplitError::TooShort)?;
let bitfield_len = decode_length( let bitfield_len = decode_length(full_ssz, index + hashes_len + 46, LENGTH_BYTES)
full_ssz, index + hashes_len + 46,
LENGTH_BYTES)
.map_err(|_| AttestationSplitError::TooShort)?; .map_err(|_| AttestationSplitError::TooShort)?;
// Subtract one because the min length assumes 1 byte of bitfield // Subtract one because the min length assumes 1 byte of bitfield
let len = MIN_LENGTH - 1 let len = MIN_LENGTH - 1 + hashes_len + bitfield_len;
+ hashes_len
+ bitfield_len;
if full_ssz.len() < index + len { if full_ssz.len() < index + len {
return Err(AttestationSplitError::TooShort); return Err(AttestationSplitError::TooShort);
@ -53,17 +51,10 @@ pub fn split_one_attestation(full_ssz: &[u8], index: usize)
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*;
use super::super::types::{
AttestationRecord,
Hash256,
Bitfield,
};
use super::super::bls::AggregateSignature; use super::super::bls::AggregateSignature;
use super::super::ssz::{ use super::super::ssz::{Decodable, SszStream};
SszStream, use super::super::types::{AttestationRecord, Bitfield, Hash256};
Decodable, use super::*;
};
fn get_two_records() -> Vec<AttestationRecord> { fn get_two_records() -> Vec<AttestationRecord> {
let a = AttestationRecord { let a = AttestationRecord {
@ -95,7 +86,6 @@ mod tests {
let a = ars[0].clone(); let a = ars[0].clone();
let b = ars[1].clone(); let b = ars[1].clone();
/* /*
* Test split one * Test split one
*/ */
@ -104,8 +94,7 @@ mod tests {
let ssz = ssz_stream.drain(); let ssz = ssz_stream.drain();
let (a_ssz, i) = split_one_attestation(&ssz, 0).unwrap(); let (a_ssz, i) = split_one_attestation(&ssz, 0).unwrap();
assert_eq!(i, ssz.len()); assert_eq!(i, ssz.len());
let (decoded_a, _) = AttestationRecord::ssz_decode(a_ssz, 0) let (decoded_a, _) = AttestationRecord::ssz_decode(a_ssz, 0).unwrap();
.unwrap();
assert_eq!(a, decoded_a); assert_eq!(a, decoded_a);
/* /*
@ -116,12 +105,8 @@ mod tests {
ssz_stream.append(&b); ssz_stream.append(&b);
let ssz = ssz_stream.drain(); let ssz = ssz_stream.drain();
let ssz_vec = split_all_attestations(&ssz, 0).unwrap(); let ssz_vec = split_all_attestations(&ssz, 0).unwrap();
let (decoded_a, _) = let (decoded_a, _) = AttestationRecord::ssz_decode(ssz_vec[0], 0).unwrap();
AttestationRecord::ssz_decode(ssz_vec[0], 0) let (decoded_b, _) = AttestationRecord::ssz_decode(ssz_vec[1], 0).unwrap();
.unwrap();
let (decoded_b, _) =
AttestationRecord::ssz_decode(ssz_vec[1], 0)
.unwrap();
assert_eq!(a, decoded_a); assert_eq!(a, decoded_a);
assert_eq!(b, decoded_b); assert_eq!(b, decoded_b);
@ -136,4 +121,3 @@ mod tests {
assert!(split_all_attestations(&ssz, 0).is_err()); assert!(split_all_attestations(&ssz, 0).is_err());
} }
} }

View File

@ -1,7 +1,7 @@
extern crate bls; extern crate bls;
extern crate hashing; extern crate hashing;
extern crate types;
extern crate ssz; extern crate ssz;
extern crate types;
pub mod attestation_ssz_splitter; pub mod attestation_ssz_splitter;
pub mod ssz_beacon_block; pub mod ssz_beacon_block;

View File

@ -1,12 +1,6 @@
use super::ssz::decode::{
decode_length,
Decodable,
};
use super::hashing::canonical_hash; use super::hashing::canonical_hash;
use super::types::beacon_block::{ use super::ssz::decode::{decode_length, Decodable};
MIN_SSZ_BLOCK_LENGTH, use super::types::beacon_block::{MAX_SSZ_BLOCK_LENGTH, MIN_SSZ_BLOCK_LENGTH};
MAX_SSZ_BLOCK_LENGTH,
};
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum SszBeaconBlockError { pub enum SszBeaconBlockError {
@ -61,9 +55,7 @@ impl<'a> SszBeaconBlock<'a> {
/// The returned `SszBeaconBlock` instance will contain a `len` field which can be used to determine /// The returned `SszBeaconBlock` instance will contain a `len` field which can be used to determine
/// how many bytes were read from the slice. In the case of multiple, sequentually serialized /// how many bytes were read from the slice. In the case of multiple, sequentually serialized
/// blocks `len` can be used to assume the location of the next serialized block. /// blocks `len` can be used to assume the location of the next serialized block.
pub fn from_slice(vec: &'a [u8]) pub fn from_slice(vec: &'a [u8]) -> Result<Self, SszBeaconBlockError> {
-> Result<Self, SszBeaconBlockError>
{
let untrimmed_ssz = &vec[..]; let untrimmed_ssz = &vec[..];
/* /*
@ -83,21 +75,18 @@ impl<'a> SszBeaconBlock<'a> {
/* /*
* Determine how many bytes are used to store ancestor hashes. * Determine how many bytes are used to store ancestor hashes.
*/ */
let ancestors_position = let ancestors_position = SLOT_BYTES + RANDAO_REVEAL_BYTES + POW_CHAIN_REF_BYTES;
SLOT_BYTES +
RANDAO_REVEAL_BYTES +
POW_CHAIN_REF_BYTES;
let ancestors_len = decode_length(untrimmed_ssz, ancestors_position, LENGTH_PREFIX_BYTES) let ancestors_len = decode_length(untrimmed_ssz, ancestors_position, LENGTH_PREFIX_BYTES)
.map_err(|_| SszBeaconBlockError::TooShort)?; .map_err(|_| SszBeaconBlockError::TooShort)?;
/* /*
* Determine how many bytes are used to store attestation records. * Determine how many bytes are used to store attestation records.
*/ */
let attestations_position = let attestations_position = ancestors_position + LENGTH_PREFIX_BYTES + ancestors_len + // end of ancestor bytes
ancestors_position + LENGTH_PREFIX_BYTES + ancestors_len + // end of ancestor bytes
ACTIVE_STATE_BYTES + ACTIVE_STATE_BYTES +
CRYSTALLIZED_STATE_BYTES; CRYSTALLIZED_STATE_BYTES;
let attestations_len = decode_length(untrimmed_ssz, attestations_position, LENGTH_PREFIX_BYTES) let attestations_len =
decode_length(untrimmed_ssz, attestations_position, LENGTH_PREFIX_BYTES)
.map_err(|_| SszBeaconBlockError::TooShort)?; .map_err(|_| SszBeaconBlockError::TooShort)?;
/* /*
@ -116,7 +105,7 @@ impl<'a> SszBeaconBlock<'a> {
return Err(SszBeaconBlockError::TooShort); return Err(SszBeaconBlockError::TooShort);
} }
Ok(Self{ Ok(Self {
ssz: &untrimmed_ssz[0..block_ssz_len], ssz: &untrimmed_ssz[0..block_ssz_len],
block_ssz_len, block_ssz_len,
ancestors_position, ancestors_position,
@ -128,8 +117,12 @@ impl<'a> SszBeaconBlock<'a> {
}) })
} }
pub fn len(&self) -> usize { self.ssz.len() } pub fn len(&self) -> usize {
pub fn is_empty(&self) -> bool { self.ssz.is_empty() } self.ssz.len()
}
pub fn is_empty(&self) -> bool {
self.ssz.is_empty()
}
/// Returns this block as ssz. /// Returns this block as ssz.
/// ///
@ -177,9 +170,7 @@ impl<'a> SszBeaconBlock<'a> {
/// Return the `pow_chain_reference` field. /// Return the `pow_chain_reference` field.
pub fn pow_chain_reference(&self) -> &[u8] { pub fn pow_chain_reference(&self) -> &[u8] {
let start = let start = SLOT_BYTES + RANDAO_REVEAL_BYTES;
SLOT_BYTES +
RANDAO_REVEAL_BYTES;
&self.ssz[start..start + POW_CHAIN_REF_BYTES] &self.ssz[start..start + POW_CHAIN_REF_BYTES]
} }
@ -198,8 +189,7 @@ impl<'a> SszBeaconBlock<'a> {
/// Return the `active_state_root` field. /// Return the `active_state_root` field.
pub fn cry_state_root(&self) -> &[u8] { pub fn cry_state_root(&self) -> &[u8] {
let start = let start =
self.ancestors_position + LENGTH_PREFIX_BYTES + self.ancestors_len + self.ancestors_position + LENGTH_PREFIX_BYTES + self.ancestors_len + ACTIVE_STATE_BYTES;
ACTIVE_STATE_BYTES;
&self.ssz[start..(start + 32)] &self.ssz[start..(start + 32)]
} }
@ -222,18 +212,13 @@ impl<'a> SszBeaconBlock<'a> {
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::super::ssz::encode::encode_length;
use super::super::types::{
AttestationRecord,
BeaconBlock,
SpecialRecord,
};
use super::super::ssz::SszStream; use super::super::ssz::SszStream;
use super::super::types::Hash256; use super::super::types::Hash256;
use super::super::ssz::encode::encode_length; use super::super::types::{AttestationRecord, BeaconBlock, SpecialRecord};
use super::*;
fn get_block_ssz(b: &BeaconBlock) -> Vec<u8> { fn get_block_ssz(b: &BeaconBlock) -> Vec<u8> {
let mut ssz_stream = SszStream::new(); let mut ssz_stream = SszStream::new();
@ -259,7 +244,6 @@ mod tests {
b.attestations = vec![]; b.attestations = vec![];
let ssz = get_block_ssz(&b); let ssz = get_block_ssz(&b);
assert!(SszBeaconBlock::from_slice(&ssz[..]).is_ok()); assert!(SszBeaconBlock::from_slice(&ssz[..]).is_ok());
} }
@ -309,8 +293,8 @@ mod tests {
// will tell us if the hash changes, not that it matches some // will tell us if the hash changes, not that it matches some
// canonical reference. // canonical reference.
let expected_hash = [ let expected_hash = [
11, 181, 149, 114, 248, 15, 46, 0, 106, 135, 158, 31, 15, 194, 149, 176, 11, 181, 149, 114, 248, 15, 46, 0, 106, 135, 158, 31, 15, 194, 149, 176, 43, 110, 154,
43, 110, 154, 26, 253, 67, 18, 139, 250, 84, 144, 219, 3, 208, 50, 145 26, 253, 67, 18, 139, 250, 84, 144, 219, 3, 208, 50, 145,
]; ];
assert_eq!(hash, expected_hash); assert_eq!(hash, expected_hash);
@ -376,7 +360,10 @@ mod tests {
let serialized = get_block_ssz(&block); let serialized = get_block_ssz(&block);
let ssz_block = SszBeaconBlock::from_slice(&serialized).unwrap(); let ssz_block = SszBeaconBlock::from_slice(&serialized).unwrap();
assert_eq!(ssz_block.parent_hash().unwrap(), &Hash256::from("cats".as_bytes()).to_vec()[..]); assert_eq!(
ssz_block.parent_hash().unwrap(),
&Hash256::from("cats".as_bytes()).to_vec()[..]
);
} }
#[test] #[test]
@ -459,7 +446,10 @@ mod tests {
let serialized = get_block_ssz(&block); let serialized = get_block_ssz(&block);
let ssz_block = SszBeaconBlock::from_slice(&serialized).unwrap(); let ssz_block = SszBeaconBlock::from_slice(&serialized).unwrap();
assert_eq!(ssz_block.pow_chain_reference(), &reference_hash.to_vec()[..]); assert_eq!(
ssz_block.pow_chain_reference(),
&reference_hash.to_vec()[..]
);
} }
#[test] #[test]

View File

@ -1,7 +1,6 @@
/// A library for performing deterministic, pseudo-random shuffling on a vector. /// A library for performing deterministic, pseudo-random shuffling on a vector.
/// ///
/// This library is designed to confirm to the Ethereum 2.0 specification. /// This library is designed to confirm to the Ethereum 2.0 specification.
extern crate hashing; extern crate hashing;
mod rng; mod rng;
@ -19,11 +18,7 @@ pub enum ShuffleErr {
/// of the supplied `seed`. /// of the supplied `seed`.
/// ///
/// This is a Fisher-Yates-Durtstenfeld shuffle. /// This is a Fisher-Yates-Durtstenfeld shuffle.
pub fn shuffle<T>( pub fn shuffle<T>(seed: &[u8], mut list: Vec<T>) -> Result<Vec<T>, ShuffleErr> {
seed: &[u8],
mut list: Vec<T>)
-> Result<Vec<T>, ShuffleErr>
{
let mut rng = ShuffleRng::new(seed); let mut rng = ShuffleRng::new(seed);
if list.len() > rng.rand_max as usize { if list.len() > rng.rand_max as usize {
@ -42,16 +37,15 @@ pub fn shuffle<T>(
Ok(list) Ok(list)
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
extern crate yaml_rust; extern crate yaml_rust;
use super::*; use self::yaml_rust::yaml;
use super::hashing::canonical_hash; use super::hashing::canonical_hash;
use super::*;
use std::fs::File; use std::fs::File;
use std::io::prelude::*; use std::io::prelude::*;
use self::yaml_rust::yaml;
#[test] #[test]
fn test_shuffling() { fn test_shuffling() {

View File

@ -35,10 +35,7 @@ impl ShuffleRng {
self.rehash_seed(); self.rehash_seed();
self.rand() self.rand()
} else { } else {
int_from_byte_slice( int_from_byte_slice(&self.seed, self.idx - RAND_BYTES)
&self.seed,
self.idx - RAND_BYTES,
)
} }
} }
@ -61,48 +58,33 @@ impl ShuffleRng {
/// interprets those bytes as a 24 bit big-endian integer. /// interprets those bytes as a 24 bit big-endian integer.
/// Returns that integer. /// Returns that integer.
fn int_from_byte_slice(source: &[u8], offset: usize) -> u32 { fn int_from_byte_slice(source: &[u8], offset: usize) -> u32 {
( (u32::from(source[offset + 2]))
u32::from(source[offset + 2])) | | (u32::from(source[offset + 1]) << 8)
(u32::from(source[offset + 1]) << 8) | | (u32::from(source[offset]) << 16)
(u32::from(source[offset ]) << 16
)
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
#[test] #[test]
fn test_shuffling_int_from_slice() { fn test_shuffling_int_from_slice() {
let mut x = int_from_byte_slice( let mut x = int_from_byte_slice(&[0, 0, 1], 0);
&[0, 0, 1],
0);
assert_eq!((x as u32), 1); assert_eq!((x as u32), 1);
x = int_from_byte_slice( x = int_from_byte_slice(&[0, 1, 1], 0);
&[0, 1, 1],
0);
assert_eq!(x, 257); assert_eq!(x, 257);
x = int_from_byte_slice( x = int_from_byte_slice(&[1, 1, 1], 0);
&[1, 1, 1],
0);
assert_eq!(x, 65793); assert_eq!(x, 65793);
x = int_from_byte_slice( x = int_from_byte_slice(&[255, 1, 1], 0);
&[255, 1, 1],
0);
assert_eq!(x, 16711937); assert_eq!(x, 16711937);
x = int_from_byte_slice( x = int_from_byte_slice(&[255, 255, 255], 0);
&[255, 255, 255],
0);
assert_eq!(x, 16777215); assert_eq!(x, 16777215);
x = int_from_byte_slice( x = int_from_byte_slice(&[0x8f, 0xbb, 0xc7], 0);
&[0x8f, 0xbb, 0xc7],
0);
assert_eq!(x, 9419719); assert_eq!(x, 9419719);
} }
@ -110,8 +92,8 @@ mod tests {
fn test_shuffling_hash_fn() { fn test_shuffling_hash_fn() {
let digest = canonical_hash(&canonical_hash(&"4kn4driuctg8".as_bytes())); // double-hash is intentional let digest = canonical_hash(&canonical_hash(&"4kn4driuctg8".as_bytes())); // double-hash is intentional
let expected = [ let expected = [
103, 21, 99, 143, 60, 75, 116, 81, 248, 175, 190, 114, 54, 65, 23, 8, 3, 116, 103, 21, 99, 143, 60, 75, 116, 81, 248, 175, 190, 114, 54, 65, 23, 8, 3, 116, 160, 178,
160, 178, 7, 75, 63, 47, 180, 239, 191, 247, 57, 194, 144, 88 7, 75, 63, 47, 180, 239, 191, 247, 57, 194, 144, 88,
]; ];
assert_eq!(digest.len(), expected.len()); assert_eq!(digest.len(), expected.len());
assert_eq!(digest, expected) assert_eq!(digest, expected)

View File

@ -25,9 +25,8 @@ pub fn attestation_parent_hashes(
block_slot: u64, block_slot: u64,
attestation_slot: u64, attestation_slot: u64,
current_hashes: &[Hash256], current_hashes: &[Hash256],
oblique_hashes: &[Hash256]) oblique_hashes: &[Hash256],
-> Result<Vec<Hash256>, ParentHashesError> ) -> Result<Vec<Hash256>, ParentHashesError> {
{
// This cast places a limit on cycle_length. If you change it, check math // This cast places a limit on cycle_length. If you change it, check math
// for overflow. // for overflow.
let cycle_length: u64 = u64::from(cycle_length); let cycle_length: u64 = u64::from(cycle_length);
@ -65,20 +64,18 @@ pub fn attestation_parent_hashes(
* Arithmetic is: * Arithmetic is:
* start + cycle_length - oblique_hashes.len() * start + cycle_length - oblique_hashes.len()
*/ */
let end = start.checked_add(cycle_length) let end = start
.checked_add(cycle_length)
.and_then(|x| x.checked_sub(oblique_hashes.len() as u64)) .and_then(|x| x.checked_sub(oblique_hashes.len() as u64))
.ok_or(ParentHashesError::IntWrapping)?; .ok_or(ParentHashesError::IntWrapping)?;
let mut hashes = Vec::new(); let mut hashes = Vec::new();
hashes.extend_from_slice( hashes.extend_from_slice(&current_hashes[(start as usize)..(end as usize)]);
&current_hashes[(start as usize)..(end as usize)]);
hashes.extend_from_slice(oblique_hashes); hashes.extend_from_slice(oblique_hashes);
Ok(hashes) Ok(hashes)
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -106,7 +103,8 @@ mod tests {
block_slot, block_slot,
attestation_slot, attestation_slot,
&current_hashes, &current_hashes,
&oblique_hashes); &oblique_hashes,
);
assert!(result.is_ok()); assert!(result.is_ok());
let result = result.unwrap(); let result = result.unwrap();
assert_eq!(result.len(), cycle_length as usize); assert_eq!(result.len(), cycle_length as usize);
@ -131,7 +129,8 @@ mod tests {
block_slot, block_slot,
attestation_slot, attestation_slot,
&current_hashes, &current_hashes,
&oblique_hashes); &oblique_hashes,
);
assert!(result.is_ok()); assert!(result.is_ok());
let result = result.unwrap(); let result = result.unwrap();
assert_eq!(result.len(), cycle_length as usize); assert_eq!(result.len(), cycle_length as usize);
@ -156,7 +155,8 @@ mod tests {
block_slot, block_slot,
attestation_slot, attestation_slot,
&current_hashes, &current_hashes,
&oblique_hashes); &oblique_hashes,
);
assert!(result.is_ok()); assert!(result.is_ok());
let result = result.unwrap(); let result = result.unwrap();
assert_eq!(result.len(), cycle_length as usize); assert_eq!(result.len(), cycle_length as usize);
@ -179,7 +179,8 @@ mod tests {
block_slot, block_slot,
attestation_slot, attestation_slot,
&current_hashes, &current_hashes,
&oblique_hashes); &oblique_hashes,
);
let result = result.unwrap(); let result = result.unwrap();
assert_eq!(result.len(), cycle_length as usize); assert_eq!(result.len(), cycle_length as usize);
let expected_result = get_range_of_hashes(7, 15); let expected_result = get_range_of_hashes(7, 15);
@ -201,7 +202,8 @@ mod tests {
block_slot, block_slot,
attestation_slot, attestation_slot,
&current_hashes, &current_hashes,
&oblique_hashes); &oblique_hashes,
);
assert!(result.is_err()); assert!(result.is_err());
} }
@ -220,7 +222,8 @@ mod tests {
block_slot, block_slot,
attestation_slot, attestation_slot,
&current_hashes, &current_hashes,
&oblique_hashes); &oblique_hashes,
);
assert!(result.is_err()); assert!(result.is_err());
} }
} }

View File

@ -1,32 +1,16 @@
use std::collections::HashSet; use super::attestation_parent_hashes::{attestation_parent_hashes, ParentHashesError};
use std::sync::Arc; use super::db::stores::{BeaconBlockAtSlotError, BeaconBlockStore, ValidatorStore};
use super::types::{ use super::db::{ClientDB, DBError};
AttestationRecord,
AttesterMap,
};
use super::attestation_parent_hashes::{
attestation_parent_hashes,
ParentHashesError,
};
use super::db::{
ClientDB,
DBError
};
use super::db::stores::{
BeaconBlockStore,
BeaconBlockAtSlotError,
ValidatorStore,
};
use super::types::{
Hash256,
};
use super::message_generation::generate_signed_message; use super::message_generation::generate_signed_message;
use super::signature_verification::{ use super::signature_verification::{
verify_aggregate_signature_for_indices, verify_aggregate_signature_for_indices, SignatureVerificationError,
SignatureVerificationError,
}; };
use super::types::Hash256;
use super::types::{AttestationRecord, AttesterMap};
use std::collections::HashSet;
use std::sync::Arc;
#[derive(Debug,PartialEq)] #[derive(Debug, PartialEq)]
pub enum AttestationValidationError { pub enum AttestationValidationError {
ParentSlotTooHigh, ParentSlotTooHigh,
ParentSlotTooLow, ParentSlotTooLow,
@ -52,7 +36,8 @@ pub enum AttestationValidationError {
/// The context against which some attestation should be validated. /// The context against which some attestation should be validated.
pub struct AttestationValidationContext<T> pub struct AttestationValidationContext<T>
where T: ClientDB + Sized where
T: ClientDB + Sized,
{ {
/// The slot as determined by the system time. /// The slot as determined by the system time.
pub block_slot: u64, pub block_slot: u64,
@ -73,7 +58,8 @@ pub struct AttestationValidationContext<T>
} }
impl<T> AttestationValidationContext<T> impl<T> AttestationValidationContext<T>
where T: ClientDB where
T: ClientDB,
{ {
/// Validate a (fully deserialized) AttestationRecord against this context. /// Validate a (fully deserialized) AttestationRecord against this context.
/// ///
@ -82,9 +68,10 @@ impl<T> AttestationValidationContext<T>
/// ///
/// The attestation's aggregate signature will be verified, therefore the function must able to /// The attestation's aggregate signature will be verified, therefore the function must able to
/// access all required validation public keys via the `validator_store`. /// access all required validation public keys via the `validator_store`.
pub fn validate_attestation(&self, a: &AttestationRecord) pub fn validate_attestation(
-> Result<HashSet<usize>, AttestationValidationError> &self,
{ a: &AttestationRecord,
) -> Result<HashSet<usize>, AttestationValidationError> {
/* /*
* The attesation slot must be less than or equal to the parent of the slot of the block * The attesation slot must be less than or equal to the parent of the slot of the block
* that contained the attestation. * that contained the attestation.
@ -97,8 +84,10 @@ impl<T> AttestationValidationContext<T>
* The slot of this attestation must not be more than cycle_length + 1 distance * The slot of this attestation must not be more than cycle_length + 1 distance
* from the parent_slot of block that contained it. * from the parent_slot of block that contained it.
*/ */
if a.slot < self.parent_block_slot if a.slot < self
.saturating_sub(u64::from(self.cycle_length).saturating_add(1)) { .parent_block_slot
.saturating_sub(u64::from(self.cycle_length).saturating_add(1))
{
return Err(AttestationValidationError::ParentSlotTooLow); return Err(AttestationValidationError::ParentSlotTooLow);
} }
@ -124,16 +113,16 @@ impl<T> AttestationValidationContext<T>
* This is an array mapping the order that validators will appear in the bitfield to the * This is an array mapping the order that validators will appear in the bitfield to the
* canonincal index of a validator. * canonincal index of a validator.
*/ */
let attestation_indices = self.attester_map.get(&(a.slot, a.shard_id)) let attestation_indices = self
.attester_map
.get(&(a.slot, a.shard_id))
.ok_or(AttestationValidationError::BadAttesterMap)?; .ok_or(AttestationValidationError::BadAttesterMap)?;
/* /*
* The bitfield must be no longer than the minimum required to represent each validator in the * The bitfield must be no longer than the minimum required to represent each validator in the
* attestation indices for this slot and shard id. * attestation indices for this slot and shard id.
*/ */
if a.attester_bitfield.num_bytes() != if a.attester_bitfield.num_bytes() != bytes_for_bits(attestation_indices.len()) {
bytes_for_bits(attestation_indices.len())
{
return Err(AttestationValidationError::BadBitfieldLength); return Err(AttestationValidationError::BadBitfieldLength);
} }
@ -145,7 +134,7 @@ impl<T> AttestationValidationContext<T>
* refer to the same AttesationRecord. * refer to the same AttesationRecord.
*/ */
if a.attester_bitfield.len() > attestation_indices.len() { if a.attester_bitfield.len() > attestation_indices.len() {
return Err(AttestationValidationError::InvalidBitfieldEndBits) return Err(AttestationValidationError::InvalidBitfieldEndBits);
} }
/* /*
@ -156,7 +145,8 @@ impl<T> AttestationValidationContext<T>
self.block_slot, self.block_slot,
a.slot, a.slot,
&self.recent_block_hashes, &self.recent_block_hashes,
&a.oblique_parent_hashes)?; &a.oblique_parent_hashes,
)?;
/* /*
* The specified justified block hash supplied in the attestation must be in the chain at * The specified justified block hash supplied in the attestation must be in the chain at
@ -166,11 +156,15 @@ impl<T> AttestationValidationContext<T>
* block store (database) we iterate back through the blocks until we find (or fail to * block store (database) we iterate back through the blocks until we find (or fail to
* find) the justified block hash referenced in the attestation record. * find) the justified block hash referenced in the attestation record.
*/ */
let latest_parent_hash = parent_hashes.last() let latest_parent_hash = parent_hashes
.last()
.ok_or(AttestationValidationError::BadCurrentHashes)?; .ok_or(AttestationValidationError::BadCurrentHashes)?;
match self.block_store.block_at_slot(&latest_parent_hash, a.justified_slot)? { match self
.block_store
.block_at_slot(&latest_parent_hash, a.justified_slot)?
{
Some((ref hash, _)) if *hash == a.justified_block_hash.to_vec() => (), Some((ref hash, _)) if *hash == a.justified_block_hash.to_vec() => (),
_ => return Err(AttestationValidationError::InvalidJustifiedBlockHash) _ => return Err(AttestationValidationError::InvalidJustifiedBlockHash),
}; };
/* /*
@ -182,16 +176,17 @@ impl<T> AttestationValidationContext<T>
&parent_hashes, &parent_hashes,
a.shard_id, a.shard_id,
&a.shard_block_hash, &a.shard_block_hash,
a.justified_slot) a.justified_slot,
)
}; };
let voted_hashset = let voted_hashset = verify_aggregate_signature_for_indices(
verify_aggregate_signature_for_indices(
&signed_message, &signed_message,
&a.aggregate_sig, &a.aggregate_sig,
&attestation_indices, &attestation_indices,
&a.attester_bitfield, &a.attester_bitfield,
&self.validator_store)?; &self.validator_store,
)?;
/* /*
* If the hashset of voters is None, the signature verification failed. * If the hashset of voters is None, the signature verification failed.
@ -210,16 +205,11 @@ fn bytes_for_bits(bits: usize) -> usize {
impl From<ParentHashesError> for AttestationValidationError { impl From<ParentHashesError> for AttestationValidationError {
fn from(e: ParentHashesError) -> Self { fn from(e: ParentHashesError) -> Self {
match e { match e {
ParentHashesError::BadCurrentHashes ParentHashesError::BadCurrentHashes => AttestationValidationError::BadCurrentHashes,
=> AttestationValidationError::BadCurrentHashes, ParentHashesError::BadObliqueHashes => AttestationValidationError::BadObliqueHashes,
ParentHashesError::BadObliqueHashes ParentHashesError::SlotTooLow => AttestationValidationError::BlockSlotTooLow,
=> AttestationValidationError::BadObliqueHashes, ParentHashesError::SlotTooHigh => AttestationValidationError::BlockSlotTooHigh,
ParentHashesError::SlotTooLow ParentHashesError::IntWrapping => AttestationValidationError::IntWrapping,
=> AttestationValidationError::BlockSlotTooLow,
ParentHashesError::SlotTooHigh
=> AttestationValidationError::BlockSlotTooHigh,
ParentHashesError::IntWrapping
=> AttestationValidationError::IntWrapping
} }
} }
} }
@ -228,8 +218,7 @@ impl From<BeaconBlockAtSlotError> for AttestationValidationError {
fn from(e: BeaconBlockAtSlotError) -> Self { fn from(e: BeaconBlockAtSlotError) -> Self {
match e { match e {
BeaconBlockAtSlotError::DBError(s) => AttestationValidationError::DBError(s), BeaconBlockAtSlotError::DBError(s) => AttestationValidationError::DBError(s),
_ => AttestationValidationError::InvalidJustifiedBlockHash _ => AttestationValidationError::InvalidJustifiedBlockHash,
} }
} }
} }
@ -243,14 +232,16 @@ impl From<DBError> for AttestationValidationError {
impl From<SignatureVerificationError> for AttestationValidationError { impl From<SignatureVerificationError> for AttestationValidationError {
fn from(e: SignatureVerificationError) -> Self { fn from(e: SignatureVerificationError) -> Self {
match e { match e {
SignatureVerificationError::BadValidatorIndex SignatureVerificationError::BadValidatorIndex => {
=> AttestationValidationError::BadAttesterMap, AttestationValidationError::BadAttesterMap
SignatureVerificationError::PublicKeyCorrupt }
=> AttestationValidationError::PublicKeyCorrupt, SignatureVerificationError::PublicKeyCorrupt => {
SignatureVerificationError::NoPublicKeyForValidator AttestationValidationError::PublicKeyCorrupt
=> AttestationValidationError::NoPublicKeyForValidator, }
SignatureVerificationError::DBError(s) SignatureVerificationError::NoPublicKeyForValidator => {
=> AttestationValidationError::DBError(s), AttestationValidationError::NoPublicKeyForValidator
}
SignatureVerificationError::DBError(s) => AttestationValidationError::DBError(s),
} }
} }
} }

View File

@ -1,12 +1,12 @@
extern crate db;
extern crate bls; extern crate bls;
extern crate db;
extern crate hashing; extern crate hashing;
extern crate ssz; extern crate ssz;
extern crate ssz_helpers; extern crate ssz_helpers;
extern crate types; extern crate types;
pub mod attestation_validation;
mod attestation_parent_hashes; mod attestation_parent_hashes;
pub mod attestation_validation;
pub mod block_validation; pub mod block_validation;
mod message_generation; mod message_generation;
mod signature_verification; mod signature_verification;

View File

@ -1,5 +1,5 @@
use super::ssz::SszStream;
use super::hashing::canonical_hash; use super::hashing::canonical_hash;
use super::ssz::SszStream;
use super::types::Hash256; use super::types::Hash256;
/// Generates the message used to validate the signature provided with an AttestationRecord. /// Generates the message used to validate the signature provided with an AttestationRecord.
@ -10,9 +10,8 @@ pub fn generate_signed_message(
parent_hashes: &[Hash256], parent_hashes: &[Hash256],
shard_id: u16, shard_id: u16,
shard_block_hash: &Hash256, shard_block_hash: &Hash256,
justified_slot: u64) justified_slot: u64,
-> Vec<u8> ) -> Vec<u8> {
{
/* /*
* Note: it's a little risky here to use SSZ, because the encoding is not necessarily SSZ * Note: it's a little risky here to use SSZ, because the encoding is not necessarily SSZ
* (for example, SSZ might change whilst this doesn't). * (for example, SSZ might change whilst this doesn't).
@ -39,9 +38,7 @@ mod tests {
#[test] #[test]
fn test_generate_signed_message() { fn test_generate_signed_message() {
let slot = 93; let slot = 93;
let parent_hashes: Vec<Hash256> = (0..12) let parent_hashes: Vec<Hash256> = (0..12).map(|i| Hash256::from(i as u64)).collect();
.map(|i| Hash256::from(i as u64))
.collect();
let shard_id = 15; let shard_id = 15;
let shard_block_hash = Hash256::from("shard_block_hash".as_bytes()); let shard_block_hash = Hash256::from("shard_block_hash".as_bytes());
let justified_slot = 18; let justified_slot = 18;
@ -51,7 +48,8 @@ mod tests {
&parent_hashes, &parent_hashes,
shard_id, shard_id,
&shard_block_hash, &shard_block_hash,
justified_slot); justified_slot,
);
/* /*
* Note: this is not some well-known test vector, it's simply the result of running * Note: this is not some well-known test vector, it's simply the result of running
@ -60,9 +58,8 @@ mod tests {
* Once well-known test vectors are established, they should be placed here. * Once well-known test vectors are established, they should be placed here.
*/ */
let expected = vec![ let expected = vec![
149, 99, 94, 229, 72, 144, 233, 14, 164, 16, 143, 53, 94, 48, 149, 99, 94, 229, 72, 144, 233, 14, 164, 16, 143, 53, 94, 48, 118, 179, 33, 181, 172,
118, 179, 33, 181, 172, 215, 2, 191, 176, 18, 188, 172, 137, 215, 2, 191, 176, 18, 188, 172, 137, 178, 236, 66, 74, 120,
178, 236, 66, 74, 120
]; ];
assert_eq!(output, expected); assert_eq!(output, expected);

View File

@ -1,14 +1,8 @@
use std::collections::HashSet; use super::bls::{AggregatePublicKey, AggregateSignature};
use super::bls::{ use super::db::stores::{ValidatorStore, ValidatorStoreError};
AggregateSignature,
AggregatePublicKey,
};
use super::db::ClientDB; use super::db::ClientDB;
use super::db::stores::{
ValidatorStore,
ValidatorStoreError,
};
use super::types::Bitfield; use super::types::Bitfield;
use std::collections::HashSet;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum SignatureVerificationError { pub enum SignatureVerificationError {
@ -30,9 +24,10 @@ pub fn verify_aggregate_signature_for_indices<T>(
agg_sig: &AggregateSignature, agg_sig: &AggregateSignature,
attestation_indices: &[usize], attestation_indices: &[usize],
bitfield: &Bitfield, bitfield: &Bitfield,
validator_store: &ValidatorStore<T>) validator_store: &ValidatorStore<T>,
-> Result<Option<HashSet<usize>>, SignatureVerificationError> ) -> Result<Option<HashSet<usize>>, SignatureVerificationError>
where T: ClientDB + Sized where
T: ClientDB + Sized,
{ {
let mut voters = HashSet::new(); let mut voters = HashSet::new();
let mut agg_pub_key = AggregatePublicKey::new(); let mut agg_pub_key = AggregatePublicKey::new();
@ -43,7 +38,8 @@ pub fn verify_aggregate_signature_for_indices<T>(
/* /*
* De-reference the attestation index into a canonical ValidatorRecord index. * De-reference the attestation index into a canonical ValidatorRecord index.
*/ */
let validator = *attestation_indices.get(i) let validator = *attestation_indices
.get(i)
.ok_or(SignatureVerificationError::BadValidatorIndex)?; .ok_or(SignatureVerificationError::BadValidatorIndex)?;
/* /*
* Load the validators public key from our store. * Load the validators public key from our store.
@ -77,23 +73,17 @@ pub fn verify_aggregate_signature_for_indices<T>(
impl From<ValidatorStoreError> for SignatureVerificationError { impl From<ValidatorStoreError> for SignatureVerificationError {
fn from(error: ValidatorStoreError) -> Self { fn from(error: ValidatorStoreError) -> Self {
match error { match error {
ValidatorStoreError::DBError(s) => ValidatorStoreError::DBError(s) => SignatureVerificationError::DBError(s),
SignatureVerificationError::DBError(s), ValidatorStoreError::DecodeError => SignatureVerificationError::PublicKeyCorrupt,
ValidatorStoreError::DecodeError =>
SignatureVerificationError::PublicKeyCorrupt,
} }
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::super::bls::{Keypair, Signature};
use super::super::bls::{
Keypair,
Signature,
};
use super::super::db::MemoryDB; use super::super::db::MemoryDB;
use super::*;
use std::sync::Arc; use std::sync::Arc;
/* /*
@ -130,8 +120,7 @@ mod tests {
let mut all_keypairs = signing_keypairs.clone(); let mut all_keypairs = signing_keypairs.clone();
all_keypairs.append(&mut non_signing_keypairs.clone()); all_keypairs.append(&mut non_signing_keypairs.clone());
let attestation_indices: Vec<usize> = (0..all_keypairs.len()) let attestation_indices: Vec<usize> = (0..all_keypairs.len()).collect();
.collect();
let mut bitfield = Bitfield::new(); let mut bitfield = Bitfield::new();
for i in 0..signing_keypairs.len() { for i in 0..signing_keypairs.len() {
bitfield.set_bit(i, true); bitfield.set_bit(i, true);
@ -158,11 +147,11 @@ mod tests {
&agg_sig, &agg_sig,
&attestation_indices, &attestation_indices,
&bitfield, &bitfield,
&store).unwrap(); &store,
).unwrap();
let voters = voters.unwrap(); let voters = voters.unwrap();
(0..signing_keypairs.len()) (0..signing_keypairs.len()).for_each(|i| assert!(voters.contains(&i)));
.for_each(|i| assert!(voters.contains(&i)));
(signing_keypairs.len()..non_signing_keypairs.len()) (signing_keypairs.len()..non_signing_keypairs.len())
.for_each(|i| assert!(!voters.contains(&i))); .for_each(|i| assert!(!voters.contains(&i)));
@ -176,7 +165,8 @@ mod tests {
&agg_sig, &agg_sig,
&attestation_indices, &attestation_indices,
&bitfield, &bitfield,
&store).unwrap(); &store,
).unwrap();
assert_eq!(voters, None); assert_eq!(voters, None);
} }

View File

@ -1,33 +1,12 @@
use std::sync::Arc; use std::sync::Arc;
use super::db::{ use super::bls::{AggregateSignature, Keypair, SecretKey, Signature};
MemoryDB, use super::db::stores::{BeaconBlockStore, ValidatorStore};
}; use super::db::MemoryDB;
use super::db::stores::{ use super::hashing::canonical_hash;
ValidatorStore,
BeaconBlockStore,
};
use super::types::{
AttestationRecord,
AttesterMap,
Bitfield,
BeaconBlock,
Hash256,
};
use super::validation::attestation_validation::{
AttestationValidationContext,
};
use super::bls::{
AggregateSignature,
Keypair,
SecretKey,
Signature,
};
use super::ssz::SszStream; use super::ssz::SszStream;
use super::hashing::{ use super::types::{AttestationRecord, AttesterMap, BeaconBlock, Bitfield, Hash256};
canonical_hash, use super::validation::attestation_validation::AttestationValidationContext;
};
pub struct TestStore { pub struct TestStore {
pub db: Arc<MemoryDB>, pub db: Arc<MemoryDB>,
@ -55,13 +34,13 @@ pub struct TestRig {
pub attester_count: usize, pub attester_count: usize,
} }
fn generate_message_hash(slot: u64, fn generate_message_hash(
slot: u64,
parent_hashes: &[Hash256], parent_hashes: &[Hash256],
shard_id: u16, shard_id: u16,
shard_block_hash: &Hash256, shard_block_hash: &Hash256,
justified_slot: u64) justified_slot: u64,
-> Vec<u8> ) -> Vec<u8> {
{
let mut stream = SszStream::new(); let mut stream = SszStream::new();
stream.append(&slot); stream.append(&slot);
stream.append_vec(&parent_hashes.to_vec()); stream.append_vec(&parent_hashes.to_vec());
@ -72,7 +51,8 @@ fn generate_message_hash(slot: u64,
canonical_hash(&bytes) canonical_hash(&bytes)
} }
pub fn generate_attestation(shard_id: u16, pub fn generate_attestation(
shard_id: u16,
shard_block_hash: &Hash256, shard_block_hash: &Hash256,
block_slot: u64, block_slot: u64,
attestation_slot: u64, attestation_slot: u64,
@ -81,9 +61,8 @@ pub fn generate_attestation(shard_id: u16,
cycle_length: u8, cycle_length: u8,
parent_hashes: &[Hash256], parent_hashes: &[Hash256],
signing_keys: &[Option<SecretKey>], signing_keys: &[Option<SecretKey>],
block_store: &BeaconBlockStore<MemoryDB>) block_store: &BeaconBlockStore<MemoryDB>,
-> AttestationRecord ) -> AttestationRecord {
{
let mut attester_bitfield = Bitfield::new(); let mut attester_bitfield = Bitfield::new();
let mut aggregate_sig = AggregateSignature::new(); let mut aggregate_sig = AggregateSignature::new();
@ -107,7 +86,8 @@ pub fn generate_attestation(shard_id: u16,
parent_hashes_slice, parent_hashes_slice,
shard_id, shard_id,
shard_block_hash, shard_block_hash,
justified_slot); justified_slot,
);
for (i, secret_key) in signing_keys.iter().enumerate() { for (i, secret_key) in signing_keys.iter().enumerate() {
/* /*
@ -143,7 +123,9 @@ pub fn create_block_at_slot(block_store: &BeaconBlockStore<MemoryDB>, hash: &Has
let mut s = SszStream::new(); let mut s = SszStream::new();
s.append(&justified_block); s.append(&justified_block);
let justified_block_ssz = s.drain(); let justified_block_ssz = s.drain();
block_store.put_serialized_block(&hash.to_vec(), &justified_block_ssz).unwrap(); block_store
.put_serialized_block(&hash.to_vec(), &justified_block_ssz)
.unwrap();
} }
/// Inserts a justified_block_hash in a position that will be referenced by an attestation record. /// Inserts a justified_block_hash in a position that will be referenced by an attestation record.
@ -151,16 +133,14 @@ pub fn insert_justified_block_hash(
parent_hashes: &mut Vec<Hash256>, parent_hashes: &mut Vec<Hash256>,
justified_block_hash: &Hash256, justified_block_hash: &Hash256,
block_slot: u64, block_slot: u64,
attestation_slot: u64) attestation_slot: u64,
{ ) {
let attestation_parent_hash_index = parent_hashes.len() - 1 - let attestation_parent_hash_index =
(block_slot as usize - attestation_slot as usize); parent_hashes.len() - 1 - (block_slot as usize - attestation_slot as usize);
parent_hashes[attestation_parent_hash_index] = justified_block_hash.clone(); parent_hashes[attestation_parent_hash_index] = justified_block_hash.clone();
} }
pub fn setup_attestation_validation_test(shard_id: u16, attester_count: usize) pub fn setup_attestation_validation_test(shard_id: u16, attester_count: usize) -> TestRig {
-> TestRig
{
let stores = TestStore::new(); let stores = TestStore::new();
let block_slot = 10000; let block_slot = 10000;
@ -181,7 +161,8 @@ pub fn setup_attestation_validation_test(shard_id: u16, attester_count: usize)
&mut parent_hashes, &mut parent_hashes,
&justified_block_hash, &justified_block_hash,
block_slot, block_slot,
attestation_slot); attestation_slot,
);
let parent_hashes = Arc::new(parent_hashes); let parent_hashes = Arc::new(parent_hashes);
@ -197,7 +178,10 @@ pub fn setup_attestation_validation_test(shard_id: u16, attester_count: usize)
for i in 0..attester_count { for i in 0..attester_count {
let keypair = Keypair::random(); let keypair = Keypair::random();
keypairs.push(keypair.clone()); keypairs.push(keypair.clone());
stores.validator.put_public_key_by_index(i, &keypair.pk).unwrap(); stores
.validator
.put_public_key_by_index(i, &keypair.pk)
.unwrap();
signing_keys.push(Some(keypair.sk.clone())); signing_keys.push(Some(keypair.sk.clone()));
attesters.push(i); attesters.push(i);
} }
@ -223,7 +207,8 @@ pub fn setup_attestation_validation_test(shard_id: u16, attester_count: usize)
cycle_length, cycle_length,
&parent_hashes.clone(), &parent_hashes.clone(),
&signing_keys, &signing_keys,
&stores.block); &stores.block,
);
TestRig { TestRig {
attestation, attestation,

View File

@ -3,7 +3,7 @@ mod tests;
use super::bls; use super::bls;
use super::db; use super::db;
use super::hashing;
use super::ssz; use super::ssz;
use super::types; use super::types;
use super::hashing;
use super::validation; use super::validation;

View File

@ -1,20 +1,10 @@
use std::sync::Arc; use std::sync::Arc;
use super::helpers::{ use super::bls::AggregateSignature;
TestRig, use super::helpers::{create_block_at_slot, setup_attestation_validation_test, TestRig};
setup_attestation_validation_test,
create_block_at_slot,
};
use super::validation::attestation_validation::{
AttestationValidationError,
};
use super::types::AttesterMap; use super::types::AttesterMap;
use super::bls::{ use super::types::Hash256;
AggregateSignature, use super::validation::attestation_validation::AttestationValidationError;
};
use super::types::{
Hash256,
};
fn generic_rig() -> TestRig { fn generic_rig() -> TestRig {
let shard_id = 10; let shard_id = 10;
@ -80,21 +70,29 @@ fn test_attestation_validation_invalid_justified_slot_incorrect() {
create_block_at_slot( create_block_at_slot(
&rig.stores.block, &rig.stores.block,
&rig.attestation.justified_block_hash, &rig.attestation.justified_block_hash,
rig.attestation.justified_slot); rig.attestation.justified_slot,
);
let result = rig.context.validate_attestation(&rig.attestation); let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::BadAggregateSignature)); assert_eq!(
result,
Err(AttestationValidationError::BadAggregateSignature)
);
rig.attestation.justified_slot = original + 1; rig.attestation.justified_slot = original + 1;
// Ensures we don't get a bad justified block error instead. // Ensures we don't get a bad justified block error instead.
create_block_at_slot( create_block_at_slot(
&rig.stores.block, &rig.stores.block,
&rig.attestation.justified_block_hash, &rig.attestation.justified_block_hash,
rig.attestation.justified_slot); rig.attestation.justified_slot,
);
// Ensures we don't get an error that the last justified slot is ahead of the context justified // Ensures we don't get an error that the last justified slot is ahead of the context justified
// slot. // slot.
rig.context.last_justified_slot = rig.attestation.justified_slot; rig.context.last_justified_slot = rig.attestation.justified_slot;
let result = rig.context.validate_attestation(&rig.attestation); let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::BadAggregateSignature)); assert_eq!(
result,
Err(AttestationValidationError::BadAggregateSignature)
);
} }
#[test] #[test]
@ -108,7 +106,10 @@ fn test_attestation_validation_invalid_too_many_oblique() {
rig.attestation.oblique_parent_hashes = obliques; rig.attestation.oblique_parent_hashes = obliques;
let result = rig.context.validate_attestation(&rig.attestation); let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::TooManyObliqueHashes)); assert_eq!(
result,
Err(AttestationValidationError::TooManyObliqueHashes)
);
} }
#[test] #[test]
@ -132,8 +133,12 @@ fn test_attestation_validation_invalid_bad_bitfield_length() {
* of the bitfield. * of the bitfield.
*/ */
let one_byte_higher = rig.attester_count + 8; let one_byte_higher = rig.attester_count + 8;
rig.attestation.attester_bitfield.set_bit(one_byte_higher, true); rig.attestation
rig.attestation.attester_bitfield.set_bit(one_byte_higher, false); .attester_bitfield
.set_bit(one_byte_higher, true);
rig.attestation
.attester_bitfield
.set_bit(one_byte_higher, false);
let result = rig.context.validate_attestation(&rig.attestation); let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::BadBitfieldLength)); assert_eq!(result, Err(AttestationValidationError::BadBitfieldLength));
@ -144,10 +149,15 @@ fn test_attestation_validation_invalid_invalid_bitfield_end_bit() {
let mut rig = generic_rig(); let mut rig = generic_rig();
let one_bit_high = rig.attester_count + 1; let one_bit_high = rig.attester_count + 1;
rig.attestation.attester_bitfield.set_bit(one_bit_high, true); rig.attestation
.attester_bitfield
.set_bit(one_bit_high, true);
let result = rig.context.validate_attestation(&rig.attestation); let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::InvalidBitfieldEndBits)); assert_eq!(
result,
Err(AttestationValidationError::InvalidBitfieldEndBits)
);
} }
#[test] #[test]
@ -164,11 +174,19 @@ fn test_attestation_validation_invalid_invalid_bitfield_end_bit_with_irreguar_bi
* bit in a bitfield and the byte length of that bitfield * bit in a bitfield and the byte length of that bitfield
*/ */
let one_bit_high = rig.attester_count + 1; let one_bit_high = rig.attester_count + 1;
assert!(one_bit_high % 8 != 0, "the test is ineffective in this case."); assert!(
rig.attestation.attester_bitfield.set_bit(one_bit_high, true); one_bit_high % 8 != 0,
"the test is ineffective in this case."
);
rig.attestation
.attester_bitfield
.set_bit(one_bit_high, true);
let result = rig.context.validate_attestation(&rig.attestation); let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::InvalidBitfieldEndBits)); assert_eq!(
result,
Err(AttestationValidationError::InvalidBitfieldEndBits)
);
} }
#[test] #[test]
@ -178,7 +196,10 @@ fn test_attestation_validation_invalid_unknown_justified_block_hash() {
rig.attestation.justified_block_hash = Hash256::from("unknown block hash".as_bytes()); rig.attestation.justified_block_hash = Hash256::from("unknown block hash".as_bytes());
let result = rig.context.validate_attestation(&rig.attestation); let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::InvalidJustifiedBlockHash)); assert_eq!(
result,
Err(AttestationValidationError::InvalidJustifiedBlockHash)
);
} }
#[test] #[test]
@ -191,9 +212,13 @@ fn test_attestation_validation_invalid_unknown_justified_block_hash_wrong_slot()
create_block_at_slot( create_block_at_slot(
&rig.stores.block, &rig.stores.block,
&rig.attestation.justified_block_hash, &rig.attestation.justified_block_hash,
rig.attestation.justified_slot + 1); rig.attestation.justified_slot + 1,
);
let result = rig.context.validate_attestation(&rig.attestation); let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::InvalidJustifiedBlockHash)); assert_eq!(
result,
Err(AttestationValidationError::InvalidJustifiedBlockHash)
);
/* /*
* justified_block_hash points to a block with a slot that is too low. * justified_block_hash points to a block with a slot that is too low.
@ -201,9 +226,13 @@ fn test_attestation_validation_invalid_unknown_justified_block_hash_wrong_slot()
create_block_at_slot( create_block_at_slot(
&rig.stores.block, &rig.stores.block,
&rig.attestation.justified_block_hash, &rig.attestation.justified_block_hash,
rig.attestation.justified_slot - 1); rig.attestation.justified_slot - 1,
);
let result = rig.context.validate_attestation(&rig.attestation); let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::InvalidJustifiedBlockHash)); assert_eq!(
result,
Err(AttestationValidationError::InvalidJustifiedBlockHash)
);
} }
#[test] #[test]
@ -213,5 +242,8 @@ fn test_attestation_validation_invalid_empty_signature() {
rig.attestation.aggregate_sig = AggregateSignature::new(); rig.attestation.aggregate_sig = AggregateSignature::new();
let result = rig.context.validate_attestation(&rig.attestation); let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::BadAggregateSignature)); assert_eq!(
result,
Err(AttestationValidationError::BadAggregateSignature)
);
} }

View File

@ -4,17 +4,10 @@ extern crate hashing;
extern crate types; extern crate types;
use active_validators::validator_is_active; use active_validators::validator_is_active;
use bytes::{ use bytes::{BufMut, BytesMut};
BytesMut,
BufMut,
};
use hashing::canonical_hash; use hashing::canonical_hash;
use std::cmp::max; use std::cmp::max;
use types::{ use types::{Hash256, ValidatorRecord, ValidatorStatus};
Hash256,
ValidatorRecord,
ValidatorStatus,
};
pub enum UpdateValidatorSetError { pub enum UpdateValidatorSetError {
ArithmeticOverflow, ArithmeticOverflow,
@ -28,9 +21,8 @@ pub fn update_validator_set(
hash_chain: Hash256, hash_chain: Hash256,
present_slot: u64, present_slot: u64,
deposit_size_gwei: u64, deposit_size_gwei: u64,
max_validator_churn_quotient: u64) max_validator_churn_quotient: u64,
-> Result<(), UpdateValidatorSetError> ) -> Result<(), UpdateValidatorSetError> {
{
/* /*
* Total balance of all active validators. * Total balance of all active validators.
* *
@ -40,7 +32,8 @@ pub fn update_validator_set(
let mut bal: u64 = 0; let mut bal: u64 = 0;
for v in validators.iter() { for v in validators.iter() {
if validator_is_active(&v) { if validator_is_active(&v) {
bal = bal.checked_add(v.balance) bal = bal
.checked_add(v.balance)
.ok_or(UpdateValidatorSetError::ArithmeticOverflow)?; .ok_or(UpdateValidatorSetError::ArithmeticOverflow)?;
} }
} }
@ -51,9 +44,13 @@ pub fn update_validator_set(
* Note: this is not the maximum allowable change, it can actually be higher. * Note: this is not the maximum allowable change, it can actually be higher.
*/ */
let max_allowable_change = { let max_allowable_change = {
let double_deposit_size = deposit_size_gwei.checked_mul(2) let double_deposit_size = deposit_size_gwei
.checked_mul(2)
.ok_or(UpdateValidatorSetError::ArithmeticOverflow)?; .ok_or(UpdateValidatorSetError::ArithmeticOverflow)?;
max(double_deposit_size, total_balance / max_validator_churn_quotient) max(
double_deposit_size,
total_balance / max_validator_churn_quotient,
)
}; };
let mut hasher = ValidatorChangeHashChain { let mut hasher = ValidatorChangeHashChain {
@ -66,7 +63,8 @@ pub fn update_validator_set(
* Validator is pending activiation. * Validator is pending activiation.
*/ */
x if x == ValidatorStatus::PendingActivation as u8 => { x if x == ValidatorStatus::PendingActivation as u8 => {
let new_total_changed = total_changed.checked_add(deposit_size_gwei) let new_total_changed = total_changed
.checked_add(deposit_size_gwei)
.ok_or(UpdateValidatorSetError::ArithmeticOverflow)?; .ok_or(UpdateValidatorSetError::ArithmeticOverflow)?;
/* /*
* If entering this validator would not exceed the max balance delta, * If entering this validator would not exceed the max balance delta,
@ -85,7 +83,8 @@ pub fn update_validator_set(
* Validator is pending exit. * Validator is pending exit.
*/ */
x if x == ValidatorStatus::PendingExit as u8 => { x if x == ValidatorStatus::PendingExit as u8 => {
let new_total_changed = total_changed.checked_add(v.balance) let new_total_changed = total_changed
.checked_add(v.balance)
.ok_or(UpdateValidatorSetError::ArithmeticOverflow)?; .ok_or(UpdateValidatorSetError::ArithmeticOverflow)?;
/* /*
* If exiting this validator would not exceed the max balance delta, * If exiting this validator would not exceed the max balance delta,
@ -101,7 +100,7 @@ pub fn update_validator_set(
break; break;
} }
} }
_ => () _ => (),
}; };
if total_changed >= max_allowable_change { if total_changed >= max_allowable_change {
break; break;
@ -115,17 +114,14 @@ pub struct ValidatorChangeHashChain {
} }
impl ValidatorChangeHashChain { impl ValidatorChangeHashChain {
pub fn extend(&mut self, index: usize, pubkey: &Vec<u8>, flag: u8) pub fn extend(&mut self, index: usize, pubkey: &Vec<u8>, flag: u8) {
{
let mut message = self.bytes.clone(); let mut message = self.bytes.clone();
message.append(&mut serialize_validator_change_record(index, pubkey, flag)); message.append(&mut serialize_validator_change_record(index, pubkey, flag));
self.bytes = canonical_hash(&message); self.bytes = canonical_hash(&message);
} }
} }
fn serialize_validator_change_record(index: usize, pubkey: &Vec<u8>, flag: u8) fn serialize_validator_change_record(index: usize, pubkey: &Vec<u8>, flag: u8) -> Vec<u8> {
-> Vec<u8>
{
let mut buf = BytesMut::with_capacity(68); let mut buf = BytesMut::with_capacity(68);
buf.put_u8(flag); buf.put_u8(flag);
let index_bytes = { let index_bytes = {
@ -138,7 +134,6 @@ fn serialize_validator_change_record(index: usize, pubkey: &Vec<u8>, flag: u8)
buf.take().to_vec() buf.take().to_vec()
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
#[test] #[test]

View File

@ -1,11 +1,5 @@
use bls::{ use bls::verify_proof_of_possession;
verify_proof_of_possession, use types::{ValidatorRecord, ValidatorRegistration, ValidatorStatus};
};
use types::{
ValidatorRecord,
ValidatorStatus,
ValidatorRegistration,
};
/// The size of a validators deposit in GWei. /// The size of a validators deposit in GWei.
pub const DEPOSIT_GWEI: u64 = 32_000_000_000; pub const DEPOSIT_GWEI: u64 = 32_000_000_000;
@ -25,9 +19,7 @@ pub enum ValidatorInductionError {
} }
impl ValidatorInductor { impl ValidatorInductor {
pub fn new(current_slot: u64, shard_count: u16, validators: Vec<ValidatorRecord>) pub fn new(current_slot: u64, shard_count: u16, validators: Vec<ValidatorRecord>) -> Self {
-> Self
{
Self { Self {
current_slot, current_slot,
shard_count, shard_count,
@ -40,29 +32,33 @@ impl ValidatorInductor {
/// ///
/// Returns an error if the registration is invalid, otherwise returns the index of the /// Returns an error if the registration is invalid, otherwise returns the index of the
/// validator in `CrystallizedState.validators`. /// validator in `CrystallizedState.validators`.
pub fn induct(&mut self, rego: &ValidatorRegistration, status: ValidatorStatus) pub fn induct(
-> Result<usize, ValidatorInductionError> &mut self,
{ rego: &ValidatorRegistration,
status: ValidatorStatus,
) -> Result<usize, ValidatorInductionError> {
let v = self.process_registration(rego, status)?; let v = self.process_registration(rego, status)?;
Ok(self.add_validator(v)) Ok(self.add_validator(v))
} }
/// Verify a `ValidatorRegistration` and return a `ValidatorRecord` if valid. /// Verify a `ValidatorRegistration` and return a `ValidatorRecord` if valid.
fn process_registration(&self, r: &ValidatorRegistration, status: ValidatorStatus) fn process_registration(
-> Result<ValidatorRecord, ValidatorInductionError> &self,
{ r: &ValidatorRegistration,
status: ValidatorStatus,
) -> Result<ValidatorRecord, ValidatorInductionError> {
/* /*
* Ensure withdrawal shard is not too high. * Ensure withdrawal shard is not too high.
*/ */
if r.withdrawal_shard > self.shard_count { if r.withdrawal_shard > self.shard_count {
return Err(ValidatorInductionError::InvalidShard) return Err(ValidatorInductionError::InvalidShard);
} }
/* /*
* Prove validator has knowledge of their secret key. * Prove validator has knowledge of their secret key.
*/ */
if !verify_proof_of_possession(&r.proof_of_possession, &r.pubkey) { if !verify_proof_of_possession(&r.proof_of_possession, &r.pubkey) {
return Err(ValidatorInductionError::InvaidProofOfPossession) return Err(ValidatorInductionError::InvaidProofOfPossession);
} }
Ok(ValidatorRecord { Ok(ValidatorRecord {
@ -79,13 +75,11 @@ impl ValidatorInductor {
/// Returns the index of the first `ValidatorRecord` in the `CrystallizedState` where /// Returns the index of the first `ValidatorRecord` in the `CrystallizedState` where
/// `validator.status == Withdrawn`. If no such record exists, `None` is returned. /// `validator.status == Withdrawn`. If no such record exists, `None` is returned.
fn first_withdrawn_validator(&mut self) fn first_withdrawn_validator(&mut self) -> Option<usize> {
-> Option<usize>
{
for i in self.empty_validator_start..self.validators.len() { for i in self.empty_validator_start..self.validators.len() {
if self.validators[i].status == ValidatorStatus::Withdrawn as u8 { if self.validators[i].status == ValidatorStatus::Withdrawn as u8 {
self.empty_validator_start = i + 1; self.empty_validator_start = i + 1;
return Some(i) return Some(i);
} }
} }
None None
@ -94,9 +88,7 @@ impl ValidatorInductor {
/// Adds a `ValidatorRecord` to the `CrystallizedState` by replacing first validator where /// Adds a `ValidatorRecord` to the `CrystallizedState` by replacing first validator where
/// `validator.status == Withdraw`. If no such withdrawn validator exists, adds the new /// `validator.status == Withdraw`. If no such withdrawn validator exists, adds the new
/// validator to the end of the list. /// validator to the end of the list.
fn add_validator(&mut self, v: ValidatorRecord) fn add_validator(&mut self, v: ValidatorRecord) -> usize {
-> usize
{
match self.first_withdrawn_validator() { match self.first_withdrawn_validator() {
Some(i) => { Some(i) => {
self.validators[i] = v; self.validators[i] = v;
@ -109,36 +101,25 @@ impl ValidatorInductor {
} }
} }
pub fn to_vec(self) pub fn to_vec(self) -> Vec<ValidatorRecord> {
-> Vec<ValidatorRecord>
{
self.validators self.validators
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use bls::{ use bls::{Keypair, Signature};
Keypair,
Signature,
};
use types::{
Address,
Hash256,
};
use hashing::proof_of_possession_hash; use hashing::proof_of_possession_hash;
use types::{Address, Hash256};
fn registration_equals_record(reg: &ValidatorRegistration, rec: &ValidatorRecord) fn registration_equals_record(reg: &ValidatorRegistration, rec: &ValidatorRecord) -> bool {
-> bool (reg.pubkey == rec.pubkey)
{ & (reg.withdrawal_shard == rec.withdrawal_shard)
(reg.pubkey == rec.pubkey) & & (reg.withdrawal_address == rec.withdrawal_address)
(reg.withdrawal_shard == rec.withdrawal_shard) & & (reg.randao_commitment == rec.randao_commitment)
(reg.withdrawal_address == rec.withdrawal_address) & & (verify_proof_of_possession(&reg.proof_of_possession, &rec.pubkey))
(reg.randao_commitment == rec.randao_commitment) &
(verify_proof_of_possession(&reg.proof_of_possession, &rec.pubkey))
} }
/// Generate a proof of possession for some keypair. /// Generate a proof of possession for some keypair.
@ -291,7 +272,10 @@ mod tests {
let result = inductor.induct(&r, ValidatorStatus::PendingActivation); let result = inductor.induct(&r, ValidatorStatus::PendingActivation);
let validators = inductor.to_vec(); let validators = inductor.to_vec();
assert_eq!(result, Err(ValidatorInductionError::InvaidProofOfPossession)); assert_eq!(
result,
Err(ValidatorInductionError::InvaidProofOfPossession)
);
assert_eq!(validators.len(), 0); assert_eq!(validators.len(), 0);
} }
} }

View File

@ -4,7 +4,4 @@ extern crate types;
mod inductor; mod inductor;
pub use inductor::{ pub use inductor::{ValidatorInductionError, ValidatorInductor};
ValidatorInductor,
ValidatorInductionError,
};

View File

@ -1,11 +1,8 @@
extern crate active_validators; extern crate active_validators;
extern crate honey_badger_split; extern crate honey_badger_split;
extern crate vec_shuffle;
extern crate types; extern crate types;
extern crate vec_shuffle;
mod shuffle; mod shuffle;
pub use shuffle::{ pub use shuffle::{shard_and_committees_for_cycle, ValidatorAssignmentError};
shard_and_committees_for_cycle,
ValidatorAssignmentError,
};

View File

@ -2,16 +2,8 @@ use std::cmp::min;
use active_validators::active_validator_indices; use active_validators::active_validator_indices;
use honey_badger_split::SplitExt; use honey_badger_split::SplitExt;
use vec_shuffle::{ use types::{ChainConfig, ShardAndCommittee, ValidatorRecord};
shuffle, use vec_shuffle::{shuffle, ShuffleErr};
ShuffleErr,
};
use types::{
ShardAndCommittee,
ValidatorRecord,
ChainConfig,
};
type DelegatedCycle = Vec<Vec<ShardAndCommittee>>; type DelegatedCycle = Vec<Vec<ShardAndCommittee>>;
@ -29,9 +21,8 @@ pub fn shard_and_committees_for_cycle(
seed: &[u8], seed: &[u8],
validators: &[ValidatorRecord], validators: &[ValidatorRecord],
crosslinking_shard_start: u16, crosslinking_shard_start: u16,
config: &ChainConfig) config: &ChainConfig,
-> Result<DelegatedCycle, ValidatorAssignmentError> ) -> Result<DelegatedCycle, ValidatorAssignmentError> {
{
let shuffled_validator_indices = { let shuffled_validator_indices = {
let mut validator_indices = active_validator_indices(validators); let mut validator_indices = active_validator_indices(validators);
shuffle(seed, validator_indices)? shuffle(seed, validator_indices)?
@ -45,7 +36,8 @@ pub fn shard_and_committees_for_cycle(
&shard_indices, &shard_indices,
crosslinking_shard_start, crosslinking_shard_start,
cycle_length, cycle_length,
min_committee_size) min_committee_size,
)
} }
/// Given the validator list, delegates the validators into slots and comittees for a given cycle. /// Given the validator list, delegates the validators into slots and comittees for a given cycle.
@ -54,50 +46,49 @@ fn generate_cycle(
shard_indices: &[usize], shard_indices: &[usize],
crosslinking_shard_start: usize, crosslinking_shard_start: usize,
cycle_length: usize, cycle_length: usize,
min_committee_size: usize) min_committee_size: usize,
-> Result<DelegatedCycle, ValidatorAssignmentError> ) -> Result<DelegatedCycle, ValidatorAssignmentError> {
{
let validator_count = validator_indices.len(); let validator_count = validator_indices.len();
let shard_count = shard_indices.len(); let shard_count = shard_indices.len();
if shard_count / cycle_length == 0 { if shard_count / cycle_length == 0 {
return Err(ValidatorAssignmentError::TooFewShards) return Err(ValidatorAssignmentError::TooFewShards);
} }
let (committees_per_slot, slots_per_committee) = { let (committees_per_slot, slots_per_committee) = {
if validator_count >= cycle_length * min_committee_size { if validator_count >= cycle_length * min_committee_size {
let committees_per_slot = min(validator_count / cycle_length / let committees_per_slot = min(
(min_committee_size * 2) + 1, shard_count / validator_count / cycle_length / (min_committee_size * 2) + 1,
cycle_length); shard_count / cycle_length,
);
let slots_per_committee = 1; let slots_per_committee = 1;
(committees_per_slot, slots_per_committee) (committees_per_slot, slots_per_committee)
} else { } else {
let committees_per_slot = 1; let committees_per_slot = 1;
let mut slots_per_committee = 1; let mut slots_per_committee = 1;
while (validator_count * slots_per_committee < cycle_length * min_committee_size) & while (validator_count * slots_per_committee < cycle_length * min_committee_size)
(slots_per_committee < cycle_length) { & (slots_per_committee < cycle_length)
{
slots_per_committee *= 2; slots_per_committee *= 2;
} }
(committees_per_slot, slots_per_committee) (committees_per_slot, slots_per_committee)
} }
}; };
let cycle = validator_indices.honey_badger_split(cycle_length) let cycle = validator_indices
.honey_badger_split(cycle_length)
.enumerate() .enumerate()
.map(|(i, slot_indices)| { .map(|(i, slot_indices)| {
let shard_start = crosslinking_shard_start + i * committees_per_slot / slots_per_committee; let shard_start =
slot_indices.honey_badger_split(committees_per_slot) crosslinking_shard_start + i * committees_per_slot / slots_per_committee;
slot_indices
.honey_badger_split(committees_per_slot)
.enumerate() .enumerate()
.map(|(j, shard_indices)| { .map(|(j, shard_indices)| ShardAndCommittee {
ShardAndCommittee{
shard: ((shard_start + j) % shard_count) as u16, shard: ((shard_start + j) % shard_count) as u16,
committee: shard_indices.to_vec(), committee: shard_indices.to_vec(),
} }).collect()
}) }).collect();
.collect()
})
.collect();
Ok(cycle) Ok(cycle)
} }
@ -118,9 +109,12 @@ mod tests {
shard_count: &usize, shard_count: &usize,
crosslinking_shard_start: usize, crosslinking_shard_start: usize,
cycle_length: usize, cycle_length: usize,
min_committee_size: usize) min_committee_size: usize,
-> (Vec<usize>, Vec<usize>, Result<DelegatedCycle, ValidatorAssignmentError>) ) -> (
{ Vec<usize>,
Vec<usize>,
Result<DelegatedCycle, ValidatorAssignmentError>,
) {
let validator_indices: Vec<usize> = (0_usize..*validator_count).into_iter().collect(); let validator_indices: Vec<usize> = (0_usize..*validator_count).into_iter().collect();
let shard_indices: Vec<usize> = (0_usize..*shard_count).into_iter().collect(); let shard_indices: Vec<usize> = (0_usize..*shard_count).into_iter().collect();
let result = generate_cycle( let result = generate_cycle(
@ -128,28 +122,27 @@ mod tests {
&shard_indices, &shard_indices,
crosslinking_shard_start, crosslinking_shard_start,
cycle_length, cycle_length,
min_committee_size); min_committee_size,
);
(validator_indices, shard_indices, result) (validator_indices, shard_indices, result)
} }
#[allow(dead_code)] #[allow(dead_code)]
fn print_cycle(cycle: &DelegatedCycle) { fn print_cycle(cycle: &DelegatedCycle) {
cycle.iter() cycle.iter().enumerate().for_each(|(i, slot)| {
.enumerate()
.for_each(|(i, slot)| {
println!("slot {:?}", &i); println!("slot {:?}", &i);
slot.iter() slot.iter().enumerate().for_each(|(i, sac)| {
.enumerate() println!(
.for_each(|(i, sac)| { "#{:?}\tshard={}\tcommittee.len()={}",
println!("#{:?}\tshard={}\tcommittee.len()={}", &i,
&i, &sac.shard, &sac.committee.len()) &sac.shard,
&sac.committee.len()
)
}) })
}); });
} }
fn flatten_validators(cycle: &DelegatedCycle) fn flatten_validators(cycle: &DelegatedCycle) -> Vec<usize> {
-> Vec<usize>
{
let mut flattened = vec![]; let mut flattened = vec![];
for slot in cycle.iter() { for slot in cycle.iter() {
for sac in slot.iter() { for sac in slot.iter() {
@ -161,9 +154,7 @@ mod tests {
flattened flattened
} }
fn flatten_and_dedup_shards(cycle: &DelegatedCycle) fn flatten_and_dedup_shards(cycle: &DelegatedCycle) -> Vec<usize> {
-> Vec<usize>
{
let mut flattened = vec![]; let mut flattened = vec![];
for slot in cycle.iter() { for slot in cycle.iter() {
for sac in slot.iter() { for sac in slot.iter() {
@ -174,9 +165,7 @@ mod tests {
flattened flattened
} }
fn flatten_shards_in_slots(cycle: &DelegatedCycle) fn flatten_shards_in_slots(cycle: &DelegatedCycle) -> Vec<Vec<usize>> {
-> Vec<Vec<usize>>
{
let mut shards_in_slots: Vec<Vec<usize>> = vec![]; let mut shards_in_slots: Vec<Vec<usize>> = vec![];
for slot in cycle.iter() { for slot in cycle.iter() {
let mut shards: Vec<usize> = vec![]; let mut shards: Vec<usize> = vec![];
@ -201,30 +190,50 @@ mod tests {
&shard_count, &shard_count,
crosslinking_shard_start, crosslinking_shard_start,
cycle_length, cycle_length,
min_committee_size); min_committee_size,
);
let cycle = result.unwrap(); let cycle = result.unwrap();
let assigned_validators = flatten_validators(&cycle); let assigned_validators = flatten_validators(&cycle);
let assigned_shards = flatten_and_dedup_shards(&cycle); let assigned_shards = flatten_and_dedup_shards(&cycle);
let shards_in_slots = flatten_shards_in_slots(&cycle); let shards_in_slots = flatten_shards_in_slots(&cycle);
let expected_shards = shards.get(0..10).unwrap(); let expected_shards = shards.get(0..10).unwrap();
assert_eq!(assigned_validators, validators, "Validator assignment incorrect"); assert_eq!(
assert_eq!(assigned_shards, expected_shards, "Shard assignment incorrect"); assigned_validators, validators,
"Validator assignment incorrect"
);
assert_eq!(
assigned_shards, expected_shards,
"Shard assignment incorrect"
);
let expected_shards_in_slots: Vec<Vec<usize>> = vec![ let expected_shards_in_slots: Vec<Vec<usize>> = vec![
vec![0], vec![0], // Each line is 2 slots.. vec![0],
vec![1], vec![1], vec![0], // Each line is 2 slots..
vec![2], vec![2], vec![1],
vec![3], vec![3], vec![1],
vec![4], vec![4], vec![2],
vec![5], vec![5], vec![2],
vec![6], vec![6], vec![3],
vec![7], vec![7], vec![3],
vec![8], vec![8], vec![4],
vec![9], vec![9], vec![4],
vec![5],
vec![5],
vec![6],
vec![6],
vec![7],
vec![7],
vec![8],
vec![8],
vec![9],
vec![9],
]; ];
// assert!(compare_shards_in_slots(&cycle, &expected_shards_in_slots)); // assert!(compare_shards_in_slots(&cycle, &expected_shards_in_slots));
assert_eq!(expected_shards_in_slots, shards_in_slots, "Shard assignment incorrect.") assert_eq!(
expected_shards_in_slots, shards_in_slots,
"Shard assignment incorrect."
)
} }
#[test] #[test]
@ -240,17 +249,28 @@ mod tests {
&shard_count, &shard_count,
crosslinking_shard_start, crosslinking_shard_start,
cycle_length, cycle_length,
min_committee_size); min_committee_size,
);
let cycle = result.unwrap(); let cycle = result.unwrap();
let assigned_validators = flatten_validators(&cycle); let assigned_validators = flatten_validators(&cycle);
let assigned_shards = flatten_and_dedup_shards(&cycle); let assigned_shards = flatten_and_dedup_shards(&cycle);
let shards_in_slots = flatten_shards_in_slots(&cycle); let shards_in_slots = flatten_shards_in_slots(&cycle);
let expected_shards = shards.get(0..22).unwrap(); let expected_shards = shards.get(0..22).unwrap();
let expected_shards_in_slots: Vec<Vec<usize>> = let expected_shards_in_slots: Vec<Vec<usize>> = (0_usize..11_usize)
(0_usize..11_usize) .map(|x| vec![2*x,2*x+1]).collect(); .map(|x| vec![2 * x, 2 * x + 1])
assert_eq!(assigned_validators, validators, "Validator assignment incorrect"); .collect();
assert_eq!(assigned_shards, expected_shards, "Shard assignment incorrect"); assert_eq!(
assigned_validators, validators,
"Validator assignment incorrect"
);
assert_eq!(
assigned_shards, expected_shards,
"Shard assignment incorrect"
);
// assert!(compare_shards_in_slots(&cycle, &expected_shards_in_slots)); // assert!(compare_shards_in_slots(&cycle, &expected_shards_in_slots));
assert_eq!(expected_shards_in_slots, shards_in_slots, "Shard assignment incorrect.") assert_eq!(
expected_shards_in_slots, shards_in_slots,
"Shard assignment incorrect."
)
} }
} }

View File

@ -16,10 +16,9 @@ const DEFAULT_LIGHTHOUSE_DIR: &str = ".lighthouse";
impl LighthouseConfig { impl LighthouseConfig {
/// Build a new lighthouse configuration from defaults. /// Build a new lighthouse configuration from defaults.
pub fn default() -> Self{ pub fn default() -> Self {
let data_dir = { let data_dir = {
let home = dirs::home_dir() let home = dirs::home_dir().expect("Unable to determine home dir.");
.expect("Unable to determine home dir.");
home.join(DEFAULT_LIGHTHOUSE_DIR) home.join(DEFAULT_LIGHTHOUSE_DIR)
}; };
fs::create_dir_all(&data_dir) fs::create_dir_all(&data_dir)

View File

@ -4,15 +4,11 @@ extern crate rocksdb;
mod disk_db; mod disk_db;
mod memory_db; mod memory_db;
mod traits;
pub mod stores; pub mod stores;
mod traits;
use self::stores::COLUMNS; use self::stores::COLUMNS;
pub use self::disk_db::DiskDB; pub use self::disk_db::DiskDB;
pub use self::memory_db::MemoryDB; pub use self::memory_db::MemoryDB;
pub use self::traits::{ pub use self::traits::{ClientDB, DBError, DBValue};
DBError,
DBValue,
ClientDB,
};

View File

@ -1,21 +1,12 @@
use super::{ use super::{ClientDB, DBError};
ClientDB,
DBError,
};
mod beacon_block_store; mod beacon_block_store;
mod pow_chain_store; mod pow_chain_store;
mod validator_store; mod validator_store;
pub use self::beacon_block_store::{ pub use self::beacon_block_store::{BeaconBlockAtSlotError, BeaconBlockStore};
BeaconBlockStore,
BeaconBlockAtSlotError,
};
pub use self::pow_chain_store::PoWChainStore; pub use self::pow_chain_store::PoWChainStore;
pub use self::validator_store::{ pub use self::validator_store::{ValidatorStore, ValidatorStoreError};
ValidatorStore,
ValidatorStoreError,
};
use super::bls; use super::bls;
@ -23,8 +14,4 @@ pub const BLOCKS_DB_COLUMN: &str = "blocks";
pub const POW_CHAIN_DB_COLUMN: &str = "powchain"; pub const POW_CHAIN_DB_COLUMN: &str = "powchain";
pub const VALIDATOR_DB_COLUMN: &str = "validator"; pub const VALIDATOR_DB_COLUMN: &str = "validator";
pub const COLUMNS: [&str; 3] = [ pub const COLUMNS: [&str; 3] = [BLOCKS_DB_COLUMN, POW_CHAIN_DB_COLUMN, VALIDATOR_DB_COLUMN];
BLOCKS_DB_COLUMN,
POW_CHAIN_DB_COLUMN,
VALIDATOR_DB_COLUMN,
];

View File

@ -1,32 +1,24 @@
use std::sync::Arc;
use super::{
ClientDB,
DBError,
};
use super::POW_CHAIN_DB_COLUMN as DB_COLUMN; use super::POW_CHAIN_DB_COLUMN as DB_COLUMN;
use super::{ClientDB, DBError};
use std::sync::Arc;
pub struct PoWChainStore<T> pub struct PoWChainStore<T>
where T: ClientDB where
T: ClientDB,
{ {
db: Arc<T>, db: Arc<T>,
} }
impl<T: ClientDB> PoWChainStore<T> { impl<T: ClientDB> PoWChainStore<T> {
pub fn new(db: Arc<T>) -> Self { pub fn new(db: Arc<T>) -> Self {
Self { Self { db }
db,
}
} }
pub fn put_block_hash(&self, hash: &[u8]) pub fn put_block_hash(&self, hash: &[u8]) -> Result<(), DBError> {
-> Result<(), DBError>
{
self.db.put(DB_COLUMN, hash, &[0]) self.db.put(DB_COLUMN, hash, &[0])
} }
pub fn block_hash_exists(&self, hash: &[u8]) pub fn block_hash_exists(&self, hash: &[u8]) -> Result<bool, DBError> {
-> Result<bool, DBError>
{
self.db.exists(DB_COLUMN, hash) self.db.exists(DB_COLUMN, hash)
} }
} }

View File

@ -1,16 +1,10 @@
extern crate bytes; extern crate bytes;
use self::bytes::{ use self::bytes::{BufMut, BytesMut};
BufMut,
BytesMut,
};
use std::sync::Arc;
use super::{
ClientDB,
DBError,
};
use super::VALIDATOR_DB_COLUMN as DB_COLUMN;
use super::bls::PublicKey; use super::bls::PublicKey;
use super::VALIDATOR_DB_COLUMN as DB_COLUMN;
use super::{ClientDB, DBError};
use std::sync::Arc;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum ValidatorStoreError { pub enum ValidatorStoreError {
@ -30,66 +24,63 @@ enum KeyPrefixes {
} }
pub struct ValidatorStore<T> pub struct ValidatorStore<T>
where T: ClientDB where
T: ClientDB,
{ {
db: Arc<T>, db: Arc<T>,
} }
impl<T: ClientDB> ValidatorStore<T> { impl<T: ClientDB> ValidatorStore<T> {
pub fn new(db: Arc<T>) -> Self { pub fn new(db: Arc<T>) -> Self {
Self { Self { db }
db,
}
} }
fn prefix_bytes(&self, key_prefix: &KeyPrefixes) fn prefix_bytes(&self, key_prefix: &KeyPrefixes) -> Vec<u8> {
-> Vec<u8>
{
match key_prefix { match key_prefix {
KeyPrefixes::PublicKey => b"pubkey".to_vec(), KeyPrefixes::PublicKey => b"pubkey".to_vec(),
} }
} }
fn get_db_key_for_index(&self, key_prefix: &KeyPrefixes, index: usize) fn get_db_key_for_index(&self, key_prefix: &KeyPrefixes, index: usize) -> Vec<u8> {
-> Vec<u8>
{
let mut buf = BytesMut::with_capacity(6 + 8); let mut buf = BytesMut::with_capacity(6 + 8);
buf.put(self.prefix_bytes(key_prefix)); buf.put(self.prefix_bytes(key_prefix));
buf.put_u64_be(index as u64); buf.put_u64_be(index as u64);
buf.take().to_vec() buf.take().to_vec()
} }
pub fn put_public_key_by_index(&self, index: usize, public_key: &PublicKey) pub fn put_public_key_by_index(
-> Result<(), ValidatorStoreError> &self,
{ index: usize,
public_key: &PublicKey,
) -> Result<(), ValidatorStoreError> {
let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index); let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index);
let val = public_key.as_bytes(); let val = public_key.as_bytes();
self.db.put(DB_COLUMN, &key[..], &val[..]) self.db
.put(DB_COLUMN, &key[..], &val[..])
.map_err(ValidatorStoreError::from) .map_err(ValidatorStoreError::from)
} }
pub fn get_public_key_by_index(&self, index: usize) pub fn get_public_key_by_index(
-> Result<Option<PublicKey>, ValidatorStoreError> &self,
{ index: usize,
) -> Result<Option<PublicKey>, ValidatorStoreError> {
let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index); let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index);
let val = self.db.get(DB_COLUMN, &key[..])?; let val = self.db.get(DB_COLUMN, &key[..])?;
match val { match val {
None => Ok(None), None => Ok(None),
Some(val) => { Some(val) => match PublicKey::from_bytes(&val) {
match PublicKey::from_bytes(&val) {
Ok(key) => Ok(Some(key)), Ok(key) => Ok(Some(key)),
Err(_) => Err(ValidatorStoreError::DecodeError), Err(_) => Err(ValidatorStoreError::DecodeError),
} },
}
} }
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*;
use super::super::super::MemoryDB; use super::super::super::MemoryDB;
use super::super::bls::Keypair; use super::super::bls::Keypair;
use super::*;
#[test] #[test]
fn test_validator_store_put_get() { fn test_validator_store_put_get() {
@ -112,16 +103,19 @@ mod tests {
* Check all keys are retrieved correctly. * Check all keys are retrieved correctly.
*/ */
for i in 0..keys.len() { for i in 0..keys.len() {
let retrieved = store.get_public_key_by_index(i) let retrieved = store.get_public_key_by_index(i).unwrap().unwrap();
.unwrap().unwrap();
assert_eq!(retrieved, keys[i].pk); assert_eq!(retrieved, keys[i].pk);
} }
/* /*
* Check that an index that wasn't stored returns None. * Check that an index that wasn't stored returns None.
*/ */
assert!(store.get_public_key_by_index(keys.len() + 1) assert!(
.unwrap().is_none()); store
.get_public_key_by_index(keys.len() + 1)
.unwrap()
.is_none()
);
} }
#[test] #[test]
@ -132,7 +126,9 @@ mod tests {
let key = store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42); let key = store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42);
db.put(DB_COLUMN, &key[..], "cats".as_bytes()).unwrap(); db.put(DB_COLUMN, &key[..], "cats".as_bytes()).unwrap();
assert_eq!(store.get_public_key_by_index(42), assert_eq!(
Err(ValidatorStoreError::DecodeError)); store.get_public_key_by_index(42),
Err(ValidatorStoreError::DecodeError)
);
} }
} }

View File

@ -1,7 +1,7 @@
#[macro_use] #[macro_use]
extern crate slog; extern crate slog;
extern crate slog_term;
extern crate slog_async; extern crate slog_async;
extern crate slog_term;
// extern crate ssz; // extern crate ssz;
extern crate clap; extern crate clap;
extern crate futures; extern crate futures;
@ -12,9 +12,9 @@ mod config;
use std::path::PathBuf; use std::path::PathBuf;
use slog::Drain; use clap::{App, Arg};
use clap::{ Arg, App };
use config::LighthouseConfig; use config::LighthouseConfig;
use slog::Drain;
fn main() { fn main() {
let decorator = slog_term::TermDecorator::new().build(); let decorator = slog_term::TermDecorator::new().build();
@ -26,17 +26,19 @@ fn main() {
.version("0.0.1") .version("0.0.1")
.author("Sigma Prime <paul@sigmaprime.io>") .author("Sigma Prime <paul@sigmaprime.io>")
.about("Eth 2.0 Client") .about("Eth 2.0 Client")
.arg(Arg::with_name("datadir") .arg(
Arg::with_name("datadir")
.long("datadir") .long("datadir")
.value_name("DIR") .value_name("DIR")
.help("Data directory for keys and databases.") .help("Data directory for keys and databases.")
.takes_value(true)) .takes_value(true),
.arg(Arg::with_name("port") ).arg(
Arg::with_name("port")
.long("port") .long("port")
.value_name("PORT") .value_name("PORT")
.help("Network listen port for p2p connections.") .help("Network listen port for p2p connections.")
.takes_value(true)) .takes_value(true),
.get_matches(); ).get_matches();
let mut config = LighthouseConfig::default(); let mut config = LighthouseConfig::default();
@ -60,8 +62,10 @@ fn main() {
"data_dir" => &config.data_dir.to_str(), "data_dir" => &config.data_dir.to_str(),
"port" => &config.p2p_listen_port); "port" => &config.p2p_listen_port);
error!(log, error!(
"Lighthouse under development and does not provide a user demo."); log,
"Lighthouse under development and does not provide a user demo."
);
info!(log, "Exiting."); info!(log, "Exiting.");
} }