Merge pull request #69 from sigp/rustfmt

Run rustfmt globally.
This commit is contained in:
Age Manning 2018-11-14 18:12:21 +02:00 committed by GitHub
commit 2e2a1faff4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
43 changed files with 754 additions and 975 deletions

View File

@ -1,8 +1,8 @@
extern crate db;
extern crate naive_fork_choice;
extern crate state_transition;
extern crate ssz;
extern crate ssz_helpers;
extern crate state_transition;
extern crate types;
extern crate validation;
extern crate validator_induction;
@ -12,8 +12,8 @@ mod block_context;
mod block_processing;
mod genesis;
mod maps;
mod transition;
mod stores;
mod transition;
use db::ClientDB;
use genesis::genesis_states;

View File

@ -1,15 +1,6 @@
use super::{ Hash256, Bitfield };
use super::bls::{
AggregateSignature,
BLS_AGG_SIG_BYTE_SIZE,
};
use super::ssz::{
Encodable,
Decodable,
DecodeError,
decode_ssz_list,
SszStream,
};
use super::bls::{AggregateSignature, BLS_AGG_SIG_BYTE_SIZE};
use super::ssz::{decode_ssz_list, Decodable, DecodeError, Encodable, SszStream};
use super::{Bitfield, Hash256};
pub const MIN_SSZ_ATTESTION_RECORD_LENGTH: usize = {
8 + // slot
@ -19,7 +10,7 @@ pub const MIN_SSZ_ATTESTION_RECORD_LENGTH: usize = {
5 + // attester_bitfield (assuming 1 byte of bitfield)
8 + // justified_slot
32 + // justified_block_hash
4 + BLS_AGG_SIG_BYTE_SIZE // aggregate sig (two 256 bit points)
4 + BLS_AGG_SIG_BYTE_SIZE // aggregate sig (two 256 bit points)
};
#[derive(Debug, Clone, PartialEq)]
@ -48,9 +39,7 @@ impl Encodable for AttestationRecord {
}
impl Decodable for AttestationRecord {
fn ssz_decode(bytes: &[u8], i: usize)
-> Result<(Self, usize), DecodeError>
{
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (slot, i) = u64::ssz_decode(bytes, i)?;
let (shard_id, i) = u16::ssz_decode(bytes, i)?;
let (oblique_parent_hashes, i) = decode_ssz_list(bytes, i)?;
@ -60,8 +49,8 @@ impl Decodable for AttestationRecord {
let (justified_block_hash, i) = Hash256::ssz_decode(bytes, i)?;
// Do aggregate sig decoding properly.
let (agg_sig_bytes, i) = decode_ssz_list(bytes, i)?;
let aggregate_sig = AggregateSignature::from_bytes(&agg_sig_bytes)
.map_err(|_| DecodeError::TooShort)?; // also could be TooLong
let aggregate_sig =
AggregateSignature::from_bytes(&agg_sig_bytes).map_err(|_| DecodeError::TooShort)?; // also could be TooLong
let attestation_record = Self {
slot,
@ -92,11 +81,10 @@ impl AttestationRecord {
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::ssz::SszStream;
use super::*;
#[test]
pub fn test_attestation_record_min_ssz_length() {
@ -124,11 +112,13 @@ mod tests {
let mut ssz_stream = SszStream::new();
ssz_stream.append(&original);
let (decoded, _) = AttestationRecord::
ssz_decode(&ssz_stream.drain(), 0).unwrap();
let (decoded, _) = AttestationRecord::ssz_decode(&ssz_stream.drain(), 0).unwrap();
assert_eq!(original.slot, decoded.slot);
assert_eq!(original.shard_id, decoded.shard_id);
assert_eq!(original.oblique_parent_hashes, decoded.oblique_parent_hashes);
assert_eq!(
original.oblique_parent_hashes,
decoded.oblique_parent_hashes
);
assert_eq!(original.shard_block_hash, decoded.shard_block_hash);
assert_eq!(original.attester_bitfield, decoded.attester_bitfield);
assert_eq!(original.justified_slot, decoded.justified_slot);

View File

@ -1,12 +1,7 @@
use super::Hash256;
use super::attestation_record::AttestationRecord;
use super::special_record::SpecialRecord;
use super::ssz::{
Encodable,
Decodable,
DecodeError,
SszStream,
};
use super::ssz::{Decodable, DecodeError, Encodable, SszStream};
use super::Hash256;
pub const MIN_SSZ_BLOCK_LENGTH: usize = {
8 + // slot
@ -16,7 +11,7 @@ pub const MIN_SSZ_BLOCK_LENGTH: usize = {
32 + // active_state_root
32 + // crystallized_state_root
4 + // attestations (assuming empty)
4 // specials (assuming empty)
4 // specials (assuming empty)
};
pub const MAX_SSZ_BLOCK_LENGTH: usize = MIN_SSZ_BLOCK_LENGTH + (1 << 24);
@ -68,9 +63,7 @@ impl Encodable for BeaconBlock {
}
impl Decodable for BeaconBlock {
fn ssz_decode(bytes: &[u8], i: usize)
-> Result<(Self, usize), DecodeError>
{
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (slot, i) = u64::ssz_decode(bytes, i)?;
let (randao_reveal, i) = Hash256::ssz_decode(bytes, i)?;
let (pow_chain_reference, i) = Hash256::ssz_decode(bytes, i)?;
@ -87,13 +80,12 @@ impl Decodable for BeaconBlock {
active_state_root,
crystallized_state_root,
attestations,
specials
specials,
};
Ok((block, i))
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -21,7 +21,7 @@ impl ChainConfig {
pub fn standard() -> Self {
Self {
cycle_length: 64,
deposit_size_gwei: 32 * (10^9),
deposit_size_gwei: 32 * (10 ^ 9),
shard_count: 1024,
min_committee_size: 128,
max_validator_churn_quotient: 32,
@ -32,28 +32,26 @@ impl ChainConfig {
}
pub fn validate(&self) -> bool {
// criteria that ensure the config is valid
// criteria that ensure the config is valid
// shard_count / cycle_length > 0 otherwise validator delegation
// will fail.
if self.shard_count / u16::from(self.cycle_length) == 0 {
return false;
}
// shard_count / cycle_length > 0 otherwise validator delegation
// will fail.
if self.shard_count / u16::from(self.cycle_length) == 0 {
return false;
}
true
true
}
#[cfg(test)]
pub fn super_fast_tests() -> Self {
Self {
cycle_length: 2,
deposit_size_gwei: 32 * (10^9),
deposit_size_gwei: 32 * (10 ^ 9),
shard_count: 2,
min_committee_size: 2,
max_validator_churn_quotient: 32,
genesis_time: TEST_GENESIS_TIME, // arbitrary
genesis_time: TEST_GENESIS_TIME, // arbitrary
slot_duration_millis: 16 * 1000,
initial_validators: vec![],
}

View File

@ -1,37 +1,33 @@
extern crate ethereum_types;
extern crate bls;
extern crate boolean_bitfield;
extern crate ethereum_types;
extern crate ssz;
pub mod active_state;
pub mod attestation_record;
pub mod crystallized_state;
pub mod chain_config;
pub mod beacon_block;
pub mod chain_config;
pub mod crosslink_record;
pub mod crystallized_state;
pub mod shard_and_committee;
pub mod special_record;
pub mod validator_record;
pub mod validator_registration;
use self::ethereum_types::{
H256,
H160,
U256,
};
use self::boolean_bitfield::BooleanBitfield;
use self::ethereum_types::{H160, H256, U256};
use std::collections::HashMap;
pub use active_state::ActiveState;
pub use attestation_record::AttestationRecord;
pub use crystallized_state::CrystallizedState;
pub use chain_config::ChainConfig;
pub use beacon_block::BeaconBlock;
pub use chain_config::ChainConfig;
pub use crosslink_record::CrosslinkRecord;
pub use crystallized_state::CrystallizedState;
pub use shard_and_committee::ShardAndCommittee;
pub use special_record::{ SpecialRecord, SpecialRecordKind };
pub use validator_record::{ ValidatorRecord, ValidatorStatus };
pub use validator_registration::{ ValidatorRegistration };
pub use special_record::{SpecialRecord, SpecialRecordKind};
pub use validator_record::{ValidatorRecord, ValidatorStatus};
pub use validator_registration::ValidatorRegistration;
pub type Hash256 = H256;
pub type Address = H160;

View File

@ -1,7 +1,7 @@
#[derive(Clone, Debug, PartialEq)]
pub struct ShardAndCommittee {
pub shard: u16,
pub committee: Vec<usize>
pub committee: Vec<usize>,
}
impl ShardAndCommittee {

View File

@ -1,10 +1,4 @@
use super::ssz::{
Encodable,
Decodable,
DecodeError,
SszStream,
};
use super::ssz::{Decodable, DecodeError, Encodable, SszStream};
/// The value of the "type" field of SpecialRecord.
///
@ -16,7 +10,6 @@ pub enum SpecialRecordKind {
RandaoChange = 2,
}
/// The structure used in the `BeaconBlock.specials` field.
#[derive(Debug, PartialEq, Clone)]
pub struct SpecialRecord {
@ -51,13 +44,14 @@ impl SpecialRecord {
/// Returns `None` if `self.kind` is an unknown value.
pub fn resolve_kind(&self) -> Option<SpecialRecordKind> {
match self.kind {
x if x == SpecialRecordKind::Logout as u8
=> Some(SpecialRecordKind::Logout),
x if x == SpecialRecordKind::CasperSlashing as u8
=> Some(SpecialRecordKind::CasperSlashing),
x if x == SpecialRecordKind::RandaoChange as u8
=> Some(SpecialRecordKind::RandaoChange),
_ => None
x if x == SpecialRecordKind::Logout as u8 => Some(SpecialRecordKind::Logout),
x if x == SpecialRecordKind::CasperSlashing as u8 => {
Some(SpecialRecordKind::CasperSlashing)
}
x if x == SpecialRecordKind::RandaoChange as u8 => {
Some(SpecialRecordKind::RandaoChange)
}
_ => None,
}
}
}
@ -70,16 +64,13 @@ impl Encodable for SpecialRecord {
}
impl Decodable for SpecialRecord {
fn ssz_decode(bytes: &[u8], i: usize)
-> Result<(Self, usize), DecodeError>
{
fn ssz_decode(bytes: &[u8], i: usize) -> Result<(Self, usize), DecodeError> {
let (kind, i) = u8::ssz_decode(bytes, i)?;
let (data, i) = Decodable::ssz_decode(bytes, i)?;
Ok((SpecialRecord{kind, data}, i))
Ok((SpecialRecord { kind, data }, i))
}
}
#[cfg(test)]
mod tests {
use super::*;
@ -132,7 +123,10 @@ mod tests {
let s = SpecialRecord::randao_change(&vec![]);
assert_eq!(s.resolve_kind(), Some(SpecialRecordKind::RandaoChange));
let s = SpecialRecord { kind: 88, data: vec![] };
let s = SpecialRecord {
kind: 88,
data: vec![],
};
assert_eq!(s.resolve_kind(), None);
}
}

View File

@ -1,20 +1,14 @@
use super::{
Hash256,
Address,
};
use super::bls::{
PublicKey,
Keypair
};
use super::bls::{Keypair, PublicKey};
use super::{Address, Hash256};
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum ValidatorStatus {
PendingActivation = 0,
Active = 1,
PendingExit = 2,
PendingWithdraw = 3,
Withdrawn = 5,
Penalized = 127,
PendingActivation = 0,
Active = 1,
PendingExit = 2,
PendingWithdraw = 3,
Withdrawn = 5,
Penalized = 127,
}
#[derive(Debug, Clone, PartialEq)]
@ -50,7 +44,6 @@ impl ValidatorRecord {
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -1,14 +1,5 @@
use bls::{
create_proof_of_possession,
Keypair,
PublicKey,
Signature,
};
use super::{
Address,
Hash256,
};
use super::{Address, Hash256};
use bls::{create_proof_of_possession, Keypair, PublicKey, Signature};
/// The information gathered from the PoW chain validator registration function.
#[derive(Debug, Clone, PartialEq)]

View File

@ -1,19 +1,15 @@
extern crate types;
use types::{
ValidatorRecord,
ValidatorStatus,
};
use types::{ValidatorRecord, ValidatorStatus};
pub fn validator_is_active(v: &ValidatorRecord) -> bool {
v.status == ValidatorStatus::Active as u8
}
/// Returns the indicies of each active validator in a given vec of validators.
pub fn active_validator_indices(validators: &[ValidatorRecord])
-> Vec<usize>
{
validators.iter()
pub fn active_validator_indices(validators: &[ValidatorRecord]) -> Vec<usize> {
validators
.iter()
.enumerate()
.filter_map(|(i, validator)| {
if validator_is_active(&validator) {
@ -21,8 +17,7 @@ pub fn active_validator_indices(validators: &[ValidatorRecord])
} else {
None
}
})
.collect()
}).collect()
}
#[cfg(test)]

View File

@ -1,12 +1,12 @@
extern crate bls_aggregates;
extern crate hashing;
pub use self::bls_aggregates::AggregateSignature;
pub use self::bls_aggregates::AggregatePublicKey;
pub use self::bls_aggregates::Signature;
pub use self::bls_aggregates::AggregateSignature;
pub use self::bls_aggregates::Keypair;
pub use self::bls_aggregates::PublicKey;
pub use self::bls_aggregates::SecretKey;
pub use self::bls_aggregates::Signature;
pub const BLS_AGG_SIG_BYTE_SIZE: usize = 97;
@ -14,16 +14,12 @@ use hashing::proof_of_possession_hash;
/// For some signature and public key, ensure that the signature message was the public key and it
/// was signed by the secret key that corresponds to that public key.
pub fn verify_proof_of_possession(sig: &Signature, pubkey: &PublicKey)
-> bool
{
pub fn verify_proof_of_possession(sig: &Signature, pubkey: &PublicKey) -> bool {
let hash = proof_of_possession_hash(&pubkey.as_bytes());
sig.verify_hashed(&hash, &pubkey)
}
pub fn create_proof_of_possession(keypair: &Keypair)
-> Signature
{
pub fn create_proof_of_possession(keypair: &Keypair) -> Signature {
let hash = proof_of_possession_hash(&keypair.pk.as_bytes());
Signature::new_hashed(&hash, &keypair.sk)
}

View File

@ -11,9 +11,9 @@ extern crate ssz;
use std::cmp::max;
#[derive(Eq, Clone, Default, Debug)]
pub struct BooleanBitfield{
pub struct BooleanBitfield {
len: usize,
vec: Vec<u8>
vec: Vec<u8>,
}
impl BooleanBitfield {
@ -21,7 +21,7 @@ impl BooleanBitfield {
pub fn new() -> Self {
Self {
len: 0,
vec: vec![0]
vec: vec![0],
}
}
@ -29,10 +29,7 @@ impl BooleanBitfield {
pub fn with_capacity(capacity: usize) -> Self {
let mut vec = Vec::with_capacity(capacity / 8 + 1);
vec.push(0);
Self {
len: 0,
vec
}
Self { len: 0, vec }
}
/// Read the value of a bit.
@ -46,7 +43,7 @@ impl BooleanBitfield {
if byte(i) >= self.vec.len() {
false
} else {
self.vec[byte(i)] & (1 << (bit(i) as u8)) != 0
self.vec[byte(i)] & (1 << (bit(i) as u8)) != 0
}
}
@ -64,11 +61,9 @@ impl BooleanBitfield {
self.vec.resize(byte(i) + 1, 0);
}
if to {
self.vec[byte(i)] =
self.vec[byte(i)] | (1 << (bit(i) as u8))
self.vec[byte(i)] = self.vec[byte(i)] | (1 << (bit(i) as u8))
} else {
self.vec[byte(i)] =
self.vec[byte(i)] & !(1 << (bit(i) as u8))
self.vec[byte(i)] = self.vec[byte(i)] & !(1 << (bit(i) as u8))
}
}
@ -77,17 +72,23 @@ impl BooleanBitfield {
///
/// Note: this is distinct from the length of the underlying
/// vector.
pub fn len(&self) -> usize { self.len }
pub fn len(&self) -> usize {
self.len
}
/// True if no bits have ever been set. A bit that is set and then
/// unset will still count to the length of the bitfield.
///
/// Note: this is distinct from the length of the underlying
/// vector.
pub fn is_empty(&self) -> bool { self.len == 0 }
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// The number of bytes required to represent the bitfield.
pub fn num_bytes(&self) -> usize { self.vec.len() }
pub fn num_bytes(&self) -> usize {
self.vec.len()
}
/// Iterate through the underlying vector and count the number of
/// true bits.
@ -110,7 +111,7 @@ impl BooleanBitfield {
for byte in (0..bytes.len()).rev() {
for bit in (0..8).rev() {
if bytes[byte] & (1 << (bit as u8)) != 0 {
return (byte * 8) + bit + 1
return (byte * 8) + bit + 1;
}
}
}
@ -141,15 +142,14 @@ impl<'a> From<&'a [u8]> for BooleanBitfield {
vec.reverse();
BooleanBitfield {
vec,
len: BooleanBitfield::compute_length(input)
len: BooleanBitfield::compute_length(input),
}
}
}
impl PartialEq for BooleanBitfield {
fn eq(&self, other: &BooleanBitfield) -> bool {
(self.vec == other.vec) &
(self.len == other.len)
(self.vec == other.vec) & (self.len == other.len)
}
}
@ -160,29 +160,21 @@ impl ssz::Encodable for BooleanBitfield {
}
impl ssz::Decodable for BooleanBitfield {
fn ssz_decode(bytes: &[u8], index: usize)
-> Result<(Self, usize), ssz::DecodeError>
{
let len = ssz::decode::decode_length(
bytes,
index,
ssz::LENGTH_BYTES)?;
fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), ssz::DecodeError> {
let len = ssz::decode::decode_length(bytes, index, ssz::LENGTH_BYTES)?;
if (ssz::LENGTH_BYTES + len) > bytes.len() {
return Err(ssz::DecodeError::TooShort);
}
if len == 0 {
Ok((BooleanBitfield::new(),
index + ssz::LENGTH_BYTES))
Ok((BooleanBitfield::new(), index + ssz::LENGTH_BYTES))
} else {
let b = BooleanBitfield::
from(&bytes[(index + 4)..(index + len + 4)]);
let b = BooleanBitfield::from(&bytes[(index + 4)..(index + len + 4)]);
let index = index + ssz::LENGTH_BYTES + len;
Ok((b, index))
}
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -2,27 +2,28 @@
///
/// We have titled it the "honey badger split" because of its robustness. It don't care.
/// Iterator for the honey_badger_split function
pub struct Split<'a, T: 'a> {
n: usize,
current_pos: usize,
list: &'a [T],
list_length: usize
list_length: usize,
}
impl<'a,T> Iterator for Split<'a, T> {
impl<'a, T> Iterator for Split<'a, T> {
type Item = &'a [T];
fn next(&mut self) -> Option<Self::Item> {
self.current_pos +=1;
self.current_pos += 1;
if self.current_pos <= self.n {
match self.list.get(self.list_length*(self.current_pos-1)/self.n..self.list_length*self.current_pos/self.n) {
match self.list.get(
self.list_length * (self.current_pos - 1) / self.n
..self.list_length * self.current_pos / self.n,
) {
Some(v) => Some(v),
None => unreachable!()
None => unreachable!(),
}
}
else {
} else {
None
}
}
@ -37,7 +38,6 @@ pub trait SplitExt<T> {
}
impl<T> SplitExt<T> for [T] {
fn honey_badger_split(&self, n: usize) -> Split<T> {
Split {
n,

View File

@ -1,29 +1,23 @@
use std::time::{
Duration,
SystemTime,
SystemTimeError,
};
use std::time::{Duration, SystemTime, SystemTimeError};
pub fn slot_now(genesis_seconds: u64, slot_duration_seconds: u64)
-> Result<Option<u64>, SystemTimeError>
{
pub fn slot_now(
genesis_seconds: u64,
slot_duration_seconds: u64,
) -> Result<Option<u64>, SystemTimeError> {
let sys_time = SystemTime::now();
let duration_since_epoch = sys_time.duration_since(SystemTime::UNIX_EPOCH)?;
let duration_since_genesis = duration_since_epoch
.checked_sub(Duration::from_secs(genesis_seconds));
let duration_since_genesis =
duration_since_epoch.checked_sub(Duration::from_secs(genesis_seconds));
match duration_since_genesis {
None => Ok(None),
Some(d) => Ok(slot_from_duration(slot_duration_seconds, d))
Some(d) => Ok(slot_from_duration(slot_duration_seconds, d)),
}
}
fn slot_from_duration(slot_duration_seconds: u64, duration: Duration)
-> Option<u64>
{
fn slot_from_duration(slot_duration_seconds: u64, duration: Duration) -> Option<u64> {
duration.as_secs().checked_div(slot_duration_seconds)
}
#[cfg(test)]
mod tests {
use super::*;
@ -55,9 +49,18 @@ mod tests {
assert_eq!(slot_from_duration(s_time, Duration::from_secs(0)), Some(0));
assert_eq!(slot_from_duration(s_time, Duration::from_secs(10)), Some(0));
assert_eq!(slot_from_duration(s_time, Duration::from_secs(100)), Some(1));
assert_eq!(slot_from_duration(s_time, Duration::from_secs(101)), Some(1));
assert_eq!(slot_from_duration(s_time, Duration::from_secs(1000)), Some(10));
assert_eq!(
slot_from_duration(s_time, Duration::from_secs(100)),
Some(1)
);
assert_eq!(
slot_from_duration(s_time, Duration::from_secs(101)),
Some(1)
);
assert_eq!(
slot_from_duration(s_time, Duration::from_secs(1000)),
Some(10)
);
}
#[test]

View File

@ -1,6 +1,4 @@
use super::{
LENGTH_BYTES,
};
use super::LENGTH_BYTES;
#[derive(Debug, PartialEq)]
pub enum DecodeError {
@ -16,12 +14,12 @@ pub trait Decodable: Sized {
///
/// The single ssz encoded value will be decoded as the given type at the
/// given index.
pub fn decode_ssz<T>(ssz_bytes: &[u8], index: usize)
-> Result<(T, usize), DecodeError>
where T: Decodable
pub fn decode_ssz<T>(ssz_bytes: &[u8], index: usize) -> Result<(T, usize), DecodeError>
where
T: Decodable,
{
if index >= ssz_bytes.len() {
return Err(DecodeError::TooShort)
return Err(DecodeError::TooShort);
}
T::ssz_decode(ssz_bytes, index)
}
@ -29,11 +27,10 @@ pub fn decode_ssz<T>(ssz_bytes: &[u8], index: usize)
/// Decode a vector (list) of encoded bytes.
///
/// Each element in the list will be decoded and placed into the vector.
pub fn decode_ssz_list<T>(ssz_bytes: &[u8], index: usize)
-> Result<(Vec<T>, usize), DecodeError>
where T: Decodable
pub fn decode_ssz_list<T>(ssz_bytes: &[u8], index: usize) -> Result<(Vec<T>, usize), DecodeError>
where
T: Decodable,
{
if index + LENGTH_BYTES > ssz_bytes.len() {
return Err(DecodeError::TooShort);
};
@ -59,10 +56,9 @@ pub fn decode_ssz_list<T>(ssz_bytes: &[u8], index: usize)
Ok(v) => {
tmp_index = v.1;
res_vec.push(v.0);
},
}
};
};
}
Ok((res_vec, final_len))
}
@ -70,15 +66,22 @@ pub fn decode_ssz_list<T>(ssz_bytes: &[u8], index: usize)
/// Given some number of bytes, interpret the first four
/// bytes as a 32-bit big-endian integer and return the
/// result.
pub fn decode_length(bytes: &[u8], index: usize, length_bytes: usize)
-> Result<usize, DecodeError>
{
pub fn decode_length(
bytes: &[u8],
index: usize,
length_bytes: usize,
) -> Result<usize, DecodeError> {
if bytes.len() < index + length_bytes {
return Err(DecodeError::TooShort);
};
let mut len: usize = 0;
for (i, byte) in bytes.iter().enumerate().take(index+length_bytes).skip(index) {
let offset = (index+length_bytes - i - 1) * 8;
for (i, byte) in bytes
.iter()
.enumerate()
.take(index + length_bytes)
.skip(index)
{
let offset = (index + length_bytes - i - 1) * 8;
len |= (*byte as usize) << offset;
}
Ok(len)
@ -86,50 +89,44 @@ pub fn decode_length(bytes: &[u8], index: usize, length_bytes: usize)
#[cfg(test)]
mod tests {
use super::*;
use super::super::encode::encode_length;
use super::*;
#[test]
fn test_ssz_decode_length() {
let decoded = decode_length(
&vec![0, 0, 0, 1],
0,
LENGTH_BYTES);
let decoded = decode_length(&vec![0, 0, 0, 1], 0, LENGTH_BYTES);
assert_eq!(decoded.unwrap(), 1);
let decoded = decode_length(
&vec![0, 0, 1, 0],
0,
LENGTH_BYTES);
let decoded = decode_length(&vec![0, 0, 1, 0], 0, LENGTH_BYTES);
assert_eq!(decoded.unwrap(), 256);
let decoded = decode_length(
&vec![0, 0, 1, 255],
0,
LENGTH_BYTES);
let decoded = decode_length(&vec![0, 0, 1, 255], 0, LENGTH_BYTES);
assert_eq!(decoded.unwrap(), 511);
let decoded = decode_length(
&vec![255, 255, 255, 255],
0,
LENGTH_BYTES);
let decoded = decode_length(&vec![255, 255, 255, 255], 0, LENGTH_BYTES);
assert_eq!(decoded.unwrap(), 4294967295);
}
#[test]
fn test_encode_decode_length() {
let params: Vec<usize> = vec![
0, 1, 2, 3, 7, 8, 16,
2^8, 2^8 + 1,
2^16, 2^16 + 1,
2^24, 2^24 + 1,
2^32,
0,
1,
2,
3,
7,
8,
16,
2 ^ 8,
2 ^ 8 + 1,
2 ^ 16,
2 ^ 16 + 1,
2 ^ 24,
2 ^ 24 + 1,
2 ^ 32,
];
for i in params {
let decoded = decode_length(
&encode_length(i, LENGTH_BYTES),
0,
LENGTH_BYTES).unwrap();
let decoded = decode_length(&encode_length(i, LENGTH_BYTES), 0, LENGTH_BYTES).unwrap();
assert_eq!(i, decoded);
}
}
@ -138,10 +135,8 @@ mod tests {
fn test_decode_ssz_list() {
// u16
let v: Vec<u16> = vec![10, 10, 10, 10];
let decoded: (Vec<u16>, usize) = decode_ssz_list(
&vec![0, 0, 0, 8, 0, 10, 0, 10, 0, 10, 0, 10],
0
).unwrap();
let decoded: (Vec<u16>, usize) =
decode_ssz_list(&vec![0, 0, 0, 8, 0, 10, 0, 10, 0, 10, 0, 10], 0).unwrap();
assert_eq!(decoded.0, v);
assert_eq!(decoded.1, 12);
@ -150,60 +145,45 @@ mod tests {
let v: Vec<u32> = vec![10, 10, 10, 10];
let decoded: (Vec<u32>, usize) = decode_ssz_list(
&vec![
0, 0, 0, 16,
0, 0, 0, 10, 0, 0, 0, 10, 0, 0, 0, 10, 0, 0, 0, 10
0, 0, 0, 16, 0, 0, 0, 10, 0, 0, 0, 10, 0, 0, 0, 10, 0, 0, 0, 10,
],
0
0,
).unwrap();
assert_eq!(decoded.0, v);
assert_eq!(decoded.1, 20);
// u64
let v: Vec<u64> = vec![10,10,10,10];
let v: Vec<u64> = vec![10, 10, 10, 10];
let decoded: (Vec<u64>, usize) = decode_ssz_list(
&vec![0, 0, 0, 32,
0, 0, 0, 0, 0, 0, 0, 10,
0, 0, 0, 0, 0, 0, 0, 10,
0, 0, 0, 0, 0, 0, 0, 10,
0, 0, 0, 0, 0, 0, 0, 10,
&vec![
0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 0, 0, 0, 0, 10,
],
0
0,
).unwrap();
assert_eq!(decoded.0, v);
assert_eq!(decoded.1, 36);
// Check that it can accept index
let v: Vec<usize> = vec![15,15,15,15];
let v: Vec<usize> = vec![15, 15, 15, 15];
let decoded: (Vec<usize>, usize) = decode_ssz_list(
&vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
0, 0, 0, 32,
0, 0, 0, 0, 0, 0, 0, 15,
0, 0, 0, 0, 0, 0, 0, 15,
0, 0, 0, 0, 0, 0, 0, 15,
0, 0, 0, 0, 0, 0, 0, 15,
&vec![
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0,
0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 15,
],
10
10,
).unwrap();
assert_eq!(decoded.0, v);
assert_eq!(decoded.1, 46);
// Check that length > bytes throws error
let decoded: Result<(Vec<usize>, usize), DecodeError> = decode_ssz_list(
&vec![0, 0, 0, 32,
0, 0, 0, 0, 0, 0, 0, 15,
],
0
);
let decoded: Result<(Vec<usize>, usize), DecodeError> =
decode_ssz_list(&vec![0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 15], 0);
assert_eq!(decoded, Err(DecodeError::TooShort));
// Check that incorrect index throws error
let decoded: Result<(Vec<usize>, usize), DecodeError> = decode_ssz_list(
&vec![
0, 0, 0, 0, 0, 0, 0, 15,
],
16
);
let decoded: Result<(Vec<usize>, usize), DecodeError> =
decode_ssz_list(&vec![0, 0, 0, 0, 0, 0, 0, 15], 16);
assert_eq!(decoded, Err(DecodeError::TooShort));
}
}

View File

@ -1,6 +1,4 @@
use super::{
LENGTH_BYTES
};
use super::LENGTH_BYTES;
pub trait Encodable {
fn ssz_append(&self, s: &mut SszStream);
@ -13,20 +11,19 @@ pub trait Encodable {
/// ssz encoded bytes.
#[derive(Default)]
pub struct SszStream {
buffer: Vec<u8>
buffer: Vec<u8>,
}
impl SszStream {
/// Create a new, empty stream for writing ssz values.
pub fn new() -> Self {
SszStream {
buffer: Vec::new()
}
SszStream { buffer: Vec::new() }
}
/// Append some ssz encodable value to the stream.
pub fn append<E>(&mut self, value: &E) -> &mut Self
where E: Encodable
where
E: Encodable,
{
value.ssz_append(self);
self
@ -37,9 +34,8 @@ impl SszStream {
/// The length of the supplied bytes will be concatenated
/// to the stream before the supplied bytes.
pub fn append_encoded_val(&mut self, vec: &[u8]) {
self.buffer.extend_from_slice(
&encode_length(vec.len(),
LENGTH_BYTES));
self.buffer
.extend_from_slice(&encode_length(vec.len(), LENGTH_BYTES));
self.buffer.extend_from_slice(&vec);
}
@ -55,7 +51,8 @@ impl SszStream {
/// The length of the list will be concatenated to the stream, then
/// each item in the vector will be encoded and concatenated.
pub fn append_vec<E>(&mut self, vec: &[E])
where E: Encodable
where
E: Encodable,
{
let mut list_stream = SszStream::new();
for item in vec {
@ -75,17 +72,16 @@ impl SszStream {
/// The ssz size prefix is 4 bytes, which is treated as a continuious
/// 32bit big-endian integer.
pub fn encode_length(len: usize, length_bytes: usize) -> Vec<u8> {
assert!(length_bytes > 0); // For sanity
assert!(length_bytes > 0); // For sanity
assert!((len as usize) < 2usize.pow(length_bytes as u32 * 8));
let mut header: Vec<u8> = vec![0; length_bytes];
for (i, header_byte) in header.iter_mut().enumerate() {
let offset = (length_bytes - i - 1) * 8;
*header_byte = ((len >> offset) & 0xff) as u8;
};
}
header
}
#[cfg(test)]
mod tests {
use super::*;
@ -98,24 +94,12 @@ mod tests {
#[test]
fn test_encode_length_4_bytes() {
assert_eq!(encode_length(0, LENGTH_BYTES), vec![0; 4]);
assert_eq!(encode_length(1, LENGTH_BYTES), vec![0, 0, 0, 1]);
assert_eq!(encode_length(255, LENGTH_BYTES), vec![0, 0, 0, 255]);
assert_eq!(encode_length(256, LENGTH_BYTES), vec![0, 0, 1, 0]);
assert_eq!(
encode_length(0, LENGTH_BYTES),
vec![0; 4]
);
assert_eq!(
encode_length(1, LENGTH_BYTES),
vec![0, 0, 0, 1]
);
assert_eq!(
encode_length(255, LENGTH_BYTES),
vec![0, 0, 0, 255]
);
assert_eq!(
encode_length(256, LENGTH_BYTES),
vec![0, 0, 1, 0]
);
assert_eq!(
encode_length(4294967295, LENGTH_BYTES), // 2^(3*8) - 1
encode_length(4294967295, LENGTH_BYTES), // 2^(3*8) - 1
vec![255, 255, 255, 255]
);
}
@ -123,7 +107,7 @@ mod tests {
#[test]
#[should_panic]
fn test_encode_length_4_bytes_panic() {
encode_length(4294967296, LENGTH_BYTES); // 2^(3*8)
encode_length(4294967296, LENGTH_BYTES); // 2^(3*8)
}
#[test]

View File

@ -1,20 +1,12 @@
use super::ethereum_types::H256;
use super::decode::decode_ssz_list;
use super::{
DecodeError,
Decodable,
};
use super::ethereum_types::H256;
use super::{Decodable, DecodeError};
macro_rules! impl_decodable_for_uint {
($type: ident, $bit_size: expr) => {
impl Decodable for $type {
fn ssz_decode(bytes: &[u8], index: usize)
-> Result<(Self, usize), DecodeError>
{
assert!((0 < $bit_size) &
($bit_size <= 64) &
($bit_size % 8 == 0));
fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> {
assert!((0 < $bit_size) & ($bit_size <= 64) & ($bit_size % 8 == 0));
let max_bytes = $bit_size / 8;
if bytes.len() >= (index + max_bytes) {
let end_bytes = index + max_bytes;
@ -29,7 +21,7 @@ macro_rules! impl_decodable_for_uint {
}
}
}
}
};
}
impl_decodable_for_uint!(u16, 16);
@ -38,9 +30,7 @@ impl_decodable_for_uint!(u64, 64);
impl_decodable_for_uint!(usize, 64);
impl Decodable for u8 {
fn ssz_decode(bytes: &[u8], index: usize)
-> Result<(Self, usize), DecodeError>
{
fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> {
if index >= bytes.len() {
Err(DecodeError::TooShort)
} else {
@ -50,35 +40,28 @@ impl Decodable for u8 {
}
impl Decodable for H256 {
fn ssz_decode(bytes: &[u8], index: usize)
-> Result<(Self, usize), DecodeError>
{
fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> {
if bytes.len() < 32 || bytes.len() - 32 < index {
Err(DecodeError::TooShort)
}
else {
} else {
Ok((H256::from(&bytes[index..(index + 32)]), index + 32))
}
}
}
impl<T> Decodable for Vec<T>
where T: Decodable
where
T: Decodable,
{
fn ssz_decode(bytes: &[u8], index: usize)
-> Result<(Self, usize), DecodeError>
{
fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> {
decode_ssz_list(bytes, index)
}
}
#[cfg(test)]
mod tests {
use super::super::{decode_ssz, DecodeError};
use super::*;
use super::super::{
DecodeError,
decode_ssz,
};
#[test]
fn test_ssz_decode_h256() {
@ -131,8 +114,7 @@ mod tests {
assert_eq!(result, 65535);
let ssz = vec![1];
let result: Result<(u16, usize), DecodeError> =
decode_ssz(&ssz, 0);
let result: Result<(u16, usize), DecodeError> = decode_ssz(&ssz, 0);
assert_eq!(result, Err(DecodeError::TooShort));
}
@ -153,7 +135,7 @@ mod tests {
assert_eq!(index, 7);
assert_eq!(result, 256);
let ssz = vec![0,200, 1, 0];
let ssz = vec![0, 200, 1, 0];
let (result, index): (u32, usize) = decode_ssz(&ssz, 0).unwrap();
assert_eq!(index, 4);
assert_eq!(result, 13107456);
@ -164,8 +146,7 @@ mod tests {
assert_eq!(result, 4294967295);
let ssz = vec![0, 0, 1];
let result: Result<(u32, usize), DecodeError> =
decode_ssz(&ssz, 0);
let result: Result<(u32, usize), DecodeError> = decode_ssz(&ssz, 0);
assert_eq!(result, Err(DecodeError::TooShort));
}
@ -186,9 +167,8 @@ mod tests {
assert_eq!(index, 11);
assert_eq!(result, 18374686479671623680);
let ssz = vec![0,0,0,0,0,0,0];
let result: Result<(u64, usize), DecodeError> =
decode_ssz(&ssz, 0);
let ssz = vec![0, 0, 0, 0, 0, 0, 0];
let result: Result<(u64, usize), DecodeError> = decode_ssz(&ssz, 0);
assert_eq!(result, Err(DecodeError::TooShort));
}
@ -210,29 +190,19 @@ mod tests {
assert_eq!(result, 18446744073709551615);
let ssz = vec![0, 0, 0, 0, 0, 0, 1];
let result: Result<(usize, usize), DecodeError> =
decode_ssz(&ssz, 0);
let result: Result<(usize, usize), DecodeError> = decode_ssz(&ssz, 0);
assert_eq!(result, Err(DecodeError::TooShort));
}
#[test]
fn test_decode_ssz_bounds() {
let err: Result<(u16, usize), DecodeError> = decode_ssz(
&vec![1],
2
);
let err: Result<(u16, usize), DecodeError> = decode_ssz(&vec![1], 2);
assert_eq!(err, Err(DecodeError::TooShort));
let err: Result<(u16,usize), DecodeError> = decode_ssz(
&vec![0, 0, 0, 0],
3
);
let err: Result<(u16, usize), DecodeError> = decode_ssz(&vec![0, 0, 0, 0], 3);
assert_eq!(err, Err(DecodeError::TooShort));
let result: u16 = decode_ssz(
&vec![0,0,0,0,1],
3
).unwrap().0;
let result: u16 = decode_ssz(&vec![0, 0, 0, 0, 1], 3).unwrap().0;
assert_eq!(result, 1);
}
}

View File

@ -1,11 +1,8 @@
extern crate bytes;
use super::{
Encodable,
SszStream
};
use self::bytes::{BufMut, BytesMut};
use super::ethereum_types::H256;
use self::bytes::{ BytesMut, BufMut };
use super::{Encodable, SszStream};
/*
* Note: there is a "to_bytes" function for integers
@ -18,12 +15,14 @@ macro_rules! impl_encodable_for_uint {
#[allow(cast_lossless)]
fn ssz_append(&self, s: &mut SszStream) {
// Ensure bit size is valid
assert!((0 < $bit_size) &&
($bit_size % 8 == 0) &&
(2_u128.pow($bit_size) > *self as u128));
assert!(
(0 < $bit_size)
&& ($bit_size % 8 == 0)
&& (2_u128.pow($bit_size) > *self as u128)
);
// Serialize to bytes
let mut buf = BytesMut::with_capacity($bit_size/8);
let mut buf = BytesMut::with_capacity($bit_size / 8);
// Match bit size with encoding
match $bit_size {
@ -31,14 +30,14 @@ macro_rules! impl_encodable_for_uint {
16 => buf.put_u16_be(*self as u16),
32 => buf.put_u32_be(*self as u32),
64 => buf.put_u64_be(*self as u64),
_ => { ; }
_ => {}
}
// Append bytes to the SszStream
s.append_encoded_raw(&buf.to_vec());
}
}
}
};
}
impl_encodable_for_uint!(u8, 8);
@ -53,7 +52,6 @@ impl Encodable for H256 {
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -13,27 +13,19 @@ extern crate ethereum_types;
pub mod decode;
pub mod encode;
mod impl_encode;
mod impl_decode;
mod impl_encode;
pub use decode::{
Decodable,
DecodeError,
decode_ssz,
decode_ssz_list,
};
pub use encode::{
Encodable,
SszStream,
};
pub use decode::{decode_ssz, decode_ssz_list, Decodable, DecodeError};
pub use encode::{Encodable, SszStream};
pub const LENGTH_BYTES: usize = 4;
pub const MAX_LIST_SIZE : usize = 1 << (4 * 8);
pub const MAX_LIST_SIZE: usize = 1 << (4 * 8);
/// Convenience function to SSZ encode an object supporting ssz::Encode.
pub fn ssz_encode<T>(val: &T) -> Vec<u8>
where T: Encodable
where
T: Encodable,
{
let mut ssz_stream = SszStream::new();
ssz_stream.append(val);

View File

@ -1,6 +1,6 @@
use super::types::attestation_record::MIN_SSZ_ATTESTION_RECORD_LENGTH as MIN_LENGTH;
use super::ssz::LENGTH_BYTES;
use super::ssz::decode::decode_length;
use super::ssz::LENGTH_BYTES;
use super::types::attestation_record::MIN_SSZ_ATTESTION_RECORD_LENGTH as MIN_LENGTH;
#[derive(Debug, PartialEq)]
pub enum AttestationSplitError {
@ -9,9 +9,10 @@ pub enum AttestationSplitError {
/// Given some ssz slice, find the bounds of each serialized AttestationRecord and return a vec of
/// slices point to each.
pub fn split_all_attestations<'a>(full_ssz: &'a [u8], index: usize)
-> Result<Vec<&'a [u8]>, AttestationSplitError>
{
pub fn split_all_attestations<'a>(
full_ssz: &'a [u8],
index: usize,
) -> Result<Vec<&'a [u8]>, AttestationSplitError> {
let mut v = vec![];
let mut index = index;
while index < full_ssz.len() - 1 {
@ -24,9 +25,10 @@ pub fn split_all_attestations<'a>(full_ssz: &'a [u8], index: usize)
/// Given some ssz slice, find the bounds of one serialized AttestationRecord
/// and return a slice pointing to that.
pub fn split_one_attestation(full_ssz: &[u8], index: usize)
-> Result<(&[u8], usize), AttestationSplitError>
{
pub fn split_one_attestation(
full_ssz: &[u8],
index: usize,
) -> Result<(&[u8], usize), AttestationSplitError> {
if full_ssz.len() < MIN_LENGTH {
return Err(AttestationSplitError::TooShort);
}
@ -34,15 +36,11 @@ pub fn split_one_attestation(full_ssz: &[u8], index: usize)
let hashes_len = decode_length(full_ssz, index + 10, LENGTH_BYTES)
.map_err(|_| AttestationSplitError::TooShort)?;
let bitfield_len = decode_length(
full_ssz, index + hashes_len + 46,
LENGTH_BYTES)
let bitfield_len = decode_length(full_ssz, index + hashes_len + 46, LENGTH_BYTES)
.map_err(|_| AttestationSplitError::TooShort)?;
// Subtract one because the min length assumes 1 byte of bitfield
let len = MIN_LENGTH - 1
+ hashes_len
+ bitfield_len;
let len = MIN_LENGTH - 1 + hashes_len + bitfield_len;
if full_ssz.len() < index + len {
return Err(AttestationSplitError::TooShort);
@ -53,17 +51,10 @@ pub fn split_one_attestation(full_ssz: &[u8], index: usize)
#[cfg(test)]
mod tests {
use super::*;
use super::super::types::{
AttestationRecord,
Hash256,
Bitfield,
};
use super::super::bls::AggregateSignature;
use super::super::ssz::{
SszStream,
Decodable,
};
use super::super::ssz::{Decodable, SszStream};
use super::super::types::{AttestationRecord, Bitfield, Hash256};
use super::*;
fn get_two_records() -> Vec<AttestationRecord> {
let a = AttestationRecord {
@ -95,7 +86,6 @@ mod tests {
let a = ars[0].clone();
let b = ars[1].clone();
/*
* Test split one
*/
@ -104,8 +94,7 @@ mod tests {
let ssz = ssz_stream.drain();
let (a_ssz, i) = split_one_attestation(&ssz, 0).unwrap();
assert_eq!(i, ssz.len());
let (decoded_a, _) = AttestationRecord::ssz_decode(a_ssz, 0)
.unwrap();
let (decoded_a, _) = AttestationRecord::ssz_decode(a_ssz, 0).unwrap();
assert_eq!(a, decoded_a);
/*
@ -116,12 +105,8 @@ mod tests {
ssz_stream.append(&b);
let ssz = ssz_stream.drain();
let ssz_vec = split_all_attestations(&ssz, 0).unwrap();
let (decoded_a, _) =
AttestationRecord::ssz_decode(ssz_vec[0], 0)
.unwrap();
let (decoded_b, _) =
AttestationRecord::ssz_decode(ssz_vec[1], 0)
.unwrap();
let (decoded_a, _) = AttestationRecord::ssz_decode(ssz_vec[0], 0).unwrap();
let (decoded_b, _) = AttestationRecord::ssz_decode(ssz_vec[1], 0).unwrap();
assert_eq!(a, decoded_a);
assert_eq!(b, decoded_b);
@ -136,4 +121,3 @@ mod tests {
assert!(split_all_attestations(&ssz, 0).is_err());
}
}

View File

@ -1,7 +1,7 @@
extern crate bls;
extern crate hashing;
extern crate types;
extern crate ssz;
extern crate types;
pub mod attestation_ssz_splitter;
pub mod ssz_beacon_block;

View File

@ -1,12 +1,6 @@
use super::ssz::decode::{
decode_length,
Decodable,
};
use super::hashing::canonical_hash;
use super::types::beacon_block::{
MIN_SSZ_BLOCK_LENGTH,
MAX_SSZ_BLOCK_LENGTH,
};
use super::ssz::decode::{decode_length, Decodable};
use super::types::beacon_block::{MAX_SSZ_BLOCK_LENGTH, MIN_SSZ_BLOCK_LENGTH};
#[derive(Debug, PartialEq)]
pub enum SszBeaconBlockError {
@ -61,9 +55,7 @@ impl<'a> SszBeaconBlock<'a> {
/// The returned `SszBeaconBlock` instance will contain a `len` field which can be used to determine
/// how many bytes were read from the slice. In the case of multiple, sequentually serialized
/// blocks `len` can be used to assume the location of the next serialized block.
pub fn from_slice(vec: &'a [u8])
-> Result<Self, SszBeaconBlockError>
{
pub fn from_slice(vec: &'a [u8]) -> Result<Self, SszBeaconBlockError> {
let untrimmed_ssz = &vec[..];
/*
@ -83,22 +75,19 @@ impl<'a> SszBeaconBlock<'a> {
/*
* Determine how many bytes are used to store ancestor hashes.
*/
let ancestors_position =
SLOT_BYTES +
RANDAO_REVEAL_BYTES +
POW_CHAIN_REF_BYTES;
let ancestors_position = SLOT_BYTES + RANDAO_REVEAL_BYTES + POW_CHAIN_REF_BYTES;
let ancestors_len = decode_length(untrimmed_ssz, ancestors_position, LENGTH_PREFIX_BYTES)
.map_err(|_| SszBeaconBlockError::TooShort)?;
/*
* Determine how many bytes are used to store attestation records.
*/
let attestations_position =
ancestors_position + LENGTH_PREFIX_BYTES + ancestors_len + // end of ancestor bytes
let attestations_position = ancestors_position + LENGTH_PREFIX_BYTES + ancestors_len + // end of ancestor bytes
ACTIVE_STATE_BYTES +
CRYSTALLIZED_STATE_BYTES;
let attestations_len = decode_length(untrimmed_ssz, attestations_position, LENGTH_PREFIX_BYTES)
.map_err(|_| SszBeaconBlockError::TooShort)?;
let attestations_len =
decode_length(untrimmed_ssz, attestations_position, LENGTH_PREFIX_BYTES)
.map_err(|_| SszBeaconBlockError::TooShort)?;
/*
* Determine how many bytes are used to store specials.
@ -116,7 +105,7 @@ impl<'a> SszBeaconBlock<'a> {
return Err(SszBeaconBlockError::TooShort);
}
Ok(Self{
Ok(Self {
ssz: &untrimmed_ssz[0..block_ssz_len],
block_ssz_len,
ancestors_position,
@ -128,8 +117,12 @@ impl<'a> SszBeaconBlock<'a> {
})
}
pub fn len(&self) -> usize { self.ssz.len() }
pub fn is_empty(&self) -> bool { self.ssz.is_empty() }
pub fn len(&self) -> usize {
self.ssz.len()
}
pub fn is_empty(&self) -> bool {
self.ssz.is_empty()
}
/// Returns this block as ssz.
///
@ -177,9 +170,7 @@ impl<'a> SszBeaconBlock<'a> {
/// Return the `pow_chain_reference` field.
pub fn pow_chain_reference(&self) -> &[u8] {
let start =
SLOT_BYTES +
RANDAO_REVEAL_BYTES;
let start = SLOT_BYTES + RANDAO_REVEAL_BYTES;
&self.ssz[start..start + POW_CHAIN_REF_BYTES]
}
@ -198,8 +189,7 @@ impl<'a> SszBeaconBlock<'a> {
/// Return the `active_state_root` field.
pub fn cry_state_root(&self) -> &[u8] {
let start =
self.ancestors_position + LENGTH_PREFIX_BYTES + self.ancestors_len +
ACTIVE_STATE_BYTES;
self.ancestors_position + LENGTH_PREFIX_BYTES + self.ancestors_len + ACTIVE_STATE_BYTES;
&self.ssz[start..(start + 32)]
}
@ -222,18 +212,13 @@ impl<'a> SszBeaconBlock<'a> {
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::types::{
AttestationRecord,
BeaconBlock,
SpecialRecord,
};
use super::super::ssz::encode::encode_length;
use super::super::ssz::SszStream;
use super::super::types::Hash256;
use super::super::ssz::encode::encode_length;
use super::super::types::{AttestationRecord, BeaconBlock, SpecialRecord};
use super::*;
fn get_block_ssz(b: &BeaconBlock) -> Vec<u8> {
let mut ssz_stream = SszStream::new();
@ -259,7 +244,6 @@ mod tests {
b.attestations = vec![];
let ssz = get_block_ssz(&b);
assert!(SszBeaconBlock::from_slice(&ssz[..]).is_ok());
}
@ -309,8 +293,8 @@ mod tests {
// will tell us if the hash changes, not that it matches some
// canonical reference.
let expected_hash = [
11, 181, 149, 114, 248, 15, 46, 0, 106, 135, 158, 31, 15, 194, 149, 176,
43, 110, 154, 26, 253, 67, 18, 139, 250, 84, 144, 219, 3, 208, 50, 145
11, 181, 149, 114, 248, 15, 46, 0, 106, 135, 158, 31, 15, 194, 149, 176, 43, 110, 154,
26, 253, 67, 18, 139, 250, 84, 144, 219, 3, 208, 50, 145,
];
assert_eq!(hash, expected_hash);
@ -376,7 +360,10 @@ mod tests {
let serialized = get_block_ssz(&block);
let ssz_block = SszBeaconBlock::from_slice(&serialized).unwrap();
assert_eq!(ssz_block.parent_hash().unwrap(), &Hash256::from("cats".as_bytes()).to_vec()[..]);
assert_eq!(
ssz_block.parent_hash().unwrap(),
&Hash256::from("cats".as_bytes()).to_vec()[..]
);
}
#[test]
@ -459,7 +446,10 @@ mod tests {
let serialized = get_block_ssz(&block);
let ssz_block = SszBeaconBlock::from_slice(&serialized).unwrap();
assert_eq!(ssz_block.pow_chain_reference(), &reference_hash.to_vec()[..]);
assert_eq!(
ssz_block.pow_chain_reference(),
&reference_hash.to_vec()[..]
);
}
#[test]

View File

@ -1,7 +1,6 @@
/// A library for performing deterministic, pseudo-random shuffling on a vector.
///
/// This library is designed to confirm to the Ethereum 2.0 specification.
extern crate hashing;
mod rng;
@ -19,11 +18,7 @@ pub enum ShuffleErr {
/// of the supplied `seed`.
///
/// This is a Fisher-Yates-Durtstenfeld shuffle.
pub fn shuffle<T>(
seed: &[u8],
mut list: Vec<T>)
-> Result<Vec<T>, ShuffleErr>
{
pub fn shuffle<T>(seed: &[u8], mut list: Vec<T>) -> Result<Vec<T>, ShuffleErr> {
let mut rng = ShuffleRng::new(seed);
if list.len() > rng.rand_max as usize {
@ -42,16 +37,15 @@ pub fn shuffle<T>(
Ok(list)
}
#[cfg(test)]
mod tests {
extern crate yaml_rust;
use super::*;
use self::yaml_rust::yaml;
use super::hashing::canonical_hash;
use super::*;
use std::fs::File;
use std::io::prelude::*;
use self::yaml_rust::yaml;
#[test]
fn test_shuffling() {

View File

@ -1,8 +1,8 @@
use super::hashing::canonical_hash;
const SEED_SIZE_BYTES: usize = 32;
const RAND_BYTES: usize = 3; // 24 / 8
const RAND_MAX: u32 = 16_777_215; // 2 ** (rand_bytes * 8) - 1
const RAND_BYTES: usize = 3; // 24 / 8
const RAND_MAX: u32 = 16_777_215; // 2 ** (rand_bytes * 8) - 1
/// A pseudo-random number generator which given a seed
/// uses successive blake2s hashing to generate "entropy".
@ -24,7 +24,7 @@ impl ShuffleRng {
/// "Regenerates" the seed by hashing it.
fn rehash_seed(&mut self) {
self.seed = canonical_hash(&self.seed);
self.seed = canonical_hash(&self.seed);
self.idx = 0;
}
@ -35,10 +35,7 @@ impl ShuffleRng {
self.rehash_seed();
self.rand()
} else {
int_from_byte_slice(
&self.seed,
self.idx - RAND_BYTES,
)
int_from_byte_slice(&self.seed, self.idx - RAND_BYTES)
}
}
@ -61,57 +58,42 @@ impl ShuffleRng {
/// interprets those bytes as a 24 bit big-endian integer.
/// Returns that integer.
fn int_from_byte_slice(source: &[u8], offset: usize) -> u32 {
(
u32::from(source[offset + 2])) |
(u32::from(source[offset + 1]) << 8) |
(u32::from(source[offset ]) << 16
)
(u32::from(source[offset + 2]))
| (u32::from(source[offset + 1]) << 8)
| (u32::from(source[offset]) << 16)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_shuffling_int_from_slice() {
let mut x = int_from_byte_slice(
&[0, 0, 1],
0);
let mut x = int_from_byte_slice(&[0, 0, 1], 0);
assert_eq!((x as u32), 1);
x = int_from_byte_slice(
&[0, 1, 1],
0);
x = int_from_byte_slice(&[0, 1, 1], 0);
assert_eq!(x, 257);
x = int_from_byte_slice(
&[1, 1, 1],
0);
x = int_from_byte_slice(&[1, 1, 1], 0);
assert_eq!(x, 65793);
x = int_from_byte_slice(
&[255, 1, 1],
0);
x = int_from_byte_slice(&[255, 1, 1], 0);
assert_eq!(x, 16711937);
x = int_from_byte_slice(
&[255, 255, 255],
0);
x = int_from_byte_slice(&[255, 255, 255], 0);
assert_eq!(x, 16777215);
x = int_from_byte_slice(
&[0x8f, 0xbb, 0xc7],
0);
x = int_from_byte_slice(&[0x8f, 0xbb, 0xc7], 0);
assert_eq!(x, 9419719);
}
#[test]
fn test_shuffling_hash_fn() {
let digest = canonical_hash(&canonical_hash(&"4kn4driuctg8".as_bytes())); // double-hash is intentional
let digest = canonical_hash(&canonical_hash(&"4kn4driuctg8".as_bytes())); // double-hash is intentional
let expected = [
103, 21, 99, 143, 60, 75, 116, 81, 248, 175, 190, 114, 54, 65, 23, 8, 3, 116,
160, 178, 7, 75, 63, 47, 180, 239, 191, 247, 57, 194, 144, 88
103, 21, 99, 143, 60, 75, 116, 81, 248, 175, 190, 114, 54, 65, 23, 8, 3, 116, 160, 178,
7, 75, 63, 47, 180, 239, 191, 247, 57, 194, 144, 88,
];
assert_eq!(digest.len(), expected.len());
assert_eq!(digest, expected)

View File

@ -25,9 +25,8 @@ pub fn attestation_parent_hashes(
block_slot: u64,
attestation_slot: u64,
current_hashes: &[Hash256],
oblique_hashes: &[Hash256])
-> Result<Vec<Hash256>, ParentHashesError>
{
oblique_hashes: &[Hash256],
) -> Result<Vec<Hash256>, ParentHashesError> {
// This cast places a limit on cycle_length. If you change it, check math
// for overflow.
let cycle_length: u64 = u64::from(cycle_length);
@ -65,20 +64,18 @@ pub fn attestation_parent_hashes(
* Arithmetic is:
* start + cycle_length - oblique_hashes.len()
*/
let end = start.checked_add(cycle_length)
let end = start
.checked_add(cycle_length)
.and_then(|x| x.checked_sub(oblique_hashes.len() as u64))
.ok_or(ParentHashesError::IntWrapping)?;
let mut hashes = Vec::new();
hashes.extend_from_slice(
&current_hashes[(start as usize)..(end as usize)]);
hashes.extend_from_slice(&current_hashes[(start as usize)..(end as usize)]);
hashes.extend_from_slice(oblique_hashes);
Ok(hashes)
}
#[cfg(test)]
mod tests {
use super::*;
@ -106,7 +103,8 @@ mod tests {
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes);
&oblique_hashes,
);
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(result.len(), cycle_length as usize);
@ -131,7 +129,8 @@ mod tests {
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes);
&oblique_hashes,
);
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(result.len(), cycle_length as usize);
@ -156,7 +155,8 @@ mod tests {
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes);
&oblique_hashes,
);
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(result.len(), cycle_length as usize);
@ -179,7 +179,8 @@ mod tests {
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes);
&oblique_hashes,
);
let result = result.unwrap();
assert_eq!(result.len(), cycle_length as usize);
let expected_result = get_range_of_hashes(7, 15);
@ -201,7 +202,8 @@ mod tests {
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes);
&oblique_hashes,
);
assert!(result.is_err());
}
@ -220,7 +222,8 @@ mod tests {
block_slot,
attestation_slot,
&current_hashes,
&oblique_hashes);
&oblique_hashes,
);
assert!(result.is_err());
}
}

View File

@ -1,32 +1,16 @@
use std::collections::HashSet;
use std::sync::Arc;
use super::types::{
AttestationRecord,
AttesterMap,
};
use super::attestation_parent_hashes::{
attestation_parent_hashes,
ParentHashesError,
};
use super::db::{
ClientDB,
DBError
};
use super::db::stores::{
BeaconBlockStore,
BeaconBlockAtSlotError,
ValidatorStore,
};
use super::types::{
Hash256,
};
use super::attestation_parent_hashes::{attestation_parent_hashes, ParentHashesError};
use super::db::stores::{BeaconBlockAtSlotError, BeaconBlockStore, ValidatorStore};
use super::db::{ClientDB, DBError};
use super::message_generation::generate_signed_message;
use super::signature_verification::{
verify_aggregate_signature_for_indices,
SignatureVerificationError,
verify_aggregate_signature_for_indices, SignatureVerificationError,
};
use super::types::Hash256;
use super::types::{AttestationRecord, AttesterMap};
use std::collections::HashSet;
use std::sync::Arc;
#[derive(Debug,PartialEq)]
#[derive(Debug, PartialEq)]
pub enum AttestationValidationError {
ParentSlotTooHigh,
ParentSlotTooLow,
@ -52,7 +36,8 @@ pub enum AttestationValidationError {
/// The context against which some attestation should be validated.
pub struct AttestationValidationContext<T>
where T: ClientDB + Sized
where
T: ClientDB + Sized,
{
/// The slot as determined by the system time.
pub block_slot: u64,
@ -73,7 +58,8 @@ pub struct AttestationValidationContext<T>
}
impl<T> AttestationValidationContext<T>
where T: ClientDB
where
T: ClientDB,
{
/// Validate a (fully deserialized) AttestationRecord against this context.
///
@ -82,9 +68,10 @@ impl<T> AttestationValidationContext<T>
///
/// The attestation's aggregate signature will be verified, therefore the function must able to
/// access all required validation public keys via the `validator_store`.
pub fn validate_attestation(&self, a: &AttestationRecord)
-> Result<HashSet<usize>, AttestationValidationError>
{
pub fn validate_attestation(
&self,
a: &AttestationRecord,
) -> Result<HashSet<usize>, AttestationValidationError> {
/*
* The attesation slot must be less than or equal to the parent of the slot of the block
* that contained the attestation.
@ -97,8 +84,10 @@ impl<T> AttestationValidationContext<T>
* The slot of this attestation must not be more than cycle_length + 1 distance
* from the parent_slot of block that contained it.
*/
if a.slot < self.parent_block_slot
.saturating_sub(u64::from(self.cycle_length).saturating_add(1)) {
if a.slot < self
.parent_block_slot
.saturating_sub(u64::from(self.cycle_length).saturating_add(1))
{
return Err(AttestationValidationError::ParentSlotTooLow);
}
@ -124,18 +113,18 @@ impl<T> AttestationValidationContext<T>
* This is an array mapping the order that validators will appear in the bitfield to the
* canonincal index of a validator.
*/
let attestation_indices = self.attester_map.get(&(a.slot, a.shard_id))
let attestation_indices = self
.attester_map
.get(&(a.slot, a.shard_id))
.ok_or(AttestationValidationError::BadAttesterMap)?;
/*
* The bitfield must be no longer than the minimum required to represent each validator in the
* attestation indices for this slot and shard id.
*/
if a.attester_bitfield.num_bytes() !=
bytes_for_bits(attestation_indices.len())
{
if a.attester_bitfield.num_bytes() != bytes_for_bits(attestation_indices.len()) {
return Err(AttestationValidationError::BadBitfieldLength);
}
}
/*
* If there are excess bits in the bitfield because the number of a validators in not a
@ -145,7 +134,7 @@ impl<T> AttestationValidationContext<T>
* refer to the same AttesationRecord.
*/
if a.attester_bitfield.len() > attestation_indices.len() {
return Err(AttestationValidationError::InvalidBitfieldEndBits)
return Err(AttestationValidationError::InvalidBitfieldEndBits);
}
/*
@ -156,7 +145,8 @@ impl<T> AttestationValidationContext<T>
self.block_slot,
a.slot,
&self.recent_block_hashes,
&a.oblique_parent_hashes)?;
&a.oblique_parent_hashes,
)?;
/*
* The specified justified block hash supplied in the attestation must be in the chain at
@ -166,11 +156,15 @@ impl<T> AttestationValidationContext<T>
* block store (database) we iterate back through the blocks until we find (or fail to
* find) the justified block hash referenced in the attestation record.
*/
let latest_parent_hash = parent_hashes.last()
let latest_parent_hash = parent_hashes
.last()
.ok_or(AttestationValidationError::BadCurrentHashes)?;
match self.block_store.block_at_slot(&latest_parent_hash, a.justified_slot)? {
match self
.block_store
.block_at_slot(&latest_parent_hash, a.justified_slot)?
{
Some((ref hash, _)) if *hash == a.justified_block_hash.to_vec() => (),
_ => return Err(AttestationValidationError::InvalidJustifiedBlockHash)
_ => return Err(AttestationValidationError::InvalidJustifiedBlockHash),
};
/*
@ -182,16 +176,17 @@ impl<T> AttestationValidationContext<T>
&parent_hashes,
a.shard_id,
&a.shard_block_hash,
a.justified_slot)
a.justified_slot,
)
};
let voted_hashset =
verify_aggregate_signature_for_indices(
&signed_message,
&a.aggregate_sig,
&attestation_indices,
&a.attester_bitfield,
&self.validator_store)?;
let voted_hashset = verify_aggregate_signature_for_indices(
&signed_message,
&a.aggregate_sig,
&attestation_indices,
&a.attester_bitfield,
&self.validator_store,
)?;
/*
* If the hashset of voters is None, the signature verification failed.
@ -210,16 +205,11 @@ fn bytes_for_bits(bits: usize) -> usize {
impl From<ParentHashesError> for AttestationValidationError {
fn from(e: ParentHashesError) -> Self {
match e {
ParentHashesError::BadCurrentHashes
=> AttestationValidationError::BadCurrentHashes,
ParentHashesError::BadObliqueHashes
=> AttestationValidationError::BadObliqueHashes,
ParentHashesError::SlotTooLow
=> AttestationValidationError::BlockSlotTooLow,
ParentHashesError::SlotTooHigh
=> AttestationValidationError::BlockSlotTooHigh,
ParentHashesError::IntWrapping
=> AttestationValidationError::IntWrapping
ParentHashesError::BadCurrentHashes => AttestationValidationError::BadCurrentHashes,
ParentHashesError::BadObliqueHashes => AttestationValidationError::BadObliqueHashes,
ParentHashesError::SlotTooLow => AttestationValidationError::BlockSlotTooLow,
ParentHashesError::SlotTooHigh => AttestationValidationError::BlockSlotTooHigh,
ParentHashesError::IntWrapping => AttestationValidationError::IntWrapping,
}
}
}
@ -228,8 +218,7 @@ impl From<BeaconBlockAtSlotError> for AttestationValidationError {
fn from(e: BeaconBlockAtSlotError) -> Self {
match e {
BeaconBlockAtSlotError::DBError(s) => AttestationValidationError::DBError(s),
_ => AttestationValidationError::InvalidJustifiedBlockHash
_ => AttestationValidationError::InvalidJustifiedBlockHash,
}
}
}
@ -243,14 +232,16 @@ impl From<DBError> for AttestationValidationError {
impl From<SignatureVerificationError> for AttestationValidationError {
fn from(e: SignatureVerificationError) -> Self {
match e {
SignatureVerificationError::BadValidatorIndex
=> AttestationValidationError::BadAttesterMap,
SignatureVerificationError::PublicKeyCorrupt
=> AttestationValidationError::PublicKeyCorrupt,
SignatureVerificationError::NoPublicKeyForValidator
=> AttestationValidationError::NoPublicKeyForValidator,
SignatureVerificationError::DBError(s)
=> AttestationValidationError::DBError(s),
SignatureVerificationError::BadValidatorIndex => {
AttestationValidationError::BadAttesterMap
}
SignatureVerificationError::PublicKeyCorrupt => {
AttestationValidationError::PublicKeyCorrupt
}
SignatureVerificationError::NoPublicKeyForValidator => {
AttestationValidationError::NoPublicKeyForValidator
}
SignatureVerificationError::DBError(s) => AttestationValidationError::DBError(s),
}
}
}

View File

@ -1,12 +1,12 @@
extern crate db;
extern crate bls;
extern crate db;
extern crate hashing;
extern crate ssz;
extern crate ssz_helpers;
extern crate types;
pub mod attestation_validation;
mod attestation_parent_hashes;
pub mod attestation_validation;
pub mod block_validation;
mod message_generation;
mod signature_verification;

View File

@ -1,5 +1,5 @@
use super::ssz::SszStream;
use super::hashing::canonical_hash;
use super::ssz::SszStream;
use super::types::Hash256;
/// Generates the message used to validate the signature provided with an AttestationRecord.
@ -10,9 +10,8 @@ pub fn generate_signed_message(
parent_hashes: &[Hash256],
shard_id: u16,
shard_block_hash: &Hash256,
justified_slot: u64)
-> Vec<u8>
{
justified_slot: u64,
) -> Vec<u8> {
/*
* Note: it's a little risky here to use SSZ, because the encoding is not necessarily SSZ
* (for example, SSZ might change whilst this doesn't).
@ -39,9 +38,7 @@ mod tests {
#[test]
fn test_generate_signed_message() {
let slot = 93;
let parent_hashes: Vec<Hash256> = (0..12)
.map(|i| Hash256::from(i as u64))
.collect();
let parent_hashes: Vec<Hash256> = (0..12).map(|i| Hash256::from(i as u64)).collect();
let shard_id = 15;
let shard_block_hash = Hash256::from("shard_block_hash".as_bytes());
let justified_slot = 18;
@ -51,7 +48,8 @@ mod tests {
&parent_hashes,
shard_id,
&shard_block_hash,
justified_slot);
justified_slot,
);
/*
* Note: this is not some well-known test vector, it's simply the result of running
@ -60,9 +58,8 @@ mod tests {
* Once well-known test vectors are established, they should be placed here.
*/
let expected = vec![
149, 99, 94, 229, 72, 144, 233, 14, 164, 16, 143, 53, 94, 48,
118, 179, 33, 181, 172, 215, 2, 191, 176, 18, 188, 172, 137,
178, 236, 66, 74, 120
149, 99, 94, 229, 72, 144, 233, 14, 164, 16, 143, 53, 94, 48, 118, 179, 33, 181, 172,
215, 2, 191, 176, 18, 188, 172, 137, 178, 236, 66, 74, 120,
];
assert_eq!(output, expected);

View File

@ -1,14 +1,8 @@
use std::collections::HashSet;
use super::bls::{
AggregateSignature,
AggregatePublicKey,
};
use super::bls::{AggregatePublicKey, AggregateSignature};
use super::db::stores::{ValidatorStore, ValidatorStoreError};
use super::db::ClientDB;
use super::db::stores::{
ValidatorStore,
ValidatorStoreError,
};
use super::types::Bitfield;
use std::collections::HashSet;
#[derive(Debug, PartialEq)]
pub enum SignatureVerificationError {
@ -30,9 +24,10 @@ pub fn verify_aggregate_signature_for_indices<T>(
agg_sig: &AggregateSignature,
attestation_indices: &[usize],
bitfield: &Bitfield,
validator_store: &ValidatorStore<T>)
-> Result<Option<HashSet<usize>>, SignatureVerificationError>
where T: ClientDB + Sized
validator_store: &ValidatorStore<T>,
) -> Result<Option<HashSet<usize>>, SignatureVerificationError>
where
T: ClientDB + Sized,
{
let mut voters = HashSet::new();
let mut agg_pub_key = AggregatePublicKey::new();
@ -43,7 +38,8 @@ pub fn verify_aggregate_signature_for_indices<T>(
/*
* De-reference the attestation index into a canonical ValidatorRecord index.
*/
let validator = *attestation_indices.get(i)
let validator = *attestation_indices
.get(i)
.ok_or(SignatureVerificationError::BadValidatorIndex)?;
/*
* Load the validators public key from our store.
@ -77,23 +73,17 @@ pub fn verify_aggregate_signature_for_indices<T>(
impl From<ValidatorStoreError> for SignatureVerificationError {
fn from(error: ValidatorStoreError) -> Self {
match error {
ValidatorStoreError::DBError(s) =>
SignatureVerificationError::DBError(s),
ValidatorStoreError::DecodeError =>
SignatureVerificationError::PublicKeyCorrupt,
ValidatorStoreError::DBError(s) => SignatureVerificationError::DBError(s),
ValidatorStoreError::DecodeError => SignatureVerificationError::PublicKeyCorrupt,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::bls::{
Keypair,
Signature,
};
use super::super::bls::{Keypair, Signature};
use super::super::db::MemoryDB;
use super::*;
use std::sync::Arc;
/*
@ -130,8 +120,7 @@ mod tests {
let mut all_keypairs = signing_keypairs.clone();
all_keypairs.append(&mut non_signing_keypairs.clone());
let attestation_indices: Vec<usize> = (0..all_keypairs.len())
.collect();
let attestation_indices: Vec<usize> = (0..all_keypairs.len()).collect();
let mut bitfield = Bitfield::new();
for i in 0..signing_keypairs.len() {
bitfield.set_bit(i, true);
@ -158,11 +147,11 @@ mod tests {
&agg_sig,
&attestation_indices,
&bitfield,
&store).unwrap();
&store,
).unwrap();
let voters = voters.unwrap();
(0..signing_keypairs.len())
.for_each(|i| assert!(voters.contains(&i)));
(0..signing_keypairs.len()).for_each(|i| assert!(voters.contains(&i)));
(signing_keypairs.len()..non_signing_keypairs.len())
.for_each(|i| assert!(!voters.contains(&i)));
@ -176,7 +165,8 @@ mod tests {
&agg_sig,
&attestation_indices,
&bitfield,
&store).unwrap();
&store,
).unwrap();
assert_eq!(voters, None);
}

View File

@ -1,33 +1,12 @@
use std::sync::Arc;
use super::db::{
MemoryDB,
};
use super::db::stores::{
ValidatorStore,
BeaconBlockStore,
};
use super::types::{
AttestationRecord,
AttesterMap,
Bitfield,
BeaconBlock,
Hash256,
};
use super::validation::attestation_validation::{
AttestationValidationContext,
};
use super::bls::{
AggregateSignature,
Keypair,
SecretKey,
Signature,
};
use super::bls::{AggregateSignature, Keypair, SecretKey, Signature};
use super::db::stores::{BeaconBlockStore, ValidatorStore};
use super::db::MemoryDB;
use super::hashing::canonical_hash;
use super::ssz::SszStream;
use super::hashing::{
canonical_hash,
};
use super::types::{AttestationRecord, AttesterMap, BeaconBlock, Bitfield, Hash256};
use super::validation::attestation_validation::AttestationValidationContext;
pub struct TestStore {
pub db: Arc<MemoryDB>,
@ -55,13 +34,13 @@ pub struct TestRig {
pub attester_count: usize,
}
fn generate_message_hash(slot: u64,
parent_hashes: &[Hash256],
shard_id: u16,
shard_block_hash: &Hash256,
justified_slot: u64)
-> Vec<u8>
{
fn generate_message_hash(
slot: u64,
parent_hashes: &[Hash256],
shard_id: u16,
shard_block_hash: &Hash256,
justified_slot: u64,
) -> Vec<u8> {
let mut stream = SszStream::new();
stream.append(&slot);
stream.append_vec(&parent_hashes.to_vec());
@ -72,18 +51,18 @@ fn generate_message_hash(slot: u64,
canonical_hash(&bytes)
}
pub fn generate_attestation(shard_id: u16,
shard_block_hash: &Hash256,
block_slot: u64,
attestation_slot: u64,
justified_slot: u64,
justified_block_hash: &Hash256,
cycle_length: u8,
parent_hashes: &[Hash256],
signing_keys: &[Option<SecretKey>],
block_store: &BeaconBlockStore<MemoryDB>)
-> AttestationRecord
{
pub fn generate_attestation(
shard_id: u16,
shard_block_hash: &Hash256,
block_slot: u64,
attestation_slot: u64,
justified_slot: u64,
justified_block_hash: &Hash256,
cycle_length: u8,
parent_hashes: &[Hash256],
signing_keys: &[Option<SecretKey>],
block_store: &BeaconBlockStore<MemoryDB>,
) -> AttestationRecord {
let mut attester_bitfield = Bitfield::new();
let mut aggregate_sig = AggregateSignature::new();
@ -107,7 +86,8 @@ pub fn generate_attestation(shard_id: u16,
parent_hashes_slice,
shard_id,
shard_block_hash,
justified_slot);
justified_slot,
);
for (i, secret_key) in signing_keys.iter().enumerate() {
/*
@ -143,7 +123,9 @@ pub fn create_block_at_slot(block_store: &BeaconBlockStore<MemoryDB>, hash: &Has
let mut s = SszStream::new();
s.append(&justified_block);
let justified_block_ssz = s.drain();
block_store.put_serialized_block(&hash.to_vec(), &justified_block_ssz).unwrap();
block_store
.put_serialized_block(&hash.to_vec(), &justified_block_ssz)
.unwrap();
}
/// Inserts a justified_block_hash in a position that will be referenced by an attestation record.
@ -151,16 +133,14 @@ pub fn insert_justified_block_hash(
parent_hashes: &mut Vec<Hash256>,
justified_block_hash: &Hash256,
block_slot: u64,
attestation_slot: u64)
{
let attestation_parent_hash_index = parent_hashes.len() - 1 -
(block_slot as usize - attestation_slot as usize);
attestation_slot: u64,
) {
let attestation_parent_hash_index =
parent_hashes.len() - 1 - (block_slot as usize - attestation_slot as usize);
parent_hashes[attestation_parent_hash_index] = justified_block_hash.clone();
}
pub fn setup_attestation_validation_test(shard_id: u16, attester_count: usize)
-> TestRig
{
pub fn setup_attestation_validation_test(shard_id: u16, attester_count: usize) -> TestRig {
let stores = TestStore::new();
let block_slot = 10000;
@ -181,7 +161,8 @@ pub fn setup_attestation_validation_test(shard_id: u16, attester_count: usize)
&mut parent_hashes,
&justified_block_hash,
block_slot,
attestation_slot);
attestation_slot,
);
let parent_hashes = Arc::new(parent_hashes);
@ -195,11 +176,14 @@ pub fn setup_attestation_validation_test(shard_id: u16, attester_count: usize)
* list of keypairs. Store it in the database.
*/
for i in 0..attester_count {
let keypair = Keypair::random();
keypairs.push(keypair.clone());
stores.validator.put_public_key_by_index(i, &keypair.pk).unwrap();
signing_keys.push(Some(keypair.sk.clone()));
attesters.push(i);
let keypair = Keypair::random();
keypairs.push(keypair.clone());
stores
.validator
.put_public_key_by_index(i, &keypair.pk)
.unwrap();
signing_keys.push(Some(keypair.sk.clone()));
attesters.push(i);
}
attester_map.insert((attestation_slot, shard_id), attesters);
@ -223,7 +207,8 @@ pub fn setup_attestation_validation_test(shard_id: u16, attester_count: usize)
cycle_length,
&parent_hashes.clone(),
&signing_keys,
&stores.block);
&stores.block,
);
TestRig {
attestation,

View File

@ -3,7 +3,7 @@ mod tests;
use super::bls;
use super::db;
use super::hashing;
use super::ssz;
use super::types;
use super::hashing;
use super::validation;

View File

@ -1,20 +1,10 @@
use std::sync::Arc;
use super::helpers::{
TestRig,
setup_attestation_validation_test,
create_block_at_slot,
};
use super::validation::attestation_validation::{
AttestationValidationError,
};
use super::bls::AggregateSignature;
use super::helpers::{create_block_at_slot, setup_attestation_validation_test, TestRig};
use super::types::AttesterMap;
use super::bls::{
AggregateSignature,
};
use super::types::{
Hash256,
};
use super::types::Hash256;
use super::validation::attestation_validation::AttestationValidationError;
fn generic_rig() -> TestRig {
let shard_id = 10;
@ -80,21 +70,29 @@ fn test_attestation_validation_invalid_justified_slot_incorrect() {
create_block_at_slot(
&rig.stores.block,
&rig.attestation.justified_block_hash,
rig.attestation.justified_slot);
rig.attestation.justified_slot,
);
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::BadAggregateSignature));
assert_eq!(
result,
Err(AttestationValidationError::BadAggregateSignature)
);
rig.attestation.justified_slot = original + 1;
// Ensures we don't get a bad justified block error instead.
create_block_at_slot(
&rig.stores.block,
&rig.attestation.justified_block_hash,
rig.attestation.justified_slot);
rig.attestation.justified_slot,
);
// Ensures we don't get an error that the last justified slot is ahead of the context justified
// slot.
rig.context.last_justified_slot = rig.attestation.justified_slot;
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::BadAggregateSignature));
assert_eq!(
result,
Err(AttestationValidationError::BadAggregateSignature)
);
}
#[test]
@ -108,7 +106,10 @@ fn test_attestation_validation_invalid_too_many_oblique() {
rig.attestation.oblique_parent_hashes = obliques;
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::TooManyObliqueHashes));
assert_eq!(
result,
Err(AttestationValidationError::TooManyObliqueHashes)
);
}
#[test]
@ -132,8 +133,12 @@ fn test_attestation_validation_invalid_bad_bitfield_length() {
* of the bitfield.
*/
let one_byte_higher = rig.attester_count + 8;
rig.attestation.attester_bitfield.set_bit(one_byte_higher, true);
rig.attestation.attester_bitfield.set_bit(one_byte_higher, false);
rig.attestation
.attester_bitfield
.set_bit(one_byte_higher, true);
rig.attestation
.attester_bitfield
.set_bit(one_byte_higher, false);
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::BadBitfieldLength));
@ -144,10 +149,15 @@ fn test_attestation_validation_invalid_invalid_bitfield_end_bit() {
let mut rig = generic_rig();
let one_bit_high = rig.attester_count + 1;
rig.attestation.attester_bitfield.set_bit(one_bit_high, true);
rig.attestation
.attester_bitfield
.set_bit(one_bit_high, true);
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::InvalidBitfieldEndBits));
assert_eq!(
result,
Err(AttestationValidationError::InvalidBitfieldEndBits)
);
}
#[test]
@ -164,11 +174,19 @@ fn test_attestation_validation_invalid_invalid_bitfield_end_bit_with_irreguar_bi
* bit in a bitfield and the byte length of that bitfield
*/
let one_bit_high = rig.attester_count + 1;
assert!(one_bit_high % 8 != 0, "the test is ineffective in this case.");
rig.attestation.attester_bitfield.set_bit(one_bit_high, true);
assert!(
one_bit_high % 8 != 0,
"the test is ineffective in this case."
);
rig.attestation
.attester_bitfield
.set_bit(one_bit_high, true);
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::InvalidBitfieldEndBits));
assert_eq!(
result,
Err(AttestationValidationError::InvalidBitfieldEndBits)
);
}
#[test]
@ -178,7 +196,10 @@ fn test_attestation_validation_invalid_unknown_justified_block_hash() {
rig.attestation.justified_block_hash = Hash256::from("unknown block hash".as_bytes());
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::InvalidJustifiedBlockHash));
assert_eq!(
result,
Err(AttestationValidationError::InvalidJustifiedBlockHash)
);
}
#[test]
@ -191,9 +212,13 @@ fn test_attestation_validation_invalid_unknown_justified_block_hash_wrong_slot()
create_block_at_slot(
&rig.stores.block,
&rig.attestation.justified_block_hash,
rig.attestation.justified_slot + 1);
rig.attestation.justified_slot + 1,
);
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::InvalidJustifiedBlockHash));
assert_eq!(
result,
Err(AttestationValidationError::InvalidJustifiedBlockHash)
);
/*
* justified_block_hash points to a block with a slot that is too low.
@ -201,9 +226,13 @@ fn test_attestation_validation_invalid_unknown_justified_block_hash_wrong_slot()
create_block_at_slot(
&rig.stores.block,
&rig.attestation.justified_block_hash,
rig.attestation.justified_slot - 1);
rig.attestation.justified_slot - 1,
);
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::InvalidJustifiedBlockHash));
assert_eq!(
result,
Err(AttestationValidationError::InvalidJustifiedBlockHash)
);
}
#[test]
@ -213,5 +242,8 @@ fn test_attestation_validation_invalid_empty_signature() {
rig.attestation.aggregate_sig = AggregateSignature::new();
let result = rig.context.validate_attestation(&rig.attestation);
assert_eq!(result, Err(AttestationValidationError::BadAggregateSignature));
assert_eq!(
result,
Err(AttestationValidationError::BadAggregateSignature)
);
}

View File

@ -4,33 +4,25 @@ extern crate hashing;
extern crate types;
use active_validators::validator_is_active;
use bytes::{
BytesMut,
BufMut,
};
use bytes::{BufMut, BytesMut};
use hashing::canonical_hash;
use std::cmp::max;
use types::{
Hash256,
ValidatorRecord,
ValidatorStatus,
};
use types::{Hash256, ValidatorRecord, ValidatorStatus};
pub enum UpdateValidatorSetError {
ArithmeticOverflow,
}
const VALIDATOR_FLAG_ENTRY: u8 = 0;
const VALIDATOR_FLAG_EXIT: u8 = 1;
const VALIDATOR_FLAG_EXIT: u8 = 1;
pub fn update_validator_set(
validators: &mut Vec<ValidatorRecord>,
hash_chain: Hash256,
present_slot: u64,
deposit_size_gwei: u64,
max_validator_churn_quotient: u64)
-> Result<(), UpdateValidatorSetError>
{
max_validator_churn_quotient: u64,
) -> Result<(), UpdateValidatorSetError> {
/*
* Total balance of all active validators.
*
@ -40,7 +32,8 @@ pub fn update_validator_set(
let mut bal: u64 = 0;
for v in validators.iter() {
if validator_is_active(&v) {
bal = bal.checked_add(v.balance)
bal = bal
.checked_add(v.balance)
.ok_or(UpdateValidatorSetError::ArithmeticOverflow)?;
}
}
@ -51,9 +44,13 @@ pub fn update_validator_set(
* Note: this is not the maximum allowable change, it can actually be higher.
*/
let max_allowable_change = {
let double_deposit_size = deposit_size_gwei.checked_mul(2)
let double_deposit_size = deposit_size_gwei
.checked_mul(2)
.ok_or(UpdateValidatorSetError::ArithmeticOverflow)?;
max(double_deposit_size, total_balance / max_validator_churn_quotient)
max(
double_deposit_size,
total_balance / max_validator_churn_quotient,
)
};
let mut hasher = ValidatorChangeHashChain {
@ -66,7 +63,8 @@ pub fn update_validator_set(
* Validator is pending activiation.
*/
x if x == ValidatorStatus::PendingActivation as u8 => {
let new_total_changed = total_changed.checked_add(deposit_size_gwei)
let new_total_changed = total_changed
.checked_add(deposit_size_gwei)
.ok_or(UpdateValidatorSetError::ArithmeticOverflow)?;
/*
* If entering this validator would not exceed the max balance delta,
@ -85,7 +83,8 @@ pub fn update_validator_set(
* Validator is pending exit.
*/
x if x == ValidatorStatus::PendingExit as u8 => {
let new_total_changed = total_changed.checked_add(v.balance)
let new_total_changed = total_changed
.checked_add(v.balance)
.ok_or(UpdateValidatorSetError::ArithmeticOverflow)?;
/*
* If exiting this validator would not exceed the max balance delta,
@ -101,7 +100,7 @@ pub fn update_validator_set(
break;
}
}
_ => ()
_ => (),
};
if total_changed >= max_allowable_change {
break;
@ -115,17 +114,14 @@ pub struct ValidatorChangeHashChain {
}
impl ValidatorChangeHashChain {
pub fn extend(&mut self, index: usize, pubkey: &Vec<u8>, flag: u8)
{
pub fn extend(&mut self, index: usize, pubkey: &Vec<u8>, flag: u8) {
let mut message = self.bytes.clone();
message.append(&mut serialize_validator_change_record(index, pubkey, flag));
self.bytes = canonical_hash(&message);
}
}
fn serialize_validator_change_record(index: usize, pubkey: &Vec<u8>, flag: u8)
-> Vec<u8>
{
fn serialize_validator_change_record(index: usize, pubkey: &Vec<u8>, flag: u8) -> Vec<u8> {
let mut buf = BytesMut::with_capacity(68);
buf.put_u8(flag);
let index_bytes = {
@ -138,7 +134,6 @@ fn serialize_validator_change_record(index: usize, pubkey: &Vec<u8>, flag: u8)
buf.take().to_vec()
}
#[cfg(test)]
mod tests {
#[test]

View File

@ -1,11 +1,5 @@
use bls::{
verify_proof_of_possession,
};
use types::{
ValidatorRecord,
ValidatorStatus,
ValidatorRegistration,
};
use bls::verify_proof_of_possession;
use types::{ValidatorRecord, ValidatorRegistration, ValidatorStatus};
/// The size of a validators deposit in GWei.
pub const DEPOSIT_GWEI: u64 = 32_000_000_000;
@ -25,9 +19,7 @@ pub enum ValidatorInductionError {
}
impl ValidatorInductor {
pub fn new(current_slot: u64, shard_count: u16, validators: Vec<ValidatorRecord>)
-> Self
{
pub fn new(current_slot: u64, shard_count: u16, validators: Vec<ValidatorRecord>) -> Self {
Self {
current_slot,
shard_count,
@ -40,29 +32,33 @@ impl ValidatorInductor {
///
/// Returns an error if the registration is invalid, otherwise returns the index of the
/// validator in `CrystallizedState.validators`.
pub fn induct(&mut self, rego: &ValidatorRegistration, status: ValidatorStatus)
-> Result<usize, ValidatorInductionError>
{
pub fn induct(
&mut self,
rego: &ValidatorRegistration,
status: ValidatorStatus,
) -> Result<usize, ValidatorInductionError> {
let v = self.process_registration(rego, status)?;
Ok(self.add_validator(v))
}
/// Verify a `ValidatorRegistration` and return a `ValidatorRecord` if valid.
fn process_registration(&self, r: &ValidatorRegistration, status: ValidatorStatus)
-> Result<ValidatorRecord, ValidatorInductionError>
{
fn process_registration(
&self,
r: &ValidatorRegistration,
status: ValidatorStatus,
) -> Result<ValidatorRecord, ValidatorInductionError> {
/*
* Ensure withdrawal shard is not too high.
*/
if r.withdrawal_shard > self.shard_count {
return Err(ValidatorInductionError::InvalidShard)
return Err(ValidatorInductionError::InvalidShard);
}
/*
* Prove validator has knowledge of their secret key.
*/
if !verify_proof_of_possession(&r.proof_of_possession, &r.pubkey) {
return Err(ValidatorInductionError::InvaidProofOfPossession)
return Err(ValidatorInductionError::InvaidProofOfPossession);
}
Ok(ValidatorRecord {
@ -79,13 +75,11 @@ impl ValidatorInductor {
/// Returns the index of the first `ValidatorRecord` in the `CrystallizedState` where
/// `validator.status == Withdrawn`. If no such record exists, `None` is returned.
fn first_withdrawn_validator(&mut self)
-> Option<usize>
{
fn first_withdrawn_validator(&mut self) -> Option<usize> {
for i in self.empty_validator_start..self.validators.len() {
if self.validators[i].status == ValidatorStatus::Withdrawn as u8 {
self.empty_validator_start = i + 1;
return Some(i)
return Some(i);
}
}
None
@ -94,9 +88,7 @@ impl ValidatorInductor {
/// Adds a `ValidatorRecord` to the `CrystallizedState` by replacing first validator where
/// `validator.status == Withdraw`. If no such withdrawn validator exists, adds the new
/// validator to the end of the list.
fn add_validator(&mut self, v: ValidatorRecord)
-> usize
{
fn add_validator(&mut self, v: ValidatorRecord) -> usize {
match self.first_withdrawn_validator() {
Some(i) => {
self.validators[i] = v;
@ -109,36 +101,25 @@ impl ValidatorInductor {
}
}
pub fn to_vec(self)
-> Vec<ValidatorRecord>
{
pub fn to_vec(self) -> Vec<ValidatorRecord> {
self.validators
}
}
#[cfg(test)]
mod tests {
use super::*;
use bls::{
Keypair,
Signature,
};
use types::{
Address,
Hash256,
};
use bls::{Keypair, Signature};
use hashing::proof_of_possession_hash;
use types::{Address, Hash256};
fn registration_equals_record(reg: &ValidatorRegistration, rec: &ValidatorRecord)
-> bool
{
(reg.pubkey == rec.pubkey) &
(reg.withdrawal_shard == rec.withdrawal_shard) &
(reg.withdrawal_address == rec.withdrawal_address) &
(reg.randao_commitment == rec.randao_commitment) &
(verify_proof_of_possession(&reg.proof_of_possession, &rec.pubkey))
fn registration_equals_record(reg: &ValidatorRegistration, rec: &ValidatorRecord) -> bool {
(reg.pubkey == rec.pubkey)
& (reg.withdrawal_shard == rec.withdrawal_shard)
& (reg.withdrawal_address == rec.withdrawal_address)
& (reg.randao_commitment == rec.randao_commitment)
& (verify_proof_of_possession(&reg.proof_of_possession, &rec.pubkey))
}
/// Generate a proof of possession for some keypair.
@ -291,7 +272,10 @@ mod tests {
let result = inductor.induct(&r, ValidatorStatus::PendingActivation);
let validators = inductor.to_vec();
assert_eq!(result, Err(ValidatorInductionError::InvaidProofOfPossession));
assert_eq!(
result,
Err(ValidatorInductionError::InvaidProofOfPossession)
);
assert_eq!(validators.len(), 0);
}
}

View File

@ -4,7 +4,4 @@ extern crate types;
mod inductor;
pub use inductor::{
ValidatorInductor,
ValidatorInductionError,
};
pub use inductor::{ValidatorInductionError, ValidatorInductor};

View File

@ -1,11 +1,8 @@
extern crate active_validators;
extern crate honey_badger_split;
extern crate vec_shuffle;
extern crate types;
extern crate vec_shuffle;
mod shuffle;
pub use shuffle::{
shard_and_committees_for_cycle,
ValidatorAssignmentError,
};
pub use shuffle::{shard_and_committees_for_cycle, ValidatorAssignmentError};

View File

@ -2,16 +2,8 @@ use std::cmp::min;
use active_validators::active_validator_indices;
use honey_badger_split::SplitExt;
use vec_shuffle::{
shuffle,
ShuffleErr,
};
use types::{
ShardAndCommittee,
ValidatorRecord,
ChainConfig,
};
use types::{ChainConfig, ShardAndCommittee, ValidatorRecord};
use vec_shuffle::{shuffle, ShuffleErr};
type DelegatedCycle = Vec<Vec<ShardAndCommittee>>;
@ -29,9 +21,8 @@ pub fn shard_and_committees_for_cycle(
seed: &[u8],
validators: &[ValidatorRecord],
crosslinking_shard_start: u16,
config: &ChainConfig)
-> Result<DelegatedCycle, ValidatorAssignmentError>
{
config: &ChainConfig,
) -> Result<DelegatedCycle, ValidatorAssignmentError> {
let shuffled_validator_indices = {
let mut validator_indices = active_validator_indices(validators);
shuffle(seed, validator_indices)?
@ -45,7 +36,8 @@ pub fn shard_and_committees_for_cycle(
&shard_indices,
crosslinking_shard_start,
cycle_length,
min_committee_size)
min_committee_size,
)
}
/// Given the validator list, delegates the validators into slots and comittees for a given cycle.
@ -54,50 +46,49 @@ fn generate_cycle(
shard_indices: &[usize],
crosslinking_shard_start: usize,
cycle_length: usize,
min_committee_size: usize)
-> Result<DelegatedCycle, ValidatorAssignmentError>
{
min_committee_size: usize,
) -> Result<DelegatedCycle, ValidatorAssignmentError> {
let validator_count = validator_indices.len();
let shard_count = shard_indices.len();
if shard_count / cycle_length == 0 {
return Err(ValidatorAssignmentError::TooFewShards)
return Err(ValidatorAssignmentError::TooFewShards);
}
let (committees_per_slot, slots_per_committee) = {
if validator_count >= cycle_length * min_committee_size {
let committees_per_slot = min(validator_count / cycle_length /
(min_committee_size * 2) + 1, shard_count /
cycle_length);
let committees_per_slot = min(
validator_count / cycle_length / (min_committee_size * 2) + 1,
shard_count / cycle_length,
);
let slots_per_committee = 1;
(committees_per_slot, slots_per_committee)
} else {
let committees_per_slot = 1;
let mut slots_per_committee = 1;
while (validator_count * slots_per_committee < cycle_length * min_committee_size) &
(slots_per_committee < cycle_length) {
while (validator_count * slots_per_committee < cycle_length * min_committee_size)
& (slots_per_committee < cycle_length)
{
slots_per_committee *= 2;
}
(committees_per_slot, slots_per_committee)
}
};
let cycle = validator_indices.honey_badger_split(cycle_length)
let cycle = validator_indices
.honey_badger_split(cycle_length)
.enumerate()
.map(|(i, slot_indices)| {
let shard_start = crosslinking_shard_start + i * committees_per_slot / slots_per_committee;
slot_indices.honey_badger_split(committees_per_slot)
let shard_start =
crosslinking_shard_start + i * committees_per_slot / slots_per_committee;
slot_indices
.honey_badger_split(committees_per_slot)
.enumerate()
.map(|(j, shard_indices)| {
ShardAndCommittee{
shard: ((shard_start + j) % shard_count) as u16,
committee: shard_indices.to_vec(),
}
})
.collect()
})
.collect();
.map(|(j, shard_indices)| ShardAndCommittee {
shard: ((shard_start + j) % shard_count) as u16,
committee: shard_indices.to_vec(),
}).collect()
}).collect();
Ok(cycle)
}
@ -118,9 +109,12 @@ mod tests {
shard_count: &usize,
crosslinking_shard_start: usize,
cycle_length: usize,
min_committee_size: usize)
-> (Vec<usize>, Vec<usize>, Result<DelegatedCycle, ValidatorAssignmentError>)
{
min_committee_size: usize,
) -> (
Vec<usize>,
Vec<usize>,
Result<DelegatedCycle, ValidatorAssignmentError>,
) {
let validator_indices: Vec<usize> = (0_usize..*validator_count).into_iter().collect();
let shard_indices: Vec<usize> = (0_usize..*shard_count).into_iter().collect();
let result = generate_cycle(
@ -128,28 +122,27 @@ mod tests {
&shard_indices,
crosslinking_shard_start,
cycle_length,
min_committee_size);
min_committee_size,
);
(validator_indices, shard_indices, result)
}
#[allow(dead_code)]
fn print_cycle(cycle: &DelegatedCycle) {
cycle.iter()
.enumerate()
.for_each(|(i, slot)| {
println!("slot {:?}", &i);
slot.iter()
.enumerate()
.for_each(|(i, sac)| {
println!("#{:?}\tshard={}\tcommittee.len()={}",
&i, &sac.shard, &sac.committee.len())
})
});
cycle.iter().enumerate().for_each(|(i, slot)| {
println!("slot {:?}", &i);
slot.iter().enumerate().for_each(|(i, sac)| {
println!(
"#{:?}\tshard={}\tcommittee.len()={}",
&i,
&sac.shard,
&sac.committee.len()
)
})
});
}
fn flatten_validators(cycle: &DelegatedCycle)
-> Vec<usize>
{
fn flatten_validators(cycle: &DelegatedCycle) -> Vec<usize> {
let mut flattened = vec![];
for slot in cycle.iter() {
for sac in slot.iter() {
@ -161,9 +154,7 @@ mod tests {
flattened
}
fn flatten_and_dedup_shards(cycle: &DelegatedCycle)
-> Vec<usize>
{
fn flatten_and_dedup_shards(cycle: &DelegatedCycle) -> Vec<usize> {
let mut flattened = vec![];
for slot in cycle.iter() {
for sac in slot.iter() {
@ -174,9 +165,7 @@ mod tests {
flattened
}
fn flatten_shards_in_slots(cycle: &DelegatedCycle)
-> Vec<Vec<usize>>
{
fn flatten_shards_in_slots(cycle: &DelegatedCycle) -> Vec<Vec<usize>> {
let mut shards_in_slots: Vec<Vec<usize>> = vec![];
for slot in cycle.iter() {
let mut shards: Vec<usize> = vec![];
@ -201,30 +190,50 @@ mod tests {
&shard_count,
crosslinking_shard_start,
cycle_length,
min_committee_size);
min_committee_size,
);
let cycle = result.unwrap();
let assigned_validators = flatten_validators(&cycle);
let assigned_shards = flatten_and_dedup_shards(&cycle);
let shards_in_slots = flatten_shards_in_slots(&cycle);
let expected_shards = shards.get(0..10).unwrap();
assert_eq!(assigned_validators, validators, "Validator assignment incorrect");
assert_eq!(assigned_shards, expected_shards, "Shard assignment incorrect");
assert_eq!(
assigned_validators, validators,
"Validator assignment incorrect"
);
assert_eq!(
assigned_shards, expected_shards,
"Shard assignment incorrect"
);
let expected_shards_in_slots: Vec<Vec<usize>> = vec![
vec![0], vec![0], // Each line is 2 slots..
vec![1], vec![1],
vec![2], vec![2],
vec![3], vec![3],
vec![4], vec![4],
vec![5], vec![5],
vec![6], vec![6],
vec![7], vec![7],
vec![8], vec![8],
vec![9], vec![9],
vec![0],
vec![0], // Each line is 2 slots..
vec![1],
vec![1],
vec![2],
vec![2],
vec![3],
vec![3],
vec![4],
vec![4],
vec![5],
vec![5],
vec![6],
vec![6],
vec![7],
vec![7],
vec![8],
vec![8],
vec![9],
vec![9],
];
// assert!(compare_shards_in_slots(&cycle, &expected_shards_in_slots));
assert_eq!(expected_shards_in_slots, shards_in_slots, "Shard assignment incorrect.")
assert_eq!(
expected_shards_in_slots, shards_in_slots,
"Shard assignment incorrect."
)
}
#[test]
@ -240,17 +249,28 @@ mod tests {
&shard_count,
crosslinking_shard_start,
cycle_length,
min_committee_size);
min_committee_size,
);
let cycle = result.unwrap();
let assigned_validators = flatten_validators(&cycle);
let assigned_shards = flatten_and_dedup_shards(&cycle);
let shards_in_slots = flatten_shards_in_slots(&cycle);
let expected_shards = shards.get(0..22).unwrap();
let expected_shards_in_slots: Vec<Vec<usize>> =
(0_usize..11_usize) .map(|x| vec![2*x,2*x+1]).collect();
assert_eq!(assigned_validators, validators, "Validator assignment incorrect");
assert_eq!(assigned_shards, expected_shards, "Shard assignment incorrect");
let expected_shards_in_slots: Vec<Vec<usize>> = (0_usize..11_usize)
.map(|x| vec![2 * x, 2 * x + 1])
.collect();
assert_eq!(
assigned_validators, validators,
"Validator assignment incorrect"
);
assert_eq!(
assigned_shards, expected_shards,
"Shard assignment incorrect"
);
// assert!(compare_shards_in_slots(&cycle, &expected_shards_in_slots));
assert_eq!(expected_shards_in_slots, shards_in_slots, "Shard assignment incorrect.")
assert_eq!(
expected_shards_in_slots, shards_in_slots,
"Shard assignment incorrect."
)
}
}

View File

@ -16,10 +16,9 @@ const DEFAULT_LIGHTHOUSE_DIR: &str = ".lighthouse";
impl LighthouseConfig {
/// Build a new lighthouse configuration from defaults.
pub fn default() -> Self{
pub fn default() -> Self {
let data_dir = {
let home = dirs::home_dir()
.expect("Unable to determine home dir.");
let home = dirs::home_dir().expect("Unable to determine home dir.");
home.join(DEFAULT_LIGHTHOUSE_DIR)
};
fs::create_dir_all(&data_dir)

View File

@ -4,15 +4,11 @@ extern crate rocksdb;
mod disk_db;
mod memory_db;
mod traits;
pub mod stores;
mod traits;
use self::stores::COLUMNS;
pub use self::disk_db::DiskDB;
pub use self::memory_db::MemoryDB;
pub use self::traits::{
DBError,
DBValue,
ClientDB,
};
pub use self::traits::{ClientDB, DBError, DBValue};

View File

@ -1,21 +1,12 @@
use super::{
ClientDB,
DBError,
};
use super::{ClientDB, DBError};
mod beacon_block_store;
mod pow_chain_store;
mod validator_store;
pub use self::beacon_block_store::{
BeaconBlockStore,
BeaconBlockAtSlotError,
};
pub use self::beacon_block_store::{BeaconBlockAtSlotError, BeaconBlockStore};
pub use self::pow_chain_store::PoWChainStore;
pub use self::validator_store::{
ValidatorStore,
ValidatorStoreError,
};
pub use self::validator_store::{ValidatorStore, ValidatorStoreError};
use super::bls;
@ -23,8 +14,4 @@ pub const BLOCKS_DB_COLUMN: &str = "blocks";
pub const POW_CHAIN_DB_COLUMN: &str = "powchain";
pub const VALIDATOR_DB_COLUMN: &str = "validator";
pub const COLUMNS: [&str; 3] = [
BLOCKS_DB_COLUMN,
POW_CHAIN_DB_COLUMN,
VALIDATOR_DB_COLUMN,
];
pub const COLUMNS: [&str; 3] = [BLOCKS_DB_COLUMN, POW_CHAIN_DB_COLUMN, VALIDATOR_DB_COLUMN];

View File

@ -1,32 +1,24 @@
use std::sync::Arc;
use super::{
ClientDB,
DBError,
};
use super::POW_CHAIN_DB_COLUMN as DB_COLUMN;
use super::{ClientDB, DBError};
use std::sync::Arc;
pub struct PoWChainStore<T>
where T: ClientDB
where
T: ClientDB,
{
db: Arc<T>,
}
impl<T: ClientDB> PoWChainStore<T> {
pub fn new(db: Arc<T>) -> Self {
Self {
db,
}
Self { db }
}
pub fn put_block_hash(&self, hash: &[u8])
-> Result<(), DBError>
{
pub fn put_block_hash(&self, hash: &[u8]) -> Result<(), DBError> {
self.db.put(DB_COLUMN, hash, &[0])
}
pub fn block_hash_exists(&self, hash: &[u8])
-> Result<bool, DBError>
{
pub fn block_hash_exists(&self, hash: &[u8]) -> Result<bool, DBError> {
self.db.exists(DB_COLUMN, hash)
}
}

View File

@ -1,16 +1,10 @@
extern crate bytes;
use self::bytes::{
BufMut,
BytesMut,
};
use std::sync::Arc;
use super::{
ClientDB,
DBError,
};
use super::VALIDATOR_DB_COLUMN as DB_COLUMN;
use self::bytes::{BufMut, BytesMut};
use super::bls::PublicKey;
use super::VALIDATOR_DB_COLUMN as DB_COLUMN;
use super::{ClientDB, DBError};
use std::sync::Arc;
#[derive(Debug, PartialEq)]
pub enum ValidatorStoreError {
@ -30,66 +24,63 @@ enum KeyPrefixes {
}
pub struct ValidatorStore<T>
where T: ClientDB
where
T: ClientDB,
{
db: Arc<T>,
}
impl<T: ClientDB> ValidatorStore<T> {
pub fn new(db: Arc<T>) -> Self {
Self {
db,
}
Self { db }
}
fn prefix_bytes(&self, key_prefix: &KeyPrefixes)
-> Vec<u8>
{
fn prefix_bytes(&self, key_prefix: &KeyPrefixes) -> Vec<u8> {
match key_prefix {
KeyPrefixes::PublicKey => b"pubkey".to_vec(),
}
}
fn get_db_key_for_index(&self, key_prefix: &KeyPrefixes, index: usize)
-> Vec<u8>
{
fn get_db_key_for_index(&self, key_prefix: &KeyPrefixes, index: usize) -> Vec<u8> {
let mut buf = BytesMut::with_capacity(6 + 8);
buf.put(self.prefix_bytes(key_prefix));
buf.put_u64_be(index as u64);
buf.take().to_vec()
}
pub fn put_public_key_by_index(&self, index: usize, public_key: &PublicKey)
-> Result<(), ValidatorStoreError>
{
pub fn put_public_key_by_index(
&self,
index: usize,
public_key: &PublicKey,
) -> Result<(), ValidatorStoreError> {
let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index);
let val = public_key.as_bytes();
self.db.put(DB_COLUMN, &key[..], &val[..])
.map_err(ValidatorStoreError::from)
self.db
.put(DB_COLUMN, &key[..], &val[..])
.map_err(ValidatorStoreError::from)
}
pub fn get_public_key_by_index(&self, index: usize)
-> Result<Option<PublicKey>, ValidatorStoreError>
{
pub fn get_public_key_by_index(
&self,
index: usize,
) -> Result<Option<PublicKey>, ValidatorStoreError> {
let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index);
let val = self.db.get(DB_COLUMN, &key[..])?;
match val {
None => Ok(None),
Some(val) => {
match PublicKey::from_bytes(&val) {
Ok(key) => Ok(Some(key)),
Err(_) => Err(ValidatorStoreError::DecodeError),
}
}
Some(val) => match PublicKey::from_bytes(&val) {
Ok(key) => Ok(Some(key)),
Err(_) => Err(ValidatorStoreError::DecodeError),
},
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::super::MemoryDB;
use super::super::bls::Keypair;
use super::*;
#[test]
fn test_validator_store_put_get() {
@ -112,16 +103,19 @@ mod tests {
* Check all keys are retrieved correctly.
*/
for i in 0..keys.len() {
let retrieved = store.get_public_key_by_index(i)
.unwrap().unwrap();
let retrieved = store.get_public_key_by_index(i).unwrap().unwrap();
assert_eq!(retrieved, keys[i].pk);
}
/*
* Check that an index that wasn't stored returns None.
*/
assert!(store.get_public_key_by_index(keys.len() + 1)
.unwrap().is_none());
assert!(
store
.get_public_key_by_index(keys.len() + 1)
.unwrap()
.is_none()
);
}
#[test]
@ -132,7 +126,9 @@ mod tests {
let key = store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42);
db.put(DB_COLUMN, &key[..], "cats".as_bytes()).unwrap();
assert_eq!(store.get_public_key_by_index(42),
Err(ValidatorStoreError::DecodeError));
assert_eq!(
store.get_public_key_by_index(42),
Err(ValidatorStoreError::DecodeError)
);
}
}

View File

@ -1,7 +1,7 @@
#[macro_use]
extern crate slog;
extern crate slog_term;
extern crate slog_async;
extern crate slog_term;
// extern crate ssz;
extern crate clap;
extern crate futures;
@ -12,9 +12,9 @@ mod config;
use std::path::PathBuf;
use slog::Drain;
use clap::{ Arg, App };
use clap::{App, Arg};
use config::LighthouseConfig;
use slog::Drain;
fn main() {
let decorator = slog_term::TermDecorator::new().build();
@ -26,17 +26,19 @@ fn main() {
.version("0.0.1")
.author("Sigma Prime <paul@sigmaprime.io>")
.about("Eth 2.0 Client")
.arg(Arg::with_name("datadir")
.long("datadir")
.value_name("DIR")
.help("Data directory for keys and databases.")
.takes_value(true))
.arg(Arg::with_name("port")
.long("port")
.value_name("PORT")
.help("Network listen port for p2p connections.")
.takes_value(true))
.get_matches();
.arg(
Arg::with_name("datadir")
.long("datadir")
.value_name("DIR")
.help("Data directory for keys and databases.")
.takes_value(true),
).arg(
Arg::with_name("port")
.long("port")
.value_name("PORT")
.help("Network listen port for p2p connections.")
.takes_value(true),
).get_matches();
let mut config = LighthouseConfig::default();
@ -60,8 +62,10 @@ fn main() {
"data_dir" => &config.data_dir.to_str(),
"port" => &config.p2p_listen_port);
error!(log,
"Lighthouse under development and does not provide a user demo.");
error!(
log,
"Lighthouse under development and does not provide a user demo."
);
info!(log, "Exiting.");
}