Merge database works, directory restructuring

This commit is contained in:
Paul Hauner 2018-09-26 11:58:46 +10:00
commit c8ff539686
No known key found for this signature in database
GPG Key ID: 303E4494BB28068C
40 changed files with 1251 additions and 163 deletions

View File

@ -7,7 +7,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
# TODO: remove "blake2" in favor of "blake2-rfc"
blake2 = "^0.7.1"
blake2-rfc = "0.2.18"
bls = { git = "https://github.com/sigp/bls" }
bls-aggregates = { git = "https://github.com/sigp/signature-schemes" }
boolean-bitfield = { path = "boolean-bitfield" }
bytes = ""
crypto-mac = "^0.6.2"

View File

@ -86,6 +86,9 @@ impl BooleanBitfield {
/// vector.
pub fn is_empty(&self) -> bool { self.len == 0 }
/// The number of bytes required to represent the bitfield.
pub fn num_bytes(&self) -> usize { self.vec.len() }
/// Iterate through the underlying vector and count the number of
/// true bits.
pub fn num_true_bits(&self) -> u64 {
@ -114,7 +117,17 @@ impl BooleanBitfield {
0
}
/// Get the byte at a position, assuming big-endian encoding.
pub fn get_byte(&self, n: usize) -> Option<&u8> {
self.vec.get(n)
}
/// Clone and return the underlying byte array (`Vec<u8>`).
pub fn to_vec(&self) -> Vec<u8> {
self.vec.clone()
}
/// Clone and return the underlying byte array (`Vec<u8>`) in big-endinan format.
pub fn to_be_vec(&self) -> Vec<u8> {
let mut o = self.vec.clone();
o.reverse();
@ -142,7 +155,7 @@ impl PartialEq for BooleanBitfield {
impl ssz::Encodable for BooleanBitfield {
fn ssz_append(&self, s: &mut ssz::SszStream) {
s.append_vec(&self.to_be_vec());
s.append_vec(&self.to_vec());
}
}
@ -161,7 +174,8 @@ impl ssz::Decodable for BooleanBitfield {
Ok((BooleanBitfield::new(),
index + ssz::LENGTH_BYTES))
} else {
let b = BooleanBitfield::from(&bytes[(index + 4)..(len + 4)]);
let b = BooleanBitfield::
from(&bytes[(index + 4)..(index + len + 4)]);
let index = index + ssz::LENGTH_BYTES + len;
Ok((b, index))
}
@ -182,7 +196,7 @@ mod tests {
let mut stream = ssz::SszStream::new();
stream.append(&b);
assert_eq!(stream.drain(), vec![0, 0, 0, 2, 1, 0]);
assert_eq!(stream.drain(), vec![0, 0, 0, 2, 0, 1]);
}
#[test]
@ -190,7 +204,7 @@ mod tests {
/*
* Correct input
*/
let input = vec![0, 0, 0, 2, 1, 0];
let input = vec![0, 0, 0, 2, 0, 1];
let (b, i) = BooleanBitfield::ssz_decode(&input, 0).unwrap();
assert_eq!(i, 6);
assert_eq!(b.num_true_bits(), 1);
@ -199,7 +213,7 @@ mod tests {
/*
* Input too long
*/
let mut input = vec![0, 0, 0, 2, 1, 0];
let mut input = vec![0, 0, 0, 2, 0, 1];
input.push(42);
let (b, i) = BooleanBitfield::ssz_decode(&input, 0).unwrap();
assert_eq!(i, 6);

9
lighthouse/bls/mod.rs Normal file
View File

@ -0,0 +1,9 @@
extern crate bls_aggregates;
pub use self::bls_aggregates::AggregateSignature;
pub use self::bls_aggregates::AggregatePublicKey;
pub use self::bls_aggregates::Signature;
pub use self::bls_aggregates::Keypair;
pub use self::bls_aggregates::PublicKey;
pub const BLS_AGG_SIG_BYTE_SIZE: usize = 97;

View File

@ -36,6 +36,9 @@ impl DiskDB {
let mut options = Options::default();
options.create_if_missing(true);
// TODO: ensure that columns are created (and remove
// the dead_code allow)
/*
* Initialise the path
*/
@ -58,6 +61,7 @@ impl DiskDB {
/// Create a RocksDB column family. Corresponds to the
/// `create_cf()` function on the RocksDB API.
#[allow(dead_code)]
fn create_col(&mut self, col: &str)
-> Result<(), DBError>
{
@ -108,6 +112,21 @@ impl ClientDB for DiskDB {
Some(handle) => self.db.put_cf(handle, key, val).map_err(|e| e.into())
}
}
/// Return true if some key exists in some column.
fn exists(&self, col: &str, key: &[u8])
-> Result<bool, DBError>
{
/*
* I'm not sure if this is the correct way to read if some
* block exists. Naievely I would expect this to unncessarily
* copy some data, but I could be wrong.
*/
match self.db.cf_handle(col) {
None => Err(DBError{ message: "Unknown column".to_string() }),
Some(handle) => Ok(self.db.get_cf(handle, key)?.is_some())
}
}
}

View File

@ -1,6 +1,7 @@
use std::collections::{ HashSet, HashMap };
use std::sync::RwLock;
use super::blake2::blake2b::blake2b;
use super::COLUMNS;
use super::{
ClientDB,
DBValue,
@ -24,13 +25,11 @@ impl MemoryDB {
///
/// All columns must be supplied initially, you will get an error if you try to access a column
/// that was not declared here. This condition is enforced artificially to simulate RocksDB.
pub fn open(columns: Option<&[&str]>) -> Self {
pub fn open() -> Self {
let db: DBHashMap = HashMap::new();
let mut known_columns: ColumnHashSet = HashSet::new();
if let Some(columns) = columns {
for col in columns {
known_columns.insert(col.to_string());
}
for col in &COLUMNS {
known_columns.insert(col.to_string());
}
Self {
db: RwLock::new(db),
@ -77,6 +76,23 @@ impl ClientDB for MemoryDB {
Err(DBError{ message: "Unknown column".to_string() })
}
}
/// Return true if some key exists in some column.
fn exists(&self, col: &str, key: &[u8])
-> Result<bool, DBError>
{
// Panic if the DB locks are poisoned.
let db = self.db.read().unwrap();
let known_columns = self.known_columns.read().unwrap();
if known_columns.contains(&col.to_string()) {
let column_key = MemoryDB::get_key_for_col(col, key);
Ok(db.contains_key(&column_key))
} else {
Err(DBError{ message: "Unknown column".to_string() })
}
}
}
@ -86,18 +102,17 @@ mod tests {
use super::super::ClientDB;
use std::thread;
use std::sync::Arc;
use super::super::stores::{
BLOCKS_DB_COLUMN,
VALIDATOR_DB_COLUMN,
};
#[test]
fn test_memorydb_column_access() {
let col_a: &str = "ColumnA";
let col_b: &str = "ColumnB";
let col_a: &str = BLOCKS_DB_COLUMN;
let col_b: &str = VALIDATOR_DB_COLUMN;
let column_families = vec![
col_a,
col_b,
];
let db = MemoryDB::open(Some(&column_families));
let db = MemoryDB::open();
/*
* Testing that if we write to the same key in different columns that
@ -114,15 +129,10 @@ mod tests {
#[test]
fn test_memorydb_unknown_column_access() {
let col_a: &str = "ColumnA";
let col_a: &str = BLOCKS_DB_COLUMN;
let col_x: &str = "ColumnX";
let column_families = vec![
col_a,
// col_x is excluded on purpose
];
let db = MemoryDB::open(Some(&column_families));
let db = MemoryDB::open();
/*
* Test that we get errors when using undeclared columns
@ -135,11 +145,30 @@ mod tests {
}
#[test]
fn test_memorydb_threading() {
let col_name: &str = "TestColumn";
let column_families = vec![col_name];
fn test_memorydb_exists() {
let col_a: &str = BLOCKS_DB_COLUMN;
let col_b: &str = VALIDATOR_DB_COLUMN;
let db = Arc::new(MemoryDB::open(Some(&column_families)));
let db = MemoryDB::open();
/*
* Testing that if we write to the same key in different columns that
* there is not an overlap.
*/
db.put(col_a, "cats".as_bytes(), "lol".as_bytes()).unwrap();
assert_eq!(true, db.exists(col_a, "cats".as_bytes()).unwrap());
assert_eq!(false, db.exists(col_b, "cats".as_bytes()).unwrap());
assert_eq!(false, db.exists(col_a, "dogs".as_bytes()).unwrap());
assert_eq!(false, db.exists(col_b, "dogs".as_bytes()).unwrap());
}
#[test]
fn test_memorydb_threading() {
let col_name: &str = BLOCKS_DB_COLUMN;
let db = Arc::new(MemoryDB::open());
let thread_count = 10;
let write_count = 10;

View File

@ -4,6 +4,10 @@ extern crate blake2_rfc as blake2;
mod disk_db;
mod memory_db;
mod traits;
pub mod stores;
use super::bls;
use self::stores::COLUMNS;
pub use self::disk_db::DiskDB;
pub use self::memory_db::MemoryDB;

View File

@ -0,0 +1,85 @@
use std::sync::Arc;
use super::{
ClientDB,
DBError,
};
use super::BLOCKS_DB_COLUMN as DB_COLUMN;
pub struct BlockStore<T>
where T: ClientDB
{
db: Arc<T>,
}
impl<T: ClientDB> BlockStore<T> {
pub fn new(db: Arc<T>) -> Self {
Self {
db,
}
}
pub fn put_block(&self, hash: &[u8], ssz: &[u8])
-> Result<(), DBError>
{
self.db.put(DB_COLUMN, hash, ssz)
}
pub fn get_block(&self, hash: &[u8])
-> Result<Option<Vec<u8>>, DBError>
{
self.db.get(DB_COLUMN, hash)
}
pub fn block_exists(&self, hash: &[u8])
-> Result<bool, DBError>
{
self.db.exists(DB_COLUMN, hash)
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::super::MemoryDB;
use std::thread;
use std::sync::Arc;
#[test]
fn test_block_store_on_disk_db() {
let db = Arc::new(MemoryDB::open());
let bs = Arc::new(BlockStore::new(db.clone()));
let thread_count = 10;
let write_count = 10;
// We're expecting the product of these numbers to fit in one byte.
assert!(thread_count * write_count <= 255);
let mut handles = vec![];
for t in 0..thread_count {
let wc = write_count;
let bs = bs.clone();
let handle = thread::spawn(move || {
for w in 0..wc {
let key = (t * w) as u8;
let val = 42;
bs.put_block(&vec![key], &vec![val]).unwrap();
}
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
for t in 0..thread_count {
for w in 0..write_count {
let key = (t * w) as u8;
assert!(bs.block_exists(&vec![key]).unwrap());
let val = bs.get_block(&vec![key]).unwrap().unwrap();
assert_eq!(vec![42], val);
}
}
}
}

View File

@ -0,0 +1,27 @@
use super::{
ClientDB,
DBError,
};
mod block_store;
mod pow_chain_store;
mod validator_store;
pub use self::block_store::BlockStore;
pub use self::pow_chain_store::PoWChainStore;
pub use self::validator_store::{
ValidatorStore,
ValidatorStoreError,
};
use super::bls;
pub const BLOCKS_DB_COLUMN: &str = "blocks";
pub const POW_CHAIN_DB_COLUMN: &str = "powchain";
pub const VALIDATOR_DB_COLUMN: &str = "validator";
pub const COLUMNS: [&str; 3] = [
BLOCKS_DB_COLUMN,
POW_CHAIN_DB_COLUMN,
VALIDATOR_DB_COLUMN,
];

View File

@ -0,0 +1,34 @@
use std::sync::Arc;
use super::{
ClientDB,
DBError,
};
use super::POW_CHAIN_DB_COLUMN as DB_COLUMN;
pub struct PoWChainStore<T>
where T: ClientDB
{
db: Arc<T>,
}
impl<T: ClientDB> PoWChainStore<T> {
pub fn new(db: Arc<T>) -> Self {
Self {
db,
}
}
pub fn put_block_hash(&self, hash: &[u8])
-> Result<(), DBError>
{
self.db.put(DB_COLUMN, hash, &[0])
}
pub fn block_hash_exists(&self, hash: &[u8])
-> Result<bool, DBError>
{
self.db.exists(DB_COLUMN, hash)
}
}
// TODO: add tests once a memory-db is implemented

View File

@ -0,0 +1,138 @@
extern crate bytes;
use self::bytes::{
BufMut,
BytesMut,
};
use std::sync::Arc;
use super::{
ClientDB,
DBError,
};
use super::VALIDATOR_DB_COLUMN as DB_COLUMN;
use super::bls::PublicKey;
#[derive(Debug, PartialEq)]
pub enum ValidatorStoreError {
DBError(String),
DecodeError,
}
impl From<DBError> for ValidatorStoreError {
fn from(error: DBError) -> Self {
ValidatorStoreError::DBError(error.message)
}
}
#[derive(Debug, PartialEq)]
enum KeyPrefixes {
PublicKey,
}
pub struct ValidatorStore<T>
where T: ClientDB
{
db: Arc<T>,
}
impl<T: ClientDB> ValidatorStore<T> {
pub fn new(db: Arc<T>) -> Self {
Self {
db,
}
}
fn prefix_bytes(&self, key_prefix: &KeyPrefixes)
-> Vec<u8>
{
match key_prefix {
KeyPrefixes::PublicKey => b"pubkey".to_vec(),
}
}
fn get_db_key_for_index(&self, key_prefix: &KeyPrefixes, index: usize)
-> Vec<u8>
{
let mut buf = BytesMut::with_capacity(6 + 8);
buf.put(self.prefix_bytes(key_prefix));
buf.put_u64_be(index as u64);
buf.take().to_vec()
}
pub fn put_public_key_by_index(&self, index: usize, public_key: &PublicKey)
-> Result<(), ValidatorStoreError>
{
let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index);
let val = public_key.as_bytes();
self.db.put(DB_COLUMN, &key[..], &val[..])
.map_err(ValidatorStoreError::from)
}
pub fn get_public_key_by_index(&self, index: usize)
-> Result<Option<PublicKey>, ValidatorStoreError>
{
let key = self.get_db_key_for_index(&KeyPrefixes::PublicKey, index);
let val = self.db.get(DB_COLUMN, &key[..])?;
match val {
None => Ok(None),
Some(val) => {
match PublicKey::from_bytes(&val) {
Ok(key) => Ok(Some(key)),
Err(_) => Err(ValidatorStoreError::DecodeError),
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::super::MemoryDB;
use super::super::bls::Keypair;
#[test]
fn test_validator_store_put_get() {
let db = Arc::new(MemoryDB::open());
let store = ValidatorStore::new(db);
let keys = vec![
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
Keypair::random(),
];
for i in 0..keys.len() {
store.put_public_key_by_index(i, &keys[i].pk).unwrap();
}
/*
* Check all keys are retrieved correctly.
*/
for i in 0..keys.len() {
let retrieved = store.get_public_key_by_index(i)
.unwrap().unwrap();
assert_eq!(retrieved, keys[i].pk);
}
/*
* Check that an index that wasn't stored returns None.
*/
assert!(store.get_public_key_by_index(keys.len() + 1)
.unwrap().is_none());
}
#[test]
fn test_validator_store_bad_key() {
let db = Arc::new(MemoryDB::open());
let store = ValidatorStore::new(db.clone());
let key = store.get_db_key_for_index(&KeyPrefixes::PublicKey, 42);
db.put(DB_COLUMN, &key[..], "cats".as_bytes()).unwrap();
assert_eq!(store.get_public_key_by_index(42),
Err(ValidatorStoreError::DecodeError));
}
}

View File

@ -23,5 +23,8 @@ pub trait ClientDB: Sync + Send {
fn put(&self, col: &str, key: &[u8], val: &[u8])
-> Result<(), DBError>;
fn exists(&self, col: &str, key: &[u8])
-> Result<bool, DBError>;
}

View File

@ -2,16 +2,24 @@
extern crate slog;
extern crate slog_term;
extern crate slog_async;
extern crate ssz;
extern crate clap;
extern crate network_libp2p;
extern crate futures;
pub mod db;
pub mod client;
pub mod state;
pub mod sync;
pub mod utils;
pub mod config;
#[macro_use]
#[allow(dead_code)]
mod utils;
#[allow(dead_code)]
mod bls;
#[allow(dead_code)]
mod db;
mod client;
#[allow(dead_code)]
mod state;
#[allow(dead_code)]
mod sync;
mod config;
use std::path::PathBuf;

View File

@ -1,37 +0,0 @@
use super::utils::types::{ Hash256, Bitfield };
use super::utils::bls::{ AggregateSignature };
use super::ssz::{ Encodable, SszStream };
pub struct AttestationRecord {
pub slot: u64,
pub shard_id: u16,
pub oblique_parent_hashes: Vec<Hash256>,
pub shard_block_hash: Hash256,
pub attester_bitfield: Bitfield,
pub aggregate_sig: Option<AggregateSignature>,
}
impl Encodable for AttestationRecord {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.slot);
s.append(&self.shard_id);
s.append_vec(&self.oblique_parent_hashes);
s.append(&self.shard_block_hash);
s.append_vec(&self.attester_bitfield.to_be_vec());
// TODO: add aggregate signature
}
}
impl AttestationRecord {
pub fn zero() -> Self {
Self {
slot: 0,
shard_id: 0,
oblique_parent_hashes: vec![],
shard_block_hash: Hash256::zero(),
attester_bitfield: Bitfield::new(),
aggregate_sig: None,
}
}
}

View File

@ -0,0 +1,17 @@
use super::bls;
use super::ssz;
use super::utils;
mod structs;
mod ssz_splitter;
pub use self::structs::{
AttestationRecord,
MIN_SSZ_ATTESTION_RECORD_LENGTH,
};
pub use self::ssz_splitter::{
split_all_attestations,
split_one_attestation,
AttestationSplitError,
};

View File

@ -0,0 +1,139 @@
use super::MIN_SSZ_ATTESTION_RECORD_LENGTH as MIN_LENGTH;
use super::ssz::LENGTH_BYTES;
use super::ssz::decode::decode_length;
#[derive(Debug, PartialEq)]
pub enum AttestationSplitError {
TooShort,
}
/// Given some ssz slice, find the bounds of each serialized AttestationRecord and return a vec of
/// slices point to each.
pub fn split_all_attestations<'a>(full_ssz: &'a [u8], index: usize)
-> Result<Vec<&'a [u8]>, AttestationSplitError>
{
let mut v = vec![];
let mut index = index;
while index < full_ssz.len() - 1 {
let (slice, i) = split_one_attestation(full_ssz, index)?;
v.push(slice);
index = i;
}
Ok(v)
}
/// Given some ssz slice, find the bounds of one serialized AttestationRecord
/// and return a slice pointing to that.
pub fn split_one_attestation(full_ssz: &[u8], index: usize)
-> Result<(&[u8], usize), AttestationSplitError>
{
if full_ssz.len() < MIN_LENGTH {
return Err(AttestationSplitError::TooShort);
}
let hashes_len = decode_length(full_ssz, index + 10, LENGTH_BYTES)
.map_err(|_| AttestationSplitError::TooShort)?;
let bitfield_len = decode_length(
full_ssz, index + hashes_len + 46,
LENGTH_BYTES)
.map_err(|_| AttestationSplitError::TooShort)?;
// Subtract one because the min length assumes 1 byte of bitfield
let len = MIN_LENGTH - 1
+ hashes_len
+ bitfield_len;
if full_ssz.len() < index + len {
return Err(AttestationSplitError::TooShort);
}
Ok((&full_ssz[index..(index + len)], index + len))
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::AttestationRecord;
use super::super::utils::types::{
Hash256,
Bitfield,
};
use super::super::bls::AggregateSignature;
use super::super::ssz::{
SszStream,
Decodable,
};
fn get_two_records() -> Vec<AttestationRecord> {
let a = AttestationRecord {
slot: 7,
shard_id: 9,
oblique_parent_hashes: vec![Hash256::from(&vec![14; 32][..])],
shard_block_hash: Hash256::from(&vec![15; 32][..]),
attester_bitfield: Bitfield::from(&vec![17; 42][..]),
justified_slot: 19,
justified_block_hash: Hash256::from(&vec![15; 32][..]),
aggregate_sig: AggregateSignature::new(),
};
let b = AttestationRecord {
slot: 9,
shard_id: 7,
oblique_parent_hashes: vec![Hash256::from(&vec![15; 32][..])],
shard_block_hash: Hash256::from(&vec![14; 32][..]),
attester_bitfield: Bitfield::from(&vec![19; 42][..]),
justified_slot: 15,
justified_block_hash: Hash256::from(&vec![17; 32][..]),
aggregate_sig: AggregateSignature::new(),
};
vec![a, b]
}
#[test]
fn test_attestation_ssz_split() {
let ars = get_two_records();
let a = ars[0].clone();
let b = ars[1].clone();
/*
* Test split one
*/
let mut ssz_stream = SszStream::new();
ssz_stream.append(&a);
let ssz = ssz_stream.drain();
let (a_ssz, i) = split_one_attestation(&ssz, 0).unwrap();
assert_eq!(i, ssz.len());
let (decoded_a, _) = AttestationRecord::ssz_decode(a_ssz, 0)
.unwrap();
assert_eq!(a, decoded_a);
/*
* Test split two
*/
let mut ssz_stream = SszStream::new();
ssz_stream.append(&a);
ssz_stream.append(&b);
let ssz = ssz_stream.drain();
let ssz_vec = split_all_attestations(&ssz, 0).unwrap();
let (decoded_a, _) =
AttestationRecord::ssz_decode(ssz_vec[0], 0)
.unwrap();
let (decoded_b, _) =
AttestationRecord::ssz_decode(ssz_vec[1], 0)
.unwrap();
assert_eq!(a, decoded_a);
assert_eq!(b, decoded_b);
/*
* Test split two with shortened ssz
*/
let mut ssz_stream = SszStream::new();
ssz_stream.append(&a);
ssz_stream.append(&b);
let ssz = ssz_stream.drain();
let ssz = &ssz[0..ssz.len() - 1];
assert!(split_all_attestations(&ssz, 0).is_err());
}
}

View File

@ -0,0 +1,137 @@
use super::utils::types::{ Hash256, Bitfield };
use super::bls::{
AggregateSignature,
BLS_AGG_SIG_BYTE_SIZE,
};
use super::ssz::{
Encodable,
Decodable,
DecodeError,
decode_ssz_list,
SszStream,
};
pub const MIN_SSZ_ATTESTION_RECORD_LENGTH: usize = {
8 + // slot
2 + // shard_id
4 + // oblique_parent_hashes (empty list)
32 + // shard_block_hash
5 + // attester_bitfield (assuming 1 byte of bitfield)
8 + // justified_slot
32 + // justified_block_hash
4 + BLS_AGG_SIG_BYTE_SIZE // aggregate sig (two 256 bit points)
};
#[derive(Debug, Clone, PartialEq)]
pub struct AttestationRecord {
pub slot: u64,
pub shard_id: u16,
pub oblique_parent_hashes: Vec<Hash256>,
pub shard_block_hash: Hash256,
pub attester_bitfield: Bitfield,
pub justified_slot: u64,
pub justified_block_hash: Hash256,
pub aggregate_sig: AggregateSignature,
}
impl Encodable for AttestationRecord {
fn ssz_append(&self, s: &mut SszStream) {
s.append(&self.slot);
s.append(&self.shard_id);
s.append_vec(&self.oblique_parent_hashes);
s.append(&self.shard_block_hash);
s.append_vec(&self.attester_bitfield.to_be_vec());
s.append(&self.justified_slot);
s.append(&self.justified_block_hash);
s.append_vec(&self.aggregate_sig.as_bytes());
}
}
impl Decodable for AttestationRecord {
fn ssz_decode(bytes: &[u8], i: usize)
-> Result<(Self, usize), DecodeError>
{
let (slot, i) = u64::ssz_decode(bytes, i)?;
let (shard_id, i) = u16::ssz_decode(bytes, i)?;
let (oblique_parent_hashes, i) = decode_ssz_list(bytes, i)?;
let (shard_block_hash, i) = Hash256::ssz_decode(bytes, i)?;
let (attester_bitfield, i) = Bitfield::ssz_decode(bytes, i)?;
let (justified_slot, i) = u64::ssz_decode(bytes, i)?;
let (justified_block_hash, i) = Hash256::ssz_decode(bytes, i)?;
// Do aggregate sig decoding properly.
let (agg_sig_bytes, i) = decode_ssz_list(bytes, i)?;
let aggregate_sig = AggregateSignature::from_bytes(&agg_sig_bytes)
.map_err(|_| DecodeError::TooShort)?; // also could be TooLong
let attestation_record = Self {
slot,
shard_id,
oblique_parent_hashes,
shard_block_hash,
attester_bitfield,
justified_slot,
justified_block_hash,
aggregate_sig,
};
Ok((attestation_record, i))
}
}
impl AttestationRecord {
pub fn zero() -> Self {
Self {
slot: 0,
shard_id: 0,
oblique_parent_hashes: vec![],
shard_block_hash: Hash256::zero(),
attester_bitfield: Bitfield::new(),
justified_slot: 0,
justified_block_hash: Hash256::zero(),
aggregate_sig: AggregateSignature::new(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::ssz::SszStream;
#[test]
pub fn test_attestation_record_min_ssz_length() {
let ar = AttestationRecord::zero();
let mut ssz_stream = SszStream::new();
ssz_stream.append(&ar);
let ssz = ssz_stream.drain();
assert_eq!(ssz.len(), MIN_SSZ_ATTESTION_RECORD_LENGTH);
}
#[test]
pub fn test_attestation_record_min_ssz_encode_decode() {
let original = AttestationRecord {
slot: 7,
shard_id: 9,
oblique_parent_hashes: vec![Hash256::from(&vec![14; 32][..])],
shard_block_hash: Hash256::from(&vec![15; 32][..]),
attester_bitfield: Bitfield::from(&vec![17; 42][..]),
justified_slot: 19,
justified_block_hash: Hash256::from(&vec![15; 32][..]),
aggregate_sig: AggregateSignature::new(),
};
let mut ssz_stream = SszStream::new();
ssz_stream.append(&original);
let (decoded, _) = AttestationRecord::
ssz_decode(&ssz_stream.drain(), 0).unwrap();
assert_eq!(original.slot, decoded.slot);
assert_eq!(original.shard_id, decoded.shard_id);
assert_eq!(original.oblique_parent_hashes, decoded.oblique_parent_hashes);
assert_eq!(original.shard_block_hash, decoded.shard_block_hash);
assert_eq!(original.attester_bitfield, decoded.attester_bitfield);
assert_eq!(original.justified_slot, decoded.justified_slot);
assert_eq!(original.justified_block_hash, decoded.justified_block_hash);
}
}

View File

@ -0,0 +1,11 @@
extern crate blake2_rfc;
use super::ssz;
use super::utils;
use super::attestation_record;
mod structs;
mod ssz_block;
pub use self::structs::Block;
pub use self::ssz_block::SszBlock;

View File

@ -0,0 +1,356 @@
use super::ssz::decode::{
decode_length,
Decodable,
};
use super::utils::hash::canonical_hash;
use super::structs::{
MIN_SSZ_BLOCK_LENGTH,
MAX_SSZ_BLOCK_LENGTH,
};
use super::attestation_record::MIN_SSZ_ATTESTION_RECORD_LENGTH;
#[derive(Debug, PartialEq)]
pub enum BlockValidatorError {
TooShort,
TooLong,
}
const LENGTH_BYTES: usize = 4;
/// Allows for reading of block values directly from serialized ssz bytes.
///
/// The purpose of this struct is to provide the functionality to read block fields directly from
/// some serialized SSZ slice allowing us to read the block without fully
/// de-serializing it.
///
/// This struct should be as "zero-copy" as possible. The `ssz` field is a reference to some slice
/// and each function reads from that slice.
///
/// Use this to perform intial checks before we fully de-serialize a block. It should only really
/// be used to verify blocks that come in from the network, for internal operations we should use a
/// full `Block`.
#[derive(Debug, PartialEq)]
pub struct SszBlock<'a> {
ssz: &'a [u8],
attestation_len: usize,
pub len: usize,
}
impl<'a> SszBlock<'a> {
/// Create a new instance from a slice reference.
///
/// This function will validate the length of the ssz string, however it will not validate the
/// contents.
///
/// The returned `SszBlock` instance will contain a `len` field which can be used to determine
/// how many bytes were read from the slice. In the case of multiple, sequentually serialized
/// blocks `len` can be used to assume the location of the next serialized block.
pub fn from_slice(vec: &'a [u8])
-> Result<Self, BlockValidatorError>
{
let untrimmed_ssz = &vec[..];
/*
* Ensure the SSZ is long enough to be a block with
* one attestation record (not necessarily a valid
* attestation record).
*/
if vec.len() < MIN_SSZ_BLOCK_LENGTH + MIN_SSZ_ATTESTION_RECORD_LENGTH {
return Err(BlockValidatorError::TooShort);
}
/*
* Ensure the SSZ slice isn't longer than is possible for a block.
*/
if vec.len() > MAX_SSZ_BLOCK_LENGTH {
return Err(BlockValidatorError::TooLong);
}
/*
* Determine how many bytes are used to store attestation records.
*/
let attestation_len = decode_length(untrimmed_ssz, 72, LENGTH_BYTES)
.map_err(|_| BlockValidatorError::TooShort)?;
/*
* The block only has one variable field, `attestations`, therefore
* the size of the block must be the minimum size, plus the length
* of the attestations.
*/
let block_ssz_len = {
MIN_SSZ_BLOCK_LENGTH + attestation_len
};
if vec.len() < block_ssz_len {
return Err(BlockValidatorError::TooShort);
}
Ok(Self{
ssz: &untrimmed_ssz[0..block_ssz_len],
attestation_len,
len: block_ssz_len,
})
}
/// Return the canonical hash for this block.
pub fn block_hash(&self) -> Vec<u8> {
canonical_hash(self.ssz)
}
/// Return the `parent_hash` field.
pub fn parent_hash(&self) -> &[u8] {
&self.ssz[0..32]
}
/// Return the `slot_number` field.
pub fn slot_number(&self) -> u64 {
/*
* An error should be unreachable from this decode
* because we checked the length of the array at
* the initalization of this struct.
*
* If you can make this function panic, please report
* it to paul@sigmaprime.io
*/
if let Ok((n, _)) = u64::ssz_decode(&self.ssz, 32) {
n
} else {
unreachable!();
}
}
/// Return the `randao_reveal` field.
pub fn randao_reveal(&self) -> &[u8] {
&self.ssz[40..72]
}
/// Return the `attestations` field.
pub fn attestations(&self) -> &[u8] {
let start = 72 + LENGTH_BYTES;
&self.ssz[start..(start + self.attestation_len)]
}
/// Return the `pow_chain_ref` field.
pub fn pow_chain_ref(&self) -> &[u8] {
let start = self.len - (32 * 3);
&self.ssz[start..(start + 32)]
}
/// Return the `active_state_root` field.
pub fn act_state_root(&self) -> &[u8] {
let start = self.len - (32 * 2);
&self.ssz[start..(start + 32)]
}
/// Return the `active_state_root` field.
pub fn cry_state_root(&self) -> &[u8] {
let start = self.len - 32;
&self.ssz[start..(start + 32)]
}
}
#[cfg(test)]
mod tests {
use super::*;
use super::super::structs::Block;
use super::super::attestation_record::AttestationRecord;
use super::super::ssz::SszStream;
use super::super::utils::types::Hash256;
fn get_block_ssz(b: &Block) -> Vec<u8> {
let mut ssz_stream = SszStream::new();
ssz_stream.append(b);
ssz_stream.drain()
}
fn get_attestation_record_ssz(ar: &AttestationRecord) -> Vec<u8> {
let mut ssz_stream = SszStream::new();
ssz_stream.append(ar);
ssz_stream.drain()
}
#[test]
fn test_ssz_block_zero_attestation_records() {
let mut b = Block::zero();
b.attestations = vec![];
let ssz = get_block_ssz(&b);
assert_eq!(
SszBlock::from_slice(&ssz[..]),
Err(BlockValidatorError::TooShort)
);
}
#[test]
fn test_ssz_block_single_attestation_record_one_byte_short() {
let mut b = Block::zero();
b.attestations = vec![AttestationRecord::zero()];
let ssz = get_block_ssz(&b);
assert_eq!(
SszBlock::from_slice(&ssz[0..(ssz.len() - 1)]),
Err(BlockValidatorError::TooShort)
);
}
#[test]
fn test_ssz_block_single_attestation_record_one_byte_long() {
let mut b = Block::zero();
b.attestations = vec![AttestationRecord::zero()];
let mut ssz = get_block_ssz(&b);
let original_len = ssz.len();
ssz.push(42);
let ssz_block = SszBlock::from_slice(&ssz[..]).unwrap();
assert_eq!(ssz_block.len, original_len);
}
#[test]
fn test_ssz_block_single_attestation_record() {
let mut b = Block::zero();
b.attestations = vec![AttestationRecord::zero()];
let ssz = get_block_ssz(&b);
assert!(SszBlock::from_slice(&ssz[..]).is_ok());
}
#[test]
fn test_ssz_block_attestation_length() {
let mut block = Block::zero();
block.attestations.push(AttestationRecord::zero());
let serialized = get_block_ssz(&block);
let ssz_block = SszBlock::from_slice(&serialized).unwrap();
assert_eq!(ssz_block.attestation_len, MIN_SSZ_ATTESTION_RECORD_LENGTH);
}
#[test]
fn test_ssz_block_block_hash() {
let mut block = Block::zero();
block.attestations.push(AttestationRecord::zero());
let serialized = get_block_ssz(&block);
let ssz_block = SszBlock::from_slice(&serialized).unwrap();
let hash = ssz_block.block_hash();
// Note: this hash was not generated by some external program,
// it was simply printed then copied into the code. This test
// will tell us if the hash changes, not that it matches some
// canonical reference.
let expected_hash = [
64, 176, 117, 210, 228, 229, 237, 100, 66, 66, 98,
252, 31, 111, 218, 27, 160, 57, 164, 12, 15, 164,
66, 102, 142, 36, 2, 196, 121, 54, 242, 3
];
assert_eq!(hash, expected_hash);
/*
* Test if you give the SszBlock too many ssz bytes
*/
let mut too_long = serialized.clone();
too_long.push(42);
let ssz_block = SszBlock::from_slice(&too_long).unwrap();
let hash = ssz_block.block_hash();
assert_eq!(hash, expected_hash);
}
#[test]
fn test_ssz_block_parent_hash() {
let mut block = Block::zero();
block.attestations.push(AttestationRecord::zero());
let reference_hash = Hash256::from([42_u8; 32]);
block.parent_hash = reference_hash.clone();
let serialized = get_block_ssz(&block);
let ssz_block = SszBlock::from_slice(&serialized).unwrap();
assert_eq!(ssz_block.parent_hash(), &reference_hash.to_vec()[..]);
}
#[test]
fn test_ssz_block_slot_number() {
let mut block = Block::zero();
block.attestations.push(AttestationRecord::zero());
block.slot_number = 42;
let serialized = get_block_ssz(&block);
let ssz_block = SszBlock::from_slice(&serialized).unwrap();
assert_eq!(ssz_block.slot_number(), 42);
}
#[test]
fn test_ssz_block_randao_reveal() {
let mut block = Block::zero();
block.attestations.push(AttestationRecord::zero());
let reference_hash = Hash256::from([42_u8; 32]);
block.randao_reveal = reference_hash.clone();
let serialized = get_block_ssz(&block);
let ssz_block = SszBlock::from_slice(&serialized).unwrap();
assert_eq!(ssz_block.randao_reveal(), &reference_hash.to_vec()[..]);
}
#[test]
fn test_ssz_block_attestations() {
/*
* Single AttestationRecord
*/
let mut block = Block::zero();
block.attestations.push(AttestationRecord::zero());
let serialized = get_block_ssz(&block);
let ssz_block = SszBlock::from_slice(&serialized).unwrap();
let ssz_ar = get_attestation_record_ssz(&AttestationRecord::zero());
assert_eq!(ssz_block.attestations(), &ssz_ar[..]);
/*
* Multiple AttestationRecords
*/
let mut block = Block::zero();
block.attestations.push(AttestationRecord::zero());
block.attestations.push(AttestationRecord::zero());
let serialized = get_block_ssz(&block);
let ssz_block = SszBlock::from_slice(&serialized).unwrap();
let mut ssz_ar = get_attestation_record_ssz(&AttestationRecord::zero());
ssz_ar.append(&mut get_attestation_record_ssz(&AttestationRecord::zero()));
assert_eq!(ssz_block.attestations(), &ssz_ar[..]);
}
#[test]
fn test_ssz_block_pow_chain_ref() {
let mut block = Block::zero();
block.attestations.push(AttestationRecord::zero());
let reference_hash = Hash256::from([42_u8; 32]);
block.pow_chain_ref = reference_hash.clone();
let serialized = get_block_ssz(&block);
let ssz_block = SszBlock::from_slice(&serialized).unwrap();
assert_eq!(ssz_block.pow_chain_ref(), &reference_hash.to_vec()[..]);
}
#[test]
fn test_ssz_block_act_state_root() {
let mut block = Block::zero();
block.attestations.push(AttestationRecord::zero());
let reference_hash = Hash256::from([42_u8; 32]);
block.active_state_root = reference_hash.clone();
let serialized = get_block_ssz(&block);
let ssz_block = SszBlock::from_slice(&serialized).unwrap();
assert_eq!(ssz_block.act_state_root(), &reference_hash.to_vec()[..]);
}
#[test]
fn test_ssz_block_cry_state_root() {
let mut block = Block::zero();
block.attestations.push(AttestationRecord::zero());
let reference_hash = Hash256::from([42_u8; 32]);
block.crystallized_state_root = reference_hash.clone();
let serialized = get_block_ssz(&block);
let ssz_block = SszBlock::from_slice(&serialized).unwrap();
assert_eq!(ssz_block.cry_state_root(), &reference_hash.to_vec()[..]);
}
}

View File

@ -2,7 +2,16 @@ use super::utils::types::Hash256;
use super::attestation_record::AttestationRecord;
use super::ssz::{ Encodable, SszStream };
const SSZ_BLOCK_LENGTH: usize = 192;
pub const MIN_SSZ_BLOCK_LENGTH: usize = {
32 + // parent_hash
8 + // slot_number
32 + // randao_reveal
4 + // attestations (assuming zero)
32 + // pow_chain_ref
32 + // active_state_root
32 // crystallized_state_root
};
pub const MAX_SSZ_BLOCK_LENGTH: usize = MIN_SSZ_BLOCK_LENGTH + (1 << 24);
pub struct Block {
pub parent_hash: Hash256,
@ -26,23 +35,6 @@ impl Block {
crystallized_state_root: Hash256::zero(),
}
}
/// Return the bytes that should be signed in order to
/// attest for this block.
pub fn encode_for_signing(&self)
-> [u8; SSZ_BLOCK_LENGTH]
{
let mut s = SszStream::new();
s.append(&self.parent_hash);
s.append(&self.slot_number);
s.append(&self.randao_reveal);
s.append(&self.pow_chain_ref);
s.append(&self.active_state_root);
s.append(&self.crystallized_state_root);
let vec = s.drain();
let mut encoded = [0; SSZ_BLOCK_LENGTH];
encoded.copy_from_slice(&vec); encoded
}
}
impl Encodable for Block {
@ -73,4 +65,15 @@ mod tests {
assert!(b.active_state_root.is_zero());
assert!(b.crystallized_state_root.is_zero());
}
#[test]
pub fn test_block_min_ssz_length() {
let b = Block::zero();
let mut ssz_stream = SszStream::new();
ssz_stream.append(&b);
let ssz = ssz_stream.drain();
assert_eq!(ssz.len(), MIN_SSZ_BLOCK_LENGTH);
}
}

View File

@ -2,14 +2,31 @@ pub struct ChainConfig {
pub cycle_length: u8,
pub shard_count: u16,
pub min_committee_size: u64,
pub genesis_time: u64,
}
/*
* Presently this is just some arbitrary time in Sept 2018.
*/
const GENESIS_TIME: u64 = 1_537_488_655;
impl ChainConfig {
pub fn standard() -> Self {
Self {
cycle_length: 8,
cycle_length: 64,
shard_count: 1024,
min_committee_size: 128,
genesis_time: GENESIS_TIME, // arbitrary
}
}
#[cfg(test)]
pub fn super_fast_tests() -> Self {
Self {
cycle_length: 2,
shard_count: 2,
min_committee_size: 2,
genesis_time: GENESIS_TIME, // arbitrary
}
}
}

View File

@ -1,5 +1,13 @@
use super::Hash256;
use super::TransitionError;
#[derive(Debug)]
pub enum ParentHashesError {
BadCurrentHashes,
BadObliqueHashes,
SlotTooHigh,
SlotTooLow,
IntWrapping,
}
/// This function is used to select the hashes used in
/// the signing of an AttestationRecord.
@ -18,23 +26,20 @@ pub fn attestation_parent_hashes(
attestation_slot: u64,
current_hashes: &[Hash256],
oblique_hashes: &[Hash256])
-> Result<Vec<Hash256>, TransitionError>
-> Result<Vec<Hash256>, ParentHashesError>
{
// This cast places a limit on cycle_length. If you change it, check math
// for overflow.
let cycle_length: u64 = u64::from(cycle_length);
if current_hashes.len() as u64 != (cycle_length * 2) {
return Err(TransitionError::InvalidInput(String::from(
"current_hashes.len() must equal cycle_length * 2")));
}
if attestation_slot >= block_slot {
return Err(TransitionError::InvalidInput(String::from(
"attestation_slot must be less than block_slot")));
return Err(ParentHashesError::BadCurrentHashes);
}
if oblique_hashes.len() as u64 > cycle_length {
return Err(TransitionError::InvalidInput(String::from(
"oblique_hashes.len() must be <= cycle_length * 2")));
return Err(ParentHashesError::BadObliqueHashes);
}
if attestation_slot >= block_slot {
return Err(ParentHashesError::SlotTooHigh);
}
/*
@ -44,8 +49,7 @@ pub fn attestation_parent_hashes(
let attestation_distance = block_slot - attestation_slot;
if attestation_distance > cycle_length {
return Err(TransitionError::InvalidInput(String::from(
"attestation_slot must be withing one cycle of block_slot")));
return Err(ParentHashesError::SlotTooLow);
}
/*
@ -63,7 +67,7 @@ pub fn attestation_parent_hashes(
*/
let end = start.checked_add(cycle_length)
.and_then(|x| x.checked_sub(oblique_hashes.len() as u64))
.ok_or(TransitionError::IntWrapping)?;
.ok_or(ParentHashesError::IntWrapping)?;
let mut hashes = Vec::new();
@ -176,7 +180,6 @@ mod tests {
attestation_slot,
&current_hashes,
&oblique_hashes);
assert!(result.is_ok());
let result = result.unwrap();
assert_eq!(result.len(), cycle_length as usize);
let expected_result = get_range_of_hashes(7, 15);

View File

@ -0,0 +1,62 @@
use super::utils::errors::ParameterError;
use super::utils::types::Hash256;
/*
* Work-in-progress function: not ready for review.
*/
pub fn get_block_hash(
active_state_recent_block_hashes: &[Hash256],
current_block_slot: u64,
slot: u64,
cycle_length: u64, // convert from standard u8
) -> Result<Hash256, ParameterError> {
// active_state must have at 2*cycle_length hashes
assert_error!(
active_state_recent_block_hashes.len() as u64 == cycle_length * 2,
ParameterError::InvalidInput(String::from(
"active state has incorrect number of block hashes"
))
);
let state_start_slot = (current_block_slot)
.checked_sub(cycle_length * 2)
.unwrap_or(0);
assert_error!(
(state_start_slot <= slot) && (slot < current_block_slot),
ParameterError::InvalidInput(String::from("incorrect slot number"))
);
let index = 2 * cycle_length + slot - current_block_slot; // should always be positive
Ok(active_state_recent_block_hashes[index as usize])
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_block_hash() {
let block_slot: u64 = 10;
let slot: u64 = 3;
let cycle_length: u64 = 8;
let mut block_hashes: Vec<Hash256> = Vec::new();
for _i in 0..2 * cycle_length {
block_hashes.push(Hash256::random());
}
let result = get_block_hash(
&block_hashes,
block_slot,
slot,
cycle_length)
.unwrap();
assert_eq!(
result,
block_hashes[(2 * cycle_length + slot - block_slot) as usize]
);
}
}

View File

@ -0,0 +1,3 @@
mod block_hash;
use super::utils;

View File

@ -0,0 +1,7 @@
use std::collections::HashMap;
/// Maps a (slot, shard_id) to attestation_indices.
pub type AttesterMap = HashMap<(u64, u16), Vec<usize>>;
/// Maps a slot to a block proposer.
pub type ProposerMap = HashMap<u64, usize>;

View File

@ -0,0 +1,9 @@
mod delegation;
mod shuffling;
pub mod maps;
pub mod attestation_parent_hashes;
use super::utils;
use super::utils::types::Hash256;
pub use self::shuffling::shuffle;

View File

@ -1,10 +1,10 @@
extern crate rlp;
extern crate ethereum_types;
extern crate blake2;
extern crate blake2_rfc as blake2;
extern crate bytes;
extern crate ssz;
use super::utils;
mod common;
pub mod active_state;
pub mod attestation_record;
@ -13,5 +13,7 @@ pub mod chain_config;
pub mod block;
pub mod crosslink_record;
pub mod shard_and_committee;
pub mod transition;
pub mod validator_record;
use super::bls;
use super::utils;

View File

@ -1,17 +0,0 @@
use super::super::utils::types::Hash256;
mod attestation_parent_hashes;
mod shuffling;
pub use self::attestation_parent_hashes::attestation_parent_hashes;
pub use self::shuffling::shuffle;
#[derive(Debug)]
pub enum TransitionError {
IntWrapping,
OutOfBounds,
InvalidInput(String),
}

View File

@ -1,9 +1,7 @@
extern crate rand;
use super::utils::types::{ Hash256, Address, U256 };
use super::utils::bls::{ PublicKey, Keypair };
use self::rand::thread_rng;
use super::bls::{ PublicKey, Keypair };
pub struct ValidatorRecord {
pub pubkey: PublicKey,
@ -21,10 +19,9 @@ impl ValidatorRecord {
///
/// Returns the new instance and new keypair.
pub fn zero_with_thread_rand_keypair() -> (Self, Keypair) {
let mut rng = thread_rng();
let keypair = Keypair::generate(&mut rng);
let keypair = Keypair::random();
let s = Self {
pubkey: keypair.public.clone(),
pubkey: keypair.pk.clone(),
withdrawal_shard: 0,
withdrawal_address: Address::zero(),
randao_commitment: Hash256::zero(),

View File

@ -1,13 +0,0 @@
extern crate bls;
extern crate pairing;
use self::bls::AggregateSignature as GenericAggregateSignature;
use self::bls::Signature as GenericSignature;
use self::bls::Keypair as GenericKeypair;
use self::bls::PublicKey as GenericPublicKey;
use self::pairing::bls12_381::Bls12;
pub type AggregateSignature = GenericAggregateSignature<Bls12>;
pub type Signature = GenericSignature<Bls12>;
pub type Keypair = GenericKeypair<Bls12>;
pub type PublicKey = GenericPublicKey<Bls12>;

View File

@ -0,0 +1,8 @@
// Collection of custom errors
#[derive(Debug,PartialEq)]
pub enum ParameterError {
IntWrapping,
OutOfBounds,
InvalidInput(String),
}

6
lighthouse/utils/hash.rs Normal file
View File

@ -0,0 +1,6 @@
use super::blake2::blake2b::blake2b;
pub fn canonical_hash(input: &[u8]) -> Vec<u8> {
let result = blake2b(64, &[], input);
result.as_bytes()[0..32].to_vec()
}

View File

@ -0,0 +1,8 @@
#[macro_export]
macro_rules! assert_error {
($exp: expr, $err: expr) => {
if !$exp {
return Err($err);
}
}
}

View File

@ -1,9 +1,11 @@
extern crate ethereum_types;
extern crate blake2;
extern crate blake2_rfc as blake2;
extern crate crypto_mac;
extern crate boolean_bitfield;
#[macro_use]
pub mod macros;
pub mod hash;
pub mod types;
pub mod bls;
pub mod test_helpers;
pub mod logging;
pub mod errors;

View File

@ -1,12 +0,0 @@
extern crate rand;
use super::bls::Keypair;
use self::rand::thread_rng;
// Returns a keypair for use in testing purposes.
// It is dangerous because we provide no guarantees
// that the private key is unique or in-fact private.
pub fn get_dangerous_test_keypair() -> Keypair {
let mut rng = thread_rng();
Keypair::generate(&mut rng)
}

View File

@ -3,7 +3,6 @@ extern crate boolean_bitfield;
use super::ethereum_types::{ H256, H160 };
use self::boolean_bitfield::BooleanBitfield;
pub use super::blake2::Blake2s;
pub use super::ethereum_types::U256;
pub type Hash256 = H256;

View File

@ -4,7 +4,6 @@ use super::{
#[derive(Debug, PartialEq)]
pub enum DecodeError {
OutOfBounds,
TooShort,
TooLong,
}
@ -22,7 +21,7 @@ pub fn decode_ssz<T>(ssz_bytes: &[u8], index: usize)
where T: Decodable
{
if index >= ssz_bytes.len() {
return Err(DecodeError::OutOfBounds)
return Err(DecodeError::TooShort)
}
T::ssz_decode(ssz_bytes, index)
}

View File

@ -36,6 +36,18 @@ impl_decodable_for_uint!(u32, 32);
impl_decodable_for_uint!(u64, 64);
impl_decodable_for_uint!(usize, 64);
impl Decodable for u8 {
fn ssz_decode(bytes: &[u8], index: usize)
-> Result<(Self, usize), DecodeError>
{
if index >= bytes.len() {
Err(DecodeError::TooShort)
} else {
Ok((bytes[index], index + 1))
}
}
}
impl Decodable for H256 {
fn ssz_decode(bytes: &[u8], index: usize)
-> Result<(Self, usize), DecodeError>