From e28e97d3c71c47762905340f5352759ba9afa282 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 27 Mar 2019 15:59:27 +1100 Subject: [PATCH 01/89] Add initial work on tree hash caching --- eth2/utils/ssz/src/cached_tree_hash.rs | 331 +++++++++++++++++++++++++ eth2/utils/ssz/src/lib.rs | 1 + 2 files changed, 332 insertions(+) create mode 100644 eth2/utils/ssz/src/cached_tree_hash.rs diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs new file mode 100644 index 000000000..7a5b1c527 --- /dev/null +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -0,0 +1,331 @@ +use crate::ssz_encode; +use hashing::hash; + +const BYTES_PER_CHUNK: usize = 32; +const HASHSIZE: usize = 32; +const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; + +pub trait CachedTreeHash { + fn cached_hash_tree_root( + &self, + other: &Self, + cache: &mut [u8], + i: usize, + changes: Vec, + ) -> Option<(usize, Vec)>; +} + +impl CachedTreeHash for u64 { + fn cached_hash_tree_root( + &self, + other: &Self, + cache: &mut [u8], + i: usize, + mut changes: Vec, + ) -> Option<(usize, Vec)> { + if self != other { + cache + .get_mut(i..i + HASHSIZE)? + .copy_from_slice(&mut hash(&ssz_encode(self))); + changes.push(true); + } else { + changes.push(false); + }; + + Some((i + HASHSIZE, changes)) + } +} + +pub struct Inner { + pub a: u64, + pub b: u64, + pub c: u64, + pub d: u64, +} + +impl CachedTreeHash for Inner { + fn cached_hash_tree_root( + &self, + other: &Self, + cache: &mut [u8], + i: usize, + mut changes: Vec, + ) -> Option<(usize, Vec)> { + let original_start = i; + + let leaves = 4; + let nodes = num_nodes(leaves); + let internal = nodes - leaves; + let leaves_start = i + internal * HASHSIZE; + + let mut leaf_changes = { + let leaf_changes = Vec::with_capacity(leaves); + let leaf_start = leaves_start; + + let (leaf_start, leaf_changes) = + self.a + .cached_hash_tree_root(&other.a, cache, leaf_start, leaf_changes)?; + let (leaf_start, leaf_changes) = + self.b + .cached_hash_tree_root(&other.b, cache, leaf_start, leaf_changes)?; + let (leaf_start, leaf_changes) = + self.c + .cached_hash_tree_root(&other.c, cache, leaf_start, leaf_changes)?; + let (_leaf_start, leaf_changes) = + self.d + .cached_hash_tree_root(&other.d, cache, leaf_start, leaf_changes)?; + + leaf_changes + }; + + let any_changes = leaf_changes.iter().any(|&c| c); + + changes.resize(changes.len() + internal, false); + changes.append(&mut leaf_changes); + + if any_changes { + let mut i = internal; + + while i > 0 { + let children = children(i); + + if changes[children.0] | changes[children.1] { + changes[parent(i)] = true; + + let children_start = children.0 * HASHSIZE; + let children_end = children_start + 2 * HASHSIZE; + let hash = hash(&cache.get(children_start..children_end)?); + + cache + .get_mut(i * HASHSIZE..(i + 1) * HASHSIZE)? + .copy_from_slice(&hash); + } + i += 1 + } + } + + Some((42, vec![any_changes])) + } +} + +/// Get merkle root of some hashed values - the input leaf nodes is expected to already be hashed +/// Outputs a `Vec` byte array of the merkle root given a set of leaf node values. +pub fn cache_builder(values: &[u8]) -> Option> { + let leaves = values.len() / HASHSIZE; + + if leaves == 0 || !leaves.is_power_of_two() { + return None; + } + + let mut o: Vec = vec![0; (num_nodes(leaves) - leaves) * HASHSIZE]; + o.append(&mut values.to_vec()); + + let mut i = o.len(); + let mut j = o.len() - values.len(); + + while i >= MERKLE_HASH_CHUNCK { + i -= MERKLE_HASH_CHUNCK; + let hash = hash(&o[i..i + MERKLE_HASH_CHUNCK]); + + j -= HASHSIZE; + o.get_mut(j..j + HASHSIZE)?.copy_from_slice(&hash); + } + + return Some(o); +} + +fn parent(child: usize) -> usize { + (child - 1) / 2 +} + +fn children(parent: usize) -> (usize, usize) { + ((2 * parent + 1), (2 * parent + 2)) +} + +fn num_nodes(num_leaves: usize) -> usize { + 2 * num_leaves - 1 +} + +pub struct Outer { + pub a: u64, + pub b: u64, + pub inner: Inner, +} + +#[cfg(test)] +mod tests { + use super::*; + + fn join(many: Vec<&[u8]>) -> Vec { + let mut all = vec![]; + for one in many { + all.extend_from_slice(&mut one.clone()) + } + all + } + + /* + #[test] + fn container() { + let data1 = hash(&vec![1; 32]); + let data2 = hash(&vec![2; 32]); + let data3 = hash(&vec![3; 32]); + let data4 = hash(&vec![4; 32]); + + let data = join(vec![&data1, &data2, &data3, &data4]); + + let cache = cache_builder(&data).unwrap(); + } + */ + + #[test] + fn can_build_cache() { + let data1 = hash(&vec![1; 32]); + let data2 = hash(&vec![2; 32]); + let data3 = hash(&vec![3; 32]); + let data4 = hash(&vec![4; 32]); + + let data = join(vec![&data1, &data2, &data3, &data4]); + + let cache = cache_builder(&data).unwrap(); + + let hash_12 = { + let mut joined = vec![]; + joined.append(&mut data1.clone()); + joined.append(&mut data2.clone()); + hash(&joined) + }; + let hash_34 = { + let mut joined = vec![]; + joined.append(&mut data3.clone()); + joined.append(&mut data4.clone()); + hash(&joined) + }; + let hash_hash12_hash_34 = { + let mut joined = vec![]; + joined.append(&mut hash_12.clone()); + joined.append(&mut hash_34.clone()); + hash(&joined) + }; + + for (i, chunk) in cache.chunks(HASHSIZE).enumerate().rev() { + let expected = match i { + 0 => hash_hash12_hash_34.clone(), + 1 => hash_12.clone(), + 2 => hash_34.clone(), + 3 => data1.clone(), + 4 => data2.clone(), + 5 => data3.clone(), + 6 => data4.clone(), + _ => vec![], + }; + + assert_eq!(chunk, &expected[..], "failed at {}", i); + } + } +} + +/* +pub trait TreeHash { + fn hash_tree_root(&self) -> Vec; +} + +/// Returns a 32 byte hash of 'list' - a vector of byte vectors. +/// Note that this will consume 'list'. +pub fn merkle_hash(list: &mut Vec>) -> Vec { + // flatten list + let mut chunkz = list_to_blob(list); + + // get data_len as bytes. It will hashed will the merkle root + let mut datalen = list.len().to_le_bytes().to_vec(); + zpad(&mut datalen, 32); + + // merklelize + while chunkz.len() > HASHSIZE { + let mut new_chunkz: Vec = Vec::new(); + + for two_chunks in chunkz.chunks(BYTES_PER_CHUNK * 2) { + // Hash two chuncks together + new_chunkz.append(&mut hash(two_chunks)); + } + + chunkz = new_chunkz; + } + + chunkz.append(&mut datalen); + hash(&chunkz) +} + +fn list_to_blob(list: &mut Vec>) -> Vec { + // pack - fit as many many items per chunk as we can and then + // right pad to BYTES_PER_CHUNCK + let (items_per_chunk, chunk_count) = if list.is_empty() { + (1, 1) + } else { + let items_per_chunk = BYTES_PER_CHUNK / list[0].len(); + let chunk_count = list.len() / items_per_chunk; + (items_per_chunk, chunk_count) + }; + + let mut chunkz = Vec::new(); + if list.is_empty() { + // handle and empty list + chunkz.append(&mut vec![0; BYTES_PER_CHUNK * 2]); + } else if list[0].len() <= BYTES_PER_CHUNK { + // just create a blob here; we'll divide into + // chunked slices when we merklize + let mut chunk = Vec::with_capacity(BYTES_PER_CHUNK); + let mut item_count_in_chunk = 0; + chunkz.reserve(chunk_count * BYTES_PER_CHUNK); + for item in list.iter_mut() { + item_count_in_chunk += 1; + chunk.append(item); + + // completed chunk? + if item_count_in_chunk == items_per_chunk { + zpad(&mut chunk, BYTES_PER_CHUNK); + chunkz.append(&mut chunk); + item_count_in_chunk = 0; + } + } + + // left-over uncompleted chunk? + if item_count_in_chunk != 0 { + zpad(&mut chunk, BYTES_PER_CHUNK); + chunkz.append(&mut chunk); + } + } + + // extend the number of chunks to a power of two if necessary + if !chunk_count.is_power_of_two() { + let zero_chunks_count = chunk_count.next_power_of_two() - chunk_count; + chunkz.append(&mut vec![0; zero_chunks_count * BYTES_PER_CHUNK]); + } + + chunkz +} + +/// right pads with zeros making 'bytes' 'size' in length +fn zpad(bytes: &mut Vec, size: usize) { + if bytes.len() < size { + bytes.resize(size, 0); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_merkle_hash() { + let data1 = vec![1; 32]; + let data2 = vec![2; 32]; + let data3 = vec![3; 32]; + let mut list = vec![data1, data2, data3]; + let result = merkle_hash(&mut list); + + //note: should test againt a known test hash value + assert_eq!(HASHSIZE, result.len()); + } +} +*/ diff --git a/eth2/utils/ssz/src/lib.rs b/eth2/utils/ssz/src/lib.rs index 7c29667af..9bf441249 100644 --- a/eth2/utils/ssz/src/lib.rs +++ b/eth2/utils/ssz/src/lib.rs @@ -10,6 +10,7 @@ extern crate bytes; extern crate ethereum_types; +mod cached_tree_hash; pub mod decode; pub mod encode; mod signed_root; From 35ceb92f2e94f70a48cfa7e0155d31ce55da531c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 27 Mar 2019 17:45:27 +1100 Subject: [PATCH 02/89] Refactor with TreeHashCache struct --- eth2/utils/ssz/Cargo.toml | 1 + eth2/utils/ssz/src/cached_tree_hash.rs | 358 +++++++++++++++++-------- 2 files changed, 243 insertions(+), 116 deletions(-) diff --git a/eth2/utils/ssz/Cargo.toml b/eth2/utils/ssz/Cargo.toml index f13db5def..21b726e9f 100644 --- a/eth2/utils/ssz/Cargo.toml +++ b/eth2/utils/ssz/Cargo.toml @@ -8,3 +8,4 @@ edition = "2018" bytes = "0.4.9" ethereum-types = "0.5" hashing = { path = "../hashing" } +int_to_bytes = { path = "../int_to_bytes" } diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 7a5b1c527..99fd49221 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -1,38 +1,97 @@ use crate::ssz_encode; use hashing::hash; +use int_to_bytes::int_to_bytes32; const BYTES_PER_CHUNK: usize = 32; const HASHSIZE: usize = 32; const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; +pub struct TreeHashCache<'a> { + chunk_offset: usize, + cache: &'a mut [u8], + chunk_modified: &'a mut [bool], +} + +impl<'a> TreeHashCache<'a> { + pub fn increment(&mut self) { + self.chunk_offset += 1 + } + + pub fn modify_current_chunk(&mut self, to: &[u8]) -> Option<()> { + self.modify_chunk(0, to) + } + + pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Option<()> { + let start = chunk * BYTES_PER_CHUNK; + let end = start + BYTES_PER_CHUNK; + self.cache.get_mut(start..end)?.copy_from_slice(to); + + self.chunk_modified[chunk] = true; + + Some(()) + } + + pub fn changed(&self, chunk: usize) -> Option { + self.chunk_modified.get(chunk).cloned() + } + + pub fn children_modified(&self, parent_chunk: usize) -> Option { + let children = children(parent_chunk); + + Some(self.changed(children.0)? | self.changed(children.1)?) + } + + pub fn hash_children(&self, parent_chunk: usize) -> Option> { + let children = children(parent_chunk); + + let start = children.0 * BYTES_PER_CHUNK; + let end = start + BYTES_PER_CHUNK * 2; + + Some(hash(&self.cache.get(start..end)?)) + } + + pub fn just_the_leaves(&mut self, leaves: usize) -> Option { + let nodes = num_nodes(leaves); + let internal = nodes - leaves; + let leaves_start = (self.chunk_offset + internal) * HASHSIZE; + + Some(TreeHashCache { + chunk_offset: self.chunk_offset + internal, + cache: self.cache.get_mut(leaves_start..leaves * HASHSIZE)?, + chunk_modified: self + .chunk_modified + .get_mut(self.chunk_offset..self.chunk_offset + leaves)?, + }) + } +} + +fn children(parent: usize) -> (usize, usize) { + ((2 * parent + 1), (2 * parent + 2)) +} + +fn num_nodes(num_leaves: usize) -> usize { + 2 * num_leaves - 1 +} + pub trait CachedTreeHash { - fn cached_hash_tree_root( - &self, - other: &Self, - cache: &mut [u8], - i: usize, - changes: Vec, - ) -> Option<(usize, Vec)>; + fn build_cache_bytes(&self) -> Vec; + + fn cached_hash_tree_root(&self, other: &Self, cache: &mut TreeHashCache) -> Option<()>; } impl CachedTreeHash for u64 { - fn cached_hash_tree_root( - &self, - other: &Self, - cache: &mut [u8], - i: usize, - mut changes: Vec, - ) -> Option<(usize, Vec)> { - if self != other { - cache - .get_mut(i..i + HASHSIZE)? - .copy_from_slice(&mut hash(&ssz_encode(self))); - changes.push(true); - } else { - changes.push(false); - }; + fn build_cache_bytes(&self) -> Vec { + merkleize(&ssz_encode(self)) + } - Some((i + HASHSIZE, changes)) + fn cached_hash_tree_root(&self, other: &Self, cache: &mut TreeHashCache) -> Option<()> { + if self != other { + cache.modify_current_chunk(&hash(&ssz_encode(self))); + } + + cache.increment(); + + Some(()) } } @@ -44,70 +103,137 @@ pub struct Inner { } impl CachedTreeHash for Inner { - fn cached_hash_tree_root( - &self, - other: &Self, - cache: &mut [u8], - i: usize, - mut changes: Vec, - ) -> Option<(usize, Vec)> { - let original_start = i; + fn build_cache_bytes(&self) -> Vec { + let mut leaves = vec![]; - let leaves = 4; - let nodes = num_nodes(leaves); - let internal = nodes - leaves; - let leaves_start = i + internal * HASHSIZE; + leaves.append(&mut self.a.build_cache_bytes()); + leaves.append(&mut self.b.build_cache_bytes()); + leaves.append(&mut self.c.build_cache_bytes()); + leaves.append(&mut self.d.build_cache_bytes()); - let mut leaf_changes = { - let leaf_changes = Vec::with_capacity(leaves); - let leaf_start = leaves_start; + merkleize(&leaves) + } - let (leaf_start, leaf_changes) = - self.a - .cached_hash_tree_root(&other.a, cache, leaf_start, leaf_changes)?; - let (leaf_start, leaf_changes) = - self.b - .cached_hash_tree_root(&other.b, cache, leaf_start, leaf_changes)?; - let (leaf_start, leaf_changes) = - self.c - .cached_hash_tree_root(&other.c, cache, leaf_start, leaf_changes)?; - let (_leaf_start, leaf_changes) = - self.d - .cached_hash_tree_root(&other.d, cache, leaf_start, leaf_changes)?; + fn cached_hash_tree_root(&self, other: &Self, cache: &mut TreeHashCache) -> Option<()> { + let num_leaves = 4; - leaf_changes - }; + let mut leaf_cache = cache.just_the_leaves(num_leaves)?; + self.a.cached_hash_tree_root(&other.a, &mut leaf_cache)?; + self.b.cached_hash_tree_root(&other.b, &mut leaf_cache)?; + self.c.cached_hash_tree_root(&other.c, &mut leaf_cache)?; + self.d.cached_hash_tree_root(&other.d, &mut leaf_cache)?; - let any_changes = leaf_changes.iter().any(|&c| c); + let nodes = num_nodes(num_leaves); + let internal_chunks = nodes - num_leaves; - changes.resize(changes.len() + internal, false); - changes.append(&mut leaf_changes); - - if any_changes { - let mut i = internal; - - while i > 0 { - let children = children(i); - - if changes[children.0] | changes[children.1] { - changes[parent(i)] = true; - - let children_start = children.0 * HASHSIZE; - let children_end = children_start + 2 * HASHSIZE; - let hash = hash(&cache.get(children_start..children_end)?); - - cache - .get_mut(i * HASHSIZE..(i + 1) * HASHSIZE)? - .copy_from_slice(&hash); - } - i += 1 + for chunk in 0..internal_chunks { + if cache.children_modified(chunk)? { + cache.modify_chunk(chunk, &cache.hash_children(chunk)?)?; } } - Some((42, vec![any_changes])) + Some(()) } } +/// A reference function to test against. +pub fn merkleize(values: &[u8]) -> Vec { + let leaves = values.len() / HASHSIZE; + + if leaves == 0 || !leaves.is_power_of_two() { + panic!("Handle bad leaf count"); + } + + let mut o: Vec = vec![0; (num_nodes(leaves) - leaves) * HASHSIZE]; + o.append(&mut values.to_vec()); + + let mut i = o.len(); + let mut j = o.len() - values.len(); + + while i >= MERKLE_HASH_CHUNCK { + i -= MERKLE_HASH_CHUNCK; + let hash = hash(&o[i..i + MERKLE_HASH_CHUNCK]); + + j -= HASHSIZE; + o[j..j + HASHSIZE].copy_from_slice(&hash); + } + + o +} + +#[cfg(test)] +mod tests { + use super::*; + + fn join(many: Vec<&[u8]>) -> Vec { + let mut all = vec![]; + for one in many { + all.extend_from_slice(&mut one.clone()) + } + all + } + + /* + #[test] + fn container() { + let data1 = hash(&vec![1; 32]); + let data2 = hash(&vec![2; 32]); + let data3 = hash(&vec![3; 32]); + let data4 = hash(&vec![4; 32]); + + let data = join(vec![&data1, &data2, &data3, &data4]); + + let cache = cache_builder(&data).unwrap(); + } + */ + #[test] + fn merkleize_4_leaves() { + let data1 = hash(&int_to_bytes32(1)); + let data2 = hash(&int_to_bytes32(2)); + let data3 = hash(&int_to_bytes32(3)); + let data4 = hash(&int_to_bytes32(4)); + + let data = join(vec![&data1, &data2, &data3, &data4]); + + let cache = merkleize(&data); + + let hash_12 = { + let mut joined = vec![]; + joined.append(&mut data1.clone()); + joined.append(&mut data2.clone()); + hash(&joined) + }; + let hash_34 = { + let mut joined = vec![]; + joined.append(&mut data3.clone()); + joined.append(&mut data4.clone()); + hash(&joined) + }; + let hash_hash12_hash_34 = { + let mut joined = vec![]; + joined.append(&mut hash_12.clone()); + joined.append(&mut hash_34.clone()); + hash(&joined) + }; + + for (i, chunk) in cache.chunks(HASHSIZE).enumerate().rev() { + let expected = match i { + 0 => hash_hash12_hash_34.clone(), + 1 => hash_12.clone(), + 2 => hash_34.clone(), + 3 => data1.clone(), + 4 => data2.clone(), + 5 => data3.clone(), + 6 => data4.clone(), + _ => vec![], + }; + + assert_eq!(chunk, &expected[..], "failed at {}", i); + } + } +} +/* + /// Get merkle root of some hashed values - the input leaf nodes is expected to already be hashed /// Outputs a `Vec` byte array of the merkle root given a set of leaf node values. pub fn cache_builder(values: &[u8]) -> Option> { @@ -177,52 +303,51 @@ mod tests { let cache = cache_builder(&data).unwrap(); } */ +#[test] +fn can_build_cache() { +let data1 = hash(&vec![1; 32]); +let data2 = hash(&vec![2; 32]); +let data3 = hash(&vec![3; 32]); +let data4 = hash(&vec![4; 32]); - #[test] - fn can_build_cache() { - let data1 = hash(&vec![1; 32]); - let data2 = hash(&vec![2; 32]); - let data3 = hash(&vec![3; 32]); - let data4 = hash(&vec![4; 32]); +let data = join(vec![&data1, &data2, &data3, &data4]); - let data = join(vec![&data1, &data2, &data3, &data4]); +let cache = cache_builder(&data).unwrap(); - let cache = cache_builder(&data).unwrap(); +let hash_12 = { +let mut joined = vec![]; +joined.append(&mut data1.clone()); +joined.append(&mut data2.clone()); +hash(&joined) +}; +let hash_34 = { +let mut joined = vec![]; +joined.append(&mut data3.clone()); +joined.append(&mut data4.clone()); +hash(&joined) +}; +let hash_hash12_hash_34 = { +let mut joined = vec![]; +joined.append(&mut hash_12.clone()); +joined.append(&mut hash_34.clone()); +hash(&joined) +}; - let hash_12 = { - let mut joined = vec![]; - joined.append(&mut data1.clone()); - joined.append(&mut data2.clone()); - hash(&joined) - }; - let hash_34 = { - let mut joined = vec![]; - joined.append(&mut data3.clone()); - joined.append(&mut data4.clone()); - hash(&joined) - }; - let hash_hash12_hash_34 = { - let mut joined = vec![]; - joined.append(&mut hash_12.clone()); - joined.append(&mut hash_34.clone()); - hash(&joined) - }; +for (i, chunk) in cache.chunks(HASHSIZE).enumerate().rev() { +let expected = match i { +0 => hash_hash12_hash_34.clone(), +1 => hash_12.clone(), +2 => hash_34.clone(), +3 => data1.clone(), +4 => data2.clone(), +5 => data3.clone(), +6 => data4.clone(), +_ => vec![], +}; - for (i, chunk) in cache.chunks(HASHSIZE).enumerate().rev() { - let expected = match i { - 0 => hash_hash12_hash_34.clone(), - 1 => hash_12.clone(), - 2 => hash_34.clone(), - 3 => data1.clone(), - 4 => data2.clone(), - 5 => data3.clone(), - 6 => data4.clone(), - _ => vec![], - }; - - assert_eq!(chunk, &expected[..], "failed at {}", i); - } - } +assert_eq!(chunk, &expected[..], "failed at {}", i); +} +} } /* @@ -329,3 +454,4 @@ mod tests { } } */ +*/ From ad4000cbdf2fa0c23560ae005c10360229115cf5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 27 Mar 2019 17:46:12 +1100 Subject: [PATCH 03/89] Remove unused code --- eth2/utils/ssz/src/cached_tree_hash.rs | 236 ------------------------- 1 file changed, 236 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 99fd49221..756f97232 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -173,19 +173,6 @@ mod tests { all } - /* - #[test] - fn container() { - let data1 = hash(&vec![1; 32]); - let data2 = hash(&vec![2; 32]); - let data3 = hash(&vec![3; 32]); - let data4 = hash(&vec![4; 32]); - - let data = join(vec![&data1, &data2, &data3, &data4]); - - let cache = cache_builder(&data).unwrap(); - } - */ #[test] fn merkleize_4_leaves() { let data1 = hash(&int_to_bytes32(1)); @@ -232,226 +219,3 @@ mod tests { } } } -/* - -/// Get merkle root of some hashed values - the input leaf nodes is expected to already be hashed -/// Outputs a `Vec` byte array of the merkle root given a set of leaf node values. -pub fn cache_builder(values: &[u8]) -> Option> { - let leaves = values.len() / HASHSIZE; - - if leaves == 0 || !leaves.is_power_of_two() { - return None; - } - - let mut o: Vec = vec![0; (num_nodes(leaves) - leaves) * HASHSIZE]; - o.append(&mut values.to_vec()); - - let mut i = o.len(); - let mut j = o.len() - values.len(); - - while i >= MERKLE_HASH_CHUNCK { - i -= MERKLE_HASH_CHUNCK; - let hash = hash(&o[i..i + MERKLE_HASH_CHUNCK]); - - j -= HASHSIZE; - o.get_mut(j..j + HASHSIZE)?.copy_from_slice(&hash); - } - - return Some(o); -} - -fn parent(child: usize) -> usize { - (child - 1) / 2 -} - -fn children(parent: usize) -> (usize, usize) { - ((2 * parent + 1), (2 * parent + 2)) -} - -fn num_nodes(num_leaves: usize) -> usize { - 2 * num_leaves - 1 -} - -pub struct Outer { - pub a: u64, - pub b: u64, - pub inner: Inner, -} - -#[cfg(test)] -mod tests { - use super::*; - - fn join(many: Vec<&[u8]>) -> Vec { - let mut all = vec![]; - for one in many { - all.extend_from_slice(&mut one.clone()) - } - all - } - - /* - #[test] - fn container() { - let data1 = hash(&vec![1; 32]); - let data2 = hash(&vec![2; 32]); - let data3 = hash(&vec![3; 32]); - let data4 = hash(&vec![4; 32]); - - let data = join(vec![&data1, &data2, &data3, &data4]); - - let cache = cache_builder(&data).unwrap(); - } - */ -#[test] -fn can_build_cache() { -let data1 = hash(&vec![1; 32]); -let data2 = hash(&vec![2; 32]); -let data3 = hash(&vec![3; 32]); -let data4 = hash(&vec![4; 32]); - -let data = join(vec![&data1, &data2, &data3, &data4]); - -let cache = cache_builder(&data).unwrap(); - -let hash_12 = { -let mut joined = vec![]; -joined.append(&mut data1.clone()); -joined.append(&mut data2.clone()); -hash(&joined) -}; -let hash_34 = { -let mut joined = vec![]; -joined.append(&mut data3.clone()); -joined.append(&mut data4.clone()); -hash(&joined) -}; -let hash_hash12_hash_34 = { -let mut joined = vec![]; -joined.append(&mut hash_12.clone()); -joined.append(&mut hash_34.clone()); -hash(&joined) -}; - -for (i, chunk) in cache.chunks(HASHSIZE).enumerate().rev() { -let expected = match i { -0 => hash_hash12_hash_34.clone(), -1 => hash_12.clone(), -2 => hash_34.clone(), -3 => data1.clone(), -4 => data2.clone(), -5 => data3.clone(), -6 => data4.clone(), -_ => vec![], -}; - -assert_eq!(chunk, &expected[..], "failed at {}", i); -} -} -} - -/* -pub trait TreeHash { - fn hash_tree_root(&self) -> Vec; -} - -/// Returns a 32 byte hash of 'list' - a vector of byte vectors. -/// Note that this will consume 'list'. -pub fn merkle_hash(list: &mut Vec>) -> Vec { - // flatten list - let mut chunkz = list_to_blob(list); - - // get data_len as bytes. It will hashed will the merkle root - let mut datalen = list.len().to_le_bytes().to_vec(); - zpad(&mut datalen, 32); - - // merklelize - while chunkz.len() > HASHSIZE { - let mut new_chunkz: Vec = Vec::new(); - - for two_chunks in chunkz.chunks(BYTES_PER_CHUNK * 2) { - // Hash two chuncks together - new_chunkz.append(&mut hash(two_chunks)); - } - - chunkz = new_chunkz; - } - - chunkz.append(&mut datalen); - hash(&chunkz) -} - -fn list_to_blob(list: &mut Vec>) -> Vec { - // pack - fit as many many items per chunk as we can and then - // right pad to BYTES_PER_CHUNCK - let (items_per_chunk, chunk_count) = if list.is_empty() { - (1, 1) - } else { - let items_per_chunk = BYTES_PER_CHUNK / list[0].len(); - let chunk_count = list.len() / items_per_chunk; - (items_per_chunk, chunk_count) - }; - - let mut chunkz = Vec::new(); - if list.is_empty() { - // handle and empty list - chunkz.append(&mut vec![0; BYTES_PER_CHUNK * 2]); - } else if list[0].len() <= BYTES_PER_CHUNK { - // just create a blob here; we'll divide into - // chunked slices when we merklize - let mut chunk = Vec::with_capacity(BYTES_PER_CHUNK); - let mut item_count_in_chunk = 0; - chunkz.reserve(chunk_count * BYTES_PER_CHUNK); - for item in list.iter_mut() { - item_count_in_chunk += 1; - chunk.append(item); - - // completed chunk? - if item_count_in_chunk == items_per_chunk { - zpad(&mut chunk, BYTES_PER_CHUNK); - chunkz.append(&mut chunk); - item_count_in_chunk = 0; - } - } - - // left-over uncompleted chunk? - if item_count_in_chunk != 0 { - zpad(&mut chunk, BYTES_PER_CHUNK); - chunkz.append(&mut chunk); - } - } - - // extend the number of chunks to a power of two if necessary - if !chunk_count.is_power_of_two() { - let zero_chunks_count = chunk_count.next_power_of_two() - chunk_count; - chunkz.append(&mut vec![0; zero_chunks_count * BYTES_PER_CHUNK]); - } - - chunkz -} - -/// right pads with zeros making 'bytes' 'size' in length -fn zpad(bytes: &mut Vec, size: usize) { - if bytes.len() < size { - bytes.resize(size, 0); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_merkle_hash() { - let data1 = vec![1; 32]; - let data2 = vec![2; 32]; - let data3 = vec![3; 32]; - let mut list = vec![data1, data2, data3]; - let result = merkle_hash(&mut list); - - //note: should test againt a known test hash value - assert_eq!(HASHSIZE, result.len()); - } -} -*/ -*/ From 839ff3ea3b4885126074e723e8fed1c448c25fe1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 27 Mar 2019 18:34:10 +1100 Subject: [PATCH 04/89] Implement (failing) cached tree hash tests --- eth2/utils/ssz/src/cached_tree_hash.rs | 91 +++++++++++++++++++++++++- 1 file changed, 88 insertions(+), 3 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 756f97232..328fdb394 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -13,6 +13,24 @@ pub struct TreeHashCache<'a> { } impl<'a> TreeHashCache<'a> { + pub fn build_changes_vec(bytes: &[u8]) -> Vec { + vec![false; bytes.len() / BYTES_PER_CHUNK] + } + + pub fn from_mut_slice(bytes: &'a mut [u8], changes: &'a mut [bool]) -> Option { + if bytes.len() % BYTES_PER_CHUNK > 0 { + return None; + } + + let chunk_modified = vec![false; bytes.len() / BYTES_PER_CHUNK]; + + Some(Self { + chunk_offset: 0, + cache: bytes, + chunk_modified: changes, + }) + } + pub fn increment(&mut self) { self.chunk_offset += 1 } @@ -63,6 +81,10 @@ impl<'a> TreeHashCache<'a> { .get_mut(self.chunk_offset..self.chunk_offset + leaves)?, }) } + + pub fn into_slice(self) -> &'a [u8] { + self.cache + } } fn children(parent: usize) -> (usize, usize) { @@ -81,7 +103,7 @@ pub trait CachedTreeHash { impl CachedTreeHash for u64 { fn build_cache_bytes(&self) -> Vec { - merkleize(&ssz_encode(self)) + merkleize(&int_to_bytes32(*self)) } fn cached_hash_tree_root(&self, other: &Self, cache: &mut TreeHashCache) -> Option<()> { @@ -95,6 +117,7 @@ impl CachedTreeHash for u64 { } } +#[derive(Clone)] pub struct Inner { pub a: u64, pub b: u64, @@ -123,6 +146,8 @@ impl CachedTreeHash for Inner { self.c.cached_hash_tree_root(&other.c, &mut leaf_cache)?; self.d.cached_hash_tree_root(&other.d, &mut leaf_cache)?; + dbg!(leaf_cache.into_slice()); + let nodes = num_nodes(num_leaves); let internal_chunks = nodes - num_leaves; @@ -140,8 +165,12 @@ impl CachedTreeHash for Inner { pub fn merkleize(values: &[u8]) -> Vec { let leaves = values.len() / HASHSIZE; - if leaves == 0 || !leaves.is_power_of_two() { - panic!("Handle bad leaf count"); + if leaves == 0 { + panic!("No full leaves"); + } + + if !leaves.is_power_of_two() { + panic!("leaves is not power of two"); } let mut o: Vec = vec![0; (num_nodes(leaves) - leaves) * HASHSIZE]; @@ -173,6 +202,62 @@ mod tests { all } + #[test] + fn cached_hash_on_inner() { + let inner = Inner { + a: 1, + b: 2, + c: 3, + d: 4, + }; + + let mut cache = inner.build_cache_bytes(); + + let changed_inner = Inner { + a: 42, + ..inner.clone() + }; + + let mut changes = TreeHashCache::build_changes_vec(&cache); + let mut cache_struct = TreeHashCache::from_mut_slice(&mut cache, &mut changes).unwrap(); + + changed_inner.cached_hash_tree_root(&inner, &mut cache_struct); + + let new_cache = cache_struct.into_slice(); + + let data1 = &int_to_bytes32(42); + let data2 = &int_to_bytes32(2); + let data3 = &int_to_bytes32(3); + let data4 = &int_to_bytes32(4); + + let data = join(vec![&data1, &data2, &data3, &data4]); + let expected = merkleize(&data); + + assert_eq!(expected, new_cache); + } + + #[test] + fn build_cache_matches_merkelize() { + let data1 = &int_to_bytes32(1); + let data2 = &int_to_bytes32(2); + let data3 = &int_to_bytes32(3); + let data4 = &int_to_bytes32(4); + + let data = join(vec![&data1, &data2, &data3, &data4]); + let expected = merkleize(&data); + + let inner = Inner { + a: 1, + b: 2, + c: 3, + d: 4, + }; + + let cache = inner.build_cache_bytes(); + + assert_eq!(expected, cache); + } + #[test] fn merkleize_4_leaves() { let data1 = hash(&int_to_bytes32(1)); From e33d1d0ebb69a362d6a3e3fd47efc1b538dd3d92 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 27 Mar 2019 18:55:39 +1100 Subject: [PATCH 05/89] First passing tree hash test --- eth2/utils/ssz/src/cached_tree_hash.rs | 31 ++++++++++++++------------ 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 328fdb394..a85da8fd9 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -71,14 +71,17 @@ impl<'a> TreeHashCache<'a> { pub fn just_the_leaves(&mut self, leaves: usize) -> Option { let nodes = num_nodes(leaves); let internal = nodes - leaves; - let leaves_start = (self.chunk_offset + internal) * HASHSIZE; + + let leaves_start = (self.chunk_offset + internal) * BYTES_PER_CHUNK; + let leaves_end = leaves_start + leaves * BYTES_PER_CHUNK; + + let modified_start = self.chunk_offset + internal; + let modified_end = modified_start + leaves; Some(TreeHashCache { chunk_offset: self.chunk_offset + internal, - cache: self.cache.get_mut(leaves_start..leaves * HASHSIZE)?, - chunk_modified: self - .chunk_modified - .get_mut(self.chunk_offset..self.chunk_offset + leaves)?, + cache: self.cache.get_mut(leaves_start..leaves_end)?, + chunk_modified: self.chunk_modified.get_mut(modified_start..modified_end)?, }) } @@ -108,7 +111,7 @@ impl CachedTreeHash for u64 { fn cached_hash_tree_root(&self, other: &Self, cache: &mut TreeHashCache) -> Option<()> { if self != other { - cache.modify_current_chunk(&hash(&ssz_encode(self))); + cache.modify_current_chunk(&merkleize(&int_to_bytes32(*self))); } cache.increment(); @@ -140,18 +143,18 @@ impl CachedTreeHash for Inner { fn cached_hash_tree_root(&self, other: &Self, cache: &mut TreeHashCache) -> Option<()> { let num_leaves = 4; - let mut leaf_cache = cache.just_the_leaves(num_leaves)?; - self.a.cached_hash_tree_root(&other.a, &mut leaf_cache)?; - self.b.cached_hash_tree_root(&other.b, &mut leaf_cache)?; - self.c.cached_hash_tree_root(&other.c, &mut leaf_cache)?; - self.d.cached_hash_tree_root(&other.d, &mut leaf_cache)?; - - dbg!(leaf_cache.into_slice()); + { + let mut leaf_cache = cache.just_the_leaves(num_leaves)?; + self.a.cached_hash_tree_root(&other.a, &mut leaf_cache)?; + self.b.cached_hash_tree_root(&other.b, &mut leaf_cache)?; + self.c.cached_hash_tree_root(&other.c, &mut leaf_cache)?; + self.d.cached_hash_tree_root(&other.d, &mut leaf_cache)?; + } let nodes = num_nodes(num_leaves); let internal_chunks = nodes - num_leaves; - for chunk in 0..internal_chunks { + for chunk in (0..internal_chunks).into_iter().rev() { if cache.children_modified(chunk)? { cache.modify_chunk(chunk, &cache.hash_children(chunk)?)?; } From acb1dd47cd71187eb34ab74479b07299f3eab3ef Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 27 Mar 2019 19:31:02 +1100 Subject: [PATCH 06/89] Make tree hash pass tests --- eth2/utils/ssz/src/cached_tree_hash.rs | 98 +++++++++++++++++++------- 1 file changed, 71 insertions(+), 27 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index a85da8fd9..757bfa9f7 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -10,6 +10,7 @@ pub struct TreeHashCache<'a> { chunk_offset: usize, cache: &'a mut [u8], chunk_modified: &'a mut [bool], + hash_count: &'a mut usize, } impl<'a> TreeHashCache<'a> { @@ -17,17 +18,20 @@ impl<'a> TreeHashCache<'a> { vec![false; bytes.len() / BYTES_PER_CHUNK] } - pub fn from_mut_slice(bytes: &'a mut [u8], changes: &'a mut [bool]) -> Option { + pub fn from_mut_slice( + bytes: &'a mut [u8], + changes: &'a mut [bool], + hash_count: &'a mut usize, + ) -> Option { if bytes.len() % BYTES_PER_CHUNK > 0 { return None; } - let chunk_modified = vec![false; bytes.len() / BYTES_PER_CHUNK]; - Some(Self { chunk_offset: 0, cache: bytes, chunk_modified: changes, + hash_count, }) } @@ -36,12 +40,13 @@ impl<'a> TreeHashCache<'a> { } pub fn modify_current_chunk(&mut self, to: &[u8]) -> Option<()> { - self.modify_chunk(0, to) + self.modify_chunk(self.chunk_offset, to) } pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Option<()> { let start = chunk * BYTES_PER_CHUNK; let end = start + BYTES_PER_CHUNK; + self.cache.get_mut(start..end)?.copy_from_slice(to); self.chunk_modified[chunk] = true; @@ -79,9 +84,10 @@ impl<'a> TreeHashCache<'a> { let modified_end = modified_start + leaves; Some(TreeHashCache { - chunk_offset: self.chunk_offset + internal, + chunk_offset: 0, cache: self.cache.get_mut(leaves_start..leaves_end)?, chunk_modified: self.chunk_modified.get_mut(modified_start..modified_end)?, + hash_count: self.hash_count, }) } @@ -111,7 +117,8 @@ impl CachedTreeHash for u64 { fn cached_hash_tree_root(&self, other: &Self, cache: &mut TreeHashCache) -> Option<()> { if self != other { - cache.modify_current_chunk(&merkleize(&int_to_bytes32(*self))); + *cache.hash_count += 1; + cache.modify_current_chunk(&merkleize(&int_to_bytes32(*self)))?; } cache.increment(); @@ -156,6 +163,7 @@ impl CachedTreeHash for Inner { for chunk in (0..internal_chunks).into_iter().rev() { if cache.children_modified(chunk)? { + *cache.hash_count += 1; cache.modify_chunk(chunk, &cache.hash_children(chunk)?)?; } } @@ -197,7 +205,7 @@ pub fn merkleize(values: &[u8]) -> Vec { mod tests { use super::*; - fn join(many: Vec<&[u8]>) -> Vec { + fn join(many: Vec>) -> Vec { let mut all = vec![]; for one in many { all.extend_from_slice(&mut one.clone()) @@ -205,8 +213,7 @@ mod tests { all } - #[test] - fn cached_hash_on_inner() { + fn generic_test(index: usize) { let inner = Inner { a: 1, b: 2, @@ -216,37 +223,69 @@ mod tests { let mut cache = inner.build_cache_bytes(); - let changed_inner = Inner { - a: 42, - ..inner.clone() + let changed_inner = match index { + 0 => Inner { + a: 42, + ..inner.clone() + }, + 1 => Inner { + b: 42, + ..inner.clone() + }, + 2 => Inner { + c: 42, + ..inner.clone() + }, + 3 => Inner { + d: 42, + ..inner.clone() + }, + _ => panic!("bad index"), }; let mut changes = TreeHashCache::build_changes_vec(&cache); - let mut cache_struct = TreeHashCache::from_mut_slice(&mut cache, &mut changes).unwrap(); + let mut hash_count = 0; + let mut cache_struct = + TreeHashCache::from_mut_slice(&mut cache, &mut changes, &mut hash_count).unwrap(); - changed_inner.cached_hash_tree_root(&inner, &mut cache_struct); + changed_inner + .cached_hash_tree_root(&inner, &mut cache_struct) + .unwrap(); + + assert_eq!(*cache_struct.hash_count, 3); let new_cache = cache_struct.into_slice(); - let data1 = &int_to_bytes32(42); - let data2 = &int_to_bytes32(2); - let data3 = &int_to_bytes32(3); - let data4 = &int_to_bytes32(4); + let data1 = int_to_bytes32(1); + let data2 = int_to_bytes32(2); + let data3 = int_to_bytes32(3); + let data4 = int_to_bytes32(4); - let data = join(vec![&data1, &data2, &data3, &data4]); - let expected = merkleize(&data); + let mut data = vec![data1, data2, data3, data4]; + + data[index] = int_to_bytes32(42); + + let expected = merkleize(&join(data)); assert_eq!(expected, new_cache); } #[test] - fn build_cache_matches_merkelize() { - let data1 = &int_to_bytes32(1); - let data2 = &int_to_bytes32(2); - let data3 = &int_to_bytes32(3); - let data4 = &int_to_bytes32(4); + fn cached_hash_on_inner() { + generic_test(0); + generic_test(1); + generic_test(2); + generic_test(3); + } - let data = join(vec![&data1, &data2, &data3, &data4]); + #[test] + fn build_cache_matches_merkelize() { + let data1 = int_to_bytes32(1); + let data2 = int_to_bytes32(2); + let data3 = int_to_bytes32(3); + let data4 = int_to_bytes32(4); + + let data = join(vec![data1, data2, data3, data4]); let expected = merkleize(&data); let inner = Inner { @@ -268,7 +307,12 @@ mod tests { let data3 = hash(&int_to_bytes32(3)); let data4 = hash(&int_to_bytes32(4)); - let data = join(vec![&data1, &data2, &data3, &data4]); + let data = join(vec![ + data1.clone(), + data2.clone(), + data3.clone(), + data4.clone(), + ]); let cache = merkleize(&data); From b05787207fb6cb79b7918071dc65e3ed140e018b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 28 Mar 2019 09:33:44 +1100 Subject: [PATCH 07/89] Refactor CachedTreeHash into owned bytes Instead of slices --- eth2/utils/ssz/src/cached_tree_hash.rs | 150 +++++++++++++------------ 1 file changed, 77 insertions(+), 73 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 757bfa9f7..e72ff1ffd 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -6,43 +6,29 @@ const BYTES_PER_CHUNK: usize = 32; const HASHSIZE: usize = 32; const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; -pub struct TreeHashCache<'a> { - chunk_offset: usize, - cache: &'a mut [u8], - chunk_modified: &'a mut [bool], - hash_count: &'a mut usize, +pub struct TreeHashCache { + cache: Vec, + chunk_modified: Vec, } -impl<'a> TreeHashCache<'a> { - pub fn build_changes_vec(bytes: &[u8]) -> Vec { - vec![false; bytes.len() / BYTES_PER_CHUNK] +impl Into> for TreeHashCache { + fn into(self) -> Vec { + self.cache } +} - pub fn from_mut_slice( - bytes: &'a mut [u8], - changes: &'a mut [bool], - hash_count: &'a mut usize, - ) -> Option { +impl TreeHashCache { + pub fn from_bytes(bytes: Vec) -> Option { if bytes.len() % BYTES_PER_CHUNK > 0 { return None; } Some(Self { - chunk_offset: 0, + chunk_modified: vec![false; bytes.len() / BYTES_PER_CHUNK], cache: bytes, - chunk_modified: changes, - hash_count, }) } - pub fn increment(&mut self) { - self.chunk_offset += 1 - } - - pub fn modify_current_chunk(&mut self, to: &[u8]) -> Option<()> { - self.modify_chunk(self.chunk_offset, to) - } - pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Option<()> { let start = chunk * BYTES_PER_CHUNK; let end = start + BYTES_PER_CHUNK; @@ -72,28 +58,6 @@ impl<'a> TreeHashCache<'a> { Some(hash(&self.cache.get(start..end)?)) } - - pub fn just_the_leaves(&mut self, leaves: usize) -> Option { - let nodes = num_nodes(leaves); - let internal = nodes - leaves; - - let leaves_start = (self.chunk_offset + internal) * BYTES_PER_CHUNK; - let leaves_end = leaves_start + leaves * BYTES_PER_CHUNK; - - let modified_start = self.chunk_offset + internal; - let modified_end = modified_start + leaves; - - Some(TreeHashCache { - chunk_offset: 0, - cache: self.cache.get_mut(leaves_start..leaves_end)?, - chunk_modified: self.chunk_modified.get_mut(modified_start..modified_end)?, - hash_count: self.hash_count, - }) - } - - pub fn into_slice(self) -> &'a [u8] { - self.cache - } } fn children(parent: usize) -> (usize, usize) { @@ -107,7 +71,16 @@ fn num_nodes(num_leaves: usize) -> usize { pub trait CachedTreeHash { fn build_cache_bytes(&self) -> Vec; - fn cached_hash_tree_root(&self, other: &Self, cache: &mut TreeHashCache) -> Option<()>; + fn num_bytes(&self) -> usize; + + fn max_num_leaves(&self) -> usize; + + fn cached_hash_tree_root( + &self, + other: &Self, + cache: &mut TreeHashCache, + chunk: usize, + ) -> Option; } impl CachedTreeHash for u64 { @@ -115,15 +88,25 @@ impl CachedTreeHash for u64 { merkleize(&int_to_bytes32(*self)) } - fn cached_hash_tree_root(&self, other: &Self, cache: &mut TreeHashCache) -> Option<()> { + fn num_bytes(&self) -> usize { + 8 + } + + fn max_num_leaves(&self) -> usize { + 1 + } + + fn cached_hash_tree_root( + &self, + other: &Self, + cache: &mut TreeHashCache, + chunk: usize, + ) -> Option { if self != other { - *cache.hash_count += 1; - cache.modify_current_chunk(&merkleize(&int_to_bytes32(*self)))?; + cache.modify_chunk(chunk, &merkleize(&int_to_bytes32(*self)))?; } - cache.increment(); - - Some(()) + Some(chunk + 1) } } @@ -147,28 +130,52 @@ impl CachedTreeHash for Inner { merkleize(&leaves) } - fn cached_hash_tree_root(&self, other: &Self, cache: &mut TreeHashCache) -> Option<()> { - let num_leaves = 4; + fn max_num_leaves(&self) -> usize { + let mut leaves = 0; + leaves += self.a.max_num_leaves(); + leaves += self.b.max_num_leaves(); + leaves += self.c.max_num_leaves(); + leaves += self.d.max_num_leaves(); + leaves + } + fn num_bytes(&self) -> usize { + let mut bytes = 0; + bytes += self.a.num_bytes(); + bytes += self.b.num_bytes(); + bytes += self.c.num_bytes(); + bytes += self.d.num_bytes(); + bytes + } + + fn cached_hash_tree_root( + &self, + other: &Self, + cache: &mut TreeHashCache, + chunk: usize, + ) -> Option { + let num_leaves = self.max_num_leaves(); + let num_nodes = num_nodes(num_leaves); + let num_internal_nodes = num_nodes - num_leaves; + + // Skip past the internal nodes and update any changed leaf nodes. { - let mut leaf_cache = cache.just_the_leaves(num_leaves)?; - self.a.cached_hash_tree_root(&other.a, &mut leaf_cache)?; - self.b.cached_hash_tree_root(&other.b, &mut leaf_cache)?; - self.c.cached_hash_tree_root(&other.c, &mut leaf_cache)?; - self.d.cached_hash_tree_root(&other.d, &mut leaf_cache)?; + let chunk = chunk + num_internal_nodes; + let chunk = self.a.cached_hash_tree_root(&other.a, cache, chunk)?; + let chunk = self.b.cached_hash_tree_root(&other.b, cache, chunk)?; + let chunk = self.c.cached_hash_tree_root(&other.c, cache, chunk)?; + let _chunk = self.d.cached_hash_tree_root(&other.d, cache, chunk)?; } - let nodes = num_nodes(num_leaves); - let internal_chunks = nodes - num_leaves; - - for chunk in (0..internal_chunks).into_iter().rev() { + // Iterate backwards through the internal nodes, rehashing any node where it's children + // have changed. + for chunk in (0..num_internal_nodes).into_iter().rev() { if cache.children_modified(chunk)? { - *cache.hash_count += 1; cache.modify_chunk(chunk, &cache.hash_children(chunk)?)?; } } - Some(()) + Some(chunk + num_nodes) } } @@ -243,18 +250,15 @@ mod tests { _ => panic!("bad index"), }; - let mut changes = TreeHashCache::build_changes_vec(&cache); - let mut hash_count = 0; - let mut cache_struct = - TreeHashCache::from_mut_slice(&mut cache, &mut changes, &mut hash_count).unwrap(); + let mut cache_struct = TreeHashCache::from_bytes(cache.clone()).unwrap(); changed_inner - .cached_hash_tree_root(&inner, &mut cache_struct) + .cached_hash_tree_root(&inner, &mut cache_struct, 0) .unwrap(); - assert_eq!(*cache_struct.hash_count, 3); + // assert_eq!(*cache_struct.hash_count, 3); - let new_cache = cache_struct.into_slice(); + let new_cache: Vec = cache_struct.into(); let data1 = int_to_bytes32(1); let data2 = int_to_bytes32(2); From 3c7e18bdf3544bc049634aeca936ea42983b2636 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 28 Mar 2019 10:56:20 +1100 Subject: [PATCH 08/89] Sanitize for odd leaf count --- eth2/utils/ssz/src/cached_tree_hash.rs | 38 +++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index e72ff1ffd..525f35db3 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -179,6 +179,29 @@ impl CachedTreeHash for Inner { } } +fn last_leaf_needs_padding(num_bytes: usize) -> bool { + num_bytes % HASHSIZE != 0 +} + +fn num_leaves(num_bytes: usize) -> usize { + num_bytes / HASHSIZE +} + +fn num_bytes(num_leaves: usize) -> usize { + num_leaves * HASHSIZE +} + +pub fn sanitise_bytes(mut bytes: Vec) -> Vec { + let present_leaves = num_leaves(bytes.len()); + let required_leaves = present_leaves.next_power_of_two(); + + if (present_leaves != required_leaves) | last_leaf_needs_padding(bytes.len()) { + bytes.resize(num_bytes(required_leaves), 0); + } + + bytes +} + /// A reference function to test against. pub fn merkleize(values: &[u8]) -> Vec { let leaves = values.len() / HASHSIZE; @@ -220,6 +243,19 @@ mod tests { all } + #[test] + fn merkleize_odd() { + let data = join(vec![ + int_to_bytes32(1), + int_to_bytes32(2), + int_to_bytes32(3), + int_to_bytes32(4), + int_to_bytes32(5), + ]); + + merkleize(&sanitise_bytes(data)); + } + fn generic_test(index: usize) { let inner = Inner { a: 1, @@ -228,7 +264,7 @@ mod tests { d: 4, }; - let mut cache = inner.build_cache_bytes(); + let cache = inner.build_cache_bytes(); let changed_inner = match index { 0 => Inner { From 1285f1e9f8b6efd4fee83e6a9acbc3cc46c86a48 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 28 Mar 2019 11:11:20 +1100 Subject: [PATCH 09/89] Restructure cached tree hash files, breaks tests --- eth2/utils/ssz/src/cached_tree_hash.rs | 335 +++---------------- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 30 ++ eth2/utils/ssz/src/cached_tree_hash/tests.rs | 227 +++++++++++++ 3 files changed, 299 insertions(+), 293 deletions(-) create mode 100644 eth2/utils/ssz/src/cached_tree_hash/impls.rs create mode 100644 eth2/utils/ssz/src/cached_tree_hash/tests.rs diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 525f35db3..6535e5cda 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -1,11 +1,27 @@ -use crate::ssz_encode; use hashing::hash; -use int_to_bytes::int_to_bytes32; + +mod impls; +mod tests; const BYTES_PER_CHUNK: usize = 32; const HASHSIZE: usize = 32; const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; +pub trait CachedTreeHash { + fn build_cache_bytes(&self) -> Vec; + + fn num_bytes(&self) -> usize; + + fn max_num_leaves(&self) -> usize; + + fn cached_hash_tree_root( + &self, + other: &Self, + cache: &mut TreeHashCache, + chunk: usize, + ) -> Option; +} + pub struct TreeHashCache { cache: Vec, chunk_modified: Vec, @@ -68,142 +84,13 @@ fn num_nodes(num_leaves: usize) -> usize { 2 * num_leaves - 1 } -pub trait CachedTreeHash { - fn build_cache_bytes(&self) -> Vec; +/// Split `values` into a power-of-two, identical-length chunks (padding with `0`) and merkleize +/// them, returning the entire merkle tree. +/// +/// The root hash is `merkleize(values)[0..BYTES_PER_CHUNK]`. +pub fn merkleize(values: Vec) -> Vec { + let values = sanitise_bytes(values); - fn num_bytes(&self) -> usize; - - fn max_num_leaves(&self) -> usize; - - fn cached_hash_tree_root( - &self, - other: &Self, - cache: &mut TreeHashCache, - chunk: usize, - ) -> Option; -} - -impl CachedTreeHash for u64 { - fn build_cache_bytes(&self) -> Vec { - merkleize(&int_to_bytes32(*self)) - } - - fn num_bytes(&self) -> usize { - 8 - } - - fn max_num_leaves(&self) -> usize { - 1 - } - - fn cached_hash_tree_root( - &self, - other: &Self, - cache: &mut TreeHashCache, - chunk: usize, - ) -> Option { - if self != other { - cache.modify_chunk(chunk, &merkleize(&int_to_bytes32(*self)))?; - } - - Some(chunk + 1) - } -} - -#[derive(Clone)] -pub struct Inner { - pub a: u64, - pub b: u64, - pub c: u64, - pub d: u64, -} - -impl CachedTreeHash for Inner { - fn build_cache_bytes(&self) -> Vec { - let mut leaves = vec![]; - - leaves.append(&mut self.a.build_cache_bytes()); - leaves.append(&mut self.b.build_cache_bytes()); - leaves.append(&mut self.c.build_cache_bytes()); - leaves.append(&mut self.d.build_cache_bytes()); - - merkleize(&leaves) - } - - fn max_num_leaves(&self) -> usize { - let mut leaves = 0; - leaves += self.a.max_num_leaves(); - leaves += self.b.max_num_leaves(); - leaves += self.c.max_num_leaves(); - leaves += self.d.max_num_leaves(); - leaves - } - - fn num_bytes(&self) -> usize { - let mut bytes = 0; - bytes += self.a.num_bytes(); - bytes += self.b.num_bytes(); - bytes += self.c.num_bytes(); - bytes += self.d.num_bytes(); - bytes - } - - fn cached_hash_tree_root( - &self, - other: &Self, - cache: &mut TreeHashCache, - chunk: usize, - ) -> Option { - let num_leaves = self.max_num_leaves(); - let num_nodes = num_nodes(num_leaves); - let num_internal_nodes = num_nodes - num_leaves; - - // Skip past the internal nodes and update any changed leaf nodes. - { - let chunk = chunk + num_internal_nodes; - let chunk = self.a.cached_hash_tree_root(&other.a, cache, chunk)?; - let chunk = self.b.cached_hash_tree_root(&other.b, cache, chunk)?; - let chunk = self.c.cached_hash_tree_root(&other.c, cache, chunk)?; - let _chunk = self.d.cached_hash_tree_root(&other.d, cache, chunk)?; - } - - // Iterate backwards through the internal nodes, rehashing any node where it's children - // have changed. - for chunk in (0..num_internal_nodes).into_iter().rev() { - if cache.children_modified(chunk)? { - cache.modify_chunk(chunk, &cache.hash_children(chunk)?)?; - } - } - - Some(chunk + num_nodes) - } -} - -fn last_leaf_needs_padding(num_bytes: usize) -> bool { - num_bytes % HASHSIZE != 0 -} - -fn num_leaves(num_bytes: usize) -> usize { - num_bytes / HASHSIZE -} - -fn num_bytes(num_leaves: usize) -> usize { - num_leaves * HASHSIZE -} - -pub fn sanitise_bytes(mut bytes: Vec) -> Vec { - let present_leaves = num_leaves(bytes.len()); - let required_leaves = present_leaves.next_power_of_two(); - - if (present_leaves != required_leaves) | last_leaf_needs_padding(bytes.len()) { - bytes.resize(num_bytes(required_leaves), 0); - } - - bytes -} - -/// A reference function to test against. -pub fn merkleize(values: &[u8]) -> Vec { let leaves = values.len() / HASHSIZE; if leaves == 0 { @@ -231,163 +118,25 @@ pub fn merkleize(values: &[u8]) -> Vec { o } -#[cfg(test)] -mod tests { - use super::*; +pub fn sanitise_bytes(mut bytes: Vec) -> Vec { + let present_leaves = num_leaves(bytes.len()); + let required_leaves = present_leaves.next_power_of_two(); - fn join(many: Vec>) -> Vec { - let mut all = vec![]; - for one in many { - all.extend_from_slice(&mut one.clone()) - } - all + if (present_leaves != required_leaves) | last_leaf_needs_padding(bytes.len()) { + bytes.resize(num_bytes(required_leaves), 0); } - #[test] - fn merkleize_odd() { - let data = join(vec![ - int_to_bytes32(1), - int_to_bytes32(2), - int_to_bytes32(3), - int_to_bytes32(4), - int_to_bytes32(5), - ]); - - merkleize(&sanitise_bytes(data)); - } - - fn generic_test(index: usize) { - let inner = Inner { - a: 1, - b: 2, - c: 3, - d: 4, - }; - - let cache = inner.build_cache_bytes(); - - let changed_inner = match index { - 0 => Inner { - a: 42, - ..inner.clone() - }, - 1 => Inner { - b: 42, - ..inner.clone() - }, - 2 => Inner { - c: 42, - ..inner.clone() - }, - 3 => Inner { - d: 42, - ..inner.clone() - }, - _ => panic!("bad index"), - }; - - let mut cache_struct = TreeHashCache::from_bytes(cache.clone()).unwrap(); - - changed_inner - .cached_hash_tree_root(&inner, &mut cache_struct, 0) - .unwrap(); - - // assert_eq!(*cache_struct.hash_count, 3); - - let new_cache: Vec = cache_struct.into(); - - let data1 = int_to_bytes32(1); - let data2 = int_to_bytes32(2); - let data3 = int_to_bytes32(3); - let data4 = int_to_bytes32(4); - - let mut data = vec![data1, data2, data3, data4]; - - data[index] = int_to_bytes32(42); - - let expected = merkleize(&join(data)); - - assert_eq!(expected, new_cache); - } - - #[test] - fn cached_hash_on_inner() { - generic_test(0); - generic_test(1); - generic_test(2); - generic_test(3); - } - - #[test] - fn build_cache_matches_merkelize() { - let data1 = int_to_bytes32(1); - let data2 = int_to_bytes32(2); - let data3 = int_to_bytes32(3); - let data4 = int_to_bytes32(4); - - let data = join(vec![data1, data2, data3, data4]); - let expected = merkleize(&data); - - let inner = Inner { - a: 1, - b: 2, - c: 3, - d: 4, - }; - - let cache = inner.build_cache_bytes(); - - assert_eq!(expected, cache); - } - - #[test] - fn merkleize_4_leaves() { - let data1 = hash(&int_to_bytes32(1)); - let data2 = hash(&int_to_bytes32(2)); - let data3 = hash(&int_to_bytes32(3)); - let data4 = hash(&int_to_bytes32(4)); - - let data = join(vec![ - data1.clone(), - data2.clone(), - data3.clone(), - data4.clone(), - ]); - - let cache = merkleize(&data); - - let hash_12 = { - let mut joined = vec![]; - joined.append(&mut data1.clone()); - joined.append(&mut data2.clone()); - hash(&joined) - }; - let hash_34 = { - let mut joined = vec![]; - joined.append(&mut data3.clone()); - joined.append(&mut data4.clone()); - hash(&joined) - }; - let hash_hash12_hash_34 = { - let mut joined = vec![]; - joined.append(&mut hash_12.clone()); - joined.append(&mut hash_34.clone()); - hash(&joined) - }; - - for (i, chunk) in cache.chunks(HASHSIZE).enumerate().rev() { - let expected = match i { - 0 => hash_hash12_hash_34.clone(), - 1 => hash_12.clone(), - 2 => hash_34.clone(), - 3 => data1.clone(), - 4 => data2.clone(), - 5 => data3.clone(), - 6 => data4.clone(), - _ => vec![], - }; - - assert_eq!(chunk, &expected[..], "failed at {}", i); - } - } + bytes +} + +fn last_leaf_needs_padding(num_bytes: usize) -> bool { + num_bytes % HASHSIZE != 0 +} + +fn num_leaves(num_bytes: usize) -> usize { + num_bytes / HASHSIZE +} + +fn num_bytes(num_leaves: usize) -> usize { + num_leaves * HASHSIZE } diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs new file mode 100644 index 000000000..b6b0d463a --- /dev/null +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -0,0 +1,30 @@ +use super::*; +use crate::ssz_encode; + +impl CachedTreeHash for u64 { + fn build_cache_bytes(&self) -> Vec { + merkleize(ssz_encode(self)) + } + + fn num_bytes(&self) -> usize { + 8 + } + + fn max_num_leaves(&self) -> usize { + 1 + } + + fn cached_hash_tree_root( + &self, + other: &Self, + cache: &mut TreeHashCache, + chunk: usize, + ) -> Option { + if self != other { + let leaf = merkleize(ssz_encode(self)); + cache.modify_chunk(chunk, &leaf)?; + } + + Some(chunk + 1) + } +} diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs new file mode 100644 index 000000000..79665f89d --- /dev/null +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -0,0 +1,227 @@ +use super::*; +use int_to_bytes::int_to_bytes32; + +#[derive(Clone)] +pub struct Inner { + pub a: u64, + pub b: u64, + pub c: u64, + pub d: u64, +} + +impl CachedTreeHash for Inner { + fn build_cache_bytes(&self) -> Vec { + let mut leaves = vec![]; + + leaves.append(&mut self.a.build_cache_bytes()); + leaves.append(&mut self.b.build_cache_bytes()); + leaves.append(&mut self.c.build_cache_bytes()); + leaves.append(&mut self.d.build_cache_bytes()); + + merkleize(leaves) + } + + fn max_num_leaves(&self) -> usize { + let mut leaves = 0; + leaves += self.a.max_num_leaves(); + leaves += self.b.max_num_leaves(); + leaves += self.c.max_num_leaves(); + leaves += self.d.max_num_leaves(); + leaves + } + + fn num_bytes(&self) -> usize { + let mut bytes = 0; + bytes += self.a.num_bytes(); + bytes += self.b.num_bytes(); + bytes += self.c.num_bytes(); + bytes += self.d.num_bytes(); + bytes + } + + fn cached_hash_tree_root( + &self, + other: &Self, + cache: &mut TreeHashCache, + chunk: usize, + ) -> Option { + let num_leaves = self.max_num_leaves(); + let num_nodes = num_nodes(num_leaves); + let num_internal_nodes = num_nodes - num_leaves; + + // Skip past the internal nodes and update any changed leaf nodes. + { + let chunk = chunk + num_internal_nodes; + let chunk = self.a.cached_hash_tree_root(&other.a, cache, chunk)?; + let chunk = self.b.cached_hash_tree_root(&other.b, cache, chunk)?; + let chunk = self.c.cached_hash_tree_root(&other.c, cache, chunk)?; + let _chunk = self.d.cached_hash_tree_root(&other.d, cache, chunk)?; + } + + // Iterate backwards through the internal nodes, rehashing any node where it's children + // have changed. + for chunk in (0..num_internal_nodes).into_iter().rev() { + if cache.children_modified(chunk)? { + cache.modify_chunk(chunk, &cache.hash_children(chunk)?)?; + } + } + + Some(chunk + num_nodes) + } +} + +fn join(many: Vec>) -> Vec { + let mut all = vec![]; + for one in many { + all.extend_from_slice(&mut one.clone()) + } + all +} + +#[test] +fn merkleize_odd() { + let data = join(vec![ + int_to_bytes32(1), + int_to_bytes32(2), + int_to_bytes32(3), + int_to_bytes32(4), + int_to_bytes32(5), + ]); + + merkleize(sanitise_bytes(data)); +} + +fn generic_test(index: usize) { + let inner = Inner { + a: 1, + b: 2, + c: 3, + d: 4, + }; + + let cache = inner.build_cache_bytes(); + + let changed_inner = match index { + 0 => Inner { + a: 42, + ..inner.clone() + }, + 1 => Inner { + b: 42, + ..inner.clone() + }, + 2 => Inner { + c: 42, + ..inner.clone() + }, + 3 => Inner { + d: 42, + ..inner.clone() + }, + _ => panic!("bad index"), + }; + + let mut cache_struct = TreeHashCache::from_bytes(cache.clone()).unwrap(); + + changed_inner + .cached_hash_tree_root(&inner, &mut cache_struct, 0) + .unwrap(); + + // assert_eq!(*cache_struct.hash_count, 3); + + let new_cache: Vec = cache_struct.into(); + + let data1 = int_to_bytes32(1); + let data2 = int_to_bytes32(2); + let data3 = int_to_bytes32(3); + let data4 = int_to_bytes32(4); + + let mut data = vec![data1, data2, data3, data4]; + + data[index] = int_to_bytes32(42); + + let expected = merkleize(join(data)); + + assert_eq!(expected, new_cache); +} + +#[test] +fn cached_hash_on_inner() { + generic_test(0); + generic_test(1); + generic_test(2); + generic_test(3); +} + +#[test] +fn build_cache_matches_merkelize() { + let data1 = int_to_bytes32(1); + let data2 = int_to_bytes32(2); + let data3 = int_to_bytes32(3); + let data4 = int_to_bytes32(4); + + let data = join(vec![data1, data2, data3, data4]); + let expected = merkleize(data); + + let inner = Inner { + a: 1, + b: 2, + c: 3, + d: 4, + }; + + let cache = inner.build_cache_bytes(); + + assert_eq!(expected, cache); +} + +#[test] +fn merkleize_4_leaves() { + let data1 = hash(&int_to_bytes32(1)); + let data2 = hash(&int_to_bytes32(2)); + let data3 = hash(&int_to_bytes32(3)); + let data4 = hash(&int_to_bytes32(4)); + + let data = join(vec![ + data1.clone(), + data2.clone(), + data3.clone(), + data4.clone(), + ]); + + let cache = merkleize(data); + + let hash_12 = { + let mut joined = vec![]; + joined.append(&mut data1.clone()); + joined.append(&mut data2.clone()); + hash(&joined) + }; + let hash_34 = { + let mut joined = vec![]; + joined.append(&mut data3.clone()); + joined.append(&mut data4.clone()); + hash(&joined) + }; + let hash_hash12_hash_34 = { + let mut joined = vec![]; + joined.append(&mut hash_12.clone()); + joined.append(&mut hash_34.clone()); + hash(&joined) + }; + + for (i, chunk) in cache.chunks(HASHSIZE).enumerate().rev() { + let expected = match i { + 0 => hash_hash12_hash_34.clone(), + 1 => hash_12.clone(), + 2 => hash_34.clone(), + 3 => data1.clone(), + 4 => data2.clone(), + 5 => data3.clone(), + 6 => data4.clone(), + _ => vec![], + }; + + assert_eq!(chunk, &expected[..], "failed at {}", i); + } +} From 224a967cce660866e6dd379787871eac1d2f532c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 28 Mar 2019 13:05:24 +1100 Subject: [PATCH 10/89] Implement basic vec tree hash cache --- eth2/utils/ssz/src/cached_tree_hash.rs | 40 +++++++++-- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 70 ++++++++++++++++++-- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 40 ++++++++--- 3 files changed, 128 insertions(+), 22 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 6535e5cda..caafaa2cf 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -8,15 +8,17 @@ const HASHSIZE: usize = 32; const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; pub trait CachedTreeHash { + type Item: CachedTreeHash; + fn build_cache_bytes(&self) -> Vec; + /// Return the number of bytes when this element is encoded as raw SSZ _without_ length + /// prefixes. fn num_bytes(&self) -> usize; - fn max_num_leaves(&self) -> usize; - fn cached_hash_tree_root( &self, - other: &Self, + other: &Self::Item, cache: &mut TreeHashCache, chunk: usize, ) -> Option; @@ -45,6 +47,18 @@ impl TreeHashCache { }) } + pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Option<()> { + let start = chunk * BYTES_PER_CHUNK; + let end = start + BYTES_PER_CHUNK; + + if !self.chunk_equals(chunk, to)? { + self.cache.get_mut(start..end)?.copy_from_slice(to); + self.chunk_modified[chunk] = true; + } + + Some(()) + } + pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Option<()> { let start = chunk * BYTES_PER_CHUNK; let end = start + BYTES_PER_CHUNK; @@ -56,6 +70,13 @@ impl TreeHashCache { Some(()) } + pub fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Option { + let start = chunk * BYTES_PER_CHUNK; + let end = start + BYTES_PER_CHUNK; + + Some(self.cache.get(start..end)? == other) + } + pub fn changed(&self, chunk: usize) -> Option { self.chunk_modified.get(chunk).cloned() } @@ -119,7 +140,7 @@ pub fn merkleize(values: Vec) -> Vec { } pub fn sanitise_bytes(mut bytes: Vec) -> Vec { - let present_leaves = num_leaves(bytes.len()); + let present_leaves = num_unsanitized_leaves(bytes.len()); let required_leaves = present_leaves.next_power_of_two(); if (present_leaves != required_leaves) | last_leaf_needs_padding(bytes.len()) { @@ -133,8 +154,15 @@ fn last_leaf_needs_padding(num_bytes: usize) -> bool { num_bytes % HASHSIZE != 0 } -fn num_leaves(num_bytes: usize) -> usize { - num_bytes / HASHSIZE +/// Rounds up +fn num_unsanitized_leaves(num_bytes: usize) -> usize { + (num_bytes + HASHSIZE - 1) / HASHSIZE +} + +/// Rounds up +fn num_sanitized_leaves(num_bytes: usize) -> usize { + let leaves = (num_bytes + HASHSIZE - 1) / HASHSIZE; + leaves.next_power_of_two() } fn num_bytes(num_leaves: usize) -> usize { diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index b6b0d463a..b27d28c4b 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -1,7 +1,9 @@ use super::*; -use crate::ssz_encode; +use crate::{ssz_encode, Encodable}; impl CachedTreeHash for u64 { + type Item = Self; + fn build_cache_bytes(&self) -> Vec { merkleize(ssz_encode(self)) } @@ -10,10 +12,6 @@ impl CachedTreeHash for u64 { 8 } - fn max_num_leaves(&self) -> usize { - 1 - } - fn cached_hash_tree_root( &self, other: &Self, @@ -28,3 +26,65 @@ impl CachedTreeHash for u64 { Some(chunk + 1) } } + +impl CachedTreeHash for Vec +where + T: CachedTreeHash + Encodable, +{ + type Item = Self; + + fn build_cache_bytes(&self) -> Vec { + let num_packed_bytes = self.num_bytes(); + let num_leaves = num_sanitized_leaves(num_packed_bytes); + + let mut packed = Vec::with_capacity(num_leaves * HASHSIZE); + + for item in self { + packed.append(&mut ssz_encode(item)); + } + + let packed = sanitise_bytes(packed); + + merkleize(packed) + } + + fn num_bytes(&self) -> usize { + self.iter().fold(0, |acc, item| acc + item.num_bytes()) + } + + fn cached_hash_tree_root( + &self, + other: &Self::Item, + cache: &mut TreeHashCache, + chunk: usize, + ) -> Option { + let num_packed_bytes = self.num_bytes(); + let num_leaves = num_sanitized_leaves(num_packed_bytes); + + if num_leaves != num_sanitized_leaves(other.num_bytes()) { + panic!("Need to handle a change in leaf count"); + } + + let mut packed = Vec::with_capacity(num_leaves * HASHSIZE); + + // TODO: try and avoid fully encoding the whole list + for item in self { + packed.append(&mut ssz_encode(item)); + } + + let packed = sanitise_bytes(packed); + + let num_nodes = num_nodes(num_leaves); + let num_internal_nodes = num_nodes - num_leaves; + + { + let mut chunk = chunk + num_internal_nodes; + for new_chunk_bytes in packed.chunks(HASHSIZE) { + cache.maybe_update_chunk(chunk, new_chunk_bytes)?; + chunk += 1; + } + } + + Some(chunk + num_nodes) + } +} diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 79665f89d..f4a4b1d46 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -1,5 +1,5 @@ use super::*; -use int_to_bytes::int_to_bytes32; +use int_to_bytes::{int_to_bytes32, int_to_bytes8}; #[derive(Clone)] pub struct Inner { @@ -10,6 +10,8 @@ pub struct Inner { } impl CachedTreeHash for Inner { + type Item = Self; + fn build_cache_bytes(&self) -> Vec { let mut leaves = vec![]; @@ -21,15 +23,6 @@ impl CachedTreeHash for Inner { merkleize(leaves) } - fn max_num_leaves(&self) -> usize { - let mut leaves = 0; - leaves += self.a.max_num_leaves(); - leaves += self.b.max_num_leaves(); - leaves += self.c.max_num_leaves(); - leaves += self.d.max_num_leaves(); - leaves - } - fn num_bytes(&self) -> usize { let mut bytes = 0; bytes += self.a.num_bytes(); @@ -45,7 +38,12 @@ impl CachedTreeHash for Inner { cache: &mut TreeHashCache, chunk: usize, ) -> Option { - let num_leaves = self.max_num_leaves(); + let mut num_leaves: usize = 0; + num_leaves += num_unsanitized_leaves(self.a.num_bytes()); + num_leaves += num_unsanitized_leaves(self.b.num_bytes()); + num_leaves += num_unsanitized_leaves(self.c.num_bytes()); + num_leaves += num_unsanitized_leaves(self.d.num_bytes()); + let num_nodes = num_nodes(num_leaves); let num_internal_nodes = num_nodes - num_leaves; @@ -78,6 +76,26 @@ fn join(many: Vec>) -> Vec { all } +#[test] +fn vec_of_u64() { + let data = join(vec![ + int_to_bytes8(1), + int_to_bytes8(2), + int_to_bytes8(3), + int_to_bytes8(4), + int_to_bytes8(5), + vec![0; 32 - 8], // padding + ]); + + let expected = merkleize(data); + + let my_vec = vec![1, 2, 3, 4, 5]; + + let cache = my_vec.build_cache_bytes(); + + assert_eq!(expected, cache); +} + #[test] fn merkleize_odd() { let data = join(vec![ From 0d8d3385bef62d4bcb2c094fd7b6ad9d7b6e0801 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 28 Mar 2019 14:17:25 +1100 Subject: [PATCH 11/89] Pass tree hash caching tests --- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 8 +++ eth2/utils/ssz/src/cached_tree_hash/tests.rs | 53 +++++++++++++++++++- 2 files changed, 59 insertions(+), 2 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index b27d28c4b..1c2bf342e 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -85,6 +85,14 @@ where } } + // Iterate backwards through the internal nodes, rehashing any node where it's children + // have changed. + for chunk in (chunk..chunk + num_internal_nodes).into_iter().rev() { + if cache.children_modified(chunk)? { + cache.modify_chunk(chunk, &cache.hash_children(chunk)?)?; + } + } + Some(chunk + num_nodes) } } diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index f4a4b1d46..6c7567250 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -58,7 +58,7 @@ impl CachedTreeHash for Inner { // Iterate backwards through the internal nodes, rehashing any node where it's children // have changed. - for chunk in (0..num_internal_nodes).into_iter().rev() { + for chunk in (chunk..chunk + num_internal_nodes).into_iter().rev() { if cache.children_modified(chunk)? { cache.modify_chunk(chunk, &cache.hash_children(chunk)?)?; } @@ -77,7 +77,56 @@ fn join(many: Vec>) -> Vec { } #[test] -fn vec_of_u64() { +fn partial_modification_u64_vec() { + let n: u64 = 50; + + let original_vec: Vec = (0..n).collect(); + + // Generate initial cache. + let original_cache = original_vec.build_cache_bytes(); + + // Modify the vec + let mut modified_vec = original_vec.clone(); + modified_vec[n as usize - 1] = 42; + + // Perform a differential hash + let mut cache_struct = TreeHashCache::from_bytes(original_cache.clone()).unwrap(); + modified_vec.cached_hash_tree_root(&original_vec, &mut cache_struct, 0); + let modified_cache: Vec = cache_struct.into(); + + // Generate reference data. + let mut data = vec![]; + for i in &modified_vec { + data.append(&mut int_to_bytes8(*i)); + } + let data = sanitise_bytes(data); + let expected = merkleize(data); + + assert_eq!(expected, modified_cache); +} + +#[test] +fn large_vec_of_u64_builds() { + let n: u64 = 50; + + let my_vec: Vec = (0..n).collect(); + + // Generate function output. + let cache = my_vec.build_cache_bytes(); + + // Generate reference data. + let mut data = vec![]; + for i in &my_vec { + data.append(&mut int_to_bytes8(*i)); + } + let data = sanitise_bytes(data); + let expected = merkleize(data); + + assert_eq!(expected, cache); +} + +#[test] +fn vec_of_u64_builds() { let data = join(vec![ int_to_bytes8(1), int_to_bytes8(2), From f21409fee1d050dabb60752a6df63de86411dd00 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 28 Mar 2019 14:44:10 +1100 Subject: [PATCH 12/89] Build breaking recursion tests for cache hashing --- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 136 ++++++++++++++++++- 1 file changed, 134 insertions(+), 2 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 6c7567250..8b235b3b9 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -68,6 +68,67 @@ impl CachedTreeHash for Inner { } } +#[derive(Clone)] +pub struct Outer { + pub a: u64, + pub b: Inner, + pub c: u64, +} + +impl CachedTreeHash for Outer { + type Item = Self; + + fn build_cache_bytes(&self) -> Vec { + let mut leaves = vec![]; + + leaves.append(&mut self.a.build_cache_bytes()); + leaves.append(&mut self.b.build_cache_bytes()); + leaves.append(&mut self.c.build_cache_bytes()); + + merkleize(leaves) + } + + fn num_bytes(&self) -> usize { + let mut bytes = 0; + bytes += self.a.num_bytes(); + bytes += self.b.num_bytes(); + bytes + } + + fn cached_hash_tree_root( + &self, + other: &Self, + cache: &mut TreeHashCache, + chunk: usize, + ) -> Option { + let mut num_leaves: usize = 0; + num_leaves += num_unsanitized_leaves(self.a.num_bytes()); + num_leaves += num_unsanitized_leaves(self.b.num_bytes()); + num_leaves += num_unsanitized_leaves(self.c.num_bytes()); + + let num_nodes = num_nodes(num_leaves); + let num_internal_nodes = num_nodes - num_leaves; + + // Skip past the internal nodes and update any changed leaf nodes. + { + let chunk = chunk + num_internal_nodes; + let chunk = self.a.cached_hash_tree_root(&other.a, cache, chunk)?; + let chunk = self.b.cached_hash_tree_root(&other.b, cache, chunk)?; + let _chunk = self.c.cached_hash_tree_root(&other.c, cache, chunk)?; + } + + // Iterate backwards through the internal nodes, rehashing any node where it's children + // have changed. + for chunk in (chunk..chunk + num_internal_nodes).into_iter().rev() { + if cache.children_modified(chunk)? { + cache.modify_chunk(chunk, &cache.hash_children(chunk)?)?; + } + } + + Some(chunk + num_nodes) + } +} + fn join(many: Vec>) -> Vec { let mut all = vec![]; for one in many { @@ -76,6 +137,73 @@ fn join(many: Vec>) -> Vec { all } +#[test] +fn partial_modification_to_outer() { + let inner = Inner { + a: 1, + b: 2, + c: 3, + d: 4, + }; + + let original_outer = Outer { + a: 0, + b: inner.clone(), + c: 5, + }; + + // Build the initial cache. + let original_cache = original_outer.build_cache_bytes(); + + // Modify outer + let modified_outer = Outer { + c: 42, + ..original_outer.clone() + }; + + // Perform a differential hash + let mut cache_struct = TreeHashCache::from_bytes(original_cache.clone()).unwrap(); + modified_outer.cached_hash_tree_root(&original_outer, &mut cache_struct, 0); + let modified_cache: Vec = cache_struct.into(); + + // Generate reference data. + let mut data = vec![]; + data.append(&mut int_to_bytes32(0)); + data.append(&mut inner.build_cache_bytes()); + data.append(&mut int_to_bytes32(42)); + let merkle = merkleize(data); + + assert_eq!(merkle, modified_cache); +} + +#[test] +fn outer_builds() { + let inner = Inner { + a: 1, + b: 2, + c: 3, + d: 4, + }; + + let outer = Outer { + a: 0, + b: inner.clone(), + c: 5, + }; + + // Build the function output. + let cache = outer.build_cache_bytes(); + + // Generate reference data. + let mut data = vec![]; + data.append(&mut int_to_bytes32(0)); + data.append(&mut inner.build_cache_bytes()); + data.append(&mut int_to_bytes32(5)); + let merkle = merkleize(data); + + assert_eq!(merkle, cache); +} + #[test] fn partial_modification_u64_vec() { let n: u64 = 50; @@ -155,7 +283,11 @@ fn merkleize_odd() { int_to_bytes32(5), ]); - merkleize(sanitise_bytes(data)); + let merkle = merkleize(sanitise_bytes(data)); + + let expected_len = num_nodes(8) * BYTES_PER_CHUNK; + + assert_eq!(merkle.len(), expected_len); } fn generic_test(index: usize) { @@ -221,7 +353,7 @@ fn cached_hash_on_inner() { } #[test] -fn build_cache_matches_merkelize() { +fn inner_builds() { let data1 = int_to_bytes32(1); let data2 = int_to_bytes32(2); let data3 = int_to_bytes32(3); From 49639c40ee26ec31dccf03d9ec2dfa3d6dc92d2d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 28 Mar 2019 19:01:31 +1100 Subject: [PATCH 13/89] Implement failing cache hash test --- eth2/utils/ssz/src/cached_tree_hash.rs | 111 +++++++++++- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 6 + eth2/utils/ssz/src/cached_tree_hash/tests.rs | 181 ++++++++++++++----- 3 files changed, 248 insertions(+), 50 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index caafaa2cf..75598f0b2 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -1,4 +1,5 @@ use hashing::hash; +use std::iter::Iterator; mod impls; mod tests; @@ -16,6 +17,8 @@ pub trait CachedTreeHash { /// prefixes. fn num_bytes(&self) -> usize; + fn num_child_nodes(&self) -> usize; + fn cached_hash_tree_root( &self, other: &Self::Item, @@ -81,15 +84,24 @@ impl TreeHashCache { self.chunk_modified.get(chunk).cloned() } - pub fn children_modified(&self, parent_chunk: usize) -> Option { - let children = children(parent_chunk); - - Some(self.changed(children.0)? | self.changed(children.1)?) + pub fn either_modified(&self, children: (&usize, &usize)) -> Option { + dbg!(&self.chunk_modified.len()); + dbg!(&self.cache.len() / BYTES_PER_CHUNK); + Some(self.changed(*children.0)? | self.changed(*children.1)?) } - pub fn hash_children(&self, parent_chunk: usize) -> Option> { + /* + pub fn children_modified(&self, parent_chunk: usize, child_offsets: &[usize]) -> Option { let children = children(parent_chunk); + let a = *child_offsets.get(children.0)?; + let b = *child_offsets.get(children.1)?; + + Some(self.changed(a)? | self.changed(b)?) + } + */ + + pub fn hash_children(&self, children: (&usize, &usize)) -> Option> { let start = children.0 * BYTES_PER_CHUNK; let end = start + BYTES_PER_CHUNK * 2; @@ -97,6 +109,30 @@ impl TreeHashCache { } } +/* +pub struct LocalCache { + offsets: Vec, +} + +impl LocalCache { + +} + +pub struct OffsetBTree { + offsets: Vec, +} + +impl From> for OffsetBTree { + fn from(offsets: Vec) -> Self { + Self { offsets } + } +} + +impl OffsetBTree { + fn +} +*/ + fn children(parent: usize) -> (usize, usize) { ((2 * parent + 1), (2 * parent + 2)) } @@ -105,6 +141,71 @@ fn num_nodes(num_leaves: usize) -> usize { 2 * num_leaves - 1 } +pub struct OffsetHandler { + num_internal_nodes: usize, + num_leaf_nodes: usize, + next_node: usize, + offsets: Vec, +} + +impl OffsetHandler { + fn from_lengths(offset: usize, mut lengths: Vec) -> Self { + // Extend it to the next power-of-two, if it is not already. + let num_leaf_nodes = if lengths.len().is_power_of_two() { + lengths.len() + } else { + let num_leaf_nodes = lengths.len().next_power_of_two(); + lengths.resize(num_leaf_nodes, 1); + num_leaf_nodes + }; + + let num_nodes = num_nodes(num_leaf_nodes); + let num_internal_nodes = num_nodes - num_leaf_nodes; + + let mut offsets = Vec::with_capacity(num_nodes); + offsets.append(&mut (offset..offset + num_internal_nodes).collect()); + + let mut next_node = num_internal_nodes + offset; + for i in 0..num_leaf_nodes { + offsets.push(next_node); + next_node += lengths[i]; + } + + Self { + num_internal_nodes, + num_leaf_nodes, + offsets, + next_node, + } + } + + pub fn total_nodes(&self) -> usize { + self.num_internal_nodes + self.num_leaf_nodes + } + + pub fn first_leaf_node(&self) -> Option { + self.offsets.get(self.num_internal_nodes).cloned() + } + + pub fn next_node(&self) -> usize { + self.next_node + } + + pub fn iter_internal_nodes<'a>( + &'a self, + ) -> impl DoubleEndedIterator { + let internal_nodes = &self.offsets[0..self.num_internal_nodes]; + + internal_nodes.iter().enumerate().map(move |(i, parent)| { + let children = children(i); + ( + parent, + (&self.offsets[children.0], &self.offsets[children.1]), + ) + }) + } +} + /// Split `values` into a power-of-two, identical-length chunks (padding with `0`) and merkleize /// them, returning the entire merkle tree. /// diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 1c2bf342e..6fb2d8938 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -12,6 +12,10 @@ impl CachedTreeHash for u64 { 8 } + fn num_child_nodes(&self) -> usize { + 0 + } + fn cached_hash_tree_root( &self, other: &Self, @@ -27,6 +31,7 @@ impl CachedTreeHash for u64 { } } +/* impl CachedTreeHash for Vec where T: CachedTreeHash + Encodable, @@ -96,3 +101,4 @@ where Some(chunk + num_nodes) } } +*/ diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 8b235b3b9..791bc17b6 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -13,58 +13,87 @@ impl CachedTreeHash for Inner { type Item = Self; fn build_cache_bytes(&self) -> Vec { + let cache_a = self.a.build_cache_bytes(); + let cache_b = self.b.build_cache_bytes(); + let cache_c = self.c.build_cache_bytes(); + let cache_d = self.d.build_cache_bytes(); + let mut leaves = vec![]; + leaves.extend_from_slice(&cache_a[0..32].to_vec()); + leaves.extend_from_slice(&cache_b[0..32].to_vec()); + leaves.extend_from_slice(&cache_c[0..32].to_vec()); + leaves.extend_from_slice(&cache_d[0..32].to_vec()); - leaves.append(&mut self.a.build_cache_bytes()); - leaves.append(&mut self.b.build_cache_bytes()); - leaves.append(&mut self.c.build_cache_bytes()); - leaves.append(&mut self.d.build_cache_bytes()); + let mut merkle = merkleize(leaves); - merkleize(leaves) + let num_leaves = 4; + let num_nodes = num_nodes(num_leaves); + let num_internal_nodes = num_nodes - num_leaves; + + let mut next_hash = num_internal_nodes * HASHSIZE; + merkle.splice(next_hash..next_hash + HASHSIZE, cache_a); + next_hash += HASHSIZE; + merkle.splice(next_hash..next_hash + HASHSIZE, cache_b); + next_hash += HASHSIZE; + merkle.splice(next_hash..next_hash + HASHSIZE, cache_c); + next_hash += HASHSIZE; + merkle.splice(next_hash..next_hash + HASHSIZE, cache_d); + + merkle } fn num_bytes(&self) -> usize { let mut bytes = 0; + bytes += self.a.num_bytes(); bytes += self.b.num_bytes(); bytes += self.c.num_bytes(); bytes += self.d.num_bytes(); + bytes } + fn num_child_nodes(&self) -> usize { + let mut children = 0; + let leaves = 4; + + children += self.a.num_child_nodes(); + children += self.b.num_child_nodes(); + children += self.c.num_child_nodes(); + children += self.d.num_child_nodes(); + + num_nodes(leaves) + children - 1 + } + fn cached_hash_tree_root( &self, other: &Self, cache: &mut TreeHashCache, chunk: usize, ) -> Option { - let mut num_leaves: usize = 0; - num_leaves += num_unsanitized_leaves(self.a.num_bytes()); - num_leaves += num_unsanitized_leaves(self.b.num_bytes()); - num_leaves += num_unsanitized_leaves(self.c.num_bytes()); - num_leaves += num_unsanitized_leaves(self.d.num_bytes()); - - let num_nodes = num_nodes(num_leaves); - let num_internal_nodes = num_nodes - num_leaves; + let mut offsets = vec![]; + offsets.push(self.a.num_child_nodes() + 1); + offsets.push(self.b.num_child_nodes() + 1); + offsets.push(self.c.num_child_nodes() + 1); + offsets.push(self.d.num_child_nodes() + 1); + let offset_handler = OffsetHandler::from_lengths(chunk, offsets); // Skip past the internal nodes and update any changed leaf nodes. { - let chunk = chunk + num_internal_nodes; + let chunk = offset_handler.first_leaf_node()?; let chunk = self.a.cached_hash_tree_root(&other.a, cache, chunk)?; let chunk = self.b.cached_hash_tree_root(&other.b, cache, chunk)?; let chunk = self.c.cached_hash_tree_root(&other.c, cache, chunk)?; let _chunk = self.d.cached_hash_tree_root(&other.d, cache, chunk)?; } - // Iterate backwards through the internal nodes, rehashing any node where it's children - // have changed. - for chunk in (chunk..chunk + num_internal_nodes).into_iter().rev() { - if cache.children_modified(chunk)? { - cache.modify_chunk(chunk, &cache.hash_children(chunk)?)?; + for (&parent, children) in offset_handler.iter_internal_nodes().rev() { + if cache.either_modified(children)? { + cache.modify_chunk(parent, &cache.hash_children(children)?)?; } } - Some(chunk + num_nodes) + Some(offset_handler.next_node()) } } @@ -79,53 +108,79 @@ impl CachedTreeHash for Outer { type Item = Self; fn build_cache_bytes(&self) -> Vec { + let cache_a = self.a.build_cache_bytes(); + let cache_b = self.b.build_cache_bytes(); + let cache_c = self.c.build_cache_bytes(); + let mut leaves = vec![]; + leaves.extend_from_slice(&cache_a[0..32].to_vec()); + leaves.extend_from_slice(&cache_b[0..32].to_vec()); + leaves.extend_from_slice(&cache_c[0..32].to_vec()); - leaves.append(&mut self.a.build_cache_bytes()); - leaves.append(&mut self.b.build_cache_bytes()); - leaves.append(&mut self.c.build_cache_bytes()); + let mut merkle = merkleize(leaves); - merkleize(leaves) + let num_leaves = 4; + let num_nodes = num_nodes(num_leaves); + let num_internal_nodes = num_nodes - num_leaves; + + let mut next_hash = num_internal_nodes * HASHSIZE; + merkle.splice(next_hash..next_hash + HASHSIZE, cache_a); + next_hash += (self.a.num_child_nodes() + 1) * HASHSIZE; + merkle.splice(next_hash..next_hash + HASHSIZE, cache_b); + next_hash += (self.b.num_child_nodes() + 1) * HASHSIZE; + merkle.splice(next_hash..next_hash + HASHSIZE, cache_c); + + merkle } fn num_bytes(&self) -> usize { let mut bytes = 0; bytes += self.a.num_bytes(); bytes += self.b.num_bytes(); + bytes += self.c.num_bytes(); bytes } + fn num_child_nodes(&self) -> usize { + let mut children = 0; + let leaves = 3; + + children += self.a.num_child_nodes(); + children += self.b.num_child_nodes(); + children += self.c.num_child_nodes(); + + num_nodes(leaves) + children - 1 + } + fn cached_hash_tree_root( &self, other: &Self, cache: &mut TreeHashCache, chunk: usize, ) -> Option { - let mut num_leaves: usize = 0; - num_leaves += num_unsanitized_leaves(self.a.num_bytes()); - num_leaves += num_unsanitized_leaves(self.b.num_bytes()); - num_leaves += num_unsanitized_leaves(self.c.num_bytes()); - - let num_nodes = num_nodes(num_leaves); - let num_internal_nodes = num_nodes - num_leaves; + let mut offsets = vec![]; + offsets.push(self.a.num_child_nodes() + 1); + offsets.push(self.b.num_child_nodes() + 1); + offsets.push(self.c.num_child_nodes() + 1); + let offset_handler = OffsetHandler::from_lengths(chunk, offsets); // Skip past the internal nodes and update any changed leaf nodes. { - let chunk = chunk + num_internal_nodes; + let chunk = offset_handler.first_leaf_node()?; let chunk = self.a.cached_hash_tree_root(&other.a, cache, chunk)?; let chunk = self.b.cached_hash_tree_root(&other.b, cache, chunk)?; let _chunk = self.c.cached_hash_tree_root(&other.c, cache, chunk)?; } - // Iterate backwards through the internal nodes, rehashing any node where it's children - // have changed. - for chunk in (chunk..chunk + num_internal_nodes).into_iter().rev() { - if cache.children_modified(chunk)? { - cache.modify_chunk(chunk, &cache.hash_children(chunk)?)?; + for (&parent, children) in offset_handler.iter_internal_nodes().rev() { + if cache.either_modified(children)? { + dbg!(parent); + dbg!(children); + cache.modify_chunk(parent, &cache.hash_children(children)?)?; } } - Some(chunk + num_nodes) + Some(offset_handler.next_node()) } } @@ -163,15 +218,30 @@ fn partial_modification_to_outer() { // Perform a differential hash let mut cache_struct = TreeHashCache::from_bytes(original_cache.clone()).unwrap(); - modified_outer.cached_hash_tree_root(&original_outer, &mut cache_struct, 0); + + modified_outer + .cached_hash_tree_root(&original_outer, &mut cache_struct, 0) + .unwrap(); + let modified_cache: Vec = cache_struct.into(); // Generate reference data. let mut data = vec![]; data.append(&mut int_to_bytes32(0)); - data.append(&mut inner.build_cache_bytes()); - data.append(&mut int_to_bytes32(42)); - let merkle = merkleize(data); + let inner_bytes = inner.build_cache_bytes(); + data.append(&mut int_to_bytes32(5)); + + let leaves = vec![ + int_to_bytes32(0), + inner_bytes[0..32].to_vec(), + int_to_bytes32(5), + vec![0; 32], // padding + ]; + let mut merkle = merkleize(join(leaves)); + merkle.splice(4 * 32..5 * 32, inner_bytes); + + assert_eq!(merkle.len() / HASHSIZE, 13); + assert_eq!(modified_cache.len() / HASHSIZE, 13); assert_eq!(merkle, modified_cache); } @@ -197,13 +267,33 @@ fn outer_builds() { // Generate reference data. let mut data = vec![]; data.append(&mut int_to_bytes32(0)); - data.append(&mut inner.build_cache_bytes()); + let inner_bytes = inner.build_cache_bytes(); data.append(&mut int_to_bytes32(5)); - let merkle = merkleize(data); - assert_eq!(merkle, cache); + let leaves = vec![ + int_to_bytes32(0), + inner_bytes[0..32].to_vec(), + int_to_bytes32(5), + vec![0; 32], // padding + ]; + let mut merkle = merkleize(join(leaves)); + merkle.splice(4 * 32..5 * 32, inner_bytes); + + assert_eq!(merkle.len() / HASHSIZE, 13); + assert_eq!(cache.len() / HASHSIZE, 13); + + for (i, chunk) in cache.chunks(HASHSIZE).enumerate() { + assert_eq!( + merkle[i * HASHSIZE..(i + 1) * HASHSIZE], + *chunk, + "failed on {}", + i + ); + } + // assert_eq!(merkle, cache); } +/* #[test] fn partial_modification_u64_vec() { let n: u64 = 50; @@ -272,6 +362,7 @@ fn vec_of_u64_builds() { assert_eq!(expected, cache); } +*/ #[test] fn merkleize_odd() { From 2dcf1c857c893319464a5004240f22f9bf250582 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 28 Mar 2019 23:21:24 +1100 Subject: [PATCH 14/89] Fix failing cache hashing test --- eth2/utils/ssz/src/cached_tree_hash.rs | 2 -- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 14 ++------------ 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 75598f0b2..bf2e4b389 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -85,8 +85,6 @@ impl TreeHashCache { } pub fn either_modified(&self, children: (&usize, &usize)) -> Option { - dbg!(&self.chunk_modified.len()); - dbg!(&self.cache.len() / BYTES_PER_CHUNK); Some(self.changed(*children.0)? | self.changed(*children.1)?) } diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 791bc17b6..13e8ef556 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -174,8 +174,6 @@ impl CachedTreeHash for Outer { for (&parent, children) in offset_handler.iter_internal_nodes().rev() { if cache.either_modified(children)? { - dbg!(parent); - dbg!(children); cache.modify_chunk(parent, &cache.hash_children(children)?)?; } } @@ -234,7 +232,7 @@ fn partial_modification_to_outer() { let leaves = vec![ int_to_bytes32(0), inner_bytes[0..32].to_vec(), - int_to_bytes32(5), + int_to_bytes32(42), vec![0; 32], // padding ]; let mut merkle = merkleize(join(leaves)); @@ -282,15 +280,7 @@ fn outer_builds() { assert_eq!(merkle.len() / HASHSIZE, 13); assert_eq!(cache.len() / HASHSIZE, 13); - for (i, chunk) in cache.chunks(HASHSIZE).enumerate() { - assert_eq!( - merkle[i * HASHSIZE..(i + 1) * HASHSIZE], - *chunk, - "failed on {}", - i - ); - } - // assert_eq!(merkle, cache); + assert_eq!(merkle, cache); } /* From 40bfd5a6c7423ea220222113255ba28c172829b4 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 28 Mar 2019 23:58:27 +1100 Subject: [PATCH 15/89] Add offset manager method to cache hash trait --- eth2/utils/ssz/src/cached_tree_hash.rs | 32 +++++ eth2/utils/ssz/src/cached_tree_hash/impls.rs | 4 + eth2/utils/ssz/src/cached_tree_hash/tests.rs | 136 ++++++++++++++----- 3 files changed, 137 insertions(+), 35 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index bf2e4b389..ce90afd33 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -1,5 +1,8 @@ use hashing::hash; +use std::iter::IntoIterator; use std::iter::Iterator; +use std::ops::Range; +use std::vec::Splice; mod impls; mod tests; @@ -17,6 +20,8 @@ pub trait CachedTreeHash { /// prefixes. fn num_bytes(&self) -> usize; + fn offset_handler(&self, initial_offset: usize) -> Option; + fn num_child_nodes(&self) -> usize; fn cached_hash_tree_root( @@ -50,6 +55,27 @@ impl TreeHashCache { }) } + pub fn single_chunk_splice(&mut self, chunk: usize, replace_with: I) -> Splice + where + I: IntoIterator, + { + self.chunk_splice(chunk..chunk + 1, replace_with) + } + + pub fn chunk_splice( + &mut self, + chunk_range: Range, + replace_with: I, + ) -> Splice + where + I: IntoIterator, + { + let byte_start = chunk_range.start * BYTES_PER_CHUNK; + let byte_end = chunk_range.end * BYTES_PER_CHUNK; + + self.cache.splice(byte_start..byte_end, replace_with) + } + pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Option<()> { let start = chunk * BYTES_PER_CHUNK; let end = start + BYTES_PER_CHUNK; @@ -202,6 +228,12 @@ impl OffsetHandler { ) }) } + + pub fn iter_leaf_nodes<'a>(&'a self) -> impl DoubleEndedIterator { + let leaf_nodes = &self.offsets[self.num_internal_nodes..]; + + leaf_nodes.iter() + } } /// Split `values` into a power-of-two, identical-length chunks (padding with `0`) and merkleize diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 6fb2d8938..a4d1c7d1a 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -12,6 +12,10 @@ impl CachedTreeHash for u64 { 8 } + fn offset_handler(&self, _initial_offset: usize) -> Option { + None + } + fn num_child_nodes(&self) -> usize { 0 } diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 13e8ef556..9db0a5906 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -24,22 +24,19 @@ impl CachedTreeHash for Inner { leaves.extend_from_slice(&cache_c[0..32].to_vec()); leaves.extend_from_slice(&cache_d[0..32].to_vec()); - let mut merkle = merkleize(leaves); + // TODO: fix unwrap + let mut cache = TreeHashCache::from_bytes(merkleize(leaves)).unwrap(); - let num_leaves = 4; - let num_nodes = num_nodes(num_leaves); - let num_internal_nodes = num_nodes - num_leaves; + // TODO: fix unwrap + let offset_handler = self.offset_handler(0).unwrap(); + let mut iter = offset_handler.iter_leaf_nodes(); - let mut next_hash = num_internal_nodes * HASHSIZE; - merkle.splice(next_hash..next_hash + HASHSIZE, cache_a); - next_hash += HASHSIZE; - merkle.splice(next_hash..next_hash + HASHSIZE, cache_b); - next_hash += HASHSIZE; - merkle.splice(next_hash..next_hash + HASHSIZE, cache_c); - next_hash += HASHSIZE; - merkle.splice(next_hash..next_hash + HASHSIZE, cache_d); + cache.single_chunk_splice(*iter.next().unwrap(), cache_a); + cache.single_chunk_splice(*iter.next().unwrap(), cache_b); + cache.single_chunk_splice(*iter.next().unwrap(), cache_c); + cache.single_chunk_splice(*iter.next().unwrap(), cache_d); - merkle + cache.into() } fn num_bytes(&self) -> usize { @@ -53,6 +50,17 @@ impl CachedTreeHash for Inner { bytes } + fn offset_handler(&self, initial_offset: usize) -> Option { + let mut offsets = vec![]; + + offsets.push(self.a.num_child_nodes() + 1); + offsets.push(self.b.num_child_nodes() + 1); + offsets.push(self.c.num_child_nodes() + 1); + offsets.push(self.d.num_child_nodes() + 1); + + Some(OffsetHandler::from_lengths(initial_offset, offsets)) + } + fn num_child_nodes(&self) -> usize { let mut children = 0; let leaves = 4; @@ -71,12 +79,7 @@ impl CachedTreeHash for Inner { cache: &mut TreeHashCache, chunk: usize, ) -> Option { - let mut offsets = vec![]; - offsets.push(self.a.num_child_nodes() + 1); - offsets.push(self.b.num_child_nodes() + 1); - offsets.push(self.c.num_child_nodes() + 1); - offsets.push(self.d.num_child_nodes() + 1); - let offset_handler = OffsetHandler::from_lengths(chunk, offsets); + let offset_handler = self.offset_handler(chunk)?; // Skip past the internal nodes and update any changed leaf nodes. { @@ -117,20 +120,18 @@ impl CachedTreeHash for Outer { leaves.extend_from_slice(&cache_b[0..32].to_vec()); leaves.extend_from_slice(&cache_c[0..32].to_vec()); - let mut merkle = merkleize(leaves); + // TODO: fix unwrap + let mut cache = TreeHashCache::from_bytes(merkleize(leaves)).unwrap(); - let num_leaves = 4; - let num_nodes = num_nodes(num_leaves); - let num_internal_nodes = num_nodes - num_leaves; + // TODO: fix unwrap + let offset_handler = self.offset_handler(0).unwrap(); + let mut iter = offset_handler.iter_leaf_nodes(); - let mut next_hash = num_internal_nodes * HASHSIZE; - merkle.splice(next_hash..next_hash + HASHSIZE, cache_a); - next_hash += (self.a.num_child_nodes() + 1) * HASHSIZE; - merkle.splice(next_hash..next_hash + HASHSIZE, cache_b); - next_hash += (self.b.num_child_nodes() + 1) * HASHSIZE; - merkle.splice(next_hash..next_hash + HASHSIZE, cache_c); + cache.single_chunk_splice(*iter.next().unwrap(), cache_a); + cache.single_chunk_splice(*iter.next().unwrap(), cache_b); + cache.single_chunk_splice(*iter.next().unwrap(), cache_c); - merkle + cache.into() } fn num_bytes(&self) -> usize { @@ -152,17 +153,23 @@ impl CachedTreeHash for Outer { num_nodes(leaves) + children - 1 } + fn offset_handler(&self, initial_offset: usize) -> Option { + let mut offsets = vec![]; + + offsets.push(self.a.num_child_nodes() + 1); + offsets.push(self.b.num_child_nodes() + 1); + offsets.push(self.c.num_child_nodes() + 1); + + Some(OffsetHandler::from_lengths(initial_offset, offsets)) + } + fn cached_hash_tree_root( &self, other: &Self, cache: &mut TreeHashCache, chunk: usize, ) -> Option { - let mut offsets = vec![]; - offsets.push(self.a.num_child_nodes() + 1); - offsets.push(self.b.num_child_nodes() + 1); - offsets.push(self.c.num_child_nodes() + 1); - let offset_handler = OffsetHandler::from_lengths(chunk, offsets); + let offset_handler = self.offset_handler(chunk)?; // Skip past the internal nodes and update any changed leaf nodes. { @@ -190,6 +197,65 @@ fn join(many: Vec>) -> Vec { all } +#[test] +fn partial_modification_to_inner_struct() { + let original_inner = Inner { + a: 1, + b: 2, + c: 3, + d: 4, + }; + + let original_outer = Outer { + a: 0, + b: original_inner.clone(), + c: 5, + }; + + let modified_inner = Inner { + a: 42, + ..original_inner.clone() + }; + + // Build the initial cache. + let original_cache = original_outer.build_cache_bytes(); + + // Modify outer + let modified_outer = Outer { + b: modified_inner.clone(), + ..original_outer.clone() + }; + + // Perform a differential hash + let mut cache_struct = TreeHashCache::from_bytes(original_cache.clone()).unwrap(); + + modified_outer + .cached_hash_tree_root(&original_outer, &mut cache_struct, 0) + .unwrap(); + + let modified_cache: Vec = cache_struct.into(); + + // Generate reference data. + let mut data = vec![]; + data.append(&mut int_to_bytes32(0)); + let inner_bytes = modified_inner.build_cache_bytes(); + data.append(&mut int_to_bytes32(5)); + + let leaves = vec![ + int_to_bytes32(0), + inner_bytes[0..32].to_vec(), + int_to_bytes32(5), + vec![0; 32], // padding + ]; + let mut merkle = merkleize(join(leaves)); + merkle.splice(4 * 32..5 * 32, inner_bytes); + + assert_eq!(merkle.len() / HASHSIZE, 13); + assert_eq!(modified_cache.len() / HASHSIZE, 13); + + assert_eq!(merkle, modified_cache); +} + #[test] fn partial_modification_to_outer() { let inner = Inner { From 7b05c506df36317e07aaa751e556526b0719cbdd Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 29 Mar 2019 00:47:42 +1100 Subject: [PATCH 16/89] Add new build method for cached hashes --- eth2/utils/ssz/src/cached_tree_hash.rs | 33 +++++++++++++++ eth2/utils/ssz/src/cached_tree_hash/tests.rs | 44 ++++++-------------- 2 files changed, 46 insertions(+), 31 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index ce90afd33..be3fe98de 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -44,6 +44,39 @@ impl Into> for TreeHashCache { } impl TreeHashCache { + pub fn new(mut leaves_and_subtrees: Vec, offset_handler: OffsetHandler) -> Option { + if leaves_and_subtrees.len() % BYTES_PER_CHUNK != 0 { + return None; + } + + // Allocate enough bytes to store the internal nodes and the leaves and subtrees, then fill + // all the to-be-built internal nodes with zeros and append the leaves and subtrees. + let internal_node_bytes = offset_handler.num_internal_nodes * BYTES_PER_CHUNK; + let mut cache = Vec::with_capacity(internal_node_bytes + leaves_and_subtrees.len()); + cache.resize(internal_node_bytes, 0); + cache.append(&mut leaves_and_subtrees); + + // Concat all the leaves into one big byte array, ready for `merkleize`. + let mut leaves = vec![]; + for leaf_chunk in offset_handler.iter_leaf_nodes() { + let start = leaf_chunk * BYTES_PER_CHUNK; + let end = start + BYTES_PER_CHUNK; + + leaves.extend_from_slice(cache.get(start..end)?); + } + + // Merkleize the leaves, then split the leaf nodes off them. Then, replace all-zeros + // internal nodes created earlier with the internal nodes generated by `merkleize`. + let mut merkleized = merkleize(leaves); + merkleized.split_off(internal_node_bytes); + cache.splice(0..internal_node_bytes, merkleized); + + Some(Self { + chunk_modified: vec![false; cache.len() / BYTES_PER_CHUNK], + cache, + }) + } + pub fn from_bytes(bytes: Vec) -> Option { if bytes.len() % BYTES_PER_CHUNK > 0 { return None; diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 9db0a5906..9cb012c79 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -13,28 +13,18 @@ impl CachedTreeHash for Inner { type Item = Self; fn build_cache_bytes(&self) -> Vec { - let cache_a = self.a.build_cache_bytes(); - let cache_b = self.b.build_cache_bytes(); - let cache_c = self.c.build_cache_bytes(); - let cache_d = self.d.build_cache_bytes(); + let mut leaves_and_subtrees = vec![]; - let mut leaves = vec![]; - leaves.extend_from_slice(&cache_a[0..32].to_vec()); - leaves.extend_from_slice(&cache_b[0..32].to_vec()); - leaves.extend_from_slice(&cache_c[0..32].to_vec()); - leaves.extend_from_slice(&cache_d[0..32].to_vec()); - - // TODO: fix unwrap - let mut cache = TreeHashCache::from_bytes(merkleize(leaves)).unwrap(); + leaves_and_subtrees.append(&mut self.a.build_cache_bytes()); + leaves_and_subtrees.append(&mut self.b.build_cache_bytes()); + leaves_and_subtrees.append(&mut self.c.build_cache_bytes()); + leaves_and_subtrees.append(&mut self.d.build_cache_bytes()); // TODO: fix unwrap let offset_handler = self.offset_handler(0).unwrap(); - let mut iter = offset_handler.iter_leaf_nodes(); - cache.single_chunk_splice(*iter.next().unwrap(), cache_a); - cache.single_chunk_splice(*iter.next().unwrap(), cache_b); - cache.single_chunk_splice(*iter.next().unwrap(), cache_c); - cache.single_chunk_splice(*iter.next().unwrap(), cache_d); + // TODO: fix unwrap + let cache = TreeHashCache::new(leaves_and_subtrees, offset_handler).unwrap(); cache.into() } @@ -111,25 +101,17 @@ impl CachedTreeHash for Outer { type Item = Self; fn build_cache_bytes(&self) -> Vec { - let cache_a = self.a.build_cache_bytes(); - let cache_b = self.b.build_cache_bytes(); - let cache_c = self.c.build_cache_bytes(); + let mut leaves_and_subtrees = vec![]; - let mut leaves = vec![]; - leaves.extend_from_slice(&cache_a[0..32].to_vec()); - leaves.extend_from_slice(&cache_b[0..32].to_vec()); - leaves.extend_from_slice(&cache_c[0..32].to_vec()); - - // TODO: fix unwrap - let mut cache = TreeHashCache::from_bytes(merkleize(leaves)).unwrap(); + leaves_and_subtrees.append(&mut self.a.build_cache_bytes()); + leaves_and_subtrees.append(&mut self.b.build_cache_bytes()); + leaves_and_subtrees.append(&mut self.c.build_cache_bytes()); // TODO: fix unwrap let offset_handler = self.offset_handler(0).unwrap(); - let mut iter = offset_handler.iter_leaf_nodes(); - cache.single_chunk_splice(*iter.next().unwrap(), cache_a); - cache.single_chunk_splice(*iter.next().unwrap(), cache_b); - cache.single_chunk_splice(*iter.next().unwrap(), cache_c); + // TODO: fix unwrap + let cache = TreeHashCache::new(leaves_and_subtrees, offset_handler).unwrap(); cache.into() } From 267c978abb2b1bc42513866cff54aa818f648a8a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 29 Mar 2019 02:36:34 +1100 Subject: [PATCH 17/89] Tidy cache hash API --- eth2/utils/ssz/src/cached_tree_hash.rs | 137 ++++++++++--------- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 12 +- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 79 +++++------ 3 files changed, 119 insertions(+), 109 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index be3fe98de..3b900e503 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -11,16 +11,27 @@ const BYTES_PER_CHUNK: usize = 32; const HASHSIZE: usize = 32; const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; +#[derive(Debug, PartialEq, Clone)] +pub enum Error { + LeavesAndSubtreesIncomplete(usize), + ShouldNotProduceOffsetHandler, + NoFirstNode, + BytesAreNotEvenChunks(usize), + NoModifiedFieldForChunk(usize), + NoBytesForChunk(usize), + NoChildrenForHashing((usize, usize)), +} + pub trait CachedTreeHash { type Item: CachedTreeHash; - fn build_cache_bytes(&self) -> Vec; + fn build_cache(&self) -> Result; /// Return the number of bytes when this element is encoded as raw SSZ _without_ length /// prefixes. fn num_bytes(&self) -> usize; - fn offset_handler(&self, initial_offset: usize) -> Option; + fn offset_handler(&self, initial_offset: usize) -> Result; fn num_child_nodes(&self) -> usize; @@ -29,9 +40,10 @@ pub trait CachedTreeHash { other: &Self::Item, cache: &mut TreeHashCache, chunk: usize, - ) -> Option; + ) -> Result; } +#[derive(Debug, PartialEq, Clone)] pub struct TreeHashCache { cache: Vec, chunk_modified: Vec, @@ -44,11 +56,17 @@ impl Into> for TreeHashCache { } impl TreeHashCache { - pub fn new(mut leaves_and_subtrees: Vec, offset_handler: OffsetHandler) -> Option { - if leaves_and_subtrees.len() % BYTES_PER_CHUNK != 0 { - return None; - } + pub fn new(item: &T) -> Result + where + T: CachedTreeHash, + { + item.build_cache() + } + pub fn from_leaves_and_subtrees( + mut leaves_and_subtrees: Vec, + offset_handler: OffsetHandler, + ) -> Result { // Allocate enough bytes to store the internal nodes and the leaves and subtrees, then fill // all the to-be-built internal nodes with zeros and append the leaves and subtrees. let internal_node_bytes = offset_handler.num_internal_nodes * BYTES_PER_CHUNK; @@ -56,13 +74,22 @@ impl TreeHashCache { cache.resize(internal_node_bytes, 0); cache.append(&mut leaves_and_subtrees); + dbg!(cache.len() / BYTES_PER_CHUNK); + // Concat all the leaves into one big byte array, ready for `merkleize`. let mut leaves = vec![]; for leaf_chunk in offset_handler.iter_leaf_nodes() { let start = leaf_chunk * BYTES_PER_CHUNK; let end = start + BYTES_PER_CHUNK; - leaves.extend_from_slice(cache.get(start..end)?); + dbg!(end); + dbg!(cache.len()); + + leaves.extend_from_slice( + cache + .get(start..end) + .ok_or_else(|| Error::LeavesAndSubtreesIncomplete(*leaf_chunk))?, + ); } // Merkleize the leaves, then split the leaf nodes off them. Then, replace all-zeros @@ -71,18 +98,18 @@ impl TreeHashCache { merkleized.split_off(internal_node_bytes); cache.splice(0..internal_node_bytes, merkleized); - Some(Self { + Ok(Self { chunk_modified: vec![false; cache.len() / BYTES_PER_CHUNK], cache, }) } - pub fn from_bytes(bytes: Vec) -> Option { + pub fn from_bytes(bytes: Vec) -> Result { if bytes.len() % BYTES_PER_CHUNK > 0 { - return None; + return Err(Error::BytesAreNotEvenChunks(bytes.len())); } - Some(Self { + Ok(Self { chunk_modified: vec![false; bytes.len() / BYTES_PER_CHUNK], cache: bytes, }) @@ -121,15 +148,18 @@ impl TreeHashCache { Some(()) } - pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Option<()> { + pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { let start = chunk * BYTES_PER_CHUNK; let end = start + BYTES_PER_CHUNK; - self.cache.get_mut(start..end)?.copy_from_slice(to); + self.cache + .get_mut(start..end) + .ok_or_else(|| Error::NoBytesForChunk(chunk))? + .copy_from_slice(to); self.chunk_modified[chunk] = true; - Some(()) + Ok(()) } pub fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Option { @@ -139,57 +169,30 @@ impl TreeHashCache { Some(self.cache.get(start..end)? == other) } - pub fn changed(&self, chunk: usize) -> Option { - self.chunk_modified.get(chunk).cloned() + pub fn changed(&self, chunk: usize) -> Result { + self.chunk_modified + .get(chunk) + .cloned() + .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk)) } - pub fn either_modified(&self, children: (&usize, &usize)) -> Option { - Some(self.changed(*children.0)? | self.changed(*children.1)?) + pub fn either_modified(&self, children: (&usize, &usize)) -> Result { + Ok(self.changed(*children.0)? | self.changed(*children.1)?) } - /* - pub fn children_modified(&self, parent_chunk: usize, child_offsets: &[usize]) -> Option { - let children = children(parent_chunk); - - let a = *child_offsets.get(children.0)?; - let b = *child_offsets.get(children.1)?; - - Some(self.changed(a)? | self.changed(b)?) - } - */ - - pub fn hash_children(&self, children: (&usize, &usize)) -> Option> { + pub fn hash_children(&self, children: (&usize, &usize)) -> Result, Error> { let start = children.0 * BYTES_PER_CHUNK; let end = start + BYTES_PER_CHUNK * 2; - Some(hash(&self.cache.get(start..end)?)) + let children = &self + .cache + .get(start..end) + .ok_or_else(|| Error::NoChildrenForHashing((*children.0, *children.1)))?; + + Ok(hash(children)) } } -/* -pub struct LocalCache { - offsets: Vec, -} - -impl LocalCache { - -} - -pub struct OffsetBTree { - offsets: Vec, -} - -impl From> for OffsetBTree { - fn from(offsets: Vec) -> Self { - Self { offsets } - } -} - -impl OffsetBTree { - fn -} -*/ - fn children(parent: usize) -> (usize, usize) { ((2 * parent + 1), (2 * parent + 2)) } @@ -206,7 +209,7 @@ pub struct OffsetHandler { } impl OffsetHandler { - fn from_lengths(offset: usize, mut lengths: Vec) -> Self { + fn from_lengths(offset: usize, mut lengths: Vec) -> Result { // Extend it to the next power-of-two, if it is not already. let num_leaf_nodes = if lengths.len().is_power_of_two() { lengths.len() @@ -228,20 +231,23 @@ impl OffsetHandler { next_node += lengths[i]; } - Self { + Ok(Self { num_internal_nodes, num_leaf_nodes, offsets, next_node, - } + }) } pub fn total_nodes(&self) -> usize { self.num_internal_nodes + self.num_leaf_nodes } - pub fn first_leaf_node(&self) -> Option { - self.offsets.get(self.num_internal_nodes).cloned() + pub fn first_leaf_node(&self) -> Result { + self.offsets + .get(self.num_internal_nodes) + .cloned() + .ok_or_else(|| Error::NoFirstNode) } pub fn next_node(&self) -> usize { @@ -314,6 +320,15 @@ pub fn sanitise_bytes(mut bytes: Vec) -> Vec { bytes } +fn pad_for_leaf_count(num_leaves: usize, bytes: &mut Vec) { + let required_leaves = num_leaves.next_power_of_two(); + + bytes.resize( + bytes.len() + (required_leaves - num_leaves) * BYTES_PER_CHUNK, + 0, + ); +} + fn last_leaf_needs_padding(num_bytes: usize) -> bool { num_bytes % HASHSIZE != 0 } diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index a4d1c7d1a..9c0a8ec6d 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -4,16 +4,16 @@ use crate::{ssz_encode, Encodable}; impl CachedTreeHash for u64 { type Item = Self; - fn build_cache_bytes(&self) -> Vec { - merkleize(ssz_encode(self)) + fn build_cache(&self) -> Result { + TreeHashCache::from_bytes(merkleize(ssz_encode(self))) } fn num_bytes(&self) -> usize { 8 } - fn offset_handler(&self, _initial_offset: usize) -> Option { - None + fn offset_handler(&self, _initial_offset: usize) -> Result { + Err(Error::ShouldNotProduceOffsetHandler) } fn num_child_nodes(&self) -> usize { @@ -25,13 +25,13 @@ impl CachedTreeHash for u64 { other: &Self, cache: &mut TreeHashCache, chunk: usize, - ) -> Option { + ) -> Result { if self != other { let leaf = merkleize(ssz_encode(self)); cache.modify_chunk(chunk, &leaf)?; } - Some(chunk + 1) + Ok(chunk + 1) } } diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 9cb012c79..a6be7f9ae 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -1,5 +1,5 @@ use super::*; -use int_to_bytes::{int_to_bytes32, int_to_bytes8}; +use int_to_bytes::int_to_bytes32; #[derive(Clone)] pub struct Inner { @@ -12,21 +12,19 @@ pub struct Inner { impl CachedTreeHash for Inner { type Item = Self; - fn build_cache_bytes(&self) -> Vec { + fn build_cache(&self) -> Result { + let offset_handler = self.offset_handler(0)?; + let mut leaves_and_subtrees = vec![]; - leaves_and_subtrees.append(&mut self.a.build_cache_bytes()); - leaves_and_subtrees.append(&mut self.b.build_cache_bytes()); - leaves_and_subtrees.append(&mut self.c.build_cache_bytes()); - leaves_and_subtrees.append(&mut self.d.build_cache_bytes()); + leaves_and_subtrees.append(&mut self.a.build_cache()?.into()); + leaves_and_subtrees.append(&mut self.b.build_cache()?.into()); + leaves_and_subtrees.append(&mut self.c.build_cache()?.into()); + leaves_and_subtrees.append(&mut self.d.build_cache()?.into()); - // TODO: fix unwrap - let offset_handler = self.offset_handler(0).unwrap(); + pad_for_leaf_count(offset_handler.num_leaf_nodes, &mut leaves_and_subtrees); - // TODO: fix unwrap - let cache = TreeHashCache::new(leaves_and_subtrees, offset_handler).unwrap(); - - cache.into() + TreeHashCache::from_leaves_and_subtrees(leaves_and_subtrees, self.offset_handler(0)?) } fn num_bytes(&self) -> usize { @@ -40,7 +38,7 @@ impl CachedTreeHash for Inner { bytes } - fn offset_handler(&self, initial_offset: usize) -> Option { + fn offset_handler(&self, initial_offset: usize) -> Result { let mut offsets = vec![]; offsets.push(self.a.num_child_nodes() + 1); @@ -48,7 +46,7 @@ impl CachedTreeHash for Inner { offsets.push(self.c.num_child_nodes() + 1); offsets.push(self.d.num_child_nodes() + 1); - Some(OffsetHandler::from_lengths(initial_offset, offsets)) + OffsetHandler::from_lengths(initial_offset, offsets) } fn num_child_nodes(&self) -> usize { @@ -68,7 +66,7 @@ impl CachedTreeHash for Inner { other: &Self, cache: &mut TreeHashCache, chunk: usize, - ) -> Option { + ) -> Result { let offset_handler = self.offset_handler(chunk)?; // Skip past the internal nodes and update any changed leaf nodes. @@ -86,7 +84,7 @@ impl CachedTreeHash for Inner { } } - Some(offset_handler.next_node()) + Ok(offset_handler.next_node()) } } @@ -100,20 +98,18 @@ pub struct Outer { impl CachedTreeHash for Outer { type Item = Self; - fn build_cache_bytes(&self) -> Vec { + fn build_cache(&self) -> Result { + let offset_handler = self.offset_handler(0)?; + let mut leaves_and_subtrees = vec![]; - leaves_and_subtrees.append(&mut self.a.build_cache_bytes()); - leaves_and_subtrees.append(&mut self.b.build_cache_bytes()); - leaves_and_subtrees.append(&mut self.c.build_cache_bytes()); + leaves_and_subtrees.append(&mut self.a.build_cache()?.into()); + leaves_and_subtrees.append(&mut self.b.build_cache()?.into()); + leaves_and_subtrees.append(&mut self.c.build_cache()?.into()); - // TODO: fix unwrap - let offset_handler = self.offset_handler(0).unwrap(); + pad_for_leaf_count(offset_handler.num_leaf_nodes, &mut leaves_and_subtrees); - // TODO: fix unwrap - let cache = TreeHashCache::new(leaves_and_subtrees, offset_handler).unwrap(); - - cache.into() + TreeHashCache::from_leaves_and_subtrees(leaves_and_subtrees, self.offset_handler(0)?) } fn num_bytes(&self) -> usize { @@ -135,14 +131,14 @@ impl CachedTreeHash for Outer { num_nodes(leaves) + children - 1 } - fn offset_handler(&self, initial_offset: usize) -> Option { + fn offset_handler(&self, initial_offset: usize) -> Result { let mut offsets = vec![]; offsets.push(self.a.num_child_nodes() + 1); offsets.push(self.b.num_child_nodes() + 1); offsets.push(self.c.num_child_nodes() + 1); - Some(OffsetHandler::from_lengths(initial_offset, offsets)) + OffsetHandler::from_lengths(initial_offset, offsets) } fn cached_hash_tree_root( @@ -150,7 +146,7 @@ impl CachedTreeHash for Outer { other: &Self, cache: &mut TreeHashCache, chunk: usize, - ) -> Option { + ) -> Result { let offset_handler = self.offset_handler(chunk)?; // Skip past the internal nodes and update any changed leaf nodes. @@ -167,7 +163,7 @@ impl CachedTreeHash for Outer { } } - Some(offset_handler.next_node()) + Ok(offset_handler.next_node()) } } @@ -199,17 +195,16 @@ fn partial_modification_to_inner_struct() { ..original_inner.clone() }; - // Build the initial cache. - let original_cache = original_outer.build_cache_bytes(); - // Modify outer let modified_outer = Outer { b: modified_inner.clone(), ..original_outer.clone() }; + println!("AAAAAAAAA"); // Perform a differential hash - let mut cache_struct = TreeHashCache::from_bytes(original_cache.clone()).unwrap(); + let mut cache_struct = TreeHashCache::new(&original_outer).unwrap(); + println!("BBBBBBBBBB"); modified_outer .cached_hash_tree_root(&original_outer, &mut cache_struct, 0) @@ -220,7 +215,7 @@ fn partial_modification_to_inner_struct() { // Generate reference data. let mut data = vec![]; data.append(&mut int_to_bytes32(0)); - let inner_bytes = modified_inner.build_cache_bytes(); + let inner_bytes: Vec = TreeHashCache::new(&modified_inner).unwrap().into(); data.append(&mut int_to_bytes32(5)); let leaves = vec![ @@ -254,7 +249,7 @@ fn partial_modification_to_outer() { }; // Build the initial cache. - let original_cache = original_outer.build_cache_bytes(); + // let original_cache = original_outer.build_cache_bytes(); // Modify outer let modified_outer = Outer { @@ -263,7 +258,7 @@ fn partial_modification_to_outer() { }; // Perform a differential hash - let mut cache_struct = TreeHashCache::from_bytes(original_cache.clone()).unwrap(); + let mut cache_struct = TreeHashCache::new(&original_outer).unwrap(); modified_outer .cached_hash_tree_root(&original_outer, &mut cache_struct, 0) @@ -274,7 +269,7 @@ fn partial_modification_to_outer() { // Generate reference data. let mut data = vec![]; data.append(&mut int_to_bytes32(0)); - let inner_bytes = inner.build_cache_bytes(); + let inner_bytes: Vec = TreeHashCache::new(&inner).unwrap().into(); data.append(&mut int_to_bytes32(5)); let leaves = vec![ @@ -308,12 +303,12 @@ fn outer_builds() { }; // Build the function output. - let cache = outer.build_cache_bytes(); + let cache: Vec = TreeHashCache::new(&outer).unwrap().into(); // Generate reference data. let mut data = vec![]; data.append(&mut int_to_bytes32(0)); - let inner_bytes = inner.build_cache_bytes(); + let inner_bytes: Vec = inner.build_cache().unwrap().into(); data.append(&mut int_to_bytes32(5)); let leaves = vec![ @@ -427,7 +422,7 @@ fn generic_test(index: usize) { d: 4, }; - let cache = inner.build_cache_bytes(); + let cache: Vec = TreeHashCache::new(&inner).unwrap().into(); let changed_inner = match index { 0 => Inner { @@ -498,7 +493,7 @@ fn inner_builds() { d: 4, }; - let cache = inner.build_cache_bytes(); + let cache: Vec = TreeHashCache::new(&inner).unwrap().into(); assert_eq!(expected, cache); } From e0104e61997d6ca868829502c8775bae07952fef Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 29 Mar 2019 13:04:01 +1100 Subject: [PATCH 18/89] Move offset_handler construction into self --- eth2/utils/ssz/src/cached_tree_hash.rs | 17 ++++++-- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 6 +-- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 44 ++++++++------------ 3 files changed, 35 insertions(+), 32 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 3b900e503..83b516ac7 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -25,13 +25,13 @@ pub enum Error { pub trait CachedTreeHash { type Item: CachedTreeHash; - fn build_cache(&self) -> Result; + fn leaves_and_subtrees(&self) -> Vec; /// Return the number of bytes when this element is encoded as raw SSZ _without_ length /// prefixes. fn num_bytes(&self) -> usize; - fn offset_handler(&self, initial_offset: usize) -> Result; + fn offsets(&self) -> Result, Error>; fn num_child_nodes(&self) -> usize; @@ -60,13 +60,17 @@ impl TreeHashCache { where T: CachedTreeHash, { - item.build_cache() + Self::from_leaves_and_subtrees(item.leaves_and_subtrees(), OffsetHandler::new(item, 0)?) } pub fn from_leaves_and_subtrees( mut leaves_and_subtrees: Vec, offset_handler: OffsetHandler, ) -> Result { + // Pad the leaves with zeros if the number of immediate leaf-nodes (without recursing into + // sub-trees) is not an even power-of-two. + pad_for_leaf_count(offset_handler.num_leaf_nodes, &mut leaves_and_subtrees); + // Allocate enough bytes to store the internal nodes and the leaves and subtrees, then fill // all the to-be-built internal nodes with zeros and append the leaves and subtrees. let internal_node_bytes = offset_handler.num_internal_nodes * BYTES_PER_CHUNK; @@ -209,6 +213,13 @@ pub struct OffsetHandler { } impl OffsetHandler { + pub fn new(item: &T, initial_offset: usize) -> Result + where + T: CachedTreeHash, + { + Self::from_lengths(initial_offset, item.offsets()?) + } + fn from_lengths(offset: usize, mut lengths: Vec) -> Result { // Extend it to the next power-of-two, if it is not already. let num_leaf_nodes = if lengths.len().is_power_of_two() { diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 9c0a8ec6d..54a690c6d 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -4,15 +4,15 @@ use crate::{ssz_encode, Encodable}; impl CachedTreeHash for u64 { type Item = Self; - fn build_cache(&self) -> Result { - TreeHashCache::from_bytes(merkleize(ssz_encode(self))) + fn leaves_and_subtrees(&self) -> Vec { + merkleize(ssz_encode(self)) } fn num_bytes(&self) -> usize { 8 } - fn offset_handler(&self, _initial_offset: usize) -> Result { + fn offsets(&self) -> Result, Error> { Err(Error::ShouldNotProduceOffsetHandler) } diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index a6be7f9ae..f6c52ef8f 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -12,19 +12,15 @@ pub struct Inner { impl CachedTreeHash for Inner { type Item = Self; - fn build_cache(&self) -> Result { - let offset_handler = self.offset_handler(0)?; - + fn leaves_and_subtrees(&self) -> Vec { let mut leaves_and_subtrees = vec![]; - leaves_and_subtrees.append(&mut self.a.build_cache()?.into()); - leaves_and_subtrees.append(&mut self.b.build_cache()?.into()); - leaves_and_subtrees.append(&mut self.c.build_cache()?.into()); - leaves_and_subtrees.append(&mut self.d.build_cache()?.into()); + leaves_and_subtrees.append(&mut self.a.leaves_and_subtrees()); + leaves_and_subtrees.append(&mut self.b.leaves_and_subtrees()); + leaves_and_subtrees.append(&mut self.c.leaves_and_subtrees()); + leaves_and_subtrees.append(&mut self.d.leaves_and_subtrees()); - pad_for_leaf_count(offset_handler.num_leaf_nodes, &mut leaves_and_subtrees); - - TreeHashCache::from_leaves_and_subtrees(leaves_and_subtrees, self.offset_handler(0)?) + leaves_and_subtrees } fn num_bytes(&self) -> usize { @@ -38,7 +34,7 @@ impl CachedTreeHash for Inner { bytes } - fn offset_handler(&self, initial_offset: usize) -> Result { + fn offsets(&self) -> Result, Error> { let mut offsets = vec![]; offsets.push(self.a.num_child_nodes() + 1); @@ -46,7 +42,7 @@ impl CachedTreeHash for Inner { offsets.push(self.c.num_child_nodes() + 1); offsets.push(self.d.num_child_nodes() + 1); - OffsetHandler::from_lengths(initial_offset, offsets) + Ok(offsets) } fn num_child_nodes(&self) -> usize { @@ -67,7 +63,7 @@ impl CachedTreeHash for Inner { cache: &mut TreeHashCache, chunk: usize, ) -> Result { - let offset_handler = self.offset_handler(chunk)?; + let offset_handler = OffsetHandler::new(self, chunk)?; // Skip past the internal nodes and update any changed leaf nodes. { @@ -98,18 +94,14 @@ pub struct Outer { impl CachedTreeHash for Outer { type Item = Self; - fn build_cache(&self) -> Result { - let offset_handler = self.offset_handler(0)?; - + fn leaves_and_subtrees(&self) -> Vec { let mut leaves_and_subtrees = vec![]; - leaves_and_subtrees.append(&mut self.a.build_cache()?.into()); - leaves_and_subtrees.append(&mut self.b.build_cache()?.into()); - leaves_and_subtrees.append(&mut self.c.build_cache()?.into()); + leaves_and_subtrees.append(&mut self.a.leaves_and_subtrees()); + leaves_and_subtrees.append(&mut self.b.leaves_and_subtrees()); + leaves_and_subtrees.append(&mut self.c.leaves_and_subtrees()); - pad_for_leaf_count(offset_handler.num_leaf_nodes, &mut leaves_and_subtrees); - - TreeHashCache::from_leaves_and_subtrees(leaves_and_subtrees, self.offset_handler(0)?) + leaves_and_subtrees } fn num_bytes(&self) -> usize { @@ -131,14 +123,14 @@ impl CachedTreeHash for Outer { num_nodes(leaves) + children - 1 } - fn offset_handler(&self, initial_offset: usize) -> Result { + fn offsets(&self) -> Result, Error> { let mut offsets = vec![]; offsets.push(self.a.num_child_nodes() + 1); offsets.push(self.b.num_child_nodes() + 1); offsets.push(self.c.num_child_nodes() + 1); - OffsetHandler::from_lengths(initial_offset, offsets) + Ok(offsets) } fn cached_hash_tree_root( @@ -147,7 +139,7 @@ impl CachedTreeHash for Outer { cache: &mut TreeHashCache, chunk: usize, ) -> Result { - let offset_handler = self.offset_handler(chunk)?; + let offset_handler = OffsetHandler::new(self, chunk)?; // Skip past the internal nodes and update any changed leaf nodes. { @@ -308,7 +300,7 @@ fn outer_builds() { // Generate reference data. let mut data = vec![]; data.append(&mut int_to_bytes32(0)); - let inner_bytes: Vec = inner.build_cache().unwrap().into(); + let inner_bytes: Vec = TreeHashCache::new(&inner).unwrap().into(); data.append(&mut int_to_bytes32(5)); let leaves = vec![ From fc17d5fea4c46ae35ab11dc9c6b1ae7a93dfaccb Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 29 Mar 2019 14:37:27 +1100 Subject: [PATCH 19/89] Fix failing tree hash tests --- eth2/utils/ssz/src/cached_tree_hash.rs | 72 ++++++++++++-------- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 4 +- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 38 ++++++----- 3 files changed, 68 insertions(+), 46 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 83b516ac7..510185b40 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -13,9 +13,9 @@ const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; #[derive(Debug, PartialEq, Clone)] pub enum Error { - LeavesAndSubtreesIncomplete(usize), ShouldNotProduceOffsetHandler, NoFirstNode, + NoBytesForRoot, BytesAreNotEvenChunks(usize), NoModifiedFieldForChunk(usize), NoBytesForChunk(usize), @@ -25,7 +25,7 @@ pub enum Error { pub trait CachedTreeHash { type Item: CachedTreeHash; - fn leaves_and_subtrees(&self) -> Vec; + fn build_tree_hash_cache(&self) -> Result; /// Return the number of bytes when this element is encoded as raw SSZ _without_ length /// prefixes. @@ -60,42 +60,44 @@ impl TreeHashCache { where T: CachedTreeHash, { - Self::from_leaves_and_subtrees(item.leaves_and_subtrees(), OffsetHandler::new(item, 0)?) + item.build_tree_hash_cache() } - pub fn from_leaves_and_subtrees( - mut leaves_and_subtrees: Vec, - offset_handler: OffsetHandler, - ) -> Result { - // Pad the leaves with zeros if the number of immediate leaf-nodes (without recursing into - // sub-trees) is not an even power-of-two. - pad_for_leaf_count(offset_handler.num_leaf_nodes, &mut leaves_and_subtrees); + pub fn from_leaves_and_subtrees( + item: &T, + leaves_and_subtrees: Vec, + ) -> Result + where + T: CachedTreeHash, + { + let offset_handler = OffsetHandler::new(item, 0)?; + + // Note how many leaves were provided. If is not a power-of-two, we'll need to pad it out + // later. + let num_provided_leaf_nodes = leaves_and_subtrees.len(); // Allocate enough bytes to store the internal nodes and the leaves and subtrees, then fill // all the to-be-built internal nodes with zeros and append the leaves and subtrees. let internal_node_bytes = offset_handler.num_internal_nodes * BYTES_PER_CHUNK; - let mut cache = Vec::with_capacity(internal_node_bytes + leaves_and_subtrees.len()); + let leaves_and_subtrees_bytes = leaves_and_subtrees + .iter() + .fold(0, |acc, t| acc + t.bytes_len()); + let mut cache = Vec::with_capacity(leaves_and_subtrees_bytes + internal_node_bytes); cache.resize(internal_node_bytes, 0); - cache.append(&mut leaves_and_subtrees); - dbg!(cache.len() / BYTES_PER_CHUNK); + // Allocate enough bytes to store all the leaves. + let mut leaves = Vec::with_capacity(offset_handler.num_leaf_nodes * HASHSIZE); - // Concat all the leaves into one big byte array, ready for `merkleize`. - let mut leaves = vec![]; - for leaf_chunk in offset_handler.iter_leaf_nodes() { - let start = leaf_chunk * BYTES_PER_CHUNK; - let end = start + BYTES_PER_CHUNK; - - dbg!(end); - dbg!(cache.len()); - - leaves.extend_from_slice( - cache - .get(start..end) - .ok_or_else(|| Error::LeavesAndSubtreesIncomplete(*leaf_chunk))?, - ); + // Iterate through all of the leaves/subtrees, adding their root as a leaf node and then + // concatenating their merkle trees. + for t in leaves_and_subtrees { + leaves.append(&mut t.root()?); + cache.append(&mut t.into_merkle_tree()); } + // Pad the leaves to an even power-of-two, using zeros. + pad_for_leaf_count(num_provided_leaf_nodes, &mut cache); + // Merkleize the leaves, then split the leaf nodes off them. Then, replace all-zeros // internal nodes created earlier with the internal nodes generated by `merkleize`. let mut merkleized = merkleize(leaves); @@ -108,6 +110,17 @@ impl TreeHashCache { }) } + pub fn bytes_len(&self) -> usize { + self.cache.len() + } + + pub fn root(&self) -> Result, Error> { + self.cache + .get(0..HASHSIZE) + .ok_or_else(|| Error::NoBytesForRoot) + .and_then(|slice| Ok(slice.to_vec())) + } + pub fn from_bytes(bytes: Vec) -> Result { if bytes.len() % BYTES_PER_CHUNK > 0 { return Err(Error::BytesAreNotEvenChunks(bytes.len())); @@ -195,6 +208,10 @@ impl TreeHashCache { Ok(hash(children)) } + + pub fn into_merkle_tree(self) -> Vec { + self.cache + } } fn children(parent: usize) -> (usize, usize) { @@ -205,6 +222,7 @@ fn num_nodes(num_leaves: usize) -> usize { 2 * num_leaves - 1 } +#[derive(Debug)] pub struct OffsetHandler { num_internal_nodes: usize, num_leaf_nodes: usize, diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 54a690c6d..012a4a8be 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -4,8 +4,8 @@ use crate::{ssz_encode, Encodable}; impl CachedTreeHash for u64 { type Item = Self; - fn leaves_and_subtrees(&self) -> Vec { - merkleize(ssz_encode(self)) + fn build_tree_hash_cache(&self) -> Result { + Ok(TreeHashCache::from_bytes(merkleize(ssz_encode(self)))?) } fn num_bytes(&self) -> usize { diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index f6c52ef8f..0593b2bae 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -12,15 +12,18 @@ pub struct Inner { impl CachedTreeHash for Inner { type Item = Self; - fn leaves_and_subtrees(&self) -> Vec { - let mut leaves_and_subtrees = vec![]; + fn build_tree_hash_cache(&self) -> Result { + let tree = TreeHashCache::from_leaves_and_subtrees( + self, + vec![ + self.a.build_tree_hash_cache()?, + self.b.build_tree_hash_cache()?, + self.c.build_tree_hash_cache()?, + self.d.build_tree_hash_cache()?, + ], + )?; - leaves_and_subtrees.append(&mut self.a.leaves_and_subtrees()); - leaves_and_subtrees.append(&mut self.b.leaves_and_subtrees()); - leaves_and_subtrees.append(&mut self.c.leaves_and_subtrees()); - leaves_and_subtrees.append(&mut self.d.leaves_and_subtrees()); - - leaves_and_subtrees + Ok(tree) } fn num_bytes(&self) -> usize { @@ -94,14 +97,17 @@ pub struct Outer { impl CachedTreeHash for Outer { type Item = Self; - fn leaves_and_subtrees(&self) -> Vec { - let mut leaves_and_subtrees = vec![]; + fn build_tree_hash_cache(&self) -> Result { + let tree = TreeHashCache::from_leaves_and_subtrees( + self, + vec![ + self.a.build_tree_hash_cache()?, + self.b.build_tree_hash_cache()?, + self.c.build_tree_hash_cache()?, + ], + )?; - leaves_and_subtrees.append(&mut self.a.leaves_and_subtrees()); - leaves_and_subtrees.append(&mut self.b.leaves_and_subtrees()); - leaves_and_subtrees.append(&mut self.c.leaves_and_subtrees()); - - leaves_and_subtrees + Ok(tree) } fn num_bytes(&self) -> usize { @@ -193,10 +199,8 @@ fn partial_modification_to_inner_struct() { ..original_outer.clone() }; - println!("AAAAAAAAA"); // Perform a differential hash let mut cache_struct = TreeHashCache::new(&original_outer).unwrap(); - println!("BBBBBBBBBB"); modified_outer .cached_hash_tree_root(&original_outer, &mut cache_struct, 0) From 56fe15625bf349b84e7c40b84c9fcb13e8112ec8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 10 Apr 2019 15:47:42 +1000 Subject: [PATCH 20/89] Allow for building cached vec --- eth2/utils/ssz/src/cached_tree_hash.rs | 12 +++ eth2/utils/ssz/src/cached_tree_hash/impls.rs | 74 +++++++++++++++--- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 80 +++++++++++++++++++- 3 files changed, 153 insertions(+), 13 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 510185b40..ba55fbf1b 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -22,9 +22,18 @@ pub enum Error { NoChildrenForHashing((usize, usize)), } +#[derive(Debug, PartialEq, Clone)] +pub enum ItemType { + Basic, + List, + Composite, +} + pub trait CachedTreeHash { type Item: CachedTreeHash; + fn item_type() -> ItemType; + fn build_tree_hash_cache(&self) -> Result; /// Return the number of bytes when this element is encoded as raw SSZ _without_ length @@ -35,6 +44,8 @@ pub trait CachedTreeHash { fn num_child_nodes(&self) -> usize; + fn packed_encoding(&self) -> Vec; + fn cached_hash_tree_root( &self, other: &Self::Item, @@ -101,6 +112,7 @@ impl TreeHashCache { // Merkleize the leaves, then split the leaf nodes off them. Then, replace all-zeros // internal nodes created earlier with the internal nodes generated by `merkleize`. let mut merkleized = merkleize(leaves); + dbg!(&merkleized); merkleized.split_off(internal_node_bytes); cache.splice(0..internal_node_bytes, merkleized); diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 012a4a8be..e088d481d 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -4,6 +4,10 @@ use crate::{ssz_encode, Encodable}; impl CachedTreeHash for u64 { type Item = Self; + fn item_type() -> ItemType { + ItemType::Basic + } + fn build_tree_hash_cache(&self) -> Result { Ok(TreeHashCache::from_bytes(merkleize(ssz_encode(self)))?) } @@ -20,6 +24,10 @@ impl CachedTreeHash for u64 { 0 } + fn packed_encoding(&self) -> Vec { + ssz_encode(self) + } + fn cached_hash_tree_root( &self, other: &Self, @@ -35,38 +43,73 @@ impl CachedTreeHash for u64 { } } -/* impl CachedTreeHash for Vec where - T: CachedTreeHash + Encodable, + T: CachedTreeHash, { type Item = Self; - fn build_cache_bytes(&self) -> Vec { - let num_packed_bytes = self.num_bytes(); - let num_leaves = num_sanitized_leaves(num_packed_bytes); + fn item_type() -> ItemType { + ItemType::List + } - let mut packed = Vec::with_capacity(num_leaves * HASHSIZE); + fn build_tree_hash_cache(&self) -> Result { + match T::item_type() { + ItemType::Basic => { + let num_packed_bytes = self.num_bytes(); + let num_leaves = num_sanitized_leaves(num_packed_bytes); + + let mut packed = Vec::with_capacity(num_leaves * HASHSIZE); + + for item in self { + packed.append(&mut item.packed_encoding()); + } + + let packed = sanitise_bytes(packed); + + TreeHashCache::from_bytes(merkleize(packed)) + } + ItemType::Composite | ItemType::List => { + let subtrees = self + .iter() + .map(|item| TreeHashCache::new(item)) + .collect::, _>>()?; + + TreeHashCache::from_leaves_and_subtrees(self, subtrees) + } + } + } + + fn offsets(&self) -> Result, Error> { + let mut offsets = vec![]; for item in self { - packed.append(&mut ssz_encode(item)); + offsets.push(item.offsets()?.iter().sum()) } - let packed = sanitise_bytes(packed); + Ok(offsets) + } - merkleize(packed) + fn num_child_nodes(&self) -> usize { + // TODO + 42 } fn num_bytes(&self) -> usize { self.iter().fold(0, |acc, item| acc + item.num_bytes()) } + fn packed_encoding(&self) -> Vec { + panic!("List should never be packed") + } + fn cached_hash_tree_root( &self, other: &Self::Item, cache: &mut TreeHashCache, chunk: usize, - ) -> Option { + ) -> Result { + /* let num_packed_bytes = self.num_bytes(); let num_leaves = num_sanitized_leaves(num_packed_bytes); @@ -103,6 +146,17 @@ where } Some(chunk + num_nodes) + */ + // TODO + Ok(42) } } + +/* +fn get_packed_leaves(vec: Vec) -> Vec +where + T: Encodable, +{ + // +} */ diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 0593b2bae..8124a8dd8 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -1,5 +1,6 @@ use super::*; -use int_to_bytes::int_to_bytes32; +use crate::Encodable; +use int_to_bytes::{int_to_bytes32, int_to_bytes8}; #[derive(Clone)] pub struct Inner { @@ -12,6 +13,10 @@ pub struct Inner { impl CachedTreeHash for Inner { type Item = Self; + fn item_type() -> ItemType { + ItemType::Composite + } + fn build_tree_hash_cache(&self) -> Result { let tree = TreeHashCache::from_leaves_and_subtrees( self, @@ -60,6 +65,10 @@ impl CachedTreeHash for Inner { num_nodes(leaves) + children - 1 } + fn packed_encoding(&self) -> Vec { + panic!("Struct should never be packed") + } + fn cached_hash_tree_root( &self, other: &Self, @@ -97,6 +106,10 @@ pub struct Outer { impl CachedTreeHash for Outer { type Item = Self; + fn item_type() -> ItemType { + ItemType::Composite + } + fn build_tree_hash_cache(&self) -> Result { let tree = TreeHashCache::from_leaves_and_subtrees( self, @@ -139,6 +152,10 @@ impl CachedTreeHash for Outer { Ok(offsets) } + fn packed_encoding(&self) -> Vec { + panic!("Struct should never be packed") + } + fn cached_hash_tree_root( &self, other: &Self, @@ -371,6 +388,64 @@ fn large_vec_of_u64_builds() { assert_eq!(expected, cache); } +*/ + +#[test] +fn vec_of_inner_builds() { + let numbers: Vec = (0..12).collect(); + + let mut leaves = vec![]; + let mut full_bytes = vec![]; + + for n in numbers.chunks(4) { + let mut merkle = merkleize(join(vec![ + int_to_bytes32(n[0]), + int_to_bytes32(n[1]), + int_to_bytes32(n[2]), + int_to_bytes32(n[3]), + ])); + leaves.append(&mut merkle[0..HASHSIZE].to_vec()); + full_bytes.append(&mut merkle); + } + + let mut expected = merkleize(leaves); + expected.splice(3 * HASHSIZE.., full_bytes); + expected.append(&mut vec![0; HASHSIZE]); + + let my_vec = vec![ + Inner { + a: 0, + b: 1, + c: 2, + d: 3, + }, + Inner { + a: 4, + b: 5, + c: 6, + d: 7, + }, + Inner { + a: 8, + b: 9, + c: 10, + d: 11, + }, + ]; + + let cache: Vec = TreeHashCache::new(&my_vec).unwrap().into(); + + assert_trees_eq(&expected, &cache); +} + +/// Provides detailed assertions when comparing merkle trees. +fn assert_trees_eq(a: &[u8], b: &[u8]) { + assert_eq!(a.len(), b.len(), "Byte lens different"); + for i in 0..a.len() / HASHSIZE { + let range = i * HASHSIZE..(i + 1) * HASHSIZE; + assert_eq!(a[range.clone()], b[range], "Chunk {} different", i); + } +} #[test] fn vec_of_u64_builds() { @@ -387,11 +462,10 @@ fn vec_of_u64_builds() { let my_vec = vec![1, 2, 3, 4, 5]; - let cache = my_vec.build_cache_bytes(); + let cache: Vec = TreeHashCache::new(&my_vec).unwrap().into(); assert_eq!(expected, cache); } -*/ #[test] fn merkleize_odd() { From e5783d43a9c3bb8d389737b9ee3b676be70062b6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 10 Apr 2019 16:59:14 +1000 Subject: [PATCH 21/89] First passing vec modified cache test --- eth2/utils/ssz/src/cached_tree_hash.rs | 29 ++++- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 110 +++++++++---------- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 18 ++- 3 files changed, 90 insertions(+), 67 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index ba55fbf1b..e7f2114e4 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -46,6 +46,8 @@ pub trait CachedTreeHash { fn packed_encoding(&self) -> Vec; + fn packing_factor() -> usize; + fn cached_hash_tree_root( &self, other: &Self::Item, @@ -165,16 +167,19 @@ impl TreeHashCache { self.cache.splice(byte_start..byte_end, replace_with) } - pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Option<()> { + pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { let start = chunk * BYTES_PER_CHUNK; let end = start + BYTES_PER_CHUNK; if !self.chunk_equals(chunk, to)? { - self.cache.get_mut(start..end)?.copy_from_slice(to); + self.cache + .get_mut(start..end) + .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))? + .copy_from_slice(to); self.chunk_modified[chunk] = true; } - Some(()) + Ok(()) } pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { @@ -191,11 +196,25 @@ impl TreeHashCache { Ok(()) } - pub fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Option { + pub fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Result { let start = chunk * BYTES_PER_CHUNK; let end = start + BYTES_PER_CHUNK; - Some(self.cache.get(start..end)? == other) + Ok(self + .cache + .get(start..end) + .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))? + == other) + } + + pub fn set_changed(&mut self, chunk: usize, to: bool) -> Result<(), Error> { + if chunk < self.chunk_modified.len() { + self.chunk_modified[chunk] = to; + + Ok(()) + } else { + Err(Error::NoModifiedFieldForChunk(chunk)) + } } pub fn changed(&self, chunk: usize) -> Result { diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index e088d481d..621c5d02b 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -28,6 +28,10 @@ impl CachedTreeHash for u64 { ssz_encode(self) } + fn packing_factor() -> usize { + 32 / 8 + } + fn cached_hash_tree_root( &self, other: &Self, @@ -55,20 +59,7 @@ where fn build_tree_hash_cache(&self) -> Result { match T::item_type() { - ItemType::Basic => { - let num_packed_bytes = self.num_bytes(); - let num_leaves = num_sanitized_leaves(num_packed_bytes); - - let mut packed = Vec::with_capacity(num_leaves * HASHSIZE); - - for item in self { - packed.append(&mut item.packed_encoding()); - } - - let packed = sanitise_bytes(packed); - - TreeHashCache::from_bytes(merkleize(packed)) - } + ItemType::Basic => TreeHashCache::from_bytes(merkleize(get_packed_leaves(self))), ItemType::Composite | ItemType::List => { let subtrees = self .iter() @@ -81,11 +72,18 @@ where } fn offsets(&self) -> Result, Error> { - let mut offsets = vec![]; + let offsets = match T::item_type() { + ItemType::Basic => vec![1; self.len() / T::packing_factor()], + ItemType::Composite | ItemType::List => { + let mut offsets = vec![]; - for item in self { - offsets.push(item.offsets()?.iter().sum()) - } + for item in self { + offsets.push(item.offsets()?.iter().sum()) + } + + offsets + } + }; Ok(offsets) } @@ -103,60 +101,58 @@ where panic!("List should never be packed") } + fn packing_factor() -> usize { + 1 + } + fn cached_hash_tree_root( &self, other: &Self::Item, cache: &mut TreeHashCache, chunk: usize, ) -> Result { - /* - let num_packed_bytes = self.num_bytes(); - let num_leaves = num_sanitized_leaves(num_packed_bytes); + let offset_handler = OffsetHandler::new(self, chunk)?; - if num_leaves != num_sanitized_leaves(other.num_bytes()) { - panic!("Need to handle a change in leaf count"); + match T::item_type() { + ItemType::Basic => { + let leaves = get_packed_leaves(self); + + for (i, chunk) in offset_handler.iter_leaf_nodes().enumerate() { + if let Some(latest) = leaves.get(i * HASHSIZE..(i + 1) * HASHSIZE) { + if !cache.chunk_equals(*chunk, latest)? { + dbg!(chunk); + cache.set_changed(*chunk, true)?; + } + } + } + let first_leaf_chunk = offset_handler.first_leaf_node()?; + cache.chunk_splice(first_leaf_chunk..offset_handler.next_node, leaves); + } + _ => panic!("not implemented"), } - let mut packed = Vec::with_capacity(num_leaves * HASHSIZE); - - // TODO: try and avoid fully encoding the whole list - for item in self { - packed.append(&mut ssz_encode(item)); - } - - let packed = sanitise_bytes(packed); - - let num_nodes = num_nodes(num_leaves); - let num_internal_nodes = num_nodes - num_leaves; - - { - let mut chunk = chunk + num_internal_nodes; - for new_chunk_bytes in packed.chunks(HASHSIZE) { - cache.maybe_update_chunk(chunk, new_chunk_bytes)?; - chunk += 1; + for (&parent, children) in offset_handler.iter_internal_nodes().rev() { + if cache.either_modified(children)? { + cache.modify_chunk(parent, &cache.hash_children(children)?)?; } } - // Iterate backwards through the internal nodes, rehashing any node where it's children - // have changed. - for chunk in (chunk..chunk + num_internal_nodes).into_iter().rev() { - if cache.children_modified(chunk)? { - cache.modify_chunk(chunk, &cache.hash_children(chunk)?)?; - } - } - - Some(chunk + num_nodes) - */ - // TODO - Ok(42) + Ok(offset_handler.next_node()) } } -/* -fn get_packed_leaves(vec: Vec) -> Vec +fn get_packed_leaves(vec: &Vec) -> Vec where - T: Encodable, + T: CachedTreeHash, { - // + let num_packed_bytes = vec.num_bytes(); + let num_leaves = num_sanitized_leaves(num_packed_bytes); + + let mut packed = Vec::with_capacity(num_leaves * HASHSIZE); + + for item in vec { + packed.append(&mut item.packed_encoding()); + } + + sanitise_bytes(packed) } -*/ diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 8124a8dd8..e65c87bbd 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -69,6 +69,10 @@ impl CachedTreeHash for Inner { panic!("Struct should never be packed") } + fn packing_factor() -> usize { + 1 + } + fn cached_hash_tree_root( &self, other: &Self, @@ -156,6 +160,10 @@ impl CachedTreeHash for Outer { panic!("Struct should never be packed") } + fn packing_factor() -> usize { + 1 + } + fn cached_hash_tree_root( &self, other: &Self, @@ -339,7 +347,6 @@ fn outer_builds() { assert_eq!(merkle, cache); } -/* #[test] fn partial_modification_u64_vec() { let n: u64 = 50; @@ -347,7 +354,7 @@ fn partial_modification_u64_vec() { let original_vec: Vec = (0..n).collect(); // Generate initial cache. - let original_cache = original_vec.build_cache_bytes(); + let original_cache: Vec = TreeHashCache::new(&original_vec).unwrap().into(); // Modify the vec let mut modified_vec = original_vec.clone(); @@ -355,7 +362,9 @@ fn partial_modification_u64_vec() { // Perform a differential hash let mut cache_struct = TreeHashCache::from_bytes(original_cache.clone()).unwrap(); - modified_vec.cached_hash_tree_root(&original_vec, &mut cache_struct, 0); + modified_vec + .cached_hash_tree_root(&original_vec, &mut cache_struct, 0) + .unwrap(); let modified_cache: Vec = cache_struct.into(); // Generate reference data. @@ -376,7 +385,7 @@ fn large_vec_of_u64_builds() { let my_vec: Vec = (0..n).collect(); // Generate function output. - let cache = my_vec.build_cache_bytes(); + let cache: Vec = TreeHashCache::new(&my_vec).unwrap().into(); // Generate reference data. let mut data = vec![]; @@ -388,7 +397,6 @@ fn large_vec_of_u64_builds() { assert_eq!(expected, cache); } -*/ #[test] fn vec_of_inner_builds() { From 0c0eebd7740fe9e021e83a799696fbdffdb93c4b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 11 Apr 2019 12:57:36 +1000 Subject: [PATCH 22/89] Add progress on variable list hashing --- eth2/utils/ssz/src/cached_tree_hash.rs | 30 +++---- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 45 +++++++---- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 84 +++++++++++++++++--- 3 files changed, 115 insertions(+), 44 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index e7f2114e4..0889718a2 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -1,4 +1,5 @@ use hashing::hash; +use std::fmt::Debug; use std::iter::IntoIterator; use std::iter::Iterator; use std::ops::Range; @@ -29,9 +30,8 @@ pub enum ItemType { Composite, } -pub trait CachedTreeHash { - type Item: CachedTreeHash; - +// TODO: remove debug requirement. +pub trait CachedTreeHash: Debug { fn item_type() -> ItemType; fn build_tree_hash_cache(&self) -> Result; @@ -50,7 +50,7 @@ pub trait CachedTreeHash { fn cached_hash_tree_root( &self, - other: &Self::Item, + other: &Item, cache: &mut TreeHashCache, chunk: usize, ) -> Result; @@ -71,7 +71,7 @@ impl Into> for TreeHashCache { impl TreeHashCache { pub fn new(item: &T) -> Result where - T: CachedTreeHash, + T: CachedTreeHash, { item.build_tree_hash_cache() } @@ -81,7 +81,7 @@ impl TreeHashCache { leaves_and_subtrees: Vec, ) -> Result where - T: CachedTreeHash, + T: CachedTreeHash, { let offset_handler = OffsetHandler::new(item, 0)?; @@ -114,7 +114,6 @@ impl TreeHashCache { // Merkleize the leaves, then split the leaf nodes off them. Then, replace all-zeros // internal nodes created earlier with the internal nodes generated by `merkleize`. let mut merkleized = merkleize(leaves); - dbg!(&merkleized); merkleized.split_off(internal_node_bytes); cache.splice(0..internal_node_bytes, merkleized); @@ -207,16 +206,6 @@ impl TreeHashCache { == other) } - pub fn set_changed(&mut self, chunk: usize, to: bool) -> Result<(), Error> { - if chunk < self.chunk_modified.len() { - self.chunk_modified[chunk] = to; - - Ok(()) - } else { - Err(Error::NoModifiedFieldForChunk(chunk)) - } - } - pub fn changed(&self, chunk: usize) -> Result { self.chunk_modified .get(chunk) @@ -256,7 +245,7 @@ fn num_nodes(num_leaves: usize) -> usize { #[derive(Debug)] pub struct OffsetHandler { num_internal_nodes: usize, - num_leaf_nodes: usize, + pub num_leaf_nodes: usize, next_node: usize, offsets: Vec, } @@ -264,7 +253,7 @@ pub struct OffsetHandler { impl OffsetHandler { pub fn new(item: &T, initial_offset: usize) -> Result where - T: CachedTreeHash, + T: CachedTreeHash, { Self::from_lengths(initial_offset, item.offsets()?) } @@ -314,6 +303,8 @@ impl OffsetHandler { self.next_node } + /// Returns an iterator visiting each internal node, providing the left and right child chunks + /// for the node. pub fn iter_internal_nodes<'a>( &'a self, ) -> impl DoubleEndedIterator { @@ -328,6 +319,7 @@ impl OffsetHandler { }) } + /// Returns an iterator visiting each leaf node, providing the chunk for that node. pub fn iter_leaf_nodes<'a>(&'a self) -> impl DoubleEndedIterator { let leaf_nodes = &self.offsets[self.num_internal_nodes..]; diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 621c5d02b..58343de3a 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -1,9 +1,7 @@ use super::*; use crate::{ssz_encode, Encodable}; -impl CachedTreeHash for u64 { - type Item = Self; - +impl CachedTreeHash for u64 { fn item_type() -> ItemType { ItemType::Basic } @@ -47,12 +45,10 @@ impl CachedTreeHash for u64 { } } -impl CachedTreeHash for Vec +impl CachedTreeHash> for Vec where - T: CachedTreeHash, + T: CachedTreeHash, { - type Item = Self; - fn item_type() -> ItemType { ItemType::List } @@ -78,7 +74,7 @@ where let mut offsets = vec![]; for item in self { - offsets.push(item.offsets()?.iter().sum()) + offsets.push(OffsetHandler::new(item, 0)?.total_nodes()) } offsets @@ -107,32 +103,51 @@ where fn cached_hash_tree_root( &self, - other: &Self::Item, + other: &Vec, cache: &mut TreeHashCache, chunk: usize, ) -> Result { let offset_handler = OffsetHandler::new(self, chunk)?; + if self.len() != other.len() { + panic!("variable sized lists not implemented"); + } + match T::item_type() { ItemType::Basic => { let leaves = get_packed_leaves(self); for (i, chunk) in offset_handler.iter_leaf_nodes().enumerate() { if let Some(latest) = leaves.get(i * HASHSIZE..(i + 1) * HASHSIZE) { - if !cache.chunk_equals(*chunk, latest)? { - dbg!(chunk); - cache.set_changed(*chunk, true)?; - } + cache.maybe_update_chunk(*chunk, latest)?; } } let first_leaf_chunk = offset_handler.first_leaf_node()?; cache.chunk_splice(first_leaf_chunk..offset_handler.next_node, leaves); } - _ => panic!("not implemented"), + ItemType::Composite | ItemType::List => { + let mut i = offset_handler.num_leaf_nodes; + for start_chunk in offset_handler.iter_leaf_nodes().rev() { + i -= 1; + match (other.get(i), self.get(i)) { + // The item existed in the previous list and exsits in the current list. + (Some(old), Some(new)) => { + new.cached_hash_tree_root(old, cache, *start_chunk)?; + }, + // The item didn't exist in the old list and doesn't exist in the new list, + // nothing to do. + (None, None) => {}, + _ => panic!("variable sized lists not implemented") + }; + } + // this thing + } } for (&parent, children) in offset_handler.iter_internal_nodes().rev() { if cache.either_modified(children)? { + dbg!(parent); + dbg!(children); cache.modify_chunk(parent, &cache.hash_children(children)?)?; } } @@ -143,7 +158,7 @@ where fn get_packed_leaves(vec: &Vec) -> Vec where - T: CachedTreeHash, + T: CachedTreeHash, { let num_packed_bytes = vec.num_bytes(); let num_leaves = num_sanitized_leaves(num_packed_bytes); diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index e65c87bbd..156e2c2e5 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -2,7 +2,7 @@ use super::*; use crate::Encodable; use int_to_bytes::{int_to_bytes32, int_to_bytes8}; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Inner { pub a: u64, pub b: u64, @@ -10,9 +10,7 @@ pub struct Inner { pub d: u64, } -impl CachedTreeHash for Inner { - type Item = Self; - +impl CachedTreeHash for Inner { fn item_type() -> ItemType { ItemType::Composite } @@ -100,16 +98,14 @@ impl CachedTreeHash for Inner { } } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Outer { pub a: u64, pub b: Inner, pub c: u64, } -impl CachedTreeHash for Outer { - type Item = Self; - +impl CachedTreeHash for Outer { fn item_type() -> ItemType { ItemType::Composite } @@ -398,6 +394,66 @@ fn large_vec_of_u64_builds() { assert_eq!(expected, cache); } +#[test] +fn partial_modification_of_vec_of_inner() { + let original_vec = vec![ + Inner { + a: 0, + b: 1, + c: 2, + d: 3, + }, + Inner { + a: 4, + b: 5, + c: 6, + d: 7, + }, + Inner { + a: 8, + b: 9, + c: 10, + d: 11, + }, + ]; + let mut cache = TreeHashCache::new(&original_vec).unwrap(); + + let mut modified_vec = original_vec.clone(); + modified_vec[1].a = 42; + + modified_vec + .cached_hash_tree_root(&original_vec, &mut cache, 0) + .unwrap(); + let modified_cache: Vec = cache.into(); + + // Build the reference vec. + + let mut numbers: Vec = (0..12).collect(); + numbers[4] = 42; + + let mut leaves = vec![]; + let mut full_bytes = vec![]; + + for n in numbers.chunks(4) { + let mut merkle = merkleize(join(vec![ + int_to_bytes32(n[0]), + int_to_bytes32(n[1]), + int_to_bytes32(n[2]), + int_to_bytes32(n[3]), + ])); + leaves.append(&mut merkle[0..HASHSIZE].to_vec()); + full_bytes.append(&mut merkle); + } + + let mut expected = merkleize(leaves); + expected.splice(3 * HASHSIZE.., full_bytes); + expected.append(&mut vec![0; HASHSIZE]); + + // Compare the cached tree to the reference tree. + + assert_trees_eq(&expected, &modified_cache); +} + #[test] fn vec_of_inner_builds() { let numbers: Vec = (0..12).collect(); @@ -449,9 +505,17 @@ fn vec_of_inner_builds() { /// Provides detailed assertions when comparing merkle trees. fn assert_trees_eq(a: &[u8], b: &[u8]) { assert_eq!(a.len(), b.len(), "Byte lens different"); - for i in 0..a.len() / HASHSIZE { + for i in (0..a.len() / HASHSIZE).rev() { let range = i * HASHSIZE..(i + 1) * HASHSIZE; - assert_eq!(a[range.clone()], b[range], "Chunk {} different", i); + assert_eq!( + a[range.clone()], + b[range], + "Chunk {}/{} different \n\n a: {:?} \n\n b: {:?}", + i, + a.len() / HASHSIZE, + a, + b, + ); } } From 0bdd61e564f62f62c85d8b3d64a3146ead38dc30 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 11 Apr 2019 17:21:57 +1000 Subject: [PATCH 23/89] Fix failing vec hashing test --- eth2/utils/ssz/src/cached_tree_hash.rs | 21 ++++++++++---------- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 7 +++---- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 0889718a2..9960d1f6a 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -195,15 +195,18 @@ impl TreeHashCache { Ok(()) } - pub fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Result { + pub fn get_chunk(&self, chunk: usize) -> Result<&[u8], Error> { let start = chunk * BYTES_PER_CHUNK; let end = start + BYTES_PER_CHUNK; Ok(self .cache .get(start..end) - .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))? - == other) + .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))?) + } + + pub fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Result { + Ok(self.get_chunk(chunk)? == other) } pub fn changed(&self, chunk: usize) -> Result { @@ -218,15 +221,11 @@ impl TreeHashCache { } pub fn hash_children(&self, children: (&usize, &usize)) -> Result, Error> { - let start = children.0 * BYTES_PER_CHUNK; - let end = start + BYTES_PER_CHUNK * 2; + let mut child_bytes = Vec::with_capacity(BYTES_PER_CHUNK * 2); + child_bytes.append(&mut self.get_chunk(*children.0)?.to_vec()); + child_bytes.append(&mut self.get_chunk(*children.1)?.to_vec()); - let children = &self - .cache - .get(start..end) - .ok_or_else(|| Error::NoChildrenForHashing((*children.0, *children.1)))?; - - Ok(hash(children)) + Ok(hash(&child_bytes)) } pub fn into_merkle_tree(self) -> Vec { diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 58343de3a..c6cd05cd9 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -133,14 +133,13 @@ where // The item existed in the previous list and exsits in the current list. (Some(old), Some(new)) => { new.cached_hash_tree_root(old, cache, *start_chunk)?; - }, + } // The item didn't exist in the old list and doesn't exist in the new list, // nothing to do. - (None, None) => {}, - _ => panic!("variable sized lists not implemented") + (None, None) => {} + _ => panic!("variable sized lists not implemented"), }; } - // this thing } } From 55ee8e20aefb0498f90383592fcca0a7b4fec7ea Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 11 Apr 2019 17:40:11 +1000 Subject: [PATCH 24/89] Add more passing tests for vec hash caching --- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 4 +- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 71 ++++++++++++++++---- 2 files changed, 59 insertions(+), 16 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index c6cd05cd9..01e9e3130 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -109,8 +109,8 @@ where ) -> Result { let offset_handler = OffsetHandler::new(self, chunk)?; - if self.len() != other.len() { - panic!("variable sized lists not implemented"); + if self.len().next_power_of_two() != other.len().next_power_of_two() { + panic!("not implemented: vary between power-of-two boundary"); } match T::item_type() { diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 156e2c2e5..62f387321 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -343,29 +343,27 @@ fn outer_builds() { assert_eq!(merkle, cache); } -#[test] -fn partial_modification_u64_vec() { - let n: u64 = 50; - - let original_vec: Vec = (0..n).collect(); - +/// Generic test that covers: +/// +/// 1. Produce a new cache from `original`. +/// 2. Do a differential hash between `original` and `modified`. +/// 3. Test that the cache generated matches the one we generate manually. +/// +/// In effect it ensures that we can do a differential hash between two `Vec`. +fn test_u64_vec_modifications(original: Vec, modified: Vec) { // Generate initial cache. - let original_cache: Vec = TreeHashCache::new(&original_vec).unwrap().into(); - - // Modify the vec - let mut modified_vec = original_vec.clone(); - modified_vec[n as usize - 1] = 42; + let original_cache: Vec = TreeHashCache::new(&original).unwrap().into(); // Perform a differential hash let mut cache_struct = TreeHashCache::from_bytes(original_cache.clone()).unwrap(); - modified_vec - .cached_hash_tree_root(&original_vec, &mut cache_struct, 0) + modified + .cached_hash_tree_root(&original, &mut cache_struct, 0) .unwrap(); let modified_cache: Vec = cache_struct.into(); // Generate reference data. let mut data = vec![]; - for i in &modified_vec { + for i in &modified { data.append(&mut int_to_bytes8(*i)); } let data = sanitise_bytes(data); @@ -374,6 +372,51 @@ fn partial_modification_u64_vec() { assert_eq!(expected, modified_cache); } +#[test] +fn partial_modification_u64_vec() { + let n: u64 = 2_u64.pow(5); + + let original_vec: Vec = (0..n).collect(); + + let mut modified_vec = original_vec.clone(); + modified_vec[n as usize - 1] = 42; + + test_u64_vec_modifications(original_vec, modified_vec); +} + +#[test] +fn shortened_u64_vec_len_within_pow_2_boundary() { + let n: u64 = 2_u64.pow(5) - 1; + + let original_vec: Vec = (0..n).collect(); + + let mut modified_vec = original_vec.clone(); + modified_vec.pop(); + + test_u64_vec_modifications(original_vec, modified_vec); +} + +#[test] +fn extended_u64_vec_len_within_pow_2_boundary() { + let n: u64 = 2_u64.pow(5) - 2; + + let original_vec: Vec = (0..n).collect(); + + let mut modified_vec = original_vec.clone(); + modified_vec.push(42); + + test_u64_vec_modifications(original_vec, modified_vec); +} + +#[test] +fn extended_u64_vec_len_outside_pow_2_boundary() { + let original_vec: Vec = (0..2_u64.pow(5)).collect(); + + let modified_vec: Vec = (0..2_u64.pow(6)).collect(); + + test_u64_vec_modifications(original_vec, modified_vec); +} + #[test] fn large_vec_of_u64_builds() { let n: u64 = 50; From 48cf75e394eafe8afac52b835d50a71d3ee6f96c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 12 Apr 2019 15:05:26 +1000 Subject: [PATCH 25/89] Add failing test for extending struct list --- eth2/utils/ssz/src/cached_tree_hash.rs | 5 + eth2/utils/ssz/src/cached_tree_hash/impls.rs | 20 ++- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 145 +++++++++++++++---- 3 files changed, 140 insertions(+), 30 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 9960d1f6a..6e84233fc 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -163,6 +163,11 @@ impl TreeHashCache { let byte_start = chunk_range.start * BYTES_PER_CHUNK; let byte_end = chunk_range.end * BYTES_PER_CHUNK; + // Update the `chunk_modified` vec, marking all spliced-in nodes as changed. + self.chunk_modified.splice( + chunk_range.clone(), + vec![true; chunk_range.end - chunk_range.start], + ); self.cache.splice(byte_start..byte_end, replace_with) } diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 01e9e3130..37a3678c2 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -127,12 +127,28 @@ where } ItemType::Composite | ItemType::List => { let mut i = offset_handler.num_leaf_nodes; - for start_chunk in offset_handler.iter_leaf_nodes().rev() { + for &start_chunk in offset_handler.iter_leaf_nodes().rev() { i -= 1; match (other.get(i), self.get(i)) { // The item existed in the previous list and exsits in the current list. (Some(old), Some(new)) => { - new.cached_hash_tree_root(old, cache, *start_chunk)?; + new.cached_hash_tree_root(old, cache, start_chunk)?; + } + // The item existed in the previous list but does not exist in this list. + // + // I.e., the list has been shortened. + (Some(old), None) => { + // Splice out the entire tree of the removed node, replacing it with a + // single padding node. + let end_chunk = OffsetHandler::new(old, start_chunk)?.next_node(); + cache.chunk_splice(start_chunk..end_chunk, vec![0; HASHSIZE]); + } + // The item existed in the previous list but does exist in this list. + // + // I.e., the list has been lengthened. + (None, Some(new)) => { + let bytes: Vec = TreeHashCache::new(new)?.into(); + cache.chunk_splice(start_chunk..start_chunk + 1, bytes); } // The item didn't exist in the old list and doesn't exist in the new list, // nothing to do. diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 62f387321..4110e29a1 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -408,6 +408,7 @@ fn extended_u64_vec_len_within_pow_2_boundary() { test_u64_vec_modifications(original_vec, modified_vec); } +/* #[test] fn extended_u64_vec_len_outside_pow_2_boundary() { let original_vec: Vec = (0..2_u64.pow(5)).collect(); @@ -416,6 +417,7 @@ fn extended_u64_vec_len_outside_pow_2_boundary() { test_u64_vec_modifications(original_vec, modified_vec); } +*/ #[test] fn large_vec_of_u64_builds() { @@ -437,9 +439,51 @@ fn large_vec_of_u64_builds() { assert_eq!(expected, cache); } +/// Generic test that covers: +/// +/// 1. Produce a new cache from `original`. +/// 2. Do a differential hash between `original` and `modified`. +/// 3. Test that the cache generated matches the one we generate manually. +/// +/// The `reference` vec is used to build the tree hash cache manually. `Inner` is just 4x `u64`, so +/// you can represent 2x `Inner` with a `reference` vec of len 8. +/// +/// In effect it ensures that we can do a differential hash between two `Vec`. +fn test_inner_vec_modifications(original: Vec, modified: Vec, reference: Vec) { + let mut cache = TreeHashCache::new(&original).unwrap(); + + modified + .cached_hash_tree_root(&original, &mut cache, 0) + .unwrap(); + let modified_cache: Vec = cache.into(); + + // Build the reference vec. + + let mut leaves = vec![]; + let mut full_bytes = vec![]; + + for n in reference.chunks(4) { + let mut merkle = merkleize(join(vec![ + int_to_bytes32(n[0]), + int_to_bytes32(n[1]), + int_to_bytes32(n[2]), + int_to_bytes32(n[3]), + ])); + leaves.append(&mut merkle[0..HASHSIZE].to_vec()); + full_bytes.append(&mut merkle); + } + + let mut expected = merkleize(leaves); + expected.splice(3 * HASHSIZE.., full_bytes); + expected.append(&mut vec![0; HASHSIZE]); + + // Compare the cached tree to the reference tree. + assert_trees_eq(&expected, &modified_cache); +} + #[test] fn partial_modification_of_vec_of_inner() { - let original_vec = vec![ + let original = vec![ Inner { a: 0, b: 1, @@ -459,42 +503,87 @@ fn partial_modification_of_vec_of_inner() { d: 11, }, ]; - let mut cache = TreeHashCache::new(&original_vec).unwrap(); - let mut modified_vec = original_vec.clone(); - modified_vec[1].a = 42; + let mut modified = original.clone(); + modified[1].a = 42; - modified_vec - .cached_hash_tree_root(&original_vec, &mut cache, 0) - .unwrap(); - let modified_cache: Vec = cache.into(); + let mut reference_vec: Vec = (0..12).collect(); + reference_vec[4] = 42; - // Build the reference vec. + test_inner_vec_modifications(original, modified, reference_vec); +} - let mut numbers: Vec = (0..12).collect(); - numbers[4] = 42; +#[test] +fn shortened_vec_of_inner_within_power_of_two_boundary() { + let original = vec![ + Inner { + a: 0, + b: 1, + c: 2, + d: 3, + }, + Inner { + a: 4, + b: 5, + c: 6, + d: 7, + }, + Inner { + a: 8, + b: 9, + c: 10, + d: 11, + }, + Inner { + a: 12, + b: 13, + c: 14, + d: 15, + }, + ]; - let mut leaves = vec![]; - let mut full_bytes = vec![]; + let mut modified = original.clone(); + modified.pop(); // remove the last element from the list. - for n in numbers.chunks(4) { - let mut merkle = merkleize(join(vec![ - int_to_bytes32(n[0]), - int_to_bytes32(n[1]), - int_to_bytes32(n[2]), - int_to_bytes32(n[3]), - ])); - leaves.append(&mut merkle[0..HASHSIZE].to_vec()); - full_bytes.append(&mut merkle); - } + let reference_vec: Vec = (0..12).collect(); - let mut expected = merkleize(leaves); - expected.splice(3 * HASHSIZE.., full_bytes); - expected.append(&mut vec![0; HASHSIZE]); + test_inner_vec_modifications(original, modified, reference_vec); +} - // Compare the cached tree to the reference tree. +#[test] +fn lengthened_vec_of_inner_within_power_of_two_boundary() { + let original = vec![ + Inner { + a: 0, + b: 1, + c: 2, + d: 3, + }, + Inner { + a: 4, + b: 5, + c: 6, + d: 7, + }, + Inner { + a: 8, + b: 9, + c: 10, + d: 11, + }, + ]; - assert_trees_eq(&expected, &modified_cache); + let mut modified = original.clone(); + modified.push(Inner { + a: 12, + b: 13, + c: 14, + d: 15, + }); + + let reference_vec: Vec = (0..16).collect(); + + test_inner_vec_modifications(original, modified, reference_vec); } #[test] From d79616fee67a51954cbf2835a1f381aff638aa9a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 12 Apr 2019 16:52:11 +1000 Subject: [PATCH 26/89] Fix failing struct vec vectors --- eth2/utils/ssz/src/cached_tree_hash.rs | 20 +++++--------------- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 1 - eth2/utils/ssz/src/cached_tree_hash/tests.rs | 7 ++++++- 3 files changed, 11 insertions(+), 17 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 6e84233fc..d6ff884ef 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -145,30 +145,20 @@ impl TreeHashCache { }) } - pub fn single_chunk_splice(&mut self, chunk: usize, replace_with: I) -> Splice - where - I: IntoIterator, - { - self.chunk_splice(chunk..chunk + 1, replace_with) + pub fn single_chunk_splice(&mut self, chunk: usize, replace_with: Vec) { + self.chunk_splice(chunk..chunk + 1, replace_with); } - pub fn chunk_splice( - &mut self, - chunk_range: Range, - replace_with: I, - ) -> Splice - where - I: IntoIterator, - { + pub fn chunk_splice(&mut self, chunk_range: Range, replace_with: Vec) { let byte_start = chunk_range.start * BYTES_PER_CHUNK; let byte_end = chunk_range.end * BYTES_PER_CHUNK; // Update the `chunk_modified` vec, marking all spliced-in nodes as changed. self.chunk_modified.splice( chunk_range.clone(), - vec![true; chunk_range.end - chunk_range.start], + vec![true; replace_with.len() / HASHSIZE], ); - self.cache.splice(byte_start..byte_end, replace_with) + self.cache.splice(byte_start..byte_end, replace_with); } pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 37a3678c2..2d0ab5059 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -153,7 +153,6 @@ where // The item didn't exist in the old list and doesn't exist in the new list, // nothing to do. (None, None) => {} - _ => panic!("variable sized lists not implemented"), }; } } diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 4110e29a1..d48ed9eb8 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -473,9 +473,14 @@ fn test_inner_vec_modifications(original: Vec, modified: Vec, refe full_bytes.append(&mut merkle); } + let num_leaves = leaves.len() / HASHSIZE; + let mut expected = merkleize(leaves); expected.splice(3 * HASHSIZE.., full_bytes); - expected.append(&mut vec![0; HASHSIZE]); + + for _ in num_leaves..num_leaves.next_power_of_two() { + expected.append(&mut vec![0; HASHSIZE]); + } // Compare the cached tree to the reference tree. assert_trees_eq(&expected, &modified_cache); From a124042e30eec4d6dd78168314c2b29255a2736e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 13 Apr 2019 09:11:19 +1000 Subject: [PATCH 27/89] Start implementing grow merkle fn --- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 2d0ab5059..14eab3180 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -170,6 +170,23 @@ where } } +/// New vec is bigger than old vec. +fn grow_merkle_cache(cache: Vec, to: usize) -> Vec { + let new = Vec::with_capacity(to * HASHSIZE); + + let i = cache.len() / HASHSIZE; + let j = to; + + assert_eq!(i.next_power_of_two(), i); + assert_eq!(j.next_power_of_two(), j); + + while i > 0 { + + } + + new +} + fn get_packed_leaves(vec: &Vec) -> Vec where T: CachedTreeHash, From 75177837d0717045fb4c9271ca2ee96989f47ab3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 13 Apr 2019 09:42:43 +1000 Subject: [PATCH 28/89] Add first pass of grow cache algo --- eth2/utils/ssz/src/cached_tree_hash.rs | 1 + eth2/utils/ssz/src/cached_tree_hash/impls.rs | 17 ---------- eth2/utils/ssz/src/cached_tree_hash/resize.rs | 33 +++++++++++++++++++ 3 files changed, 34 insertions(+), 17 deletions(-) create mode 100644 eth2/utils/ssz/src/cached_tree_hash/resize.rs diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index d6ff884ef..0588ab772 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -6,6 +6,7 @@ use std::ops::Range; use std::vec::Splice; mod impls; +mod resize; mod tests; const BYTES_PER_CHUNK: usize = 32; diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 14eab3180..2d0ab5059 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -170,23 +170,6 @@ where } } -/// New vec is bigger than old vec. -fn grow_merkle_cache(cache: Vec, to: usize) -> Vec { - let new = Vec::with_capacity(to * HASHSIZE); - - let i = cache.len() / HASHSIZE; - let j = to; - - assert_eq!(i.next_power_of_two(), i); - assert_eq!(j.next_power_of_two(), j); - - while i > 0 { - - } - - new -} - fn get_packed_leaves(vec: &Vec) -> Vec where T: CachedTreeHash, diff --git a/eth2/utils/ssz/src/cached_tree_hash/resize.rs b/eth2/utils/ssz/src/cached_tree_hash/resize.rs new file mode 100644 index 000000000..d41453e9a --- /dev/null +++ b/eth2/utils/ssz/src/cached_tree_hash/resize.rs @@ -0,0 +1,33 @@ +use super::*; + +/// New vec is bigger than old vec. +fn grow_merkle_cache(old_bytes: &[u8], old_flags: &[bool], to: usize) -> Option> { + let mut bytes = Vec::with_capacity(to * HASHSIZE); + let mut flags = Vec::with_capacity(to); + + let from = old_bytes.len() / HASHSIZE; + let to = to; + + let distance = (from.leading_zeros() - to.leading_zeros()) as usize; + + let leading_zero_chunks = 1 >> distance; + + bytes.resize(leading_zero_chunks * HASHSIZE, 0); + flags.resize(leading_zero_chunks, true); // all new chunks are modified by default. + + for i in 0..to.leading_zeros() as usize { + let new_slice = bytes.get_mut(1 >> i + distance..1 >> i + distance + 1)?; + let old_slice = old_bytes.get(1 >> i..1 >> i + 1)?; + new_slice.copy_from_slice(old_slice); + } + + Some(bytes) +} + +#[cfg(test)] +mod test { + #[test] + fn can_grow() { + // TODO + } +} From 0b186f772fdfe432bdcc707482539a1199aae346 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 13 Apr 2019 12:12:56 +1000 Subject: [PATCH 29/89] Refactor resize functions for clarity --- eth2/utils/ssz/src/cached_tree_hash/resize.rs | 105 +++++++++++++++--- 1 file changed, 92 insertions(+), 13 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash/resize.rs b/eth2/utils/ssz/src/cached_tree_hash/resize.rs index d41453e9a..a7bad0b04 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/resize.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/resize.rs @@ -1,33 +1,112 @@ use super::*; /// New vec is bigger than old vec. -fn grow_merkle_cache(old_bytes: &[u8], old_flags: &[bool], to: usize) -> Option> { - let mut bytes = Vec::with_capacity(to * HASHSIZE); - let mut flags = Vec::with_capacity(to); +fn grow_merkle_cache( + old_bytes: &[u8], + old_flags: &[bool], + from_height: usize, + to_height: usize, +) -> Option> { + let to_nodes = (1 << to_height.next_power_of_two()) - 1; - let from = old_bytes.len() / HASHSIZE; - let to = to; + // Determine the size of our new tree. It is not just a simple `1 << to_height` as there can be + // an arbitrary number of bytes in `old_bytes` leaves. + let new_byte_count = { + let additional_from_nodes = old_bytes.len() / HASHSIZE - ((1 << from_height) - 1); + ((1 << to_height + additional_from_nodes) - 1) * HASHSIZE + }; + dbg!(new_byte_count / 32); - let distance = (from.leading_zeros() - to.leading_zeros()) as usize; + let mut bytes = vec![0; new_byte_count]; + let mut flags = vec![true; to_nodes]; - let leading_zero_chunks = 1 >> distance; + let leaf_level = from_height - 1; - bytes.resize(leading_zero_chunks * HASHSIZE, 0); - flags.resize(leading_zero_chunks, true); // all new chunks are modified by default. + // Loop through all internal levels of the tree (skipping the final, leaves level). + for i in 0..from_height - 1 as usize { + dbg!(i); + dbg!(bytes.len()); + // If we're on the leaf slice, grab the first byte and all the of the bytes after that. + // This is required because we can have an arbitrary number of bytes at the leaf level + // (e.g., the case where there are subtrees as leaves). + // + // If we're not on a leaf level, the number of nodes is fixed and known. + let old_slice = if i == leaf_level { + old_bytes.get(first_byte_at_height(i)..) + } else { + old_bytes.get(byte_range_at_height(i)) + }?; + + dbg!(byte_range_at_height(i + to_height - from_height)); + + let new_slice = bytes + .get_mut(byte_range_at_height(i + to_height - from_height))? + .get_mut(0..old_slice.len())?; - for i in 0..to.leading_zeros() as usize { - let new_slice = bytes.get_mut(1 >> i + distance..1 >> i + distance + 1)?; - let old_slice = old_bytes.get(1 >> i..1 >> i + 1)?; new_slice.copy_from_slice(old_slice); } Some(bytes) } +fn byte_range_at_height(h: usize) -> Range { + first_byte_at_height(h)..last_node_at_height(h) * HASHSIZE +} + +fn first_byte_at_height(h: usize) -> usize { + first_node_at_height(h) * HASHSIZE +} + +fn first_node_at_height(h: usize) -> usize { + (1 << h) - 1 +} + +fn last_node_at_height(h: usize) -> usize { + (1 << (h + 1)) - 1 +} + #[cfg(test)] mod test { + use super::*; + #[test] fn can_grow() { - // TODO + let from: usize = 7; + let to: usize = 15; + + let old_bytes = vec![42; from * HASHSIZE]; + let old_flags = vec![false; from]; + + let new = grow_merkle_cache( + &old_bytes, + &old_flags, + (from + 1).trailing_zeros() as usize, + (to + 1).trailing_zeros() as usize, + ) + .unwrap(); + + println!("{:?}", new); + let mut expected = vec![]; + // First level + expected.append(&mut vec![0; 32]); + // Second level + expected.append(&mut vec![42; 32]); + expected.append(&mut vec![0; 32]); + // Third level + expected.append(&mut vec![42; 32]); + expected.append(&mut vec![42; 32]); + expected.append(&mut vec![0; 32]); + expected.append(&mut vec![0; 32]); + // Fourth level + expected.append(&mut vec![0; 32]); + expected.append(&mut vec![0; 32]); + expected.append(&mut vec![0; 32]); + expected.append(&mut vec![0; 32]); + expected.append(&mut vec![0; 32]); + expected.append(&mut vec![0; 32]); + expected.append(&mut vec![0; 32]); + expected.append(&mut vec![0; 32]); + + assert_eq!(expected, new); } } From 0420607ff130fc8f4b61458267479fb6ad16980b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 13 Apr 2019 13:02:41 +1000 Subject: [PATCH 30/89] Tidy, remove debug prints --- eth2/utils/ssz/src/cached_tree_hash/resize.rs | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash/resize.rs b/eth2/utils/ssz/src/cached_tree_hash/resize.rs index a7bad0b04..21b729c9e 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/resize.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/resize.rs @@ -24,8 +24,6 @@ fn grow_merkle_cache( // Loop through all internal levels of the tree (skipping the final, leaves level). for i in 0..from_height - 1 as usize { - dbg!(i); - dbg!(bytes.len()); // If we're on the leaf slice, grab the first byte and all the of the bytes after that. // This is required because we can have an arbitrary number of bytes at the leaf level // (e.g., the case where there are subtrees as leaves). @@ -37,8 +35,6 @@ fn grow_merkle_cache( old_bytes.get(byte_range_at_height(i)) }?; - dbg!(byte_range_at_height(i + to_height - from_height)); - let new_slice = bytes .get_mut(byte_range_at_height(i + to_height - from_height))? .get_mut(0..old_slice.len())?; @@ -49,6 +45,27 @@ fn grow_merkle_cache( Some(bytes) } +/* +fn copy_bytes( + from_range: Range, + to_range: Range, + from: &[u8], + to: &mut Vec, +) -> Option<()> { + let from_slice = from.get(node_range_to_byte_range(from_range)); + + let to_slice = to + .get_mut(byte_range_at_height(i + to_height - from_height))? + .get_mut(0..old_slice.len())?; + + Ok(()) +} +*/ + +fn node_range_to_byte_range(node_range: Range) -> Range { + node_range.start * HASHSIZE..node_range.end * HASHSIZE +} + fn byte_range_at_height(h: usize) -> Range { first_byte_at_height(h)..last_node_at_height(h) * HASHSIZE } From 42d6a39832d867bbaab4103e447b740e5ba3d49e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 13 Apr 2019 13:18:18 +1000 Subject: [PATCH 31/89] Refactor TreeHashCache splice method --- eth2/utils/ssz/src/cached_tree_hash.rs | 43 ++++++++++++++------ eth2/utils/ssz/src/cached_tree_hash/impls.rs | 25 +++++++++--- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 4 +- 3 files changed, 52 insertions(+), 20 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 0588ab772..b676ececc 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -124,6 +124,17 @@ impl TreeHashCache { }) } + pub fn from_bytes(bytes: Vec, initial_modified_state: bool) -> Result { + if bytes.len() % BYTES_PER_CHUNK > 0 { + return Err(Error::BytesAreNotEvenChunks(bytes.len())); + } + + Ok(Self { + chunk_modified: vec![initial_modified_state; bytes.len() / BYTES_PER_CHUNK], + cache: bytes, + }) + } + pub fn bytes_len(&self) -> usize { self.cache.len() } @@ -135,22 +146,19 @@ impl TreeHashCache { .and_then(|slice| Ok(slice.to_vec())) } - pub fn from_bytes(bytes: Vec) -> Result { - if bytes.len() % BYTES_PER_CHUNK > 0 { - return Err(Error::BytesAreNotEvenChunks(bytes.len())); - } + pub fn splice(&mut self, chunk_range: Range, replace_with: Self) { + let (bytes, bools) = replace_with.into_components(); - Ok(Self { - chunk_modified: vec![false; bytes.len() / BYTES_PER_CHUNK], - cache: bytes, - }) + // Update the `chunk_modified` vec, marking all spliced-in nodes as changed. + self.chunk_modified.splice( + chunk_range.clone(), + bools, + ); + self.cache.splice(node_range_to_byte_range(chunk_range), bytes); } - pub fn single_chunk_splice(&mut self, chunk: usize, replace_with: Vec) { - self.chunk_splice(chunk..chunk + 1, replace_with); - } - - pub fn chunk_splice(&mut self, chunk_range: Range, replace_with: Vec) { + /* + pub fn byte_splice(&mut self, chunk_range: Range, replace_with: Vec) { let byte_start = chunk_range.start * BYTES_PER_CHUNK; let byte_end = chunk_range.end * BYTES_PER_CHUNK; @@ -161,6 +169,7 @@ impl TreeHashCache { ); self.cache.splice(byte_start..byte_end, replace_with); } + */ pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { let start = chunk * BYTES_PER_CHUNK; @@ -227,6 +236,10 @@ impl TreeHashCache { pub fn into_merkle_tree(self) -> Vec { self.cache } + + pub fn into_components(self) -> (Vec, Vec) { + (self.cache, self.chunk_modified) + } } fn children(parent: usize) -> (usize, usize) { @@ -237,6 +250,10 @@ fn num_nodes(num_leaves: usize) -> usize { 2 * num_leaves - 1 } +fn node_range_to_byte_range(node_range: Range) -> Range { + node_range.start * HASHSIZE..node_range.end * HASHSIZE +} + #[derive(Debug)] pub struct OffsetHandler { num_internal_nodes: usize, diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 2d0ab5059..f598de79a 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -7,7 +7,10 @@ impl CachedTreeHash for u64 { } fn build_tree_hash_cache(&self) -> Result { - Ok(TreeHashCache::from_bytes(merkleize(ssz_encode(self)))?) + Ok(TreeHashCache::from_bytes( + merkleize(ssz_encode(self)), + false, + )?) } fn num_bytes(&self) -> usize { @@ -55,7 +58,7 @@ where fn build_tree_hash_cache(&self) -> Result { match T::item_type() { - ItemType::Basic => TreeHashCache::from_bytes(merkleize(get_packed_leaves(self))), + ItemType::Basic => TreeHashCache::from_bytes(merkleize(get_packed_leaves(self)), false), ItemType::Composite | ItemType::List => { let subtrees = self .iter() @@ -123,7 +126,11 @@ where } } let first_leaf_chunk = offset_handler.first_leaf_node()?; - cache.chunk_splice(first_leaf_chunk..offset_handler.next_node, leaves); + + cache.splice( + first_leaf_chunk..offset_handler.next_node, + TreeHashCache::from_bytes(leaves, true)?, + ); } ItemType::Composite | ItemType::List => { let mut i = offset_handler.num_leaf_nodes; @@ -141,14 +148,22 @@ where // Splice out the entire tree of the removed node, replacing it with a // single padding node. let end_chunk = OffsetHandler::new(old, start_chunk)?.next_node(); - cache.chunk_splice(start_chunk..end_chunk, vec![0; HASHSIZE]); + + cache.splice( + start_chunk..end_chunk, + TreeHashCache::from_bytes(vec![0; HASHSIZE], true)?, + ); } // The item existed in the previous list but does exist in this list. // // I.e., the list has been lengthened. (None, Some(new)) => { let bytes: Vec = TreeHashCache::new(new)?.into(); - cache.chunk_splice(start_chunk..start_chunk + 1, bytes); + + cache.splice( + start_chunk..start_chunk + 1, + TreeHashCache::from_bytes(bytes, true)?, + ); } // The item didn't exist in the old list and doesn't exist in the new list, // nothing to do. diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index d48ed9eb8..d784a0889 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -355,7 +355,7 @@ fn test_u64_vec_modifications(original: Vec, modified: Vec) { let original_cache: Vec = TreeHashCache::new(&original).unwrap().into(); // Perform a differential hash - let mut cache_struct = TreeHashCache::from_bytes(original_cache.clone()).unwrap(); + let mut cache_struct = TreeHashCache::from_bytes(original_cache.clone(), false).unwrap(); modified .cached_hash_tree_root(&original, &mut cache_struct, 0) .unwrap(); @@ -723,7 +723,7 @@ fn generic_test(index: usize) { _ => panic!("bad index"), }; - let mut cache_struct = TreeHashCache::from_bytes(cache.clone()).unwrap(); + let mut cache_struct = TreeHashCache::from_bytes(cache.clone(), false).unwrap(); changed_inner .cached_hash_tree_root(&inner, &mut cache_struct, 0) From 1ce1fce03c5bc31f120bcbf4650ea2c0acd53557 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 13 Apr 2019 17:21:50 +1000 Subject: [PATCH 32/89] Fix failing grow tree test --- eth2/utils/ssz/src/cached_tree_hash.rs | 25 +-- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 9 +- eth2/utils/ssz/src/cached_tree_hash/resize.rs | 174 ++++++++++++------ eth2/utils/ssz/src/cached_tree_hash/tests.rs | 2 +- 4 files changed, 129 insertions(+), 81 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index b676ececc..84ef82233 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -1,9 +1,7 @@ use hashing::hash; use std::fmt::Debug; -use std::iter::IntoIterator; use std::iter::Iterator; use std::ops::Range; -use std::vec::Splice; mod impls; mod resize; @@ -21,7 +19,6 @@ pub enum Error { BytesAreNotEvenChunks(usize), NoModifiedFieldForChunk(usize), NoBytesForChunk(usize), - NoChildrenForHashing((usize, usize)), } #[derive(Debug, PartialEq, Clone)] @@ -150,27 +147,11 @@ impl TreeHashCache { let (bytes, bools) = replace_with.into_components(); // Update the `chunk_modified` vec, marking all spliced-in nodes as changed. - self.chunk_modified.splice( - chunk_range.clone(), - bools, - ); - self.cache.splice(node_range_to_byte_range(chunk_range), bytes); + self.chunk_modified.splice(chunk_range.clone(), bools); + self.cache + .splice(node_range_to_byte_range(chunk_range), bytes); } - /* - pub fn byte_splice(&mut self, chunk_range: Range, replace_with: Vec) { - let byte_start = chunk_range.start * BYTES_PER_CHUNK; - let byte_end = chunk_range.end * BYTES_PER_CHUNK; - - // Update the `chunk_modified` vec, marking all spliced-in nodes as changed. - self.chunk_modified.splice( - chunk_range.clone(), - vec![true; replace_with.len() / HASHSIZE], - ); - self.cache.splice(byte_start..byte_end, replace_with); - } - */ - pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { let start = chunk * BYTES_PER_CHUNK; let end = start + BYTES_PER_CHUNK; diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index f598de79a..dca00b6ba 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -1,5 +1,6 @@ +use super::resize::grow_merkle_cache; use super::*; -use crate::{ssz_encode, Encodable}; +use crate::ssz_encode; impl CachedTreeHash for u64 { fn item_type() -> ItemType { @@ -112,8 +113,10 @@ where ) -> Result { let offset_handler = OffsetHandler::new(self, chunk)?; - if self.len().next_power_of_two() != other.len().next_power_of_two() { - panic!("not implemented: vary between power-of-two boundary"); + if other.len().next_power_of_two() > self.len().next_power_of_two() { + // + } else if other.len().next_power_of_two() < self.len().next_power_of_two() { + panic!("shrinking below power of two is not implemented") } match T::item_type() { diff --git a/eth2/utils/ssz/src/cached_tree_hash/resize.rs b/eth2/utils/ssz/src/cached_tree_hash/resize.rs index 21b729c9e..bce722a5e 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/resize.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/resize.rs @@ -1,12 +1,12 @@ use super::*; /// New vec is bigger than old vec. -fn grow_merkle_cache( +pub fn grow_merkle_cache( old_bytes: &[u8], old_flags: &[bool], from_height: usize, to_height: usize, -) -> Option> { +) -> Option<(Vec, Vec)> { let to_nodes = (1 << to_height.next_power_of_two()) - 1; // Determine the size of our new tree. It is not just a simple `1 << to_height` as there can be @@ -15,7 +15,6 @@ fn grow_merkle_cache( let additional_from_nodes = old_bytes.len() / HASHSIZE - ((1 << from_height) - 1); ((1 << to_height + additional_from_nodes) - 1) * HASHSIZE }; - dbg!(new_byte_count / 32); let mut bytes = vec![0; new_byte_count]; let mut flags = vec![true; to_nodes]; @@ -23,53 +22,45 @@ fn grow_merkle_cache( let leaf_level = from_height - 1; // Loop through all internal levels of the tree (skipping the final, leaves level). - for i in 0..from_height - 1 as usize { + for i in 0..from_height as usize { // If we're on the leaf slice, grab the first byte and all the of the bytes after that. // This is required because we can have an arbitrary number of bytes at the leaf level // (e.g., the case where there are subtrees as leaves). // // If we're not on a leaf level, the number of nodes is fixed and known. - let old_slice = if i == leaf_level { - old_bytes.get(first_byte_at_height(i)..) + let (byte_slice, flag_slice) = if i == leaf_level { + ( + old_bytes.get(first_byte_at_height(i)..)?, + old_flags.get(first_node_at_height(i)..)?, + ) } else { - old_bytes.get(byte_range_at_height(i)) - }?; + ( + old_bytes.get(byte_range_at_height(i))?, + old_flags.get(node_range_at_height(i))? + ) + }; - let new_slice = bytes + bytes .get_mut(byte_range_at_height(i + to_height - from_height))? - .get_mut(0..old_slice.len())?; - - new_slice.copy_from_slice(old_slice); + .get_mut(0..byte_slice.len())? + .copy_from_slice(byte_slice); + flags + .get_mut(node_range_at_height(i + to_height - from_height))? + .get_mut(0..flag_slice.len())? + .copy_from_slice(flag_slice); } - Some(bytes) -} - -/* -fn copy_bytes( - from_range: Range, - to_range: Range, - from: &[u8], - to: &mut Vec, -) -> Option<()> { - let from_slice = from.get(node_range_to_byte_range(from_range)); - - let to_slice = to - .get_mut(byte_range_at_height(i + to_height - from_height))? - .get_mut(0..old_slice.len())?; - - Ok(()) -} -*/ - -fn node_range_to_byte_range(node_range: Range) -> Range { - node_range.start * HASHSIZE..node_range.end * HASHSIZE + Some((bytes, flags)) } fn byte_range_at_height(h: usize) -> Range { first_byte_at_height(h)..last_node_at_height(h) * HASHSIZE } +fn node_range_at_height(h: usize) -> Range { + first_node_at_height(h)..last_node_at_height(h) +} + fn first_byte_at_height(h: usize) -> usize { first_node_at_height(h) * HASHSIZE } @@ -87,14 +78,14 @@ mod test { use super::*; #[test] - fn can_grow() { - let from: usize = 7; + fn can_grow_three_levels() { + let from: usize = 1; let to: usize = 15; let old_bytes = vec![42; from * HASHSIZE]; let old_flags = vec![false; from]; - let new = grow_merkle_cache( + let (new_bytes, new_flags) = grow_merkle_cache( &old_bytes, &old_flags, (from + 1).trailing_zeros() as usize, @@ -102,28 +93,101 @@ mod test { ) .unwrap(); - println!("{:?}", new); - let mut expected = vec![]; + let mut expected_bytes = vec![]; + let mut expected_flags = vec![]; // First level - expected.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_flags.push(true); // Second level - expected.append(&mut vec![42; 32]); - expected.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_flags.push(true); + expected_flags.push(true); // Third level - expected.append(&mut vec![42; 32]); - expected.append(&mut vec![42; 32]); - expected.append(&mut vec![0; 32]); - expected.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_flags.push(true); + expected_flags.push(true); + expected_flags.push(true); + expected_flags.push(true); // Fourth level - expected.append(&mut vec![0; 32]); - expected.append(&mut vec![0; 32]); - expected.append(&mut vec![0; 32]); - expected.append(&mut vec![0; 32]); - expected.append(&mut vec![0; 32]); - expected.append(&mut vec![0; 32]); - expected.append(&mut vec![0; 32]); - expected.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![42; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_flags.push(false); + expected_flags.push(true); + expected_flags.push(true); + expected_flags.push(true); + expected_flags.push(true); + expected_flags.push(true); + expected_flags.push(true); + expected_flags.push(true); - assert_eq!(expected, new); + assert_eq!(expected_bytes, new_bytes); + assert_eq!(expected_flags, new_flags); + } + + #[test] + fn can_grow_one_level() { + let from: usize = 7; + let to: usize = 15; + + let old_bytes = vec![42; from * HASHSIZE]; + let old_flags = vec![false; from]; + + let (new_bytes, new_flags) = grow_merkle_cache( + &old_bytes, + &old_flags, + (from + 1).trailing_zeros() as usize, + (to + 1).trailing_zeros() as usize, + ) + .unwrap(); + + let mut expected_bytes = vec![]; + let mut expected_flags = vec![]; + // First level + expected_bytes.append(&mut vec![0; 32]); + expected_flags.push(true); + // Second level + expected_bytes.append(&mut vec![42; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_flags.push(false); + expected_flags.push(true); + // Third level + expected_bytes.append(&mut vec![42; 32]); + expected_bytes.append(&mut vec![42; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_flags.push(false); + expected_flags.push(false); + expected_flags.push(true); + expected_flags.push(true); + // Fourth level + expected_bytes.append(&mut vec![42; 32]); + expected_bytes.append(&mut vec![42; 32]); + expected_bytes.append(&mut vec![42; 32]); + expected_bytes.append(&mut vec![42; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_bytes.append(&mut vec![0; 32]); + expected_flags.push(false); + expected_flags.push(false); + expected_flags.push(false); + expected_flags.push(false); + expected_flags.push(true); + expected_flags.push(true); + expected_flags.push(true); + expected_flags.push(true); + + assert_eq!(expected_bytes, new_bytes); + assert_eq!(expected_flags, new_flags); } } diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index d784a0889..9b8e81bc9 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -1,5 +1,5 @@ +#![cfg(test)] use super::*; -use crate::Encodable; use int_to_bytes::{int_to_bytes32, int_to_bytes8}; #[derive(Clone, Debug)] From e038bd18b59033712746de8927dc23f8ce4d2430 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 14 Apr 2019 10:34:54 +1000 Subject: [PATCH 33/89] Add failing test for grow merkle tree --- eth2/utils/ssz/src/cached_tree_hash.rs | 24 ++++++++++++++++++-- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 24 +++++++++++++++++--- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 2 -- 3 files changed, 43 insertions(+), 7 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 84ef82233..42faf1211 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -16,6 +16,8 @@ pub enum Error { ShouldNotProduceOffsetHandler, NoFirstNode, NoBytesForRoot, + UnableToObtainSlices, + UnableToGrowMerkleTree, BytesAreNotEvenChunks(usize), NoModifiedFieldForChunk(usize), NoBytesForChunk(usize), @@ -74,6 +76,13 @@ impl TreeHashCache { item.build_tree_hash_cache() } + pub fn from_elems(cache: Vec, chunk_modified: Vec) -> Self { + Self { + cache, + chunk_modified, + } + } + pub fn from_leaves_and_subtrees( item: &T, leaves_and_subtrees: Vec, @@ -149,7 +158,7 @@ impl TreeHashCache { // Update the `chunk_modified` vec, marking all spliced-in nodes as changed. self.chunk_modified.splice(chunk_range.clone(), bools); self.cache - .splice(node_range_to_byte_range(chunk_range), bytes); + .splice(node_range_to_byte_range(&chunk_range), bytes); } pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { @@ -167,6 +176,13 @@ impl TreeHashCache { Ok(()) } + pub fn slices(&self, chunk_range: Range) -> Option<(&[u8], &[bool])> { + Some(( + self.cache.get(node_range_to_byte_range(&chunk_range))?, + self.chunk_modified.get(chunk_range)?, + )) + } + pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { let start = chunk * BYTES_PER_CHUNK; let end = start + BYTES_PER_CHUNK; @@ -231,7 +247,7 @@ fn num_nodes(num_leaves: usize) -> usize { 2 * num_leaves - 1 } -fn node_range_to_byte_range(node_range: Range) -> Range { +fn node_range_to_byte_range(node_range: &Range) -> Range { node_range.start * HASHSIZE..node_range.end * HASHSIZE } @@ -281,6 +297,10 @@ impl OffsetHandler { }) } + pub fn node_range(&self) -> Result, Error> { + Ok(*self.offsets.first().ok_or_else(|| Error::NoFirstNode)?..self.next_node()) + } + pub fn total_nodes(&self) -> usize { self.num_internal_nodes + self.num_leaf_nodes } diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index dca00b6ba..2010aeb0d 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -113,9 +113,27 @@ where ) -> Result { let offset_handler = OffsetHandler::new(self, chunk)?; - if other.len().next_power_of_two() > self.len().next_power_of_two() { - // - } else if other.len().next_power_of_two() < self.len().next_power_of_two() { + if self.len().next_power_of_two() > other.len().next_power_of_two() { + // Get slices of the exsiting tree from the cache. + let (old_bytes, old_flags) = cache + .slices(offset_handler.node_range()?) + .ok_or_else(|| Error::UnableToObtainSlices)?; + + // From the existing slices build new, expanded Vecs. + let (new_bytes, new_flags) = grow_merkle_cache( + old_bytes, + old_flags, + other.len().next_power_of_two().leading_zeros() as usize, + self.len().next_power_of_two().leading_zeros() as usize, + ).ok_or_else(|| Error::UnableToGrowMerkleTree)?; + + // Create a `TreeHashCache` from the raw elements. + let expanded_cache = TreeHashCache::from_elems(new_bytes, new_flags); + + // Splice the newly created `TreeHashCache` over the existing, smaller elements. + cache.splice(offset_handler.node_range()?, expanded_cache); + // + } else if self.len().next_power_of_two() < other.len().next_power_of_two() { panic!("shrinking below power of two is not implemented") } diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 9b8e81bc9..c402fd15b 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -408,7 +408,6 @@ fn extended_u64_vec_len_within_pow_2_boundary() { test_u64_vec_modifications(original_vec, modified_vec); } -/* #[test] fn extended_u64_vec_len_outside_pow_2_boundary() { let original_vec: Vec = (0..2_u64.pow(5)).collect(); @@ -417,7 +416,6 @@ fn extended_u64_vec_len_outside_pow_2_boundary() { test_u64_vec_modifications(original_vec, modified_vec); } -*/ #[test] fn large_vec_of_u64_builds() { From 737e6b9a866beaf1974249644c3474b7168f2901 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 14 Apr 2019 13:54:04 +1000 Subject: [PATCH 34/89] Fix failing tree hash test --- eth2/utils/ssz/src/cached_tree_hash.rs | 10 ++++- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 14 +++++-- eth2/utils/ssz/src/cached_tree_hash/resize.rs | 39 ++++++++++--------- 3 files changed, 39 insertions(+), 24 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 42faf1211..0e6bdf986 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -255,6 +255,7 @@ fn node_range_to_byte_range(node_range: &Range) -> Range { pub struct OffsetHandler { num_internal_nodes: usize, pub num_leaf_nodes: usize, + first_node: usize, next_node: usize, offsets: Vec, } @@ -293,12 +294,17 @@ impl OffsetHandler { num_internal_nodes, num_leaf_nodes, offsets, + first_node: offset, next_node, }) } - pub fn node_range(&self) -> Result, Error> { - Ok(*self.offsets.first().ok_or_else(|| Error::NoFirstNode)?..self.next_node()) + pub fn height(&self) -> usize { + self.num_leaf_nodes.trailing_zeros() as usize + } + + pub fn node_range(&self) -> Range { + self.first_node..self.next_node } pub fn total_nodes(&self) -> usize { diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 2010aeb0d..c55415e54 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -113,25 +113,31 @@ where ) -> Result { let offset_handler = OffsetHandler::new(self, chunk)?; + // Check to see if the length of the list has changed length beyond a power-of-two + // boundary. In such a case we need to resize the merkle tree bytes. if self.len().next_power_of_two() > other.len().next_power_of_two() { + let old_offset_handler = OffsetHandler::new(other, chunk)?; + + dbg!(old_offset_handler.node_range()); + // Get slices of the exsiting tree from the cache. let (old_bytes, old_flags) = cache - .slices(offset_handler.node_range()?) + .slices(old_offset_handler.node_range()) .ok_or_else(|| Error::UnableToObtainSlices)?; // From the existing slices build new, expanded Vecs. let (new_bytes, new_flags) = grow_merkle_cache( old_bytes, old_flags, - other.len().next_power_of_two().leading_zeros() as usize, - self.len().next_power_of_two().leading_zeros() as usize, + old_offset_handler.height(), + offset_handler.height(), ).ok_or_else(|| Error::UnableToGrowMerkleTree)?; // Create a `TreeHashCache` from the raw elements. let expanded_cache = TreeHashCache::from_elems(new_bytes, new_flags); // Splice the newly created `TreeHashCache` over the existing, smaller elements. - cache.splice(offset_handler.node_range()?, expanded_cache); + cache.splice(old_offset_handler.node_range(), expanded_cache); // } else if self.len().next_power_of_two() < other.len().next_power_of_two() { panic!("shrinking below power of two is not implemented") diff --git a/eth2/utils/ssz/src/cached_tree_hash/resize.rs b/eth2/utils/ssz/src/cached_tree_hash/resize.rs index bce722a5e..2cdce4827 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/resize.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/resize.rs @@ -7,22 +7,20 @@ pub fn grow_merkle_cache( from_height: usize, to_height: usize, ) -> Option<(Vec, Vec)> { - let to_nodes = (1 << to_height.next_power_of_two()) - 1; - // Determine the size of our new tree. It is not just a simple `1 << to_height` as there can be - // an arbitrary number of bytes in `old_bytes` leaves. - let new_byte_count = { - let additional_from_nodes = old_bytes.len() / HASHSIZE - ((1 << from_height) - 1); - ((1 << to_height + additional_from_nodes) - 1) * HASHSIZE + // an arbitrary number of nodes in `old_bytes` leaves if those leaves are subtrees. + let to_nodes = { + let old_nodes = old_bytes.len() / HASHSIZE; + let additional_nodes = old_nodes - nodes_in_tree_of_height(from_height); + nodes_in_tree_of_height(to_height) + additional_nodes }; - let mut bytes = vec![0; new_byte_count]; + let mut bytes = vec![0; to_nodes * HASHSIZE]; let mut flags = vec![true; to_nodes]; - let leaf_level = from_height - 1; + let leaf_level = from_height; - // Loop through all internal levels of the tree (skipping the final, leaves level). - for i in 0..from_height as usize { + for i in 0..=from_height as usize { // If we're on the leaf slice, grab the first byte and all the of the bytes after that. // This is required because we can have an arbitrary number of bytes at the leaf level // (e.g., the case where there are subtrees as leaves). @@ -36,7 +34,7 @@ pub fn grow_merkle_cache( } else { ( old_bytes.get(byte_range_at_height(i))?, - old_flags.get(node_range_at_height(i))? + old_flags.get(node_range_at_height(i))?, ) }; @@ -53,12 +51,17 @@ pub fn grow_merkle_cache( Some((bytes, flags)) } +fn nodes_in_tree_of_height(h: usize) -> usize { + 2 * (1 << h) - 1 +} + fn byte_range_at_height(h: usize) -> Range { - first_byte_at_height(h)..last_node_at_height(h) * HASHSIZE + let node_range = node_range_at_height(h); + node_range.start * HASHSIZE..node_range.end * HASHSIZE } fn node_range_at_height(h: usize) -> Range { - first_node_at_height(h)..last_node_at_height(h) + first_node_at_height(h)..last_node_at_height(h) + 1 } fn first_byte_at_height(h: usize) -> usize { @@ -70,7 +73,7 @@ fn first_node_at_height(h: usize) -> usize { } fn last_node_at_height(h: usize) -> usize { - (1 << (h + 1)) - 1 + (1 << (h + 1)) - 2 } #[cfg(test)] @@ -88,8 +91,8 @@ mod test { let (new_bytes, new_flags) = grow_merkle_cache( &old_bytes, &old_flags, - (from + 1).trailing_zeros() as usize, - (to + 1).trailing_zeros() as usize, + (from + 1).trailing_zeros() as usize - 1, + (to + 1).trailing_zeros() as usize - 1, ) .unwrap(); @@ -145,8 +148,8 @@ mod test { let (new_bytes, new_flags) = grow_merkle_cache( &old_bytes, &old_flags, - (from + 1).trailing_zeros() as usize, - (to + 1).trailing_zeros() as usize, + (from + 1).trailing_zeros() as usize - 1, + (to + 1).trailing_zeros() as usize - 1, ) .unwrap(); From 582f465ffd9ff57bcad2933a47547bcace5aa0a6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 14 Apr 2019 14:20:33 +1000 Subject: [PATCH 35/89] Add test for growing vec of structs --- eth2/utils/ssz/src/cached_tree_hash/resize.rs | 29 ++++++++---- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 47 ++++++++++++++++++- 2 files changed, 65 insertions(+), 11 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash/resize.rs b/eth2/utils/ssz/src/cached_tree_hash/resize.rs index 2cdce4827..3c2d2c407 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/resize.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/resize.rs @@ -26,7 +26,7 @@ pub fn grow_merkle_cache( // (e.g., the case where there are subtrees as leaves). // // If we're not on a leaf level, the number of nodes is fixed and known. - let (byte_slice, flag_slice) = if i == leaf_level { + let (old_byte_slice, old_flag_slice) = if i == leaf_level { ( old_bytes.get(first_byte_at_height(i)..)?, old_flags.get(first_node_at_height(i)..)?, @@ -38,14 +38,25 @@ pub fn grow_merkle_cache( ) }; - bytes - .get_mut(byte_range_at_height(i + to_height - from_height))? - .get_mut(0..byte_slice.len())? - .copy_from_slice(byte_slice); - flags - .get_mut(node_range_at_height(i + to_height - from_height))? - .get_mut(0..flag_slice.len())? - .copy_from_slice(flag_slice); + let new_i = i + to_height - from_height; + let (new_byte_slice, new_flag_slice) = if i == leaf_level { + ( + bytes.get_mut(first_byte_at_height(new_i)..)?, + flags.get_mut(first_node_at_height(new_i)..)?, + ) + } else { + ( + bytes.get_mut(byte_range_at_height(new_i))?, + flags.get_mut(node_range_at_height(new_i))?, + ) + }; + + new_byte_slice + .get_mut(0..old_byte_slice.len())? + .copy_from_slice(old_byte_slice); + new_flag_slice + .get_mut(0..old_flag_slice.len())? + .copy_from_slice(old_flag_slice); } Some((bytes, flags)) diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index c402fd15b..fb6ed9080 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -472,9 +472,10 @@ fn test_inner_vec_modifications(original: Vec, modified: Vec, refe } let num_leaves = leaves.len() / HASHSIZE; - let mut expected = merkleize(leaves); - expected.splice(3 * HASHSIZE.., full_bytes); + + let num_internal_nodes = num_leaves.next_power_of_two() - 1; + expected.splice(num_internal_nodes * HASHSIZE.., full_bytes); for _ in num_leaves..num_leaves.next_power_of_two() { expected.append(&mut vec![0; HASHSIZE]); @@ -589,6 +590,48 @@ fn lengthened_vec_of_inner_within_power_of_two_boundary() { test_inner_vec_modifications(original, modified, reference_vec); } +#[test] +fn lengthened_vec_of_inner_outside_power_of_two_boundary() { + let original = vec![ + Inner { + a: 0, + b: 1, + c: 2, + d: 3, + }, + Inner { + a: 4, + b: 5, + c: 6, + d: 7, + }, + Inner { + a: 8, + b: 9, + c: 10, + d: 11, + }, + Inner { + a: 12, + b: 13, + c: 14, + d: 15, + }, + ]; + + let mut modified = original.clone(); + modified.push(Inner { + a: 16, + b: 17, + c: 18, + d: 19, + }); + + let reference_vec: Vec = (0..20).collect(); + + test_inner_vec_modifications(original, modified, reference_vec); +} + #[test] fn vec_of_inner_builds() { let numbers: Vec = (0..12).collect(); From 9bc0519092e31a834719a706d0277eb022ad3678 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 14 Apr 2019 16:31:47 +1000 Subject: [PATCH 36/89] Add tree shrink fn --- eth2/utils/ssz/src/cached_tree_hash/resize.rs | 118 ++++++++++++++---- 1 file changed, 94 insertions(+), 24 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash/resize.rs b/eth2/utils/ssz/src/cached_tree_hash/resize.rs index 3c2d2c407..0b492770f 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/resize.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/resize.rs @@ -62,6 +62,52 @@ pub fn grow_merkle_cache( Some((bytes, flags)) } +/// New vec is smaller than old vec. +pub fn shrink_merkle_cache( + from_bytes: &[u8], + from_flags: &[bool], + from_height: usize, + to_height: usize, + to_nodes: usize, +) -> Option<(Vec, Vec)> { + let mut bytes = vec![0; to_nodes * HASHSIZE]; + let mut flags = vec![true; to_nodes]; + + let leaf_level = to_height; + + for i in 0..=leaf_level as usize { + let from_i = i + from_height - to_height; + let (from_byte_slice, from_flag_slice) = if from_i == leaf_level { + ( + from_bytes.get(first_byte_at_height(from_i)..)?, + from_flags.get(first_node_at_height(from_i)..)?, + ) + } else { + ( + from_bytes.get(byte_range_at_height(from_i))?, + from_flags.get(node_range_at_height(from_i))?, + ) + }; + + let (to_byte_slice, to_flag_slice) = if i == leaf_level { + ( + bytes.get_mut(first_byte_at_height(i)..)?, + flags.get_mut(first_node_at_height(i)..)?, + ) + } else { + ( + bytes.get_mut(byte_range_at_height(i))?, + flags.get_mut(node_range_at_height(i))?, + ) + }; + + to_byte_slice.copy_from_slice(from_byte_slice.get(0..to_byte_slice.len())?); + to_flag_slice.copy_from_slice(from_flag_slice.get(0..to_flag_slice.len())?); + } + + Some((bytes, flags)) +} + fn nodes_in_tree_of_height(h: usize) -> usize { 2 * (1 << h) - 1 } @@ -92,18 +138,18 @@ mod test { use super::*; #[test] - fn can_grow_three_levels() { - let from: usize = 1; - let to: usize = 15; + fn can_grow_and_shrink_three_levels() { + let small: usize = 1; + let big: usize = 15; - let old_bytes = vec![42; from * HASHSIZE]; - let old_flags = vec![false; from]; + let original_bytes = vec![42; small * HASHSIZE]; + let original_flags = vec![false; small]; - let (new_bytes, new_flags) = grow_merkle_cache( - &old_bytes, - &old_flags, - (from + 1).trailing_zeros() as usize - 1, - (to + 1).trailing_zeros() as usize - 1, + let (grown_bytes, grown_flags) = grow_merkle_cache( + &original_bytes, + &original_flags, + (small + 1).trailing_zeros() as usize - 1, + (big + 1).trailing_zeros() as usize - 1, ) .unwrap(); @@ -144,23 +190,35 @@ mod test { expected_flags.push(true); expected_flags.push(true); - assert_eq!(expected_bytes, new_bytes); - assert_eq!(expected_flags, new_flags); + assert_eq!(expected_bytes, grown_bytes); + assert_eq!(expected_flags, grown_flags); + + let (shrunk_bytes, shrunk_flags) = shrink_merkle_cache( + &grown_bytes, + &grown_flags, + (big + 1).trailing_zeros() as usize - 1, + (small + 1).trailing_zeros() as usize - 1, + small, + ) + .unwrap(); + + assert_eq!(original_bytes, shrunk_bytes); + assert_eq!(original_flags, shrunk_flags); } #[test] - fn can_grow_one_level() { - let from: usize = 7; - let to: usize = 15; + fn can_grow_and_shrink_one_level() { + let small: usize = 7; + let big: usize = 15; - let old_bytes = vec![42; from * HASHSIZE]; - let old_flags = vec![false; from]; + let original_bytes = vec![42; small * HASHSIZE]; + let original_flags = vec![false; small]; - let (new_bytes, new_flags) = grow_merkle_cache( - &old_bytes, - &old_flags, - (from + 1).trailing_zeros() as usize - 1, - (to + 1).trailing_zeros() as usize - 1, + let (grown_bytes, grown_flags) = grow_merkle_cache( + &original_bytes, + &original_flags, + (small + 1).trailing_zeros() as usize - 1, + (big + 1).trailing_zeros() as usize - 1, ) .unwrap(); @@ -201,7 +259,19 @@ mod test { expected_flags.push(true); expected_flags.push(true); - assert_eq!(expected_bytes, new_bytes); - assert_eq!(expected_flags, new_flags); + assert_eq!(expected_bytes, grown_bytes); + assert_eq!(expected_flags, grown_flags); + + let (shrunk_bytes, shrunk_flags) = shrink_merkle_cache( + &grown_bytes, + &grown_flags, + (big + 1).trailing_zeros() as usize - 1, + (small + 1).trailing_zeros() as usize - 1, + small, + ) + .unwrap(); + + assert_eq!(original_bytes, shrunk_bytes); + assert_eq!(original_flags, shrunk_flags); } } From da74c4ce74e4fff958434a01200aad6b25478c88 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 14 Apr 2019 16:50:00 +1000 Subject: [PATCH 37/89] Add tree shrinking for u64 vec --- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 38 +++++++++++--------- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 9 +++++ 2 files changed, 31 insertions(+), 16 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index c55415e54..f16e6a62b 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -1,4 +1,4 @@ -use super::resize::grow_merkle_cache; +use super::resize::{grow_merkle_cache, shrink_merkle_cache}; use super::*; use crate::ssz_encode; @@ -112,35 +112,41 @@ where chunk: usize, ) -> Result { let offset_handler = OffsetHandler::new(self, chunk)?; + let old_offset_handler = OffsetHandler::new(other, chunk)?; - // Check to see if the length of the list has changed length beyond a power-of-two - // boundary. In such a case we need to resize the merkle tree bytes. - if self.len().next_power_of_two() > other.len().next_power_of_two() { + if offset_handler.num_leaf_nodes != old_offset_handler.num_leaf_nodes { let old_offset_handler = OffsetHandler::new(other, chunk)?; - dbg!(old_offset_handler.node_range()); - // Get slices of the exsiting tree from the cache. let (old_bytes, old_flags) = cache .slices(old_offset_handler.node_range()) .ok_or_else(|| Error::UnableToObtainSlices)?; - // From the existing slices build new, expanded Vecs. - let (new_bytes, new_flags) = grow_merkle_cache( - old_bytes, - old_flags, - old_offset_handler.height(), - offset_handler.height(), - ).ok_or_else(|| Error::UnableToGrowMerkleTree)?; + let (new_bytes, new_flags) = + if offset_handler.num_leaf_nodes > old_offset_handler.num_leaf_nodes { + grow_merkle_cache( + old_bytes, + old_flags, + old_offset_handler.height(), + offset_handler.height(), + ) + .ok_or_else(|| Error::UnableToGrowMerkleTree)? + } else { + shrink_merkle_cache( + old_bytes, + old_flags, + old_offset_handler.height(), + offset_handler.height(), + offset_handler.total_nodes(), + ) + .ok_or_else(|| Error::UnableToGrowMerkleTree)? + }; // Create a `TreeHashCache` from the raw elements. let expanded_cache = TreeHashCache::from_elems(new_bytes, new_flags); // Splice the newly created `TreeHashCache` over the existing, smaller elements. cache.splice(old_offset_handler.node_range(), expanded_cache); - // - } else if self.len().next_power_of_two() < other.len().next_power_of_two() { - panic!("shrinking below power of two is not implemented") } match T::item_type() { diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index fb6ed9080..22d01ec1a 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -396,6 +396,15 @@ fn shortened_u64_vec_len_within_pow_2_boundary() { test_u64_vec_modifications(original_vec, modified_vec); } +#[test] +fn shortened_u64_vec_len_outside_pow_2_boundary() { + let original_vec: Vec = (0..2_u64.pow(6)).collect(); + + let modified_vec: Vec = (0..2_u64.pow(5)).collect(); + + test_u64_vec_modifications(original_vec, modified_vec); +} + #[test] fn extended_u64_vec_len_within_pow_2_boundary() { let n: u64 = 2_u64.pow(5) - 2; From 0632a00a48f6da3cd9a3e7ac1df332e6b12aee92 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 14 Apr 2019 18:50:12 +1000 Subject: [PATCH 38/89] Fix failing test for shrinking vec of structs --- eth2/utils/ssz/src/cached_tree_hash.rs | 7 ++- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 14 +++--- eth2/utils/ssz/src/cached_tree_hash/resize.rs | 9 ++-- eth2/utils/ssz/src/cached_tree_hash/tests.rs | 43 +++++++++++++++++++ 4 files changed, 59 insertions(+), 14 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 0e6bdf986..7a722766c 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -18,6 +18,7 @@ pub enum Error { NoBytesForRoot, UnableToObtainSlices, UnableToGrowMerkleTree, + UnableToShrinkMerkleTree, BytesAreNotEvenChunks(usize), NoModifiedFieldForChunk(usize), NoBytesForChunk(usize), @@ -303,10 +304,14 @@ impl OffsetHandler { self.num_leaf_nodes.trailing_zeros() as usize } - pub fn node_range(&self) -> Range { + pub fn chunk_range(&self) -> Range { self.first_node..self.next_node } + pub fn total_chunks(&self) -> usize { + self.next_node - self.first_node + } + pub fn total_nodes(&self) -> usize { self.num_internal_nodes + self.num_leaf_nodes } diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index f16e6a62b..0377649cb 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -119,7 +119,7 @@ where // Get slices of the exsiting tree from the cache. let (old_bytes, old_flags) = cache - .slices(old_offset_handler.node_range()) + .slices(old_offset_handler.chunk_range()) .ok_or_else(|| Error::UnableToObtainSlices)?; let (new_bytes, new_flags) = @@ -137,16 +137,16 @@ where old_flags, old_offset_handler.height(), offset_handler.height(), - offset_handler.total_nodes(), + offset_handler.total_chunks(), ) - .ok_or_else(|| Error::UnableToGrowMerkleTree)? + .ok_or_else(|| Error::UnableToShrinkMerkleTree)? }; // Create a `TreeHashCache` from the raw elements. - let expanded_cache = TreeHashCache::from_elems(new_bytes, new_flags); + let modified_cache = TreeHashCache::from_elems(new_bytes, new_flags); - // Splice the newly created `TreeHashCache` over the existing, smaller elements. - cache.splice(old_offset_handler.node_range(), expanded_cache); + // Splice the newly created `TreeHashCache` over the existing elements. + cache.splice(old_offset_handler.chunk_range(), modified_cache); } match T::item_type() { @@ -208,8 +208,6 @@ where for (&parent, children) in offset_handler.iter_internal_nodes().rev() { if cache.either_modified(children)? { - dbg!(parent); - dbg!(children); cache.modify_chunk(parent, &cache.hash_children(children)?)?; } } diff --git a/eth2/utils/ssz/src/cached_tree_hash/resize.rs b/eth2/utils/ssz/src/cached_tree_hash/resize.rs index 0b492770f..44b3f0ea5 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/resize.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/resize.rs @@ -73,11 +73,10 @@ pub fn shrink_merkle_cache( let mut bytes = vec![0; to_nodes * HASHSIZE]; let mut flags = vec![true; to_nodes]; - let leaf_level = to_height; - - for i in 0..=leaf_level as usize { + for i in 0..=to_height as usize { let from_i = i + from_height - to_height; - let (from_byte_slice, from_flag_slice) = if from_i == leaf_level { + + let (from_byte_slice, from_flag_slice) = if from_i == from_height { ( from_bytes.get(first_byte_at_height(from_i)..)?, from_flags.get(first_node_at_height(from_i)..)?, @@ -89,7 +88,7 @@ pub fn shrink_merkle_cache( ) }; - let (to_byte_slice, to_flag_slice) = if i == leaf_level { + let (to_byte_slice, to_flag_slice) = if i == to_height { ( bytes.get_mut(first_byte_at_height(i)..)?, flags.get_mut(first_node_at_height(i)..)?, diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index 22d01ec1a..f09fac419 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -563,6 +563,49 @@ fn shortened_vec_of_inner_within_power_of_two_boundary() { test_inner_vec_modifications(original, modified, reference_vec); } +#[test] +fn shortened_vec_of_inner_outside_power_of_two_boundary() { + let original = vec![ + Inner { + a: 0, + b: 1, + c: 2, + d: 3, + }, + Inner { + a: 4, + b: 5, + c: 6, + d: 7, + }, + Inner { + a: 8, + b: 9, + c: 10, + d: 11, + }, + Inner { + a: 12, + b: 13, + c: 14, + d: 15, + }, + Inner { + a: 16, + b: 17, + c: 18, + d: 19, + }, + ]; + + let mut modified = original.clone(); + modified.pop(); // remove the last element from the list. + + let reference_vec: Vec = (0..16).collect(); + + test_inner_vec_modifications(original, modified, reference_vec); +} + #[test] fn lengthened_vec_of_inner_within_power_of_two_boundary() { let original = vec![ From ab78a15313f3604c8d0116bdbda2ea81bb7750b7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 14 Apr 2019 21:39:36 +1000 Subject: [PATCH 39/89] Add mix-in-len to cached tree hash --- eth2/utils/ssz/src/cached_tree_hash.rs | 14 ++++++++++++++ eth2/utils/ssz/src/cached_tree_hash/impls.rs | 6 ++++++ eth2/utils/ssz/src/cached_tree_hash/tests.rs | 13 ++++++++++++- 3 files changed, 32 insertions(+), 1 deletion(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 7a722766c..8a7b07f50 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -1,4 +1,5 @@ use hashing::hash; +use int_to_bytes::int_to_bytes32; use std::fmt::Debug; use std::iter::Iterator; use std::ops::Range; @@ -231,6 +232,15 @@ impl TreeHashCache { Ok(hash(&child_bytes)) } + pub fn mix_in_length(&self, chunk: usize, length: usize) -> Result, Error> { + let mut bytes = Vec::with_capacity(2 * BYTES_PER_CHUNK); + + bytes.append(&mut self.get_chunk(chunk)?.to_vec()); + bytes.append(&mut int_to_bytes32(length as u64)); + + Ok(hash(&bytes)) + } + pub fn into_merkle_tree(self) -> Vec { self.cache } @@ -300,6 +310,10 @@ impl OffsetHandler { }) } + pub fn root(&self) -> usize { + self.first_node + } + pub fn height(&self) -> usize { self.num_leaf_nodes.trailing_zeros() as usize } diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 0377649cb..558b4dde5 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -212,6 +212,12 @@ where } } + // If the root node or the length has changed, mix in the length of the list. + let root_node = offset_handler.root(); + if cache.changed(root_node)? | (self.len() != other.len()) { + cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?; + } + Ok(offset_handler.next_node()) } } diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index f09fac419..b85c16587 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -343,6 +343,13 @@ fn outer_builds() { assert_eq!(merkle, cache); } +fn mix_in_length(root: &mut [u8], len: usize) { + let mut bytes = root.to_vec(); + bytes.append(&mut int_to_bytes32(len as u64)); + + root.copy_from_slice(&hash(&bytes)); +} + /// Generic test that covers: /// /// 1. Produce a new cache from `original`. @@ -367,7 +374,9 @@ fn test_u64_vec_modifications(original: Vec, modified: Vec) { data.append(&mut int_to_bytes8(*i)); } let data = sanitise_bytes(data); - let expected = merkleize(data); + let mut expected = merkleize(data); + + mix_in_length(&mut expected[0..HASHSIZE], modified.len()); assert_eq!(expected, modified_cache); } @@ -490,6 +499,8 @@ fn test_inner_vec_modifications(original: Vec, modified: Vec, refe expected.append(&mut vec![0; HASHSIZE]); } + mix_in_length(&mut expected[0..HASHSIZE], modified.len()); + // Compare the cached tree to the reference tree. assert_trees_eq(&expected, &modified_cache); } From 7132ee59c0789c623896cd8439648f2fb24a0a6e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 15 Apr 2019 09:06:19 +1000 Subject: [PATCH 40/89] Rename OffsetHandler -> BTreeOverlay --- eth2/utils/ssz/src/cached_tree_hash.rs | 8 ++++---- eth2/utils/ssz/src/cached_tree_hash/impls.rs | 12 ++++++------ eth2/utils/ssz/src/cached_tree_hash/tests.rs | 4 ++-- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs index 8a7b07f50..f7d18c57c 100644 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ b/eth2/utils/ssz/src/cached_tree_hash.rs @@ -14,7 +14,7 @@ const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; #[derive(Debug, PartialEq, Clone)] pub enum Error { - ShouldNotProduceOffsetHandler, + ShouldNotProduceBTreeOverlay, NoFirstNode, NoBytesForRoot, UnableToObtainSlices, @@ -92,7 +92,7 @@ impl TreeHashCache { where T: CachedTreeHash, { - let offset_handler = OffsetHandler::new(item, 0)?; + let offset_handler = BTreeOverlay::new(item, 0)?; // Note how many leaves were provided. If is not a power-of-two, we'll need to pad it out // later. @@ -263,7 +263,7 @@ fn node_range_to_byte_range(node_range: &Range) -> Range { } #[derive(Debug)] -pub struct OffsetHandler { +pub struct BTreeOverlay { num_internal_nodes: usize, pub num_leaf_nodes: usize, first_node: usize, @@ -271,7 +271,7 @@ pub struct OffsetHandler { offsets: Vec, } -impl OffsetHandler { +impl BTreeOverlay { pub fn new(item: &T, initial_offset: usize) -> Result where T: CachedTreeHash, diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/ssz/src/cached_tree_hash/impls.rs index 558b4dde5..26905c667 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/impls.rs @@ -19,7 +19,7 @@ impl CachedTreeHash for u64 { } fn offsets(&self) -> Result, Error> { - Err(Error::ShouldNotProduceOffsetHandler) + Err(Error::ShouldNotProduceBTreeOverlay) } fn num_child_nodes(&self) -> usize { @@ -78,7 +78,7 @@ where let mut offsets = vec![]; for item in self { - offsets.push(OffsetHandler::new(item, 0)?.total_nodes()) + offsets.push(BTreeOverlay::new(item, 0)?.total_nodes()) } offsets @@ -111,11 +111,11 @@ where cache: &mut TreeHashCache, chunk: usize, ) -> Result { - let offset_handler = OffsetHandler::new(self, chunk)?; - let old_offset_handler = OffsetHandler::new(other, chunk)?; + let offset_handler = BTreeOverlay::new(self, chunk)?; + let old_offset_handler = BTreeOverlay::new(other, chunk)?; if offset_handler.num_leaf_nodes != old_offset_handler.num_leaf_nodes { - let old_offset_handler = OffsetHandler::new(other, chunk)?; + let old_offset_handler = BTreeOverlay::new(other, chunk)?; // Get slices of the exsiting tree from the cache. let (old_bytes, old_flags) = cache @@ -180,7 +180,7 @@ where (Some(old), None) => { // Splice out the entire tree of the removed node, replacing it with a // single padding node. - let end_chunk = OffsetHandler::new(old, start_chunk)?.next_node(); + let end_chunk = BTreeOverlay::new(old, start_chunk)?.next_node(); cache.splice( start_chunk..end_chunk, diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/ssz/src/cached_tree_hash/tests.rs index b85c16587..e6e2b1754 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/ssz/src/cached_tree_hash/tests.rs @@ -77,7 +77,7 @@ impl CachedTreeHash for Inner { cache: &mut TreeHashCache, chunk: usize, ) -> Result { - let offset_handler = OffsetHandler::new(self, chunk)?; + let offset_handler = BTreeOverlay::new(self, chunk)?; // Skip past the internal nodes and update any changed leaf nodes. { @@ -166,7 +166,7 @@ impl CachedTreeHash for Outer { cache: &mut TreeHashCache, chunk: usize, ) -> Result { - let offset_handler = OffsetHandler::new(self, chunk)?; + let offset_handler = BTreeOverlay::new(self, chunk)?; // Skip past the internal nodes and update any changed leaf nodes. { From 0b5c10212d7f08d42f5d7bfe73a640fc2cb431ed Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 15 Apr 2019 11:14:30 +1000 Subject: [PATCH 41/89] Move tree_hash from ssz into own crate --- Cargo.toml | 1 + eth2/utils/ssz/src/cached_tree_hash.rs | 439 ------------------ eth2/utils/ssz/src/lib.rs | 1 - eth2/utils/tree_hash/Cargo.toml | 11 + eth2/utils/tree_hash/src/btree_overlay.rs | 0 eth2/utils/tree_hash/src/cached_tree_hash.rs | 193 ++++++++ .../src}/impls.rs | 2 +- eth2/utils/tree_hash/src/lib.rs | 249 ++++++++++ .../src}/resize.rs | 0 .../tests}/tests.rs | 8 +- 10 files changed, 461 insertions(+), 443 deletions(-) delete mode 100644 eth2/utils/ssz/src/cached_tree_hash.rs create mode 100644 eth2/utils/tree_hash/Cargo.toml create mode 100644 eth2/utils/tree_hash/src/btree_overlay.rs create mode 100644 eth2/utils/tree_hash/src/cached_tree_hash.rs rename eth2/utils/{ssz/src/cached_tree_hash => tree_hash/src}/impls.rs (99%) create mode 100644 eth2/utils/tree_hash/src/lib.rs rename eth2/utils/{ssz/src/cached_tree_hash => tree_hash/src}/resize.rs (100%) rename eth2/utils/{ssz/src/cached_tree_hash => tree_hash/tests}/tests.rs (99%) diff --git a/Cargo.toml b/Cargo.toml index 5c9593f5a..2574d328f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,7 @@ members = [ "eth2/utils/ssz", "eth2/utils/ssz_derive", "eth2/utils/swap_or_not_shuffle", + "eth2/utils/tree_hash", "eth2/utils/fisher_yates_shuffle", "eth2/utils/test_random_derive", "beacon_node", diff --git a/eth2/utils/ssz/src/cached_tree_hash.rs b/eth2/utils/ssz/src/cached_tree_hash.rs deleted file mode 100644 index f7d18c57c..000000000 --- a/eth2/utils/ssz/src/cached_tree_hash.rs +++ /dev/null @@ -1,439 +0,0 @@ -use hashing::hash; -use int_to_bytes::int_to_bytes32; -use std::fmt::Debug; -use std::iter::Iterator; -use std::ops::Range; - -mod impls; -mod resize; -mod tests; - -const BYTES_PER_CHUNK: usize = 32; -const HASHSIZE: usize = 32; -const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; - -#[derive(Debug, PartialEq, Clone)] -pub enum Error { - ShouldNotProduceBTreeOverlay, - NoFirstNode, - NoBytesForRoot, - UnableToObtainSlices, - UnableToGrowMerkleTree, - UnableToShrinkMerkleTree, - BytesAreNotEvenChunks(usize), - NoModifiedFieldForChunk(usize), - NoBytesForChunk(usize), -} - -#[derive(Debug, PartialEq, Clone)] -pub enum ItemType { - Basic, - List, - Composite, -} - -// TODO: remove debug requirement. -pub trait CachedTreeHash: Debug { - fn item_type() -> ItemType; - - fn build_tree_hash_cache(&self) -> Result; - - /// Return the number of bytes when this element is encoded as raw SSZ _without_ length - /// prefixes. - fn num_bytes(&self) -> usize; - - fn offsets(&self) -> Result, Error>; - - fn num_child_nodes(&self) -> usize; - - fn packed_encoding(&self) -> Vec; - - fn packing_factor() -> usize; - - fn cached_hash_tree_root( - &self, - other: &Item, - cache: &mut TreeHashCache, - chunk: usize, - ) -> Result; -} - -#[derive(Debug, PartialEq, Clone)] -pub struct TreeHashCache { - cache: Vec, - chunk_modified: Vec, -} - -impl Into> for TreeHashCache { - fn into(self) -> Vec { - self.cache - } -} - -impl TreeHashCache { - pub fn new(item: &T) -> Result - where - T: CachedTreeHash, - { - item.build_tree_hash_cache() - } - - pub fn from_elems(cache: Vec, chunk_modified: Vec) -> Self { - Self { - cache, - chunk_modified, - } - } - - pub fn from_leaves_and_subtrees( - item: &T, - leaves_and_subtrees: Vec, - ) -> Result - where - T: CachedTreeHash, - { - let offset_handler = BTreeOverlay::new(item, 0)?; - - // Note how many leaves were provided. If is not a power-of-two, we'll need to pad it out - // later. - let num_provided_leaf_nodes = leaves_and_subtrees.len(); - - // Allocate enough bytes to store the internal nodes and the leaves and subtrees, then fill - // all the to-be-built internal nodes with zeros and append the leaves and subtrees. - let internal_node_bytes = offset_handler.num_internal_nodes * BYTES_PER_CHUNK; - let leaves_and_subtrees_bytes = leaves_and_subtrees - .iter() - .fold(0, |acc, t| acc + t.bytes_len()); - let mut cache = Vec::with_capacity(leaves_and_subtrees_bytes + internal_node_bytes); - cache.resize(internal_node_bytes, 0); - - // Allocate enough bytes to store all the leaves. - let mut leaves = Vec::with_capacity(offset_handler.num_leaf_nodes * HASHSIZE); - - // Iterate through all of the leaves/subtrees, adding their root as a leaf node and then - // concatenating their merkle trees. - for t in leaves_and_subtrees { - leaves.append(&mut t.root()?); - cache.append(&mut t.into_merkle_tree()); - } - - // Pad the leaves to an even power-of-two, using zeros. - pad_for_leaf_count(num_provided_leaf_nodes, &mut cache); - - // Merkleize the leaves, then split the leaf nodes off them. Then, replace all-zeros - // internal nodes created earlier with the internal nodes generated by `merkleize`. - let mut merkleized = merkleize(leaves); - merkleized.split_off(internal_node_bytes); - cache.splice(0..internal_node_bytes, merkleized); - - Ok(Self { - chunk_modified: vec![false; cache.len() / BYTES_PER_CHUNK], - cache, - }) - } - - pub fn from_bytes(bytes: Vec, initial_modified_state: bool) -> Result { - if bytes.len() % BYTES_PER_CHUNK > 0 { - return Err(Error::BytesAreNotEvenChunks(bytes.len())); - } - - Ok(Self { - chunk_modified: vec![initial_modified_state; bytes.len() / BYTES_PER_CHUNK], - cache: bytes, - }) - } - - pub fn bytes_len(&self) -> usize { - self.cache.len() - } - - pub fn root(&self) -> Result, Error> { - self.cache - .get(0..HASHSIZE) - .ok_or_else(|| Error::NoBytesForRoot) - .and_then(|slice| Ok(slice.to_vec())) - } - - pub fn splice(&mut self, chunk_range: Range, replace_with: Self) { - let (bytes, bools) = replace_with.into_components(); - - // Update the `chunk_modified` vec, marking all spliced-in nodes as changed. - self.chunk_modified.splice(chunk_range.clone(), bools); - self.cache - .splice(node_range_to_byte_range(&chunk_range), bytes); - } - - pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { - let start = chunk * BYTES_PER_CHUNK; - let end = start + BYTES_PER_CHUNK; - - if !self.chunk_equals(chunk, to)? { - self.cache - .get_mut(start..end) - .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))? - .copy_from_slice(to); - self.chunk_modified[chunk] = true; - } - - Ok(()) - } - - pub fn slices(&self, chunk_range: Range) -> Option<(&[u8], &[bool])> { - Some(( - self.cache.get(node_range_to_byte_range(&chunk_range))?, - self.chunk_modified.get(chunk_range)?, - )) - } - - pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { - let start = chunk * BYTES_PER_CHUNK; - let end = start + BYTES_PER_CHUNK; - - self.cache - .get_mut(start..end) - .ok_or_else(|| Error::NoBytesForChunk(chunk))? - .copy_from_slice(to); - - self.chunk_modified[chunk] = true; - - Ok(()) - } - - pub fn get_chunk(&self, chunk: usize) -> Result<&[u8], Error> { - let start = chunk * BYTES_PER_CHUNK; - let end = start + BYTES_PER_CHUNK; - - Ok(self - .cache - .get(start..end) - .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))?) - } - - pub fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Result { - Ok(self.get_chunk(chunk)? == other) - } - - pub fn changed(&self, chunk: usize) -> Result { - self.chunk_modified - .get(chunk) - .cloned() - .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk)) - } - - pub fn either_modified(&self, children: (&usize, &usize)) -> Result { - Ok(self.changed(*children.0)? | self.changed(*children.1)?) - } - - pub fn hash_children(&self, children: (&usize, &usize)) -> Result, Error> { - let mut child_bytes = Vec::with_capacity(BYTES_PER_CHUNK * 2); - child_bytes.append(&mut self.get_chunk(*children.0)?.to_vec()); - child_bytes.append(&mut self.get_chunk(*children.1)?.to_vec()); - - Ok(hash(&child_bytes)) - } - - pub fn mix_in_length(&self, chunk: usize, length: usize) -> Result, Error> { - let mut bytes = Vec::with_capacity(2 * BYTES_PER_CHUNK); - - bytes.append(&mut self.get_chunk(chunk)?.to_vec()); - bytes.append(&mut int_to_bytes32(length as u64)); - - Ok(hash(&bytes)) - } - - pub fn into_merkle_tree(self) -> Vec { - self.cache - } - - pub fn into_components(self) -> (Vec, Vec) { - (self.cache, self.chunk_modified) - } -} - -fn children(parent: usize) -> (usize, usize) { - ((2 * parent + 1), (2 * parent + 2)) -} - -fn num_nodes(num_leaves: usize) -> usize { - 2 * num_leaves - 1 -} - -fn node_range_to_byte_range(node_range: &Range) -> Range { - node_range.start * HASHSIZE..node_range.end * HASHSIZE -} - -#[derive(Debug)] -pub struct BTreeOverlay { - num_internal_nodes: usize, - pub num_leaf_nodes: usize, - first_node: usize, - next_node: usize, - offsets: Vec, -} - -impl BTreeOverlay { - pub fn new(item: &T, initial_offset: usize) -> Result - where - T: CachedTreeHash, - { - Self::from_lengths(initial_offset, item.offsets()?) - } - - fn from_lengths(offset: usize, mut lengths: Vec) -> Result { - // Extend it to the next power-of-two, if it is not already. - let num_leaf_nodes = if lengths.len().is_power_of_two() { - lengths.len() - } else { - let num_leaf_nodes = lengths.len().next_power_of_two(); - lengths.resize(num_leaf_nodes, 1); - num_leaf_nodes - }; - - let num_nodes = num_nodes(num_leaf_nodes); - let num_internal_nodes = num_nodes - num_leaf_nodes; - - let mut offsets = Vec::with_capacity(num_nodes); - offsets.append(&mut (offset..offset + num_internal_nodes).collect()); - - let mut next_node = num_internal_nodes + offset; - for i in 0..num_leaf_nodes { - offsets.push(next_node); - next_node += lengths[i]; - } - - Ok(Self { - num_internal_nodes, - num_leaf_nodes, - offsets, - first_node: offset, - next_node, - }) - } - - pub fn root(&self) -> usize { - self.first_node - } - - pub fn height(&self) -> usize { - self.num_leaf_nodes.trailing_zeros() as usize - } - - pub fn chunk_range(&self) -> Range { - self.first_node..self.next_node - } - - pub fn total_chunks(&self) -> usize { - self.next_node - self.first_node - } - - pub fn total_nodes(&self) -> usize { - self.num_internal_nodes + self.num_leaf_nodes - } - - pub fn first_leaf_node(&self) -> Result { - self.offsets - .get(self.num_internal_nodes) - .cloned() - .ok_or_else(|| Error::NoFirstNode) - } - - pub fn next_node(&self) -> usize { - self.next_node - } - - /// Returns an iterator visiting each internal node, providing the left and right child chunks - /// for the node. - pub fn iter_internal_nodes<'a>( - &'a self, - ) -> impl DoubleEndedIterator { - let internal_nodes = &self.offsets[0..self.num_internal_nodes]; - - internal_nodes.iter().enumerate().map(move |(i, parent)| { - let children = children(i); - ( - parent, - (&self.offsets[children.0], &self.offsets[children.1]), - ) - }) - } - - /// Returns an iterator visiting each leaf node, providing the chunk for that node. - pub fn iter_leaf_nodes<'a>(&'a self) -> impl DoubleEndedIterator { - let leaf_nodes = &self.offsets[self.num_internal_nodes..]; - - leaf_nodes.iter() - } -} - -/// Split `values` into a power-of-two, identical-length chunks (padding with `0`) and merkleize -/// them, returning the entire merkle tree. -/// -/// The root hash is `merkleize(values)[0..BYTES_PER_CHUNK]`. -pub fn merkleize(values: Vec) -> Vec { - let values = sanitise_bytes(values); - - let leaves = values.len() / HASHSIZE; - - if leaves == 0 { - panic!("No full leaves"); - } - - if !leaves.is_power_of_two() { - panic!("leaves is not power of two"); - } - - let mut o: Vec = vec![0; (num_nodes(leaves) - leaves) * HASHSIZE]; - o.append(&mut values.to_vec()); - - let mut i = o.len(); - let mut j = o.len() - values.len(); - - while i >= MERKLE_HASH_CHUNCK { - i -= MERKLE_HASH_CHUNCK; - let hash = hash(&o[i..i + MERKLE_HASH_CHUNCK]); - - j -= HASHSIZE; - o[j..j + HASHSIZE].copy_from_slice(&hash); - } - - o -} - -pub fn sanitise_bytes(mut bytes: Vec) -> Vec { - let present_leaves = num_unsanitized_leaves(bytes.len()); - let required_leaves = present_leaves.next_power_of_two(); - - if (present_leaves != required_leaves) | last_leaf_needs_padding(bytes.len()) { - bytes.resize(num_bytes(required_leaves), 0); - } - - bytes -} - -fn pad_for_leaf_count(num_leaves: usize, bytes: &mut Vec) { - let required_leaves = num_leaves.next_power_of_two(); - - bytes.resize( - bytes.len() + (required_leaves - num_leaves) * BYTES_PER_CHUNK, - 0, - ); -} - -fn last_leaf_needs_padding(num_bytes: usize) -> bool { - num_bytes % HASHSIZE != 0 -} - -/// Rounds up -fn num_unsanitized_leaves(num_bytes: usize) -> usize { - (num_bytes + HASHSIZE - 1) / HASHSIZE -} - -/// Rounds up -fn num_sanitized_leaves(num_bytes: usize) -> usize { - let leaves = (num_bytes + HASHSIZE - 1) / HASHSIZE; - leaves.next_power_of_two() -} - -fn num_bytes(num_leaves: usize) -> usize { - num_leaves * HASHSIZE -} diff --git a/eth2/utils/ssz/src/lib.rs b/eth2/utils/ssz/src/lib.rs index f86749c66..cb3f63c48 100644 --- a/eth2/utils/ssz/src/lib.rs +++ b/eth2/utils/ssz/src/lib.rs @@ -10,7 +10,6 @@ extern crate bytes; extern crate ethereum_types; -mod cached_tree_hash; pub mod decode; pub mod encode; mod signed_root; diff --git a/eth2/utils/tree_hash/Cargo.toml b/eth2/utils/tree_hash/Cargo.toml new file mode 100644 index 000000000..243a49446 --- /dev/null +++ b/eth2/utils/tree_hash/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "tree_hash" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +[dependencies] +ethereum-types = "0.5" +hashing = { path = "../hashing" } +int_to_bytes = { path = "../int_to_bytes" } +ssz = { path = "../ssz" } diff --git a/eth2/utils/tree_hash/src/btree_overlay.rs b/eth2/utils/tree_hash/src/btree_overlay.rs new file mode 100644 index 000000000..e69de29bb diff --git a/eth2/utils/tree_hash/src/cached_tree_hash.rs b/eth2/utils/tree_hash/src/cached_tree_hash.rs new file mode 100644 index 000000000..556ba2d21 --- /dev/null +++ b/eth2/utils/tree_hash/src/cached_tree_hash.rs @@ -0,0 +1,193 @@ +use super::*; + +#[derive(Debug, PartialEq, Clone)] +pub struct TreeHashCache { + cache: Vec, + chunk_modified: Vec, +} + +impl Into> for TreeHashCache { + fn into(self) -> Vec { + self.cache + } +} + +impl TreeHashCache { + pub fn new(item: &T) -> Result + where + T: CachedTreeHash, + { + item.build_tree_hash_cache() + } + + pub fn from_elems(cache: Vec, chunk_modified: Vec) -> Self { + Self { + cache, + chunk_modified, + } + } + + pub fn from_leaves_and_subtrees( + item: &T, + leaves_and_subtrees: Vec, + ) -> Result + where + T: CachedTreeHash, + { + let offset_handler = BTreeOverlay::new(item, 0)?; + + // Note how many leaves were provided. If is not a power-of-two, we'll need to pad it out + // later. + let num_provided_leaf_nodes = leaves_and_subtrees.len(); + + // Allocate enough bytes to store the internal nodes and the leaves and subtrees, then fill + // all the to-be-built internal nodes with zeros and append the leaves and subtrees. + let internal_node_bytes = offset_handler.num_internal_nodes * BYTES_PER_CHUNK; + let leaves_and_subtrees_bytes = leaves_and_subtrees + .iter() + .fold(0, |acc, t| acc + t.bytes_len()); + let mut cache = Vec::with_capacity(leaves_and_subtrees_bytes + internal_node_bytes); + cache.resize(internal_node_bytes, 0); + + // Allocate enough bytes to store all the leaves. + let mut leaves = Vec::with_capacity(offset_handler.num_leaf_nodes * HASHSIZE); + + // Iterate through all of the leaves/subtrees, adding their root as a leaf node and then + // concatenating their merkle trees. + for t in leaves_and_subtrees { + leaves.append(&mut t.root()?); + cache.append(&mut t.into_merkle_tree()); + } + + // Pad the leaves to an even power-of-two, using zeros. + pad_for_leaf_count(num_provided_leaf_nodes, &mut cache); + + // Merkleize the leaves, then split the leaf nodes off them. Then, replace all-zeros + // internal nodes created earlier with the internal nodes generated by `merkleize`. + let mut merkleized = merkleize(leaves); + merkleized.split_off(internal_node_bytes); + cache.splice(0..internal_node_bytes, merkleized); + + Ok(Self { + chunk_modified: vec![false; cache.len() / BYTES_PER_CHUNK], + cache, + }) + } + + pub fn from_bytes(bytes: Vec, initial_modified_state: bool) -> Result { + if bytes.len() % BYTES_PER_CHUNK > 0 { + return Err(Error::BytesAreNotEvenChunks(bytes.len())); + } + + Ok(Self { + chunk_modified: vec![initial_modified_state; bytes.len() / BYTES_PER_CHUNK], + cache: bytes, + }) + } + + pub fn bytes_len(&self) -> usize { + self.cache.len() + } + + pub fn root(&self) -> Result, Error> { + self.cache + .get(0..HASHSIZE) + .ok_or_else(|| Error::NoBytesForRoot) + .and_then(|slice| Ok(slice.to_vec())) + } + + pub fn splice(&mut self, chunk_range: Range, replace_with: Self) { + let (bytes, bools) = replace_with.into_components(); + + // Update the `chunk_modified` vec, marking all spliced-in nodes as changed. + self.chunk_modified.splice(chunk_range.clone(), bools); + self.cache + .splice(node_range_to_byte_range(&chunk_range), bytes); + } + + pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { + let start = chunk * BYTES_PER_CHUNK; + let end = start + BYTES_PER_CHUNK; + + if !self.chunk_equals(chunk, to)? { + self.cache + .get_mut(start..end) + .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))? + .copy_from_slice(to); + self.chunk_modified[chunk] = true; + } + + Ok(()) + } + + pub fn slices(&self, chunk_range: Range) -> Option<(&[u8], &[bool])> { + Some(( + self.cache.get(node_range_to_byte_range(&chunk_range))?, + self.chunk_modified.get(chunk_range)?, + )) + } + + pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { + let start = chunk * BYTES_PER_CHUNK; + let end = start + BYTES_PER_CHUNK; + + self.cache + .get_mut(start..end) + .ok_or_else(|| Error::NoBytesForChunk(chunk))? + .copy_from_slice(to); + + self.chunk_modified[chunk] = true; + + Ok(()) + } + + pub fn get_chunk(&self, chunk: usize) -> Result<&[u8], Error> { + let start = chunk * BYTES_PER_CHUNK; + let end = start + BYTES_PER_CHUNK; + + Ok(self + .cache + .get(start..end) + .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))?) + } + + pub fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Result { + Ok(self.get_chunk(chunk)? == other) + } + + pub fn changed(&self, chunk: usize) -> Result { + self.chunk_modified + .get(chunk) + .cloned() + .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk)) + } + + pub fn either_modified(&self, children: (&usize, &usize)) -> Result { + Ok(self.changed(*children.0)? | self.changed(*children.1)?) + } + + pub fn hash_children(&self, children: (&usize, &usize)) -> Result, Error> { + let mut child_bytes = Vec::with_capacity(BYTES_PER_CHUNK * 2); + child_bytes.append(&mut self.get_chunk(*children.0)?.to_vec()); + child_bytes.append(&mut self.get_chunk(*children.1)?.to_vec()); + + Ok(hash(&child_bytes)) + } + + pub fn mix_in_length(&self, chunk: usize, length: usize) -> Result, Error> { + let mut bytes = Vec::with_capacity(2 * BYTES_PER_CHUNK); + + bytes.append(&mut self.get_chunk(chunk)?.to_vec()); + bytes.append(&mut int_to_bytes32(length as u64)); + + Ok(hash(&bytes)) + } + + pub fn into_merkle_tree(self) -> Vec { + self.cache + } + + pub fn into_components(self) -> (Vec, Vec) { + (self.cache, self.chunk_modified) + } +} diff --git a/eth2/utils/ssz/src/cached_tree_hash/impls.rs b/eth2/utils/tree_hash/src/impls.rs similarity index 99% rename from eth2/utils/ssz/src/cached_tree_hash/impls.rs rename to eth2/utils/tree_hash/src/impls.rs index 26905c667..d5297c38e 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/impls.rs +++ b/eth2/utils/tree_hash/src/impls.rs @@ -1,6 +1,6 @@ use super::resize::{grow_merkle_cache, shrink_merkle_cache}; use super::*; -use crate::ssz_encode; +use ssz::ssz_encode; impl CachedTreeHash for u64 { fn item_type() -> ItemType { diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs new file mode 100644 index 000000000..1b085770d --- /dev/null +++ b/eth2/utils/tree_hash/src/lib.rs @@ -0,0 +1,249 @@ +use hashing::hash; +use int_to_bytes::int_to_bytes32; +use std::fmt::Debug; +use std::iter::Iterator; +use std::ops::Range; + +mod cached_tree_hash; +mod impls; +mod resize; + +pub use cached_tree_hash::TreeHashCache; + +pub const BYTES_PER_CHUNK: usize = 32; +pub const HASHSIZE: usize = 32; +pub const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; + +#[derive(Debug, PartialEq, Clone)] +pub enum Error { + ShouldNotProduceBTreeOverlay, + NoFirstNode, + NoBytesForRoot, + UnableToObtainSlices, + UnableToGrowMerkleTree, + UnableToShrinkMerkleTree, + BytesAreNotEvenChunks(usize), + NoModifiedFieldForChunk(usize), + NoBytesForChunk(usize), +} + +#[derive(Debug, PartialEq, Clone)] +pub enum ItemType { + Basic, + List, + Composite, +} + +// TODO: remove debug requirement. +pub trait CachedTreeHash: Debug { + fn item_type() -> ItemType; + + fn build_tree_hash_cache(&self) -> Result; + + /// Return the number of bytes when this element is encoded as raw SSZ _without_ length + /// prefixes. + fn num_bytes(&self) -> usize; + + fn offsets(&self) -> Result, Error>; + + fn num_child_nodes(&self) -> usize; + + fn packed_encoding(&self) -> Vec; + + fn packing_factor() -> usize; + + fn cached_hash_tree_root( + &self, + other: &Item, + cache: &mut TreeHashCache, + chunk: usize, + ) -> Result; +} + +fn children(parent: usize) -> (usize, usize) { + ((2 * parent + 1), (2 * parent + 2)) +} + +fn num_nodes(num_leaves: usize) -> usize { + 2 * num_leaves - 1 +} + +fn node_range_to_byte_range(node_range: &Range) -> Range { + node_range.start * HASHSIZE..node_range.end * HASHSIZE +} + +#[derive(Debug)] +pub struct BTreeOverlay { + num_internal_nodes: usize, + pub num_leaf_nodes: usize, + first_node: usize, + next_node: usize, + offsets: Vec, +} + +impl BTreeOverlay { + pub fn new(item: &T, initial_offset: usize) -> Result + where + T: CachedTreeHash, + { + Self::from_lengths(initial_offset, item.offsets()?) + } + + fn from_lengths(offset: usize, mut lengths: Vec) -> Result { + // Extend it to the next power-of-two, if it is not already. + let num_leaf_nodes = if lengths.len().is_power_of_two() { + lengths.len() + } else { + let num_leaf_nodes = lengths.len().next_power_of_two(); + lengths.resize(num_leaf_nodes, 1); + num_leaf_nodes + }; + + let num_nodes = num_nodes(num_leaf_nodes); + let num_internal_nodes = num_nodes - num_leaf_nodes; + + let mut offsets = Vec::with_capacity(num_nodes); + offsets.append(&mut (offset..offset + num_internal_nodes).collect()); + + let mut next_node = num_internal_nodes + offset; + for i in 0..num_leaf_nodes { + offsets.push(next_node); + next_node += lengths[i]; + } + + Ok(Self { + num_internal_nodes, + num_leaf_nodes, + offsets, + first_node: offset, + next_node, + }) + } + + pub fn root(&self) -> usize { + self.first_node + } + + pub fn height(&self) -> usize { + self.num_leaf_nodes.trailing_zeros() as usize + } + + pub fn chunk_range(&self) -> Range { + self.first_node..self.next_node + } + + pub fn total_chunks(&self) -> usize { + self.next_node - self.first_node + } + + pub fn total_nodes(&self) -> usize { + self.num_internal_nodes + self.num_leaf_nodes + } + + pub fn first_leaf_node(&self) -> Result { + self.offsets + .get(self.num_internal_nodes) + .cloned() + .ok_or_else(|| Error::NoFirstNode) + } + + pub fn next_node(&self) -> usize { + self.next_node + } + + /// Returns an iterator visiting each internal node, providing the left and right child chunks + /// for the node. + pub fn iter_internal_nodes<'a>( + &'a self, + ) -> impl DoubleEndedIterator { + let internal_nodes = &self.offsets[0..self.num_internal_nodes]; + + internal_nodes.iter().enumerate().map(move |(i, parent)| { + let children = children(i); + ( + parent, + (&self.offsets[children.0], &self.offsets[children.1]), + ) + }) + } + + /// Returns an iterator visiting each leaf node, providing the chunk for that node. + pub fn iter_leaf_nodes<'a>(&'a self) -> impl DoubleEndedIterator { + let leaf_nodes = &self.offsets[self.num_internal_nodes..]; + + leaf_nodes.iter() + } +} + +/// Split `values` into a power-of-two, identical-length chunks (padding with `0`) and merkleize +/// them, returning the entire merkle tree. +/// +/// The root hash is `merkleize(values)[0..BYTES_PER_CHUNK]`. +pub fn merkleize(values: Vec) -> Vec { + let values = sanitise_bytes(values); + + let leaves = values.len() / HASHSIZE; + + if leaves == 0 { + panic!("No full leaves"); + } + + if !leaves.is_power_of_two() { + panic!("leaves is not power of two"); + } + + let mut o: Vec = vec![0; (num_nodes(leaves) - leaves) * HASHSIZE]; + o.append(&mut values.to_vec()); + + let mut i = o.len(); + let mut j = o.len() - values.len(); + + while i >= MERKLE_HASH_CHUNCK { + i -= MERKLE_HASH_CHUNCK; + let hash = hash(&o[i..i + MERKLE_HASH_CHUNCK]); + + j -= HASHSIZE; + o[j..j + HASHSIZE].copy_from_slice(&hash); + } + + o +} + +pub fn sanitise_bytes(mut bytes: Vec) -> Vec { + let present_leaves = num_unsanitized_leaves(bytes.len()); + let required_leaves = present_leaves.next_power_of_two(); + + if (present_leaves != required_leaves) | last_leaf_needs_padding(bytes.len()) { + bytes.resize(num_bytes(required_leaves), 0); + } + + bytes +} + +fn pad_for_leaf_count(num_leaves: usize, bytes: &mut Vec) { + let required_leaves = num_leaves.next_power_of_two(); + + bytes.resize( + bytes.len() + (required_leaves - num_leaves) * BYTES_PER_CHUNK, + 0, + ); +} + +fn last_leaf_needs_padding(num_bytes: usize) -> bool { + num_bytes % HASHSIZE != 0 +} + +/// Rounds up +fn num_unsanitized_leaves(num_bytes: usize) -> usize { + (num_bytes + HASHSIZE - 1) / HASHSIZE +} + +/// Rounds up +fn num_sanitized_leaves(num_bytes: usize) -> usize { + let leaves = (num_bytes + HASHSIZE - 1) / HASHSIZE; + leaves.next_power_of_two() +} + +fn num_bytes(num_leaves: usize) -> usize { + num_leaves * HASHSIZE +} diff --git a/eth2/utils/ssz/src/cached_tree_hash/resize.rs b/eth2/utils/tree_hash/src/resize.rs similarity index 100% rename from eth2/utils/ssz/src/cached_tree_hash/resize.rs rename to eth2/utils/tree_hash/src/resize.rs diff --git a/eth2/utils/ssz/src/cached_tree_hash/tests.rs b/eth2/utils/tree_hash/tests/tests.rs similarity index 99% rename from eth2/utils/ssz/src/cached_tree_hash/tests.rs rename to eth2/utils/tree_hash/tests/tests.rs index e6e2b1754..972eb1e00 100644 --- a/eth2/utils/ssz/src/cached_tree_hash/tests.rs +++ b/eth2/utils/tree_hash/tests/tests.rs @@ -1,6 +1,10 @@ -#![cfg(test)] -use super::*; +use hashing::hash; use int_to_bytes::{int_to_bytes32, int_to_bytes8}; +use tree_hash::*; + +fn num_nodes(num_leaves: usize) -> usize { + 2 * num_leaves - 1 +} #[derive(Clone, Debug)] pub struct Inner { From c87a0fc5882c3b718827bd4865688558ab8ae611 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 15 Apr 2019 11:37:29 +1000 Subject: [PATCH 42/89] Tidy CachedTreeHash trait --- eth2/utils/tree_hash/src/btree_overlay.rs | 100 ++++++++++++++++++++ eth2/utils/tree_hash/src/impls.rs | 25 ++--- eth2/utils/tree_hash/src/lib.rs | 108 +--------------------- eth2/utils/tree_hash/tests/tests.rs | 30 +++--- 4 files changed, 131 insertions(+), 132 deletions(-) diff --git a/eth2/utils/tree_hash/src/btree_overlay.rs b/eth2/utils/tree_hash/src/btree_overlay.rs index e69de29bb..7d5602c0b 100644 --- a/eth2/utils/tree_hash/src/btree_overlay.rs +++ b/eth2/utils/tree_hash/src/btree_overlay.rs @@ -0,0 +1,100 @@ +use super::*; + +#[derive(Debug)] +pub struct BTreeOverlay { + pub num_internal_nodes: usize, + pub num_leaf_nodes: usize, + pub first_node: usize, + pub next_node: usize, + offsets: Vec, +} + +impl BTreeOverlay { + pub fn new(item: &T, initial_offset: usize) -> Result + where + T: CachedTreeHash, + { + item.btree_overlay(initial_offset) + } + + pub fn from_lengths(offset: usize, mut lengths: Vec) -> Result { + // Extend it to the next power-of-two, if it is not already. + let num_leaf_nodes = if lengths.len().is_power_of_two() { + lengths.len() + } else { + let num_leaf_nodes = lengths.len().next_power_of_two(); + lengths.resize(num_leaf_nodes, 1); + num_leaf_nodes + }; + + let num_nodes = num_nodes(num_leaf_nodes); + let num_internal_nodes = num_nodes - num_leaf_nodes; + + let mut offsets = Vec::with_capacity(num_nodes); + offsets.append(&mut (offset..offset + num_internal_nodes).collect()); + + let mut next_node = num_internal_nodes + offset; + for i in 0..num_leaf_nodes { + offsets.push(next_node); + next_node += lengths[i]; + } + + Ok(Self { + num_internal_nodes, + num_leaf_nodes, + offsets, + first_node: offset, + next_node, + }) + } + + pub fn root(&self) -> usize { + self.first_node + } + + pub fn height(&self) -> usize { + self.num_leaf_nodes.trailing_zeros() as usize + } + + pub fn chunk_range(&self) -> Range { + self.first_node..self.next_node + } + + pub fn total_chunks(&self) -> usize { + self.next_node - self.first_node + } + + pub fn total_nodes(&self) -> usize { + self.num_internal_nodes + self.num_leaf_nodes + } + + pub fn first_leaf_node(&self) -> Result { + self.offsets + .get(self.num_internal_nodes) + .cloned() + .ok_or_else(|| Error::NoFirstNode) + } + + /// Returns an iterator visiting each internal node, providing the left and right child chunks + /// for the node. + pub fn iter_internal_nodes<'a>( + &'a self, + ) -> impl DoubleEndedIterator { + let internal_nodes = &self.offsets[0..self.num_internal_nodes]; + + internal_nodes.iter().enumerate().map(move |(i, parent)| { + let children = children(i); + ( + parent, + (&self.offsets[children.0], &self.offsets[children.1]), + ) + }) + } + + /// Returns an iterator visiting each leaf node, providing the chunk for that node. + pub fn iter_leaf_nodes<'a>(&'a self) -> impl DoubleEndedIterator { + let leaf_nodes = &self.offsets[self.num_internal_nodes..]; + + leaf_nodes.iter() + } +} diff --git a/eth2/utils/tree_hash/src/impls.rs b/eth2/utils/tree_hash/src/impls.rs index d5297c38e..9149cf8aa 100644 --- a/eth2/utils/tree_hash/src/impls.rs +++ b/eth2/utils/tree_hash/src/impls.rs @@ -14,12 +14,12 @@ impl CachedTreeHash for u64 { )?) } - fn num_bytes(&self) -> usize { - 8 + fn btree_overlay(&self, _chunk_offset: usize) -> Result { + Err(Error::ShouldNotProduceBTreeOverlay) } - fn offsets(&self) -> Result, Error> { - Err(Error::ShouldNotProduceBTreeOverlay) + fn num_bytes(&self) -> usize { + 8 } fn num_child_nodes(&self) -> usize { @@ -71,21 +71,22 @@ where } } - fn offsets(&self) -> Result, Error> { - let offsets = match T::item_type() { + fn btree_overlay(&self, chunk_offset: usize) -> Result { + // + let lengths = match T::item_type() { ItemType::Basic => vec![1; self.len() / T::packing_factor()], ItemType::Composite | ItemType::List => { - let mut offsets = vec![]; + let mut lengths = vec![]; for item in self { - offsets.push(BTreeOverlay::new(item, 0)?.total_nodes()) + lengths.push(BTreeOverlay::new(item, 0)?.total_nodes()) } - offsets + lengths } }; - Ok(offsets) + BTreeOverlay::from_lengths(chunk_offset, lengths) } fn num_child_nodes(&self) -> usize { @@ -180,7 +181,7 @@ where (Some(old), None) => { // Splice out the entire tree of the removed node, replacing it with a // single padding node. - let end_chunk = BTreeOverlay::new(old, start_chunk)?.next_node(); + let end_chunk = BTreeOverlay::new(old, start_chunk)?.next_node; cache.splice( start_chunk..end_chunk, @@ -218,7 +219,7 @@ where cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?; } - Ok(offset_handler.next_node()) + Ok(offset_handler.next_node) } } diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index 1b085770d..e356210a4 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -1,13 +1,14 @@ use hashing::hash; use int_to_bytes::int_to_bytes32; use std::fmt::Debug; -use std::iter::Iterator; use std::ops::Range; +mod btree_overlay; mod cached_tree_hash; mod impls; mod resize; +pub use btree_overlay::BTreeOverlay; pub use cached_tree_hash::TreeHashCache; pub const BYTES_PER_CHUNK: usize = 32; @@ -44,7 +45,7 @@ pub trait CachedTreeHash: Debug { /// prefixes. fn num_bytes(&self) -> usize; - fn offsets(&self) -> Result, Error>; + fn btree_overlay(&self, chunk_offset: usize) -> Result; fn num_child_nodes(&self) -> usize; @@ -72,109 +73,6 @@ fn node_range_to_byte_range(node_range: &Range) -> Range { node_range.start * HASHSIZE..node_range.end * HASHSIZE } -#[derive(Debug)] -pub struct BTreeOverlay { - num_internal_nodes: usize, - pub num_leaf_nodes: usize, - first_node: usize, - next_node: usize, - offsets: Vec, -} - -impl BTreeOverlay { - pub fn new(item: &T, initial_offset: usize) -> Result - where - T: CachedTreeHash, - { - Self::from_lengths(initial_offset, item.offsets()?) - } - - fn from_lengths(offset: usize, mut lengths: Vec) -> Result { - // Extend it to the next power-of-two, if it is not already. - let num_leaf_nodes = if lengths.len().is_power_of_two() { - lengths.len() - } else { - let num_leaf_nodes = lengths.len().next_power_of_two(); - lengths.resize(num_leaf_nodes, 1); - num_leaf_nodes - }; - - let num_nodes = num_nodes(num_leaf_nodes); - let num_internal_nodes = num_nodes - num_leaf_nodes; - - let mut offsets = Vec::with_capacity(num_nodes); - offsets.append(&mut (offset..offset + num_internal_nodes).collect()); - - let mut next_node = num_internal_nodes + offset; - for i in 0..num_leaf_nodes { - offsets.push(next_node); - next_node += lengths[i]; - } - - Ok(Self { - num_internal_nodes, - num_leaf_nodes, - offsets, - first_node: offset, - next_node, - }) - } - - pub fn root(&self) -> usize { - self.first_node - } - - pub fn height(&self) -> usize { - self.num_leaf_nodes.trailing_zeros() as usize - } - - pub fn chunk_range(&self) -> Range { - self.first_node..self.next_node - } - - pub fn total_chunks(&self) -> usize { - self.next_node - self.first_node - } - - pub fn total_nodes(&self) -> usize { - self.num_internal_nodes + self.num_leaf_nodes - } - - pub fn first_leaf_node(&self) -> Result { - self.offsets - .get(self.num_internal_nodes) - .cloned() - .ok_or_else(|| Error::NoFirstNode) - } - - pub fn next_node(&self) -> usize { - self.next_node - } - - /// Returns an iterator visiting each internal node, providing the left and right child chunks - /// for the node. - pub fn iter_internal_nodes<'a>( - &'a self, - ) -> impl DoubleEndedIterator { - let internal_nodes = &self.offsets[0..self.num_internal_nodes]; - - internal_nodes.iter().enumerate().map(move |(i, parent)| { - let children = children(i); - ( - parent, - (&self.offsets[children.0], &self.offsets[children.1]), - ) - }) - } - - /// Returns an iterator visiting each leaf node, providing the chunk for that node. - pub fn iter_leaf_nodes<'a>(&'a self) -> impl DoubleEndedIterator { - let leaf_nodes = &self.offsets[self.num_internal_nodes..]; - - leaf_nodes.iter() - } -} - /// Split `values` into a power-of-two, identical-length chunks (padding with `0`) and merkleize /// them, returning the entire merkle tree. /// diff --git a/eth2/utils/tree_hash/tests/tests.rs b/eth2/utils/tree_hash/tests/tests.rs index 972eb1e00..af4204cc2 100644 --- a/eth2/utils/tree_hash/tests/tests.rs +++ b/eth2/utils/tree_hash/tests/tests.rs @@ -44,15 +44,15 @@ impl CachedTreeHash for Inner { bytes } - fn offsets(&self) -> Result, Error> { - let mut offsets = vec![]; + fn btree_overlay(&self, chunk_offset: usize) -> Result { + let mut lengths = vec![]; - offsets.push(self.a.num_child_nodes() + 1); - offsets.push(self.b.num_child_nodes() + 1); - offsets.push(self.c.num_child_nodes() + 1); - offsets.push(self.d.num_child_nodes() + 1); + lengths.push(self.a.num_child_nodes() + 1); + lengths.push(self.b.num_child_nodes() + 1); + lengths.push(self.c.num_child_nodes() + 1); + lengths.push(self.d.num_child_nodes() + 1); - Ok(offsets) + BTreeOverlay::from_lengths(chunk_offset, lengths) } fn num_child_nodes(&self) -> usize { @@ -98,7 +98,7 @@ impl CachedTreeHash for Inner { } } - Ok(offset_handler.next_node()) + Ok(offset_handler.next_node) } } @@ -146,14 +146,14 @@ impl CachedTreeHash for Outer { num_nodes(leaves) + children - 1 } - fn offsets(&self) -> Result, Error> { - let mut offsets = vec![]; + fn btree_overlay(&self, chunk_offset: usize) -> Result { + let mut lengths = vec![]; - offsets.push(self.a.num_child_nodes() + 1); - offsets.push(self.b.num_child_nodes() + 1); - offsets.push(self.c.num_child_nodes() + 1); + lengths.push(self.a.num_child_nodes() + 1); + lengths.push(self.b.num_child_nodes() + 1); + lengths.push(self.c.num_child_nodes() + 1); - Ok(offsets) + BTreeOverlay::from_lengths(chunk_offset, lengths) } fn packed_encoding(&self) -> Vec { @@ -186,7 +186,7 @@ impl CachedTreeHash for Outer { } } - Ok(offset_handler.next_node()) + Ok(offset_handler.next_node) } } From e6c33e1b60e560fb7539c0965f441dccb6af277d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 15 Apr 2019 11:44:44 +1000 Subject: [PATCH 43/89] Remove child_nodes method from CachedTreeHash --- eth2/utils/tree_hash/src/impls.rs | 14 ++--------- eth2/utils/tree_hash/src/lib.rs | 2 -- eth2/utils/tree_hash/tests/tests.rs | 37 ++++++----------------------- 3 files changed, 9 insertions(+), 44 deletions(-) diff --git a/eth2/utils/tree_hash/src/impls.rs b/eth2/utils/tree_hash/src/impls.rs index 9149cf8aa..4349d73d8 100644 --- a/eth2/utils/tree_hash/src/impls.rs +++ b/eth2/utils/tree_hash/src/impls.rs @@ -14,18 +14,14 @@ impl CachedTreeHash for u64 { )?) } - fn btree_overlay(&self, _chunk_offset: usize) -> Result { - Err(Error::ShouldNotProduceBTreeOverlay) + fn btree_overlay(&self, chunk_offset: usize) -> Result { + BTreeOverlay::from_lengths(chunk_offset, vec![1]) } fn num_bytes(&self) -> usize { 8 } - fn num_child_nodes(&self) -> usize { - 0 - } - fn packed_encoding(&self) -> Vec { ssz_encode(self) } @@ -72,7 +68,6 @@ where } fn btree_overlay(&self, chunk_offset: usize) -> Result { - // let lengths = match T::item_type() { ItemType::Basic => vec![1; self.len() / T::packing_factor()], ItemType::Composite | ItemType::List => { @@ -89,11 +84,6 @@ where BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn num_child_nodes(&self) -> usize { - // TODO - 42 - } - fn num_bytes(&self) -> usize { self.iter().fold(0, |acc, item| acc + item.num_bytes()) } diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index e356210a4..4e1cff0e8 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -47,8 +47,6 @@ pub trait CachedTreeHash: Debug { fn btree_overlay(&self, chunk_offset: usize) -> Result; - fn num_child_nodes(&self) -> usize; - fn packed_encoding(&self) -> Vec; fn packing_factor() -> usize; diff --git a/eth2/utils/tree_hash/tests/tests.rs b/eth2/utils/tree_hash/tests/tests.rs index af4204cc2..a315feeed 100644 --- a/eth2/utils/tree_hash/tests/tests.rs +++ b/eth2/utils/tree_hash/tests/tests.rs @@ -47,26 +47,14 @@ impl CachedTreeHash for Inner { fn btree_overlay(&self, chunk_offset: usize) -> Result { let mut lengths = vec![]; - lengths.push(self.a.num_child_nodes() + 1); - lengths.push(self.b.num_child_nodes() + 1); - lengths.push(self.c.num_child_nodes() + 1); - lengths.push(self.d.num_child_nodes() + 1); + lengths.push(BTreeOverlay::new(&self.a, 0)?.total_nodes()); + lengths.push(BTreeOverlay::new(&self.b, 0)?.total_nodes()); + lengths.push(BTreeOverlay::new(&self.c, 0)?.total_nodes()); + lengths.push(BTreeOverlay::new(&self.d, 0)?.total_nodes()); BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn num_child_nodes(&self) -> usize { - let mut children = 0; - let leaves = 4; - - children += self.a.num_child_nodes(); - children += self.b.num_child_nodes(); - children += self.c.num_child_nodes(); - children += self.d.num_child_nodes(); - - num_nodes(leaves) + children - 1 - } - fn packed_encoding(&self) -> Vec { panic!("Struct should never be packed") } @@ -135,23 +123,12 @@ impl CachedTreeHash for Outer { bytes } - fn num_child_nodes(&self) -> usize { - let mut children = 0; - let leaves = 3; - - children += self.a.num_child_nodes(); - children += self.b.num_child_nodes(); - children += self.c.num_child_nodes(); - - num_nodes(leaves) + children - 1 - } - fn btree_overlay(&self, chunk_offset: usize) -> Result { let mut lengths = vec![]; - lengths.push(self.a.num_child_nodes() + 1); - lengths.push(self.b.num_child_nodes() + 1); - lengths.push(self.c.num_child_nodes() + 1); + lengths.push(BTreeOverlay::new(&self.a, 0)?.total_nodes()); + lengths.push(BTreeOverlay::new(&self.b, 0)?.total_nodes()); + lengths.push(BTreeOverlay::new(&self.c, 0)?.total_nodes()); BTreeOverlay::from_lengths(chunk_offset, lengths) } From cb9b59b93d90a727c9a4e3eac0423a7a42d0eca0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 15 Apr 2019 11:49:50 +1000 Subject: [PATCH 44/89] Remove panics from packed_encoding --- eth2/utils/tree_hash/src/impls.rs | 20 +++++++++++--------- eth2/utils/tree_hash/src/lib.rs | 3 ++- eth2/utils/tree_hash/tests/tests.rs | 8 ++++---- 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/eth2/utils/tree_hash/src/impls.rs b/eth2/utils/tree_hash/src/impls.rs index 4349d73d8..05da2753a 100644 --- a/eth2/utils/tree_hash/src/impls.rs +++ b/eth2/utils/tree_hash/src/impls.rs @@ -22,8 +22,8 @@ impl CachedTreeHash for u64 { 8 } - fn packed_encoding(&self) -> Vec { - ssz_encode(self) + fn packed_encoding(&self) -> Result, Error> { + Ok(ssz_encode(self)) } fn packing_factor() -> usize { @@ -55,7 +55,9 @@ where fn build_tree_hash_cache(&self) -> Result { match T::item_type() { - ItemType::Basic => TreeHashCache::from_bytes(merkleize(get_packed_leaves(self)), false), + ItemType::Basic => { + TreeHashCache::from_bytes(merkleize(get_packed_leaves(self)?), false) + } ItemType::Composite | ItemType::List => { let subtrees = self .iter() @@ -88,8 +90,8 @@ where self.iter().fold(0, |acc, item| acc + item.num_bytes()) } - fn packed_encoding(&self) -> Vec { - panic!("List should never be packed") + fn packed_encoding(&self) -> Result, Error> { + Err(Error::ShouldNeverBePacked(Self::item_type())) } fn packing_factor() -> usize { @@ -142,7 +144,7 @@ where match T::item_type() { ItemType::Basic => { - let leaves = get_packed_leaves(self); + let leaves = get_packed_leaves(self)?; for (i, chunk) in offset_handler.iter_leaf_nodes().enumerate() { if let Some(latest) = leaves.get(i * HASHSIZE..(i + 1) * HASHSIZE) { @@ -213,7 +215,7 @@ where } } -fn get_packed_leaves(vec: &Vec) -> Vec +fn get_packed_leaves(vec: &Vec) -> Result, Error> where T: CachedTreeHash, { @@ -223,8 +225,8 @@ where let mut packed = Vec::with_capacity(num_leaves * HASHSIZE); for item in vec { - packed.append(&mut item.packed_encoding()); + packed.append(&mut item.packed_encoding()?); } - sanitise_bytes(packed) + Ok(sanitise_bytes(packed)) } diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index 4e1cff0e8..76752e5b2 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -23,6 +23,7 @@ pub enum Error { UnableToObtainSlices, UnableToGrowMerkleTree, UnableToShrinkMerkleTree, + ShouldNeverBePacked(ItemType), BytesAreNotEvenChunks(usize), NoModifiedFieldForChunk(usize), NoBytesForChunk(usize), @@ -47,7 +48,7 @@ pub trait CachedTreeHash: Debug { fn btree_overlay(&self, chunk_offset: usize) -> Result; - fn packed_encoding(&self) -> Vec; + fn packed_encoding(&self) -> Result, Error>; fn packing_factor() -> usize; diff --git a/eth2/utils/tree_hash/tests/tests.rs b/eth2/utils/tree_hash/tests/tests.rs index a315feeed..17ec121a8 100644 --- a/eth2/utils/tree_hash/tests/tests.rs +++ b/eth2/utils/tree_hash/tests/tests.rs @@ -55,8 +55,8 @@ impl CachedTreeHash for Inner { BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn packed_encoding(&self) -> Vec { - panic!("Struct should never be packed") + fn packed_encoding(&self) -> Result, Error> { + Err(Error::ShouldNeverBePacked(Self::item_type())) } fn packing_factor() -> usize { @@ -133,8 +133,8 @@ impl CachedTreeHash for Outer { BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn packed_encoding(&self) -> Vec { - panic!("Struct should never be packed") + fn packed_encoding(&self) -> Result, Error> { + Err(Error::ShouldNeverBePacked(Self::item_type())) } fn packing_factor() -> usize { From c18cdf2abf7478e4d9535aa9d2581ea35df5931a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 15 Apr 2019 11:55:56 +1000 Subject: [PATCH 45/89] Remove num_bytes method --- eth2/utils/tree_hash/src/impls.rs | 12 ++---------- eth2/utils/tree_hash/src/lib.rs | 4 ---- eth2/utils/tree_hash/tests/tests.rs | 19 ------------------- 3 files changed, 2 insertions(+), 33 deletions(-) diff --git a/eth2/utils/tree_hash/src/impls.rs b/eth2/utils/tree_hash/src/impls.rs index 05da2753a..53490551f 100644 --- a/eth2/utils/tree_hash/src/impls.rs +++ b/eth2/utils/tree_hash/src/impls.rs @@ -18,16 +18,12 @@ impl CachedTreeHash for u64 { BTreeOverlay::from_lengths(chunk_offset, vec![1]) } - fn num_bytes(&self) -> usize { - 8 - } - fn packed_encoding(&self) -> Result, Error> { Ok(ssz_encode(self)) } fn packing_factor() -> usize { - 32 / 8 + HASHSIZE / 8 } fn cached_hash_tree_root( @@ -86,10 +82,6 @@ where BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn num_bytes(&self) -> usize { - self.iter().fold(0, |acc, item| acc + item.num_bytes()) - } - fn packed_encoding(&self) -> Result, Error> { Err(Error::ShouldNeverBePacked(Self::item_type())) } @@ -219,7 +211,7 @@ fn get_packed_leaves(vec: &Vec) -> Result, Error> where T: CachedTreeHash, { - let num_packed_bytes = vec.num_bytes(); + let num_packed_bytes = (BYTES_PER_CHUNK / T::packing_factor()) * vec.len(); let num_leaves = num_sanitized_leaves(num_packed_bytes); let mut packed = Vec::with_capacity(num_leaves * HASHSIZE); diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index 76752e5b2..0fd75dc5a 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -42,10 +42,6 @@ pub trait CachedTreeHash: Debug { fn build_tree_hash_cache(&self) -> Result; - /// Return the number of bytes when this element is encoded as raw SSZ _without_ length - /// prefixes. - fn num_bytes(&self) -> usize; - fn btree_overlay(&self, chunk_offset: usize) -> Result; fn packed_encoding(&self) -> Result, Error>; diff --git a/eth2/utils/tree_hash/tests/tests.rs b/eth2/utils/tree_hash/tests/tests.rs index 17ec121a8..701bf8ec1 100644 --- a/eth2/utils/tree_hash/tests/tests.rs +++ b/eth2/utils/tree_hash/tests/tests.rs @@ -33,17 +33,6 @@ impl CachedTreeHash for Inner { Ok(tree) } - fn num_bytes(&self) -> usize { - let mut bytes = 0; - - bytes += self.a.num_bytes(); - bytes += self.b.num_bytes(); - bytes += self.c.num_bytes(); - bytes += self.d.num_bytes(); - - bytes - } - fn btree_overlay(&self, chunk_offset: usize) -> Result { let mut lengths = vec![]; @@ -115,14 +104,6 @@ impl CachedTreeHash for Outer { Ok(tree) } - fn num_bytes(&self) -> usize { - let mut bytes = 0; - bytes += self.a.num_bytes(); - bytes += self.b.num_bytes(); - bytes += self.c.num_bytes(); - bytes - } - fn btree_overlay(&self, chunk_offset: usize) -> Result { let mut lengths = vec![]; From 8e5b79452ad5878e5f512d9e3cc35af1882b3bbf Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 15 Apr 2019 12:01:12 +1000 Subject: [PATCH 46/89] Further tidy cached tree hash --- eth2/utils/tree_hash/src/cached_tree_hash.rs | 2 +- eth2/utils/tree_hash/src/impls.rs | 188 +------------------ eth2/utils/tree_hash/src/impls/vec.rs | 183 ++++++++++++++++++ eth2/utils/tree_hash/src/lib.rs | 6 +- eth2/utils/tree_hash/tests/tests.rs | 48 +++-- 5 files changed, 214 insertions(+), 213 deletions(-) create mode 100644 eth2/utils/tree_hash/src/impls/vec.rs diff --git a/eth2/utils/tree_hash/src/cached_tree_hash.rs b/eth2/utils/tree_hash/src/cached_tree_hash.rs index 556ba2d21..4022f6b7b 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash.rs @@ -17,7 +17,7 @@ impl TreeHashCache { where T: CachedTreeHash, { - item.build_tree_hash_cache() + item.new_cache() } pub fn from_elems(cache: Vec, chunk_modified: Vec) -> Self { diff --git a/eth2/utils/tree_hash/src/impls.rs b/eth2/utils/tree_hash/src/impls.rs index 53490551f..6849fd55c 100644 --- a/eth2/utils/tree_hash/src/impls.rs +++ b/eth2/utils/tree_hash/src/impls.rs @@ -2,12 +2,14 @@ use super::resize::{grow_merkle_cache, shrink_merkle_cache}; use super::*; use ssz::ssz_encode; +mod vec; + impl CachedTreeHash for u64 { fn item_type() -> ItemType { ItemType::Basic } - fn build_tree_hash_cache(&self) -> Result { + fn new_cache(&self) -> Result { Ok(TreeHashCache::from_bytes( merkleize(ssz_encode(self)), false, @@ -26,7 +28,7 @@ impl CachedTreeHash for u64 { HASHSIZE / 8 } - fn cached_hash_tree_root( + fn update_cache( &self, other: &Self, cache: &mut TreeHashCache, @@ -40,185 +42,3 @@ impl CachedTreeHash for u64 { Ok(chunk + 1) } } - -impl CachedTreeHash> for Vec -where - T: CachedTreeHash, -{ - fn item_type() -> ItemType { - ItemType::List - } - - fn build_tree_hash_cache(&self) -> Result { - match T::item_type() { - ItemType::Basic => { - TreeHashCache::from_bytes(merkleize(get_packed_leaves(self)?), false) - } - ItemType::Composite | ItemType::List => { - let subtrees = self - .iter() - .map(|item| TreeHashCache::new(item)) - .collect::, _>>()?; - - TreeHashCache::from_leaves_and_subtrees(self, subtrees) - } - } - } - - fn btree_overlay(&self, chunk_offset: usize) -> Result { - let lengths = match T::item_type() { - ItemType::Basic => vec![1; self.len() / T::packing_factor()], - ItemType::Composite | ItemType::List => { - let mut lengths = vec![]; - - for item in self { - lengths.push(BTreeOverlay::new(item, 0)?.total_nodes()) - } - - lengths - } - }; - - BTreeOverlay::from_lengths(chunk_offset, lengths) - } - - fn packed_encoding(&self) -> Result, Error> { - Err(Error::ShouldNeverBePacked(Self::item_type())) - } - - fn packing_factor() -> usize { - 1 - } - - fn cached_hash_tree_root( - &self, - other: &Vec, - cache: &mut TreeHashCache, - chunk: usize, - ) -> Result { - let offset_handler = BTreeOverlay::new(self, chunk)?; - let old_offset_handler = BTreeOverlay::new(other, chunk)?; - - if offset_handler.num_leaf_nodes != old_offset_handler.num_leaf_nodes { - let old_offset_handler = BTreeOverlay::new(other, chunk)?; - - // Get slices of the exsiting tree from the cache. - let (old_bytes, old_flags) = cache - .slices(old_offset_handler.chunk_range()) - .ok_or_else(|| Error::UnableToObtainSlices)?; - - let (new_bytes, new_flags) = - if offset_handler.num_leaf_nodes > old_offset_handler.num_leaf_nodes { - grow_merkle_cache( - old_bytes, - old_flags, - old_offset_handler.height(), - offset_handler.height(), - ) - .ok_or_else(|| Error::UnableToGrowMerkleTree)? - } else { - shrink_merkle_cache( - old_bytes, - old_flags, - old_offset_handler.height(), - offset_handler.height(), - offset_handler.total_chunks(), - ) - .ok_or_else(|| Error::UnableToShrinkMerkleTree)? - }; - - // Create a `TreeHashCache` from the raw elements. - let modified_cache = TreeHashCache::from_elems(new_bytes, new_flags); - - // Splice the newly created `TreeHashCache` over the existing elements. - cache.splice(old_offset_handler.chunk_range(), modified_cache); - } - - match T::item_type() { - ItemType::Basic => { - let leaves = get_packed_leaves(self)?; - - for (i, chunk) in offset_handler.iter_leaf_nodes().enumerate() { - if let Some(latest) = leaves.get(i * HASHSIZE..(i + 1) * HASHSIZE) { - cache.maybe_update_chunk(*chunk, latest)?; - } - } - let first_leaf_chunk = offset_handler.first_leaf_node()?; - - cache.splice( - first_leaf_chunk..offset_handler.next_node, - TreeHashCache::from_bytes(leaves, true)?, - ); - } - ItemType::Composite | ItemType::List => { - let mut i = offset_handler.num_leaf_nodes; - for &start_chunk in offset_handler.iter_leaf_nodes().rev() { - i -= 1; - match (other.get(i), self.get(i)) { - // The item existed in the previous list and exsits in the current list. - (Some(old), Some(new)) => { - new.cached_hash_tree_root(old, cache, start_chunk)?; - } - // The item existed in the previous list but does not exist in this list. - // - // I.e., the list has been shortened. - (Some(old), None) => { - // Splice out the entire tree of the removed node, replacing it with a - // single padding node. - let end_chunk = BTreeOverlay::new(old, start_chunk)?.next_node; - - cache.splice( - start_chunk..end_chunk, - TreeHashCache::from_bytes(vec![0; HASHSIZE], true)?, - ); - } - // The item existed in the previous list but does exist in this list. - // - // I.e., the list has been lengthened. - (None, Some(new)) => { - let bytes: Vec = TreeHashCache::new(new)?.into(); - - cache.splice( - start_chunk..start_chunk + 1, - TreeHashCache::from_bytes(bytes, true)?, - ); - } - // The item didn't exist in the old list and doesn't exist in the new list, - // nothing to do. - (None, None) => {} - }; - } - } - } - - for (&parent, children) in offset_handler.iter_internal_nodes().rev() { - if cache.either_modified(children)? { - cache.modify_chunk(parent, &cache.hash_children(children)?)?; - } - } - - // If the root node or the length has changed, mix in the length of the list. - let root_node = offset_handler.root(); - if cache.changed(root_node)? | (self.len() != other.len()) { - cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?; - } - - Ok(offset_handler.next_node) - } -} - -fn get_packed_leaves(vec: &Vec) -> Result, Error> -where - T: CachedTreeHash, -{ - let num_packed_bytes = (BYTES_PER_CHUNK / T::packing_factor()) * vec.len(); - let num_leaves = num_sanitized_leaves(num_packed_bytes); - - let mut packed = Vec::with_capacity(num_leaves * HASHSIZE); - - for item in vec { - packed.append(&mut item.packed_encoding()?); - } - - Ok(sanitise_bytes(packed)) -} diff --git a/eth2/utils/tree_hash/src/impls/vec.rs b/eth2/utils/tree_hash/src/impls/vec.rs new file mode 100644 index 000000000..7c0993f43 --- /dev/null +++ b/eth2/utils/tree_hash/src/impls/vec.rs @@ -0,0 +1,183 @@ +use super::*; + +impl CachedTreeHash> for Vec +where + T: CachedTreeHash, +{ + fn item_type() -> ItemType { + ItemType::List + } + + fn new_cache(&self) -> Result { + match T::item_type() { + ItemType::Basic => { + TreeHashCache::from_bytes(merkleize(get_packed_leaves(self)?), false) + } + ItemType::Composite | ItemType::List => { + let subtrees = self + .iter() + .map(|item| TreeHashCache::new(item)) + .collect::, _>>()?; + + TreeHashCache::from_leaves_and_subtrees(self, subtrees) + } + } + } + + fn btree_overlay(&self, chunk_offset: usize) -> Result { + let lengths = match T::item_type() { + ItemType::Basic => vec![1; self.len() / T::packing_factor()], + ItemType::Composite | ItemType::List => { + let mut lengths = vec![]; + + for item in self { + lengths.push(BTreeOverlay::new(item, 0)?.total_nodes()) + } + + lengths + } + }; + + BTreeOverlay::from_lengths(chunk_offset, lengths) + } + + fn packed_encoding(&self) -> Result, Error> { + Err(Error::ShouldNeverBePacked(Self::item_type())) + } + + fn packing_factor() -> usize { + 1 + } + + fn update_cache( + &self, + other: &Vec, + cache: &mut TreeHashCache, + chunk: usize, + ) -> Result { + let offset_handler = BTreeOverlay::new(self, chunk)?; + let old_offset_handler = BTreeOverlay::new(other, chunk)?; + + if offset_handler.num_leaf_nodes != old_offset_handler.num_leaf_nodes { + let old_offset_handler = BTreeOverlay::new(other, chunk)?; + + // Get slices of the exsiting tree from the cache. + let (old_bytes, old_flags) = cache + .slices(old_offset_handler.chunk_range()) + .ok_or_else(|| Error::UnableToObtainSlices)?; + + let (new_bytes, new_flags) = + if offset_handler.num_leaf_nodes > old_offset_handler.num_leaf_nodes { + grow_merkle_cache( + old_bytes, + old_flags, + old_offset_handler.height(), + offset_handler.height(), + ) + .ok_or_else(|| Error::UnableToGrowMerkleTree)? + } else { + shrink_merkle_cache( + old_bytes, + old_flags, + old_offset_handler.height(), + offset_handler.height(), + offset_handler.total_chunks(), + ) + .ok_or_else(|| Error::UnableToShrinkMerkleTree)? + }; + + // Create a `TreeHashCache` from the raw elements. + let modified_cache = TreeHashCache::from_elems(new_bytes, new_flags); + + // Splice the newly created `TreeHashCache` over the existing elements. + cache.splice(old_offset_handler.chunk_range(), modified_cache); + } + + match T::item_type() { + ItemType::Basic => { + let leaves = get_packed_leaves(self)?; + + for (i, chunk) in offset_handler.iter_leaf_nodes().enumerate() { + if let Some(latest) = leaves.get(i * HASHSIZE..(i + 1) * HASHSIZE) { + cache.maybe_update_chunk(*chunk, latest)?; + } + } + let first_leaf_chunk = offset_handler.first_leaf_node()?; + + cache.splice( + first_leaf_chunk..offset_handler.next_node, + TreeHashCache::from_bytes(leaves, true)?, + ); + } + ItemType::Composite | ItemType::List => { + let mut i = offset_handler.num_leaf_nodes; + for &start_chunk in offset_handler.iter_leaf_nodes().rev() { + i -= 1; + match (other.get(i), self.get(i)) { + // The item existed in the previous list and exsits in the current list. + (Some(old), Some(new)) => { + new.update_cache(old, cache, start_chunk)?; + } + // The item existed in the previous list but does not exist in this list. + // + // I.e., the list has been shortened. + (Some(old), None) => { + // Splice out the entire tree of the removed node, replacing it with a + // single padding node. + let end_chunk = BTreeOverlay::new(old, start_chunk)?.next_node; + + cache.splice( + start_chunk..end_chunk, + TreeHashCache::from_bytes(vec![0; HASHSIZE], true)?, + ); + } + // The item existed in the previous list but does exist in this list. + // + // I.e., the list has been lengthened. + (None, Some(new)) => { + let bytes: Vec = TreeHashCache::new(new)?.into(); + + cache.splice( + start_chunk..start_chunk + 1, + TreeHashCache::from_bytes(bytes, true)?, + ); + } + // The item didn't exist in the old list and doesn't exist in the new list, + // nothing to do. + (None, None) => {} + }; + } + } + } + + for (&parent, children) in offset_handler.iter_internal_nodes().rev() { + if cache.either_modified(children)? { + cache.modify_chunk(parent, &cache.hash_children(children)?)?; + } + } + + // If the root node or the length has changed, mix in the length of the list. + let root_node = offset_handler.root(); + if cache.changed(root_node)? | (self.len() != other.len()) { + cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?; + } + + Ok(offset_handler.next_node) + } +} + +fn get_packed_leaves(vec: &Vec) -> Result, Error> +where + T: CachedTreeHash, +{ + let num_packed_bytes = (BYTES_PER_CHUNK / T::packing_factor()) * vec.len(); + let num_leaves = num_sanitized_leaves(num_packed_bytes); + + let mut packed = Vec::with_capacity(num_leaves * HASHSIZE); + + for item in vec { + packed.append(&mut item.packed_encoding()?); + } + + Ok(sanitise_bytes(packed)) +} diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index 0fd75dc5a..b3167a37d 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -40,15 +40,15 @@ pub enum ItemType { pub trait CachedTreeHash: Debug { fn item_type() -> ItemType; - fn build_tree_hash_cache(&self) -> Result; - fn btree_overlay(&self, chunk_offset: usize) -> Result; fn packed_encoding(&self) -> Result, Error>; fn packing_factor() -> usize; - fn cached_hash_tree_root( + fn new_cache(&self) -> Result; + + fn update_cache( &self, other: &Item, cache: &mut TreeHashCache, diff --git a/eth2/utils/tree_hash/tests/tests.rs b/eth2/utils/tree_hash/tests/tests.rs index 701bf8ec1..22780bcac 100644 --- a/eth2/utils/tree_hash/tests/tests.rs +++ b/eth2/utils/tree_hash/tests/tests.rs @@ -19,14 +19,14 @@ impl CachedTreeHash for Inner { ItemType::Composite } - fn build_tree_hash_cache(&self) -> Result { + fn new_cache(&self) -> Result { let tree = TreeHashCache::from_leaves_and_subtrees( self, vec![ - self.a.build_tree_hash_cache()?, - self.b.build_tree_hash_cache()?, - self.c.build_tree_hash_cache()?, - self.d.build_tree_hash_cache()?, + self.a.new_cache()?, + self.b.new_cache()?, + self.c.new_cache()?, + self.d.new_cache()?, ], )?; @@ -52,7 +52,7 @@ impl CachedTreeHash for Inner { 1 } - fn cached_hash_tree_root( + fn update_cache( &self, other: &Self, cache: &mut TreeHashCache, @@ -63,10 +63,10 @@ impl CachedTreeHash for Inner { // Skip past the internal nodes and update any changed leaf nodes. { let chunk = offset_handler.first_leaf_node()?; - let chunk = self.a.cached_hash_tree_root(&other.a, cache, chunk)?; - let chunk = self.b.cached_hash_tree_root(&other.b, cache, chunk)?; - let chunk = self.c.cached_hash_tree_root(&other.c, cache, chunk)?; - let _chunk = self.d.cached_hash_tree_root(&other.d, cache, chunk)?; + let chunk = self.a.update_cache(&other.a, cache, chunk)?; + let chunk = self.b.update_cache(&other.b, cache, chunk)?; + let chunk = self.c.update_cache(&other.c, cache, chunk)?; + let _chunk = self.d.update_cache(&other.d, cache, chunk)?; } for (&parent, children) in offset_handler.iter_internal_nodes().rev() { @@ -91,13 +91,13 @@ impl CachedTreeHash for Outer { ItemType::Composite } - fn build_tree_hash_cache(&self) -> Result { + fn new_cache(&self) -> Result { let tree = TreeHashCache::from_leaves_and_subtrees( self, vec![ - self.a.build_tree_hash_cache()?, - self.b.build_tree_hash_cache()?, - self.c.build_tree_hash_cache()?, + self.a.new_cache()?, + self.b.new_cache()?, + self.c.new_cache()?, ], )?; @@ -122,7 +122,7 @@ impl CachedTreeHash for Outer { 1 } - fn cached_hash_tree_root( + fn update_cache( &self, other: &Self, cache: &mut TreeHashCache, @@ -133,9 +133,9 @@ impl CachedTreeHash for Outer { // Skip past the internal nodes and update any changed leaf nodes. { let chunk = offset_handler.first_leaf_node()?; - let chunk = self.a.cached_hash_tree_root(&other.a, cache, chunk)?; - let chunk = self.b.cached_hash_tree_root(&other.b, cache, chunk)?; - let _chunk = self.c.cached_hash_tree_root(&other.c, cache, chunk)?; + let chunk = self.a.update_cache(&other.a, cache, chunk)?; + let chunk = self.b.update_cache(&other.b, cache, chunk)?; + let _chunk = self.c.update_cache(&other.c, cache, chunk)?; } for (&parent, children) in offset_handler.iter_internal_nodes().rev() { @@ -186,7 +186,7 @@ fn partial_modification_to_inner_struct() { let mut cache_struct = TreeHashCache::new(&original_outer).unwrap(); modified_outer - .cached_hash_tree_root(&original_outer, &mut cache_struct, 0) + .update_cache(&original_outer, &mut cache_struct, 0) .unwrap(); let modified_cache: Vec = cache_struct.into(); @@ -240,7 +240,7 @@ fn partial_modification_to_outer() { let mut cache_struct = TreeHashCache::new(&original_outer).unwrap(); modified_outer - .cached_hash_tree_root(&original_outer, &mut cache_struct, 0) + .update_cache(&original_outer, &mut cache_struct, 0) .unwrap(); let modified_cache: Vec = cache_struct.into(); @@ -326,7 +326,7 @@ fn test_u64_vec_modifications(original: Vec, modified: Vec) { // Perform a differential hash let mut cache_struct = TreeHashCache::from_bytes(original_cache.clone(), false).unwrap(); modified - .cached_hash_tree_root(&original, &mut cache_struct, 0) + .update_cache(&original, &mut cache_struct, 0) .unwrap(); let modified_cache: Vec = cache_struct.into(); @@ -430,9 +430,7 @@ fn large_vec_of_u64_builds() { fn test_inner_vec_modifications(original: Vec, modified: Vec, reference: Vec) { let mut cache = TreeHashCache::new(&original).unwrap(); - modified - .cached_hash_tree_root(&original, &mut cache, 0) - .unwrap(); + modified.update_cache(&original, &mut cache, 0).unwrap(); let modified_cache: Vec = cache.into(); // Build the reference vec. @@ -792,7 +790,7 @@ fn generic_test(index: usize) { let mut cache_struct = TreeHashCache::from_bytes(cache.clone(), false).unwrap(); changed_inner - .cached_hash_tree_root(&inner, &mut cache_struct, 0) + .update_cache(&inner, &mut cache_struct, 0) .unwrap(); // assert_eq!(*cache_struct.hash_count, 3); From 354f823c1628e56de6627961dc3746a1f09c7a2b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 15 Apr 2019 15:13:02 +1000 Subject: [PATCH 47/89] Tidy tree hash cache, add new trait --- eth2/utils/tree_hash/src/btree_overlay.rs | 2 +- eth2/utils/tree_hash/src/cached_tree_hash.rs | 13 +- eth2/utils/tree_hash/src/impls.rs | 2 +- eth2/utils/tree_hash/src/impls/vec.rs | 6 +- eth2/utils/tree_hash/src/lib.rs | 12 +- eth2/utils/tree_hash/tests/tests.rs | 121 ++++++++++++++++++- 6 files changed, 138 insertions(+), 18 deletions(-) diff --git a/eth2/utils/tree_hash/src/btree_overlay.rs b/eth2/utils/tree_hash/src/btree_overlay.rs index 7d5602c0b..8c859d046 100644 --- a/eth2/utils/tree_hash/src/btree_overlay.rs +++ b/eth2/utils/tree_hash/src/btree_overlay.rs @@ -12,7 +12,7 @@ pub struct BTreeOverlay { impl BTreeOverlay { pub fn new(item: &T, initial_offset: usize) -> Result where - T: CachedTreeHash, + T: CachedTreeHashSubtree, { item.btree_overlay(initial_offset) } diff --git a/eth2/utils/tree_hash/src/cached_tree_hash.rs b/eth2/utils/tree_hash/src/cached_tree_hash.rs index 4022f6b7b..97f9388a1 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash.rs @@ -15,7 +15,7 @@ impl Into> for TreeHashCache { impl TreeHashCache { pub fn new(item: &T) -> Result where - T: CachedTreeHash, + T: CachedTreeHashSubtree, { item.new_cache() } @@ -32,7 +32,7 @@ impl TreeHashCache { leaves_and_subtrees: Vec, ) -> Result where - T: CachedTreeHash, + T: CachedTreeHashSubtree, { let offset_handler = BTreeOverlay::new(item, 0)?; @@ -55,7 +55,7 @@ impl TreeHashCache { // Iterate through all of the leaves/subtrees, adding their root as a leaf node and then // concatenating their merkle trees. for t in leaves_and_subtrees { - leaves.append(&mut t.root()?); + leaves.append(&mut t.root().ok_or_else(|| Error::NoBytesForRoot)?.to_vec()); cache.append(&mut t.into_merkle_tree()); } @@ -89,11 +89,8 @@ impl TreeHashCache { self.cache.len() } - pub fn root(&self) -> Result, Error> { - self.cache - .get(0..HASHSIZE) - .ok_or_else(|| Error::NoBytesForRoot) - .and_then(|slice| Ok(slice.to_vec())) + pub fn root(&self) -> Option<&[u8]> { + self.cache.get(0..HASHSIZE) } pub fn splice(&mut self, chunk_range: Range, replace_with: Self) { diff --git a/eth2/utils/tree_hash/src/impls.rs b/eth2/utils/tree_hash/src/impls.rs index 6849fd55c..bd5c352c9 100644 --- a/eth2/utils/tree_hash/src/impls.rs +++ b/eth2/utils/tree_hash/src/impls.rs @@ -4,7 +4,7 @@ use ssz::ssz_encode; mod vec; -impl CachedTreeHash for u64 { +impl CachedTreeHashSubtree for u64 { fn item_type() -> ItemType { ItemType::Basic } diff --git a/eth2/utils/tree_hash/src/impls/vec.rs b/eth2/utils/tree_hash/src/impls/vec.rs index 7c0993f43..c02460cf3 100644 --- a/eth2/utils/tree_hash/src/impls/vec.rs +++ b/eth2/utils/tree_hash/src/impls/vec.rs @@ -1,8 +1,8 @@ use super::*; -impl CachedTreeHash> for Vec +impl CachedTreeHashSubtree> for Vec where - T: CachedTreeHash, + T: CachedTreeHashSubtree, { fn item_type() -> ItemType { ItemType::List @@ -168,7 +168,7 @@ where fn get_packed_leaves(vec: &Vec) -> Result, Error> where - T: CachedTreeHash, + T: CachedTreeHashSubtree, { let num_packed_bytes = (BYTES_PER_CHUNK / T::packing_factor()) * vec.len(); let num_leaves = num_sanitized_leaves(num_packed_bytes); diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index b3167a37d..179f557ce 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -1,6 +1,5 @@ use hashing::hash; use int_to_bytes::int_to_bytes32; -use std::fmt::Debug; use std::ops::Range; mod btree_overlay; @@ -36,8 +35,15 @@ pub enum ItemType { Composite, } -// TODO: remove debug requirement. -pub trait CachedTreeHash: Debug { +pub trait CachedTreeHash: CachedTreeHashSubtree + Sized { + fn update_internal_tree_hash_cache(self, old: T) -> Result<(Self, Self), Error>; + + fn cached_tree_hash_root(&self) -> Option>; + + fn clone_without_tree_hash_cache(&self) -> Self; +} + +pub trait CachedTreeHashSubtree { fn item_type() -> ItemType; fn btree_overlay(&self, chunk_offset: usize) -> Result; diff --git a/eth2/utils/tree_hash/tests/tests.rs b/eth2/utils/tree_hash/tests/tests.rs index 22780bcac..c61a010ca 100644 --- a/eth2/utils/tree_hash/tests/tests.rs +++ b/eth2/utils/tree_hash/tests/tests.rs @@ -2,6 +2,123 @@ use hashing::hash; use int_to_bytes::{int_to_bytes32, int_to_bytes8}; use tree_hash::*; +#[derive(Clone, Debug)] +pub struct InternalCache { + pub a: u64, + pub b: u64, + pub cache: Option, +} + +impl CachedTreeHash for InternalCache { + fn update_internal_tree_hash_cache(mut self, mut old: Self) -> Result<(Self, Self), Error> { + let mut local_cache = old.cache; + old.cache = None; + + if let Some(ref mut local_cache) = local_cache { + self.update_cache(&old, local_cache, 0)?; + } else { + local_cache = Some(self.new_cache()?) + } + + self.cache = local_cache; + + Ok((old, self)) + } + + fn cached_tree_hash_root(&self) -> Option> { + match &self.cache { + None => None, + Some(c) => Some(c.root()?.to_vec()), + } + } + + fn clone_without_tree_hash_cache(&self) -> Self { + Self { + a: self.a, + b: self.b, + cache: None, + } + } +} + +#[test] +fn works_when_embedded() { + let old = InternalCache { + a: 99, + b: 99, + cache: None, + }; + + let mut new = old.clone_without_tree_hash_cache(); + new.a = 1; + new.b = 2; + + let (_old, new) = new.update_internal_tree_hash_cache(old).unwrap(); + + let root = new.cached_tree_hash_root().unwrap(); + + let leaves = vec![int_to_bytes32(1), int_to_bytes32(2)]; + let merkle = merkleize(join(leaves)); + + assert_eq!(&merkle[0..32], &root[..]); +} + +impl CachedTreeHashSubtree for InternalCache { + fn item_type() -> ItemType { + ItemType::Composite + } + + fn new_cache(&self) -> Result { + let tree = TreeHashCache::from_leaves_and_subtrees( + self, + vec![self.a.new_cache()?, self.b.new_cache()?], + )?; + + Ok(tree) + } + + fn btree_overlay(&self, chunk_offset: usize) -> Result { + let mut lengths = vec![]; + + lengths.push(BTreeOverlay::new(&self.a, 0)?.total_nodes()); + lengths.push(BTreeOverlay::new(&self.b, 0)?.total_nodes()); + + BTreeOverlay::from_lengths(chunk_offset, lengths) + } + + fn packed_encoding(&self) -> Result, Error> { + Err(Error::ShouldNeverBePacked(Self::item_type())) + } + + fn packing_factor() -> usize { + 1 + } + + fn update_cache( + &self, + other: &Self, + cache: &mut TreeHashCache, + chunk: usize, + ) -> Result { + let offset_handler = BTreeOverlay::new(self, chunk)?; + + // Skip past the internal nodes and update any changed leaf nodes. + { + let chunk = offset_handler.first_leaf_node()?; + let chunk = self.a.update_cache(&other.a, cache, chunk)?; + let _chunk = self.b.update_cache(&other.b, cache, chunk)?; + } + + for (&parent, children) in offset_handler.iter_internal_nodes().rev() { + if cache.either_modified(children)? { + cache.modify_chunk(parent, &cache.hash_children(children)?)?; + } + } + + Ok(offset_handler.next_node) + } +} + fn num_nodes(num_leaves: usize) -> usize { 2 * num_leaves - 1 } @@ -14,7 +131,7 @@ pub struct Inner { pub d: u64, } -impl CachedTreeHash for Inner { +impl CachedTreeHashSubtree for Inner { fn item_type() -> ItemType { ItemType::Composite } @@ -86,7 +203,7 @@ pub struct Outer { pub c: u64, } -impl CachedTreeHash for Outer { +impl CachedTreeHashSubtree for Outer { fn item_type() -> ItemType { ItemType::Composite } From 2be05a466f0e0cddc56fa807c6e70b28913fafe3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 15 Apr 2019 15:45:05 +1000 Subject: [PATCH 48/89] Add tree_hash_derive crate --- Cargo.toml | 1 + eth2/utils/tree_hash/src/btree_overlay.rs | 2 +- eth2/utils/tree_hash/src/cached_tree_hash.rs | 4 +- eth2/utils/tree_hash/src/impls.rs | 2 +- eth2/utils/tree_hash/src/impls/vec.rs | 6 +- eth2/utils/tree_hash/src/lib.rs | 4 +- eth2/utils/tree_hash/tests/tests.rs | 6 +- eth2/utils/tree_hash_derive/Cargo.toml | 16 +++ eth2/utils/tree_hash_derive/src/lib.rs | 125 +++++++++++++++++++ eth2/utils/tree_hash_derive/tests/tests.rs | 9 ++ 10 files changed, 163 insertions(+), 12 deletions(-) create mode 100644 eth2/utils/tree_hash_derive/Cargo.toml create mode 100644 eth2/utils/tree_hash_derive/src/lib.rs create mode 100644 eth2/utils/tree_hash_derive/tests/tests.rs diff --git a/Cargo.toml b/Cargo.toml index 2574d328f..b419d32e4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,7 @@ members = [ "eth2/utils/ssz_derive", "eth2/utils/swap_or_not_shuffle", "eth2/utils/tree_hash", + "eth2/utils/tree_hash_derive", "eth2/utils/fisher_yates_shuffle", "eth2/utils/test_random_derive", "beacon_node", diff --git a/eth2/utils/tree_hash/src/btree_overlay.rs b/eth2/utils/tree_hash/src/btree_overlay.rs index 8c859d046..1e188da60 100644 --- a/eth2/utils/tree_hash/src/btree_overlay.rs +++ b/eth2/utils/tree_hash/src/btree_overlay.rs @@ -12,7 +12,7 @@ pub struct BTreeOverlay { impl BTreeOverlay { pub fn new(item: &T, initial_offset: usize) -> Result where - T: CachedTreeHashSubtree, + T: CachedTreeHashSubTree, { item.btree_overlay(initial_offset) } diff --git a/eth2/utils/tree_hash/src/cached_tree_hash.rs b/eth2/utils/tree_hash/src/cached_tree_hash.rs index 97f9388a1..048d4bab5 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash.rs @@ -15,7 +15,7 @@ impl Into> for TreeHashCache { impl TreeHashCache { pub fn new(item: &T) -> Result where - T: CachedTreeHashSubtree, + T: CachedTreeHashSubTree, { item.new_cache() } @@ -32,7 +32,7 @@ impl TreeHashCache { leaves_and_subtrees: Vec, ) -> Result where - T: CachedTreeHashSubtree, + T: CachedTreeHashSubTree, { let offset_handler = BTreeOverlay::new(item, 0)?; diff --git a/eth2/utils/tree_hash/src/impls.rs b/eth2/utils/tree_hash/src/impls.rs index bd5c352c9..982e98724 100644 --- a/eth2/utils/tree_hash/src/impls.rs +++ b/eth2/utils/tree_hash/src/impls.rs @@ -4,7 +4,7 @@ use ssz::ssz_encode; mod vec; -impl CachedTreeHashSubtree for u64 { +impl CachedTreeHashSubTree for u64 { fn item_type() -> ItemType { ItemType::Basic } diff --git a/eth2/utils/tree_hash/src/impls/vec.rs b/eth2/utils/tree_hash/src/impls/vec.rs index c02460cf3..a6fad9ba6 100644 --- a/eth2/utils/tree_hash/src/impls/vec.rs +++ b/eth2/utils/tree_hash/src/impls/vec.rs @@ -1,8 +1,8 @@ use super::*; -impl CachedTreeHashSubtree> for Vec +impl CachedTreeHashSubTree> for Vec where - T: CachedTreeHashSubtree, + T: CachedTreeHashSubTree, { fn item_type() -> ItemType { ItemType::List @@ -168,7 +168,7 @@ where fn get_packed_leaves(vec: &Vec) -> Result, Error> where - T: CachedTreeHashSubtree, + T: CachedTreeHashSubTree, { let num_packed_bytes = (BYTES_PER_CHUNK / T::packing_factor()) * vec.len(); let num_leaves = num_sanitized_leaves(num_packed_bytes); diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index 179f557ce..5ec2b0283 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -35,7 +35,7 @@ pub enum ItemType { Composite, } -pub trait CachedTreeHash: CachedTreeHashSubtree + Sized { +pub trait CachedTreeHash: CachedTreeHashSubTree + Sized { fn update_internal_tree_hash_cache(self, old: T) -> Result<(Self, Self), Error>; fn cached_tree_hash_root(&self) -> Option>; @@ -43,7 +43,7 @@ pub trait CachedTreeHash: CachedTreeHashSubtree + Sized { fn clone_without_tree_hash_cache(&self) -> Self; } -pub trait CachedTreeHashSubtree { +pub trait CachedTreeHashSubTree { fn item_type() -> ItemType; fn btree_overlay(&self, chunk_offset: usize) -> Result; diff --git a/eth2/utils/tree_hash/tests/tests.rs b/eth2/utils/tree_hash/tests/tests.rs index c61a010ca..ead6d8c00 100644 --- a/eth2/utils/tree_hash/tests/tests.rs +++ b/eth2/utils/tree_hash/tests/tests.rs @@ -63,7 +63,7 @@ fn works_when_embedded() { assert_eq!(&merkle[0..32], &root[..]); } -impl CachedTreeHashSubtree for InternalCache { +impl CachedTreeHashSubTree for InternalCache { fn item_type() -> ItemType { ItemType::Composite } @@ -131,7 +131,7 @@ pub struct Inner { pub d: u64, } -impl CachedTreeHashSubtree for Inner { +impl CachedTreeHashSubTree for Inner { fn item_type() -> ItemType { ItemType::Composite } @@ -203,7 +203,7 @@ pub struct Outer { pub c: u64, } -impl CachedTreeHashSubtree for Outer { +impl CachedTreeHashSubTree for Outer { fn item_type() -> ItemType { ItemType::Composite } diff --git a/eth2/utils/tree_hash_derive/Cargo.toml b/eth2/utils/tree_hash_derive/Cargo.toml new file mode 100644 index 000000000..f227d7954 --- /dev/null +++ b/eth2/utils/tree_hash_derive/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "tree_hash_derive" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" +description = "Procedural derive macros for SSZ tree hashing." + +[lib] +proc-macro = true + +[dev-dependencies] +tree_hash = { path = "../tree_hash" } + +[dependencies] +syn = "0.15" +quote = "0.6" diff --git a/eth2/utils/tree_hash_derive/src/lib.rs b/eth2/utils/tree_hash_derive/src/lib.rs new file mode 100644 index 000000000..217e91c24 --- /dev/null +++ b/eth2/utils/tree_hash_derive/src/lib.rs @@ -0,0 +1,125 @@ +#![recursion_limit = "256"] +extern crate proc_macro; + +use proc_macro::TokenStream; +use quote::quote; +use syn::{parse_macro_input, DeriveInput}; + +/// Returns a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields +/// that should not be hashed. +/// +/// # Panics +/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time. +fn get_hashable_named_field_idents<'a>(struct_data: &'a syn::DataStruct) -> Vec<&'a syn::Ident> { + struct_data + .fields + .iter() + .filter_map(|f| { + if should_skip_hashing(&f) { + None + } else { + Some(match &f.ident { + Some(ref ident) => ident, + _ => panic!("tree_hash_derive only supports named struct fields."), + }) + } + }) + .collect() +} + +/// Returns true if some field has an attribute declaring it should not be hashedd. +/// +/// The field attribute is: `#[tree_hash(skip_hashing)]` +fn should_skip_hashing(field: &syn::Field) -> bool { + for attr in &field.attrs { + if attr.tts.to_string() == "( skip_hashing )" { + return true; + } + } + false +} + +/// Implements `ssz::Encodable` for some `struct`. +/// +/// Fields are encoded in the order they are defined. +#[proc_macro_derive(CachedTreeHashSubTree, attributes(tree_hash))] +pub fn subtree_derive(input: TokenStream) -> TokenStream { + let item = parse_macro_input!(input as DeriveInput); + + let name = &item.ident; + + let struct_data = match &item.data { + syn::Data::Struct(s) => s, + _ => panic!("tree_hash_derive only supports structs."), + }; + + let idents_a = get_hashable_named_field_idents(&struct_data); + let idents_b = idents_a.clone(); + let idents_c = idents_a.clone(); + let idents_d = idents_a.clone(); + + let output = quote! { + impl tree_hash::CachedTreeHashSubTree<#name> for #name { + fn item_type() -> tree_hash::ItemType { + tree_hash::ItemType::Composite + } + + fn new_cache(&self) -> Result { + let tree = tree_hash::TreeHashCache::from_leaves_and_subtrees( + self, + vec![ + #( + self.#idents_a.new_cache()?, + )* + ], + )?; + + Ok(tree) + } + + fn btree_overlay(&self, chunk_offset: usize) -> Result { + let mut lengths = vec![]; + + #( + lengths.push(tree_hash::BTreeOverlay::new(&self.#idents_b, 0)?.total_nodes()); + )* + + tree_hash::BTreeOverlay::from_lengths(chunk_offset, lengths) + } + + fn packed_encoding(&self) -> Result, tree_hash::Error> { + Err(tree_hash::Error::ShouldNeverBePacked(Self::item_type())) + } + + fn packing_factor() -> usize { + 1 + } + + fn update_cache( + &self, + other: &Self, + cache: &mut tree_hash::TreeHashCache, + chunk: usize, + ) -> Result { + let offset_handler = tree_hash::BTreeOverlay::new(self, chunk)?; + + // Skip past the internal nodes and update any changed leaf nodes. + { + let chunk = offset_handler.first_leaf_node()?; + #( + let chunk = self.#idents_c.update_cache(&other.#idents_d, cache, chunk)?; + )* + } + + for (&parent, children) in offset_handler.iter_internal_nodes().rev() { + if cache.either_modified(children)? { + cache.modify_chunk(parent, &cache.hash_children(children)?)?; + } + } + + Ok(offset_handler.next_node) + } + } + }; + output.into() +} diff --git a/eth2/utils/tree_hash_derive/tests/tests.rs b/eth2/utils/tree_hash_derive/tests/tests.rs new file mode 100644 index 000000000..a5ab112a2 --- /dev/null +++ b/eth2/utils/tree_hash_derive/tests/tests.rs @@ -0,0 +1,9 @@ +use tree_hash_derive::CachedTreeHashSubTree; + +#[derive(Clone, Debug, CachedTreeHashSubTree)] +pub struct Inner { + pub a: u64, + pub b: u64, + pub c: u64, + pub d: u64, +} From 93f3fc858d97791563ab52eb1a1dea78b8f1ec46 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 16 Apr 2019 09:14:33 +1000 Subject: [PATCH 49/89] Add uncached tree hashing --- eth2/utils/tree_hash/src/cached_tree_hash.rs | 125 ++++++++++++++++ .../{ => cached_tree_hash}/btree_overlay.rs | 0 .../src/{ => cached_tree_hash}/impls.rs | 0 .../src/{ => cached_tree_hash}/impls/vec.rs | 0 .../src/{ => cached_tree_hash}/resize.rs | 0 eth2/utils/tree_hash/src/lib.rs | 134 +----------------- .../utils/tree_hash/src/standard_tree_hash.rs | 114 +++++++++++++++ eth2/utils/tree_hash/tests/tests.rs | 25 ++++ 8 files changed, 268 insertions(+), 130 deletions(-) rename eth2/utils/tree_hash/src/{ => cached_tree_hash}/btree_overlay.rs (100%) rename eth2/utils/tree_hash/src/{ => cached_tree_hash}/impls.rs (100%) rename eth2/utils/tree_hash/src/{ => cached_tree_hash}/impls/vec.rs (100%) rename eth2/utils/tree_hash/src/{ => cached_tree_hash}/resize.rs (100%) create mode 100644 eth2/utils/tree_hash/src/standard_tree_hash.rs diff --git a/eth2/utils/tree_hash/src/cached_tree_hash.rs b/eth2/utils/tree_hash/src/cached_tree_hash.rs index 048d4bab5..fc12cfbba 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash.rs @@ -1,4 +1,129 @@ use super::*; +use hashing::hash; +use int_to_bytes::int_to_bytes32; +use std::ops::Range; + +pub mod btree_overlay; +pub mod impls; +pub mod resize; + +pub use btree_overlay::BTreeOverlay; + +#[derive(Debug, PartialEq, Clone)] +pub enum Error { + ShouldNotProduceBTreeOverlay, + NoFirstNode, + NoBytesForRoot, + UnableToObtainSlices, + UnableToGrowMerkleTree, + UnableToShrinkMerkleTree, + ShouldNeverBePacked(ItemType), + BytesAreNotEvenChunks(usize), + NoModifiedFieldForChunk(usize), + NoBytesForChunk(usize), +} + +pub trait CachedTreeHash: CachedTreeHashSubTree + Sized { + fn update_internal_tree_hash_cache(self, old: T) -> Result<(Self, Self), Error>; + + fn cached_tree_hash_root(&self) -> Option>; + + fn clone_without_tree_hash_cache(&self) -> Self; +} + +pub trait CachedTreeHashSubTree { + fn item_type() -> ItemType; + + fn btree_overlay(&self, chunk_offset: usize) -> Result; + + fn packed_encoding(&self) -> Result, Error>; + + fn packing_factor() -> usize; + + fn new_cache(&self) -> Result; + + fn update_cache( + &self, + other: &Item, + cache: &mut TreeHashCache, + chunk: usize, + ) -> Result; +} + +fn children(parent: usize) -> (usize, usize) { + ((2 * parent + 1), (2 * parent + 2)) +} + +fn node_range_to_byte_range(node_range: &Range) -> Range { + node_range.start * HASHSIZE..node_range.end * HASHSIZE +} + +/// Split `values` into a power-of-two, identical-length chunks (padding with `0`) and merkleize +/// them, returning the entire merkle tree. +/// +/// The root hash is `merkleize(values)[0..BYTES_PER_CHUNK]`. +pub fn merkleize(values: Vec) -> Vec { + let values = sanitise_bytes(values); + + let leaves = values.len() / HASHSIZE; + + if leaves == 0 { + panic!("No full leaves"); + } + + if !leaves.is_power_of_two() { + panic!("leaves is not power of two"); + } + + let mut o: Vec = vec![0; (num_nodes(leaves) - leaves) * HASHSIZE]; + o.append(&mut values.to_vec()); + + let mut i = o.len(); + let mut j = o.len() - values.len(); + + while i >= MERKLE_HASH_CHUNCK { + i -= MERKLE_HASH_CHUNCK; + let hash = hash(&o[i..i + MERKLE_HASH_CHUNCK]); + + j -= HASHSIZE; + o[j..j + HASHSIZE].copy_from_slice(&hash); + } + + o +} + +pub fn sanitise_bytes(mut bytes: Vec) -> Vec { + let present_leaves = num_unsanitized_leaves(bytes.len()); + let required_leaves = present_leaves.next_power_of_two(); + + if (present_leaves != required_leaves) | last_leaf_needs_padding(bytes.len()) { + bytes.resize(num_bytes(required_leaves), 0); + } + + bytes +} + +fn pad_for_leaf_count(num_leaves: usize, bytes: &mut Vec) { + let required_leaves = num_leaves.next_power_of_two(); + + bytes.resize( + bytes.len() + (required_leaves - num_leaves) * BYTES_PER_CHUNK, + 0, + ); +} + +fn last_leaf_needs_padding(num_bytes: usize) -> bool { + num_bytes % HASHSIZE != 0 +} + +/// Rounds up +fn num_unsanitized_leaves(num_bytes: usize) -> usize { + (num_bytes + HASHSIZE - 1) / HASHSIZE +} + +fn num_bytes(num_leaves: usize) -> usize { + num_leaves * HASHSIZE +} #[derive(Debug, PartialEq, Clone)] pub struct TreeHashCache { diff --git a/eth2/utils/tree_hash/src/btree_overlay.rs b/eth2/utils/tree_hash/src/cached_tree_hash/btree_overlay.rs similarity index 100% rename from eth2/utils/tree_hash/src/btree_overlay.rs rename to eth2/utils/tree_hash/src/cached_tree_hash/btree_overlay.rs diff --git a/eth2/utils/tree_hash/src/impls.rs b/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs similarity index 100% rename from eth2/utils/tree_hash/src/impls.rs rename to eth2/utils/tree_hash/src/cached_tree_hash/impls.rs diff --git a/eth2/utils/tree_hash/src/impls/vec.rs b/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs similarity index 100% rename from eth2/utils/tree_hash/src/impls/vec.rs rename to eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs diff --git a/eth2/utils/tree_hash/src/resize.rs b/eth2/utils/tree_hash/src/cached_tree_hash/resize.rs similarity index 100% rename from eth2/utils/tree_hash/src/resize.rs rename to eth2/utils/tree_hash/src/cached_tree_hash/resize.rs diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index 5ec2b0283..4e5302bca 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -1,33 +1,10 @@ -use hashing::hash; -use int_to_bytes::int_to_bytes32; -use std::ops::Range; - -mod btree_overlay; -mod cached_tree_hash; -mod impls; -mod resize; - -pub use btree_overlay::BTreeOverlay; -pub use cached_tree_hash::TreeHashCache; +pub mod cached_tree_hash; +pub mod standard_tree_hash; pub const BYTES_PER_CHUNK: usize = 32; pub const HASHSIZE: usize = 32; pub const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; -#[derive(Debug, PartialEq, Clone)] -pub enum Error { - ShouldNotProduceBTreeOverlay, - NoFirstNode, - NoBytesForRoot, - UnableToObtainSlices, - UnableToGrowMerkleTree, - UnableToShrinkMerkleTree, - ShouldNeverBePacked(ItemType), - BytesAreNotEvenChunks(usize), - NoModifiedFieldForChunk(usize), - NoBytesForChunk(usize), -} - #[derive(Debug, PartialEq, Clone)] pub enum ItemType { Basic, @@ -35,114 +12,11 @@ pub enum ItemType { Composite, } -pub trait CachedTreeHash: CachedTreeHashSubTree + Sized { - fn update_internal_tree_hash_cache(self, old: T) -> Result<(Self, Self), Error>; - - fn cached_tree_hash_root(&self) -> Option>; - - fn clone_without_tree_hash_cache(&self) -> Self; -} - -pub trait CachedTreeHashSubTree { - fn item_type() -> ItemType; - - fn btree_overlay(&self, chunk_offset: usize) -> Result; - - fn packed_encoding(&self) -> Result, Error>; - - fn packing_factor() -> usize; - - fn new_cache(&self) -> Result; - - fn update_cache( - &self, - other: &Item, - cache: &mut TreeHashCache, - chunk: usize, - ) -> Result; -} - -fn children(parent: usize) -> (usize, usize) { - ((2 * parent + 1), (2 * parent + 2)) -} - -fn num_nodes(num_leaves: usize) -> usize { - 2 * num_leaves - 1 -} - -fn node_range_to_byte_range(node_range: &Range) -> Range { - node_range.start * HASHSIZE..node_range.end * HASHSIZE -} - -/// Split `values` into a power-of-two, identical-length chunks (padding with `0`) and merkleize -/// them, returning the entire merkle tree. -/// -/// The root hash is `merkleize(values)[0..BYTES_PER_CHUNK]`. -pub fn merkleize(values: Vec) -> Vec { - let values = sanitise_bytes(values); - - let leaves = values.len() / HASHSIZE; - - if leaves == 0 { - panic!("No full leaves"); - } - - if !leaves.is_power_of_two() { - panic!("leaves is not power of two"); - } - - let mut o: Vec = vec![0; (num_nodes(leaves) - leaves) * HASHSIZE]; - o.append(&mut values.to_vec()); - - let mut i = o.len(); - let mut j = o.len() - values.len(); - - while i >= MERKLE_HASH_CHUNCK { - i -= MERKLE_HASH_CHUNCK; - let hash = hash(&o[i..i + MERKLE_HASH_CHUNCK]); - - j -= HASHSIZE; - o[j..j + HASHSIZE].copy_from_slice(&hash); - } - - o -} - -pub fn sanitise_bytes(mut bytes: Vec) -> Vec { - let present_leaves = num_unsanitized_leaves(bytes.len()); - let required_leaves = present_leaves.next_power_of_two(); - - if (present_leaves != required_leaves) | last_leaf_needs_padding(bytes.len()) { - bytes.resize(num_bytes(required_leaves), 0); - } - - bytes -} - -fn pad_for_leaf_count(num_leaves: usize, bytes: &mut Vec) { - let required_leaves = num_leaves.next_power_of_two(); - - bytes.resize( - bytes.len() + (required_leaves - num_leaves) * BYTES_PER_CHUNK, - 0, - ); -} - -fn last_leaf_needs_padding(num_bytes: usize) -> bool { - num_bytes % HASHSIZE != 0 -} - -/// Rounds up -fn num_unsanitized_leaves(num_bytes: usize) -> usize { - (num_bytes + HASHSIZE - 1) / HASHSIZE -} - -/// Rounds up fn num_sanitized_leaves(num_bytes: usize) -> usize { let leaves = (num_bytes + HASHSIZE - 1) / HASHSIZE; leaves.next_power_of_two() } -fn num_bytes(num_leaves: usize) -> usize { - num_leaves * HASHSIZE +fn num_nodes(num_leaves: usize) -> usize { + 2 * num_leaves - 1 } diff --git a/eth2/utils/tree_hash/src/standard_tree_hash.rs b/eth2/utils/tree_hash/src/standard_tree_hash.rs new file mode 100644 index 000000000..c8119a790 --- /dev/null +++ b/eth2/utils/tree_hash/src/standard_tree_hash.rs @@ -0,0 +1,114 @@ +use super::*; +use hashing::hash; +use int_to_bytes::int_to_bytes32; +use ssz::ssz_encode; + +pub trait TreeHash { + fn tree_hash_item_type() -> ItemType; + + fn tree_hash_packed_encoding(&self) -> Vec; + + fn hash_tree_root(&self) -> Vec; +} + +impl TreeHash for u64 { + fn tree_hash_item_type() -> ItemType { + ItemType::Basic + } + + fn tree_hash_packed_encoding(&self) -> Vec { + ssz_encode(self) + } + + fn hash_tree_root(&self) -> Vec { + int_to_bytes32(*self) + } +} + +impl TreeHash for Vec +where + T: TreeHash, +{ + fn tree_hash_item_type() -> ItemType { + ItemType::List + } + + fn tree_hash_packed_encoding(&self) -> Vec { + unreachable!("List should never be packed.") + } + + fn hash_tree_root(&self) -> Vec { + let leaves = match T::tree_hash_item_type() { + ItemType::Basic => { + let mut leaves = vec![]; + + for item in self { + leaves.append(&mut item.tree_hash_packed_encoding()); + } + + leaves + } + ItemType::Composite | ItemType::List => { + let mut leaves = Vec::with_capacity(self.len() * HASHSIZE); + + for item in self { + leaves.append(&mut item.hash_tree_root()) + } + + leaves + } + }; + + // Mix in the length + let mut root_and_len = Vec::with_capacity(HASHSIZE * 2); + root_and_len.append(&mut efficient_merkleize(&leaves)[0..32].to_vec()); + root_and_len.append(&mut int_to_bytes32(self.len() as u64)); + + hash(&root_and_len) + } +} + +pub fn efficient_merkleize(bytes: &[u8]) -> Vec { + let leaves = num_sanitized_leaves(bytes.len()); + let nodes = num_nodes(leaves); + let internal_nodes = nodes - leaves; + + let num_bytes = internal_nodes * HASHSIZE + bytes.len(); + + let mut o: Vec = vec![0; internal_nodes * HASHSIZE]; + o.append(&mut bytes.to_vec()); + + assert_eq!(o.len(), num_bytes); + + let empty_chunk_hash = hash(&[0; MERKLE_HASH_CHUNCK]); + + let mut i = nodes * HASHSIZE; + let mut j = internal_nodes * HASHSIZE; + + while i >= MERKLE_HASH_CHUNCK { + i -= MERKLE_HASH_CHUNCK; + + j -= HASHSIZE; + let hash = match o.get(i..i + MERKLE_HASH_CHUNCK) { + // All bytes are available, hash as ususal. + Some(slice) => hash(slice), + // Unable to get all the bytes. + None => { + match o.get(i..) { + // Able to get some of the bytes, pad them out. + Some(slice) => { + let mut bytes = slice.to_vec(); + bytes.resize(MERKLE_HASH_CHUNCK, 0); + hash(&bytes) + } + // Unable to get any bytes, use the empty-chunk hash. + None => empty_chunk_hash.clone(), + } + } + }; + + o[j..j + HASHSIZE].copy_from_slice(&hash); + } + + o +} diff --git a/eth2/utils/tree_hash/tests/tests.rs b/eth2/utils/tree_hash/tests/tests.rs index ead6d8c00..d65192cd5 100644 --- a/eth2/utils/tree_hash/tests/tests.rs +++ b/eth2/utils/tree_hash/tests/tests.rs @@ -1,5 +1,7 @@ use hashing::hash; use int_to_bytes::{int_to_bytes32, int_to_bytes8}; +use tree_hash::cached_tree_hash::*; +use tree_hash::standard_tree_hash::*; use tree_hash::*; #[derive(Clone, Debug)] @@ -131,6 +133,27 @@ pub struct Inner { pub d: u64, } +impl TreeHash for Inner { + fn tree_hash_item_type() -> ItemType { + ItemType::Composite + } + + fn tree_hash_packed_encoding(&self) -> Vec { + unreachable!("Struct should never be packed.") + } + + fn hash_tree_root(&self) -> Vec { + let mut leaves = Vec::with_capacity(4 * HASHSIZE); + + leaves.append(&mut self.a.hash_tree_root()); + leaves.append(&mut self.b.hash_tree_root()); + leaves.append(&mut self.c.hash_tree_root()); + leaves.append(&mut self.d.hash_tree_root()); + + efficient_merkleize(&leaves)[0..32].to_vec() + } +} + impl CachedTreeHashSubTree for Inner { fn item_type() -> ItemType { ItemType::Composite @@ -458,6 +481,7 @@ fn test_u64_vec_modifications(original: Vec, modified: Vec) { mix_in_length(&mut expected[0..HASHSIZE], modified.len()); assert_eq!(expected, modified_cache); + assert_eq!(&expected[0..32], &modified.hash_tree_root()[..]); } #[test] @@ -580,6 +604,7 @@ fn test_inner_vec_modifications(original: Vec, modified: Vec, refe // Compare the cached tree to the reference tree. assert_trees_eq(&expected, &modified_cache); + assert_eq!(&expected[0..32], &modified.hash_tree_root()[..]); } #[test] From d311b48a9f35f4463939bbf50efbe65eea7a5261 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 16 Apr 2019 09:34:23 +1000 Subject: [PATCH 50/89] Unify tree hash methods --- eth2/utils/tree_hash/src/cached_tree_hash.rs | 10 +- .../tree_hash/src/cached_tree_hash/impls.rs | 12 -- .../src/cached_tree_hash/impls/vec.rs | 36 ++---- eth2/utils/tree_hash/src/lib.rs | 5 +- .../utils/tree_hash/src/standard_tree_hash.rs | 37 ++++--- eth2/utils/tree_hash/tests/tests.rs | 103 ++++++++++-------- 6 files changed, 101 insertions(+), 102 deletions(-) diff --git a/eth2/utils/tree_hash/src/cached_tree_hash.rs b/eth2/utils/tree_hash/src/cached_tree_hash.rs index fc12cfbba..43c0ba2fe 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash.rs @@ -17,7 +17,7 @@ pub enum Error { UnableToObtainSlices, UnableToGrowMerkleTree, UnableToShrinkMerkleTree, - ShouldNeverBePacked(ItemType), + ShouldNeverBePacked(TreeHashType), BytesAreNotEvenChunks(usize), NoModifiedFieldForChunk(usize), NoBytesForChunk(usize), @@ -31,15 +31,9 @@ pub trait CachedTreeHash: CachedTreeHashSubTree + Sized { fn clone_without_tree_hash_cache(&self) -> Self; } -pub trait CachedTreeHashSubTree { - fn item_type() -> ItemType; - +pub trait CachedTreeHashSubTree: TreeHash { fn btree_overlay(&self, chunk_offset: usize) -> Result; - fn packed_encoding(&self) -> Result, Error>; - - fn packing_factor() -> usize; - fn new_cache(&self) -> Result; fn update_cache( diff --git a/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs b/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs index 982e98724..190deaf27 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs @@ -5,10 +5,6 @@ use ssz::ssz_encode; mod vec; impl CachedTreeHashSubTree for u64 { - fn item_type() -> ItemType { - ItemType::Basic - } - fn new_cache(&self) -> Result { Ok(TreeHashCache::from_bytes( merkleize(ssz_encode(self)), @@ -20,14 +16,6 @@ impl CachedTreeHashSubTree for u64 { BTreeOverlay::from_lengths(chunk_offset, vec![1]) } - fn packed_encoding(&self) -> Result, Error> { - Ok(ssz_encode(self)) - } - - fn packing_factor() -> usize { - HASHSIZE / 8 - } - fn update_cache( &self, other: &Self, diff --git a/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs b/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs index a6fad9ba6..bc86e6054 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs @@ -2,18 +2,14 @@ use super::*; impl CachedTreeHashSubTree> for Vec where - T: CachedTreeHashSubTree, + T: CachedTreeHashSubTree + TreeHash, { - fn item_type() -> ItemType { - ItemType::List - } - fn new_cache(&self) -> Result { - match T::item_type() { - ItemType::Basic => { + match T::tree_hash_type() { + TreeHashType::Basic => { TreeHashCache::from_bytes(merkleize(get_packed_leaves(self)?), false) } - ItemType::Composite | ItemType::List => { + TreeHashType::Composite | TreeHashType::List => { let subtrees = self .iter() .map(|item| TreeHashCache::new(item)) @@ -25,9 +21,9 @@ where } fn btree_overlay(&self, chunk_offset: usize) -> Result { - let lengths = match T::item_type() { - ItemType::Basic => vec![1; self.len() / T::packing_factor()], - ItemType::Composite | ItemType::List => { + let lengths = match T::tree_hash_type() { + TreeHashType::Basic => vec![1; self.len() / T::tree_hash_packing_factor()], + TreeHashType::Composite | TreeHashType::List => { let mut lengths = vec![]; for item in self { @@ -41,14 +37,6 @@ where BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn packed_encoding(&self) -> Result, Error> { - Err(Error::ShouldNeverBePacked(Self::item_type())) - } - - fn packing_factor() -> usize { - 1 - } - fn update_cache( &self, other: &Vec, @@ -93,8 +81,8 @@ where cache.splice(old_offset_handler.chunk_range(), modified_cache); } - match T::item_type() { - ItemType::Basic => { + match T::tree_hash_type() { + TreeHashType::Basic => { let leaves = get_packed_leaves(self)?; for (i, chunk) in offset_handler.iter_leaf_nodes().enumerate() { @@ -109,7 +97,7 @@ where TreeHashCache::from_bytes(leaves, true)?, ); } - ItemType::Composite | ItemType::List => { + TreeHashType::Composite | TreeHashType::List => { let mut i = offset_handler.num_leaf_nodes; for &start_chunk in offset_handler.iter_leaf_nodes().rev() { i -= 1; @@ -170,13 +158,13 @@ fn get_packed_leaves(vec: &Vec) -> Result, Error> where T: CachedTreeHashSubTree, { - let num_packed_bytes = (BYTES_PER_CHUNK / T::packing_factor()) * vec.len(); + let num_packed_bytes = (BYTES_PER_CHUNK / T::tree_hash_packing_factor()) * vec.len(); let num_leaves = num_sanitized_leaves(num_packed_bytes); let mut packed = Vec::with_capacity(num_leaves * HASHSIZE); for item in vec { - packed.append(&mut item.packed_encoding()?); + packed.append(&mut item.tree_hash_packed_encoding()); } Ok(sanitise_bytes(packed)) diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index 4e5302bca..0f0fb60f4 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -5,8 +5,11 @@ pub const BYTES_PER_CHUNK: usize = 32; pub const HASHSIZE: usize = 32; pub const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; +pub use cached_tree_hash::CachedTreeHashSubTree; +pub use standard_tree_hash::TreeHash; + #[derive(Debug, PartialEq, Clone)] -pub enum ItemType { +pub enum TreeHashType { Basic, List, Composite, diff --git a/eth2/utils/tree_hash/src/standard_tree_hash.rs b/eth2/utils/tree_hash/src/standard_tree_hash.rs index c8119a790..e7f94560b 100644 --- a/eth2/utils/tree_hash/src/standard_tree_hash.rs +++ b/eth2/utils/tree_hash/src/standard_tree_hash.rs @@ -4,23 +4,29 @@ use int_to_bytes::int_to_bytes32; use ssz::ssz_encode; pub trait TreeHash { - fn tree_hash_item_type() -> ItemType; + fn tree_hash_type() -> TreeHashType; fn tree_hash_packed_encoding(&self) -> Vec; - fn hash_tree_root(&self) -> Vec; + fn tree_hash_packing_factor() -> usize; + + fn tree_hash_root(&self) -> Vec; } impl TreeHash for u64 { - fn tree_hash_item_type() -> ItemType { - ItemType::Basic + fn tree_hash_type() -> TreeHashType { + TreeHashType::Basic } fn tree_hash_packed_encoding(&self) -> Vec { ssz_encode(self) } - fn hash_tree_root(&self) -> Vec { + fn tree_hash_packing_factor() -> usize { + HASHSIZE / 8 + } + + fn tree_hash_root(&self) -> Vec { int_to_bytes32(*self) } } @@ -29,18 +35,23 @@ impl TreeHash for Vec where T: TreeHash, { - fn tree_hash_item_type() -> ItemType { - ItemType::List + fn tree_hash_type() -> TreeHashType { + TreeHashType::List } fn tree_hash_packed_encoding(&self) -> Vec { unreachable!("List should never be packed.") } - fn hash_tree_root(&self) -> Vec { - let leaves = match T::tree_hash_item_type() { - ItemType::Basic => { - let mut leaves = vec![]; + fn tree_hash_packing_factor() -> usize { + unreachable!("List should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { + let leaves = match T::tree_hash_type() { + TreeHashType::Basic => { + let mut leaves = + Vec::with_capacity((HASHSIZE / T::tree_hash_packing_factor()) * self.len()); for item in self { leaves.append(&mut item.tree_hash_packed_encoding()); @@ -48,11 +59,11 @@ where leaves } - ItemType::Composite | ItemType::List => { + TreeHashType::Composite | TreeHashType::List => { let mut leaves = Vec::with_capacity(self.len() * HASHSIZE); for item in self { - leaves.append(&mut item.hash_tree_root()) + leaves.append(&mut item.tree_hash_root()) } leaves diff --git a/eth2/utils/tree_hash/tests/tests.rs b/eth2/utils/tree_hash/tests/tests.rs index d65192cd5..f52a17272 100644 --- a/eth2/utils/tree_hash/tests/tests.rs +++ b/eth2/utils/tree_hash/tests/tests.rs @@ -11,6 +11,29 @@ pub struct InternalCache { pub cache: Option, } +impl TreeHash for InternalCache { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Composite + } + + fn tree_hash_packed_encoding(&self) -> Vec { + unreachable!("Struct should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("Struct should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { + let mut leaves = Vec::with_capacity(4 * HASHSIZE); + + leaves.append(&mut self.a.tree_hash_root()); + leaves.append(&mut self.b.tree_hash_root()); + + efficient_merkleize(&leaves)[0..32].to_vec() + } +} + impl CachedTreeHash for InternalCache { fn update_internal_tree_hash_cache(mut self, mut old: Self) -> Result<(Self, Self), Error> { let mut local_cache = old.cache; @@ -66,10 +89,6 @@ fn works_when_embedded() { } impl CachedTreeHashSubTree for InternalCache { - fn item_type() -> ItemType { - ItemType::Composite - } - fn new_cache(&self) -> Result { let tree = TreeHashCache::from_leaves_and_subtrees( self, @@ -88,14 +107,6 @@ impl CachedTreeHashSubTree for InternalCache { BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn packed_encoding(&self) -> Result, Error> { - Err(Error::ShouldNeverBePacked(Self::item_type())) - } - - fn packing_factor() -> usize { - 1 - } - fn update_cache( &self, other: &Self, @@ -134,31 +145,31 @@ pub struct Inner { } impl TreeHash for Inner { - fn tree_hash_item_type() -> ItemType { - ItemType::Composite + fn tree_hash_type() -> TreeHashType { + TreeHashType::Composite } fn tree_hash_packed_encoding(&self) -> Vec { unreachable!("Struct should never be packed.") } - fn hash_tree_root(&self) -> Vec { + fn tree_hash_packing_factor() -> usize { + unreachable!("Struct should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { let mut leaves = Vec::with_capacity(4 * HASHSIZE); - leaves.append(&mut self.a.hash_tree_root()); - leaves.append(&mut self.b.hash_tree_root()); - leaves.append(&mut self.c.hash_tree_root()); - leaves.append(&mut self.d.hash_tree_root()); + leaves.append(&mut self.a.tree_hash_root()); + leaves.append(&mut self.b.tree_hash_root()); + leaves.append(&mut self.c.tree_hash_root()); + leaves.append(&mut self.d.tree_hash_root()); efficient_merkleize(&leaves)[0..32].to_vec() } } impl CachedTreeHashSubTree for Inner { - fn item_type() -> ItemType { - ItemType::Composite - } - fn new_cache(&self) -> Result { let tree = TreeHashCache::from_leaves_and_subtrees( self, @@ -184,14 +195,6 @@ impl CachedTreeHashSubTree for Inner { BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn packed_encoding(&self) -> Result, Error> { - Err(Error::ShouldNeverBePacked(Self::item_type())) - } - - fn packing_factor() -> usize { - 1 - } - fn update_cache( &self, other: &Self, @@ -226,11 +229,31 @@ pub struct Outer { pub c: u64, } -impl CachedTreeHashSubTree for Outer { - fn item_type() -> ItemType { - ItemType::Composite +impl TreeHash for Outer { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Composite } + fn tree_hash_packed_encoding(&self) -> Vec { + unreachable!("Struct should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("Struct should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { + let mut leaves = Vec::with_capacity(4 * HASHSIZE); + + leaves.append(&mut self.a.tree_hash_root()); + leaves.append(&mut self.b.tree_hash_root()); + leaves.append(&mut self.c.tree_hash_root()); + + efficient_merkleize(&leaves)[0..32].to_vec() + } +} + +impl CachedTreeHashSubTree for Outer { fn new_cache(&self) -> Result { let tree = TreeHashCache::from_leaves_and_subtrees( self, @@ -254,14 +277,6 @@ impl CachedTreeHashSubTree for Outer { BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn packed_encoding(&self) -> Result, Error> { - Err(Error::ShouldNeverBePacked(Self::item_type())) - } - - fn packing_factor() -> usize { - 1 - } - fn update_cache( &self, other: &Self, @@ -481,7 +496,7 @@ fn test_u64_vec_modifications(original: Vec, modified: Vec) { mix_in_length(&mut expected[0..HASHSIZE], modified.len()); assert_eq!(expected, modified_cache); - assert_eq!(&expected[0..32], &modified.hash_tree_root()[..]); + assert_eq!(&expected[0..32], &modified.tree_hash_root()[..]); } #[test] @@ -604,7 +619,7 @@ fn test_inner_vec_modifications(original: Vec, modified: Vec, refe // Compare the cached tree to the reference tree. assert_trees_eq(&expected, &modified_cache); - assert_eq!(&expected[0..32], &modified.hash_tree_root()[..]); + assert_eq!(&expected[0..32], &modified.tree_hash_root()[..]); } #[test] From 8a1bde3e2f9d64f06000208e1940082007c241de Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 16 Apr 2019 10:47:58 +1000 Subject: [PATCH 51/89] Update naming for tree_hash fns/structs/traits --- eth2/utils/tree_hash/src/cached_tree_hash.rs | 8 +-- .../src/cached_tree_hash/btree_overlay.rs | 2 +- .../tree_hash/src/cached_tree_hash/impls.rs | 6 +- .../src/cached_tree_hash/impls/vec.rs | 8 +-- eth2/utils/tree_hash/src/lib.rs | 4 +- eth2/utils/tree_hash/tests/tests.rs | 72 ++++++++++--------- eth2/utils/tree_hash_derive/src/lib.rs | 70 +++++++++++++----- eth2/utils/tree_hash_derive/tests/tests.rs | 66 ++++++++++++++++- 8 files changed, 166 insertions(+), 70 deletions(-) diff --git a/eth2/utils/tree_hash/src/cached_tree_hash.rs b/eth2/utils/tree_hash/src/cached_tree_hash.rs index 43c0ba2fe..e093b2dd7 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash.rs @@ -32,11 +32,11 @@ pub trait CachedTreeHash: CachedTreeHashSubTree + Sized { } pub trait CachedTreeHashSubTree: TreeHash { - fn btree_overlay(&self, chunk_offset: usize) -> Result; + fn tree_hash_cache_overlay(&self, chunk_offset: usize) -> Result; - fn new_cache(&self) -> Result; + fn new_tree_hash_cache(&self) -> Result; - fn update_cache( + fn update_tree_hash_cache( &self, other: &Item, cache: &mut TreeHashCache, @@ -136,7 +136,7 @@ impl TreeHashCache { where T: CachedTreeHashSubTree, { - item.new_cache() + item.new_tree_hash_cache() } pub fn from_elems(cache: Vec, chunk_modified: Vec) -> Self { diff --git a/eth2/utils/tree_hash/src/cached_tree_hash/btree_overlay.rs b/eth2/utils/tree_hash/src/cached_tree_hash/btree_overlay.rs index 1e188da60..e8c04a91e 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash/btree_overlay.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash/btree_overlay.rs @@ -14,7 +14,7 @@ impl BTreeOverlay { where T: CachedTreeHashSubTree, { - item.btree_overlay(initial_offset) + item.tree_hash_cache_overlay(initial_offset) } pub fn from_lengths(offset: usize, mut lengths: Vec) -> Result { diff --git a/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs b/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs index 190deaf27..62d013881 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs @@ -5,18 +5,18 @@ use ssz::ssz_encode; mod vec; impl CachedTreeHashSubTree for u64 { - fn new_cache(&self) -> Result { + fn new_tree_hash_cache(&self) -> Result { Ok(TreeHashCache::from_bytes( merkleize(ssz_encode(self)), false, )?) } - fn btree_overlay(&self, chunk_offset: usize) -> Result { + fn tree_hash_cache_overlay(&self, chunk_offset: usize) -> Result { BTreeOverlay::from_lengths(chunk_offset, vec![1]) } - fn update_cache( + fn update_tree_hash_cache( &self, other: &Self, cache: &mut TreeHashCache, diff --git a/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs b/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs index bc86e6054..6c0970cef 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs @@ -4,7 +4,7 @@ impl CachedTreeHashSubTree> for Vec where T: CachedTreeHashSubTree + TreeHash, { - fn new_cache(&self) -> Result { + fn new_tree_hash_cache(&self) -> Result { match T::tree_hash_type() { TreeHashType::Basic => { TreeHashCache::from_bytes(merkleize(get_packed_leaves(self)?), false) @@ -20,7 +20,7 @@ where } } - fn btree_overlay(&self, chunk_offset: usize) -> Result { + fn tree_hash_cache_overlay(&self, chunk_offset: usize) -> Result { let lengths = match T::tree_hash_type() { TreeHashType::Basic => vec![1; self.len() / T::tree_hash_packing_factor()], TreeHashType::Composite | TreeHashType::List => { @@ -37,7 +37,7 @@ where BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn update_cache( + fn update_tree_hash_cache( &self, other: &Vec, cache: &mut TreeHashCache, @@ -104,7 +104,7 @@ where match (other.get(i), self.get(i)) { // The item existed in the previous list and exsits in the current list. (Some(old), Some(new)) => { - new.update_cache(old, cache, start_chunk)?; + new.update_tree_hash_cache(old, cache, start_chunk)?; } // The item existed in the previous list but does not exist in this list. // diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index 0f0fb60f4..04eb6d80f 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -5,8 +5,8 @@ pub const BYTES_PER_CHUNK: usize = 32; pub const HASHSIZE: usize = 32; pub const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; -pub use cached_tree_hash::CachedTreeHashSubTree; -pub use standard_tree_hash::TreeHash; +pub use cached_tree_hash::{BTreeOverlay, CachedTreeHashSubTree, Error, TreeHashCache}; +pub use standard_tree_hash::{efficient_merkleize, TreeHash}; #[derive(Debug, PartialEq, Clone)] pub enum TreeHashType { diff --git a/eth2/utils/tree_hash/tests/tests.rs b/eth2/utils/tree_hash/tests/tests.rs index f52a17272..db33709ac 100644 --- a/eth2/utils/tree_hash/tests/tests.rs +++ b/eth2/utils/tree_hash/tests/tests.rs @@ -40,9 +40,9 @@ impl CachedTreeHash for InternalCache { old.cache = None; if let Some(ref mut local_cache) = local_cache { - self.update_cache(&old, local_cache, 0)?; + self.update_tree_hash_cache(&old, local_cache, 0)?; } else { - local_cache = Some(self.new_cache()?) + local_cache = Some(self.new_tree_hash_cache()?) } self.cache = local_cache; @@ -89,16 +89,16 @@ fn works_when_embedded() { } impl CachedTreeHashSubTree for InternalCache { - fn new_cache(&self) -> Result { + fn new_tree_hash_cache(&self) -> Result { let tree = TreeHashCache::from_leaves_and_subtrees( self, - vec![self.a.new_cache()?, self.b.new_cache()?], + vec![self.a.new_tree_hash_cache()?, self.b.new_tree_hash_cache()?], )?; Ok(tree) } - fn btree_overlay(&self, chunk_offset: usize) -> Result { + fn tree_hash_cache_overlay(&self, chunk_offset: usize) -> Result { let mut lengths = vec![]; lengths.push(BTreeOverlay::new(&self.a, 0)?.total_nodes()); @@ -107,7 +107,7 @@ impl CachedTreeHashSubTree for InternalCache { BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn update_cache( + fn update_tree_hash_cache( &self, other: &Self, cache: &mut TreeHashCache, @@ -118,8 +118,8 @@ impl CachedTreeHashSubTree for InternalCache { // Skip past the internal nodes and update any changed leaf nodes. { let chunk = offset_handler.first_leaf_node()?; - let chunk = self.a.update_cache(&other.a, cache, chunk)?; - let _chunk = self.b.update_cache(&other.b, cache, chunk)?; + let chunk = self.a.update_tree_hash_cache(&other.a, cache, chunk)?; + let _chunk = self.b.update_tree_hash_cache(&other.b, cache, chunk)?; } for (&parent, children) in offset_handler.iter_internal_nodes().rev() { @@ -170,21 +170,21 @@ impl TreeHash for Inner { } impl CachedTreeHashSubTree for Inner { - fn new_cache(&self) -> Result { + fn new_tree_hash_cache(&self) -> Result { let tree = TreeHashCache::from_leaves_and_subtrees( self, vec![ - self.a.new_cache()?, - self.b.new_cache()?, - self.c.new_cache()?, - self.d.new_cache()?, + self.a.new_tree_hash_cache()?, + self.b.new_tree_hash_cache()?, + self.c.new_tree_hash_cache()?, + self.d.new_tree_hash_cache()?, ], )?; Ok(tree) } - fn btree_overlay(&self, chunk_offset: usize) -> Result { + fn tree_hash_cache_overlay(&self, chunk_offset: usize) -> Result { let mut lengths = vec![]; lengths.push(BTreeOverlay::new(&self.a, 0)?.total_nodes()); @@ -195,7 +195,7 @@ impl CachedTreeHashSubTree for Inner { BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn update_cache( + fn update_tree_hash_cache( &self, other: &Self, cache: &mut TreeHashCache, @@ -206,10 +206,10 @@ impl CachedTreeHashSubTree for Inner { // Skip past the internal nodes and update any changed leaf nodes. { let chunk = offset_handler.first_leaf_node()?; - let chunk = self.a.update_cache(&other.a, cache, chunk)?; - let chunk = self.b.update_cache(&other.b, cache, chunk)?; - let chunk = self.c.update_cache(&other.c, cache, chunk)?; - let _chunk = self.d.update_cache(&other.d, cache, chunk)?; + let chunk = self.a.update_tree_hash_cache(&other.a, cache, chunk)?; + let chunk = self.b.update_tree_hash_cache(&other.b, cache, chunk)?; + let chunk = self.c.update_tree_hash_cache(&other.c, cache, chunk)?; + let _chunk = self.d.update_tree_hash_cache(&other.d, cache, chunk)?; } for (&parent, children) in offset_handler.iter_internal_nodes().rev() { @@ -254,20 +254,20 @@ impl TreeHash for Outer { } impl CachedTreeHashSubTree for Outer { - fn new_cache(&self) -> Result { + fn new_tree_hash_cache(&self) -> Result { let tree = TreeHashCache::from_leaves_and_subtrees( self, vec![ - self.a.new_cache()?, - self.b.new_cache()?, - self.c.new_cache()?, + self.a.new_tree_hash_cache()?, + self.b.new_tree_hash_cache()?, + self.c.new_tree_hash_cache()?, ], )?; Ok(tree) } - fn btree_overlay(&self, chunk_offset: usize) -> Result { + fn tree_hash_cache_overlay(&self, chunk_offset: usize) -> Result { let mut lengths = vec![]; lengths.push(BTreeOverlay::new(&self.a, 0)?.total_nodes()); @@ -277,7 +277,7 @@ impl CachedTreeHashSubTree for Outer { BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn update_cache( + fn update_tree_hash_cache( &self, other: &Self, cache: &mut TreeHashCache, @@ -288,9 +288,9 @@ impl CachedTreeHashSubTree for Outer { // Skip past the internal nodes and update any changed leaf nodes. { let chunk = offset_handler.first_leaf_node()?; - let chunk = self.a.update_cache(&other.a, cache, chunk)?; - let chunk = self.b.update_cache(&other.b, cache, chunk)?; - let _chunk = self.c.update_cache(&other.c, cache, chunk)?; + let chunk = self.a.update_tree_hash_cache(&other.a, cache, chunk)?; + let chunk = self.b.update_tree_hash_cache(&other.b, cache, chunk)?; + let _chunk = self.c.update_tree_hash_cache(&other.c, cache, chunk)?; } for (&parent, children) in offset_handler.iter_internal_nodes().rev() { @@ -341,7 +341,7 @@ fn partial_modification_to_inner_struct() { let mut cache_struct = TreeHashCache::new(&original_outer).unwrap(); modified_outer - .update_cache(&original_outer, &mut cache_struct, 0) + .update_tree_hash_cache(&original_outer, &mut cache_struct, 0) .unwrap(); let modified_cache: Vec = cache_struct.into(); @@ -395,7 +395,7 @@ fn partial_modification_to_outer() { let mut cache_struct = TreeHashCache::new(&original_outer).unwrap(); modified_outer - .update_cache(&original_outer, &mut cache_struct, 0) + .update_tree_hash_cache(&original_outer, &mut cache_struct, 0) .unwrap(); let modified_cache: Vec = cache_struct.into(); @@ -481,7 +481,7 @@ fn test_u64_vec_modifications(original: Vec, modified: Vec) { // Perform a differential hash let mut cache_struct = TreeHashCache::from_bytes(original_cache.clone(), false).unwrap(); modified - .update_cache(&original, &mut cache_struct, 0) + .update_tree_hash_cache(&original, &mut cache_struct, 0) .unwrap(); let modified_cache: Vec = cache_struct.into(); @@ -586,7 +586,9 @@ fn large_vec_of_u64_builds() { fn test_inner_vec_modifications(original: Vec, modified: Vec, reference: Vec) { let mut cache = TreeHashCache::new(&original).unwrap(); - modified.update_cache(&original, &mut cache, 0).unwrap(); + modified + .update_tree_hash_cache(&original, &mut cache, 0) + .unwrap(); let modified_cache: Vec = cache.into(); // Build the reference vec. @@ -947,12 +949,12 @@ fn generic_test(index: usize) { let mut cache_struct = TreeHashCache::from_bytes(cache.clone(), false).unwrap(); changed_inner - .update_cache(&inner, &mut cache_struct, 0) + .update_tree_hash_cache(&inner, &mut cache_struct, 0) .unwrap(); // assert_eq!(*cache_struct.hash_count, 3); - let new_cache: Vec = cache_struct.into(); + let new_tree_hash_cache: Vec = cache_struct.into(); let data1 = int_to_bytes32(1); let data2 = int_to_bytes32(2); @@ -965,7 +967,7 @@ fn generic_test(index: usize) { let expected = merkleize(join(data)); - assert_eq!(expected, new_cache); + assert_eq!(expected, new_tree_hash_cache); } #[test] diff --git a/eth2/utils/tree_hash_derive/src/lib.rs b/eth2/utils/tree_hash_derive/src/lib.rs index 217e91c24..b2afabaa9 100644 --- a/eth2/utils/tree_hash_derive/src/lib.rs +++ b/eth2/utils/tree_hash_derive/src/lib.rs @@ -39,9 +39,9 @@ fn should_skip_hashing(field: &syn::Field) -> bool { false } -/// Implements `ssz::Encodable` for some `struct`. +/// Implements `tree_hash::CachedTreeHashSubTree` for some `struct`. /// -/// Fields are encoded in the order they are defined. +/// Fields are hashed in the order they are defined. #[proc_macro_derive(CachedTreeHashSubTree, attributes(tree_hash))] pub fn subtree_derive(input: TokenStream) -> TokenStream { let item = parse_macro_input!(input as DeriveInput); @@ -60,16 +60,12 @@ pub fn subtree_derive(input: TokenStream) -> TokenStream { let output = quote! { impl tree_hash::CachedTreeHashSubTree<#name> for #name { - fn item_type() -> tree_hash::ItemType { - tree_hash::ItemType::Composite - } - - fn new_cache(&self) -> Result { + fn new_tree_hash_cache(&self) -> Result { let tree = tree_hash::TreeHashCache::from_leaves_and_subtrees( self, vec![ #( - self.#idents_a.new_cache()?, + self.#idents_a.new_tree_hash_cache()?, )* ], )?; @@ -77,7 +73,7 @@ pub fn subtree_derive(input: TokenStream) -> TokenStream { Ok(tree) } - fn btree_overlay(&self, chunk_offset: usize) -> Result { + fn tree_hash_cache_overlay(&self, chunk_offset: usize) -> Result { let mut lengths = vec![]; #( @@ -87,15 +83,7 @@ pub fn subtree_derive(input: TokenStream) -> TokenStream { tree_hash::BTreeOverlay::from_lengths(chunk_offset, lengths) } - fn packed_encoding(&self) -> Result, tree_hash::Error> { - Err(tree_hash::Error::ShouldNeverBePacked(Self::item_type())) - } - - fn packing_factor() -> usize { - 1 - } - - fn update_cache( + fn update_tree_hash_cache( &self, other: &Self, cache: &mut tree_hash::TreeHashCache, @@ -107,7 +95,7 @@ pub fn subtree_derive(input: TokenStream) -> TokenStream { { let chunk = offset_handler.first_leaf_node()?; #( - let chunk = self.#idents_c.update_cache(&other.#idents_d, cache, chunk)?; + let chunk = self.#idents_c.update_tree_hash_cache(&other.#idents_d, cache, chunk)?; )* } @@ -123,3 +111,47 @@ pub fn subtree_derive(input: TokenStream) -> TokenStream { }; output.into() } + +/// Implements `tree_hash::TreeHash` for some `struct`. +/// +/// Fields are hashed in the order they are defined. +#[proc_macro_derive(TreeHash, attributes(tree_hash))] +pub fn tree_hash_derive(input: TokenStream) -> TokenStream { + let item = parse_macro_input!(input as DeriveInput); + + let name = &item.ident; + + let struct_data = match &item.data { + syn::Data::Struct(s) => s, + _ => panic!("tree_hash_derive only supports structs."), + }; + + let idents = get_hashable_named_field_idents(&struct_data); + + let output = quote! { + impl tree_hash::TreeHash for #name { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::Composite + } + + fn tree_hash_packed_encoding(&self) -> Vec { + unreachable!("Struct should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("Struct should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { + let mut leaves = Vec::with_capacity(4 * tree_hash::HASHSIZE); + + #( + leaves.append(&mut self.#idents.tree_hash_root()); + )* + + tree_hash::efficient_merkleize(&leaves)[0..32].to_vec() + } + } + }; + output.into() +} diff --git a/eth2/utils/tree_hash_derive/tests/tests.rs b/eth2/utils/tree_hash_derive/tests/tests.rs index a5ab112a2..5f065c982 100644 --- a/eth2/utils/tree_hash_derive/tests/tests.rs +++ b/eth2/utils/tree_hash_derive/tests/tests.rs @@ -1,9 +1,71 @@ -use tree_hash_derive::CachedTreeHashSubTree; +use tree_hash::CachedTreeHashSubTree; +use tree_hash_derive::{CachedTreeHashSubTree, TreeHash}; -#[derive(Clone, Debug, CachedTreeHashSubTree)] +#[derive(Clone, Debug, TreeHash, CachedTreeHashSubTree)] pub struct Inner { pub a: u64, pub b: u64, pub c: u64, pub d: u64, } + +fn test_standard_and_cached(original: &T, modified: &T) +where + T: CachedTreeHashSubTree, +{ + let mut cache = original.new_tree_hash_cache().unwrap(); + + let standard_root = original.tree_hash_root(); + let cached_root = cache.root().unwrap().to_vec(); + assert_eq!(standard_root, cached_root); + + // Test after a modification + modified + .update_tree_hash_cache(&original, &mut cache, 0) + .unwrap(); + let standard_root = modified.tree_hash_root(); + let cached_root = cache.root().unwrap().to_vec(); + assert_eq!(standard_root, cached_root); +} + +#[test] +fn inner_standard_vs_cached() { + let original = Inner { + a: 1, + b: 2, + c: 3, + d: 4, + }; + let modified = Inner { + b: 42, + ..original.clone() + }; + + test_standard_and_cached(&original, &modified); +} + +#[derive(Clone, Debug, TreeHash, CachedTreeHashSubTree)] +pub struct Uneven { + pub a: u64, + pub b: u64, + pub c: u64, + pub d: u64, + pub e: u64, +} + +#[test] +fn uneven_standard_vs_cached() { + let original = Uneven { + a: 1, + b: 2, + c: 3, + d: 4, + e: 5, + }; + let modified = Uneven { + e: 42, + ..original.clone() + }; + + test_standard_and_cached(&original, &modified); +} From 024b9e315ad2edcf71611fb8361b607d25a74dbe Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 16 Apr 2019 11:14:28 +1000 Subject: [PATCH 52/89] Add signed_root to tree_hash crate --- eth2/utils/tree_hash/src/lib.rs | 2 + eth2/utils/tree_hash/src/signed_root.rs | 5 ++ eth2/utils/tree_hash_derive/src/lib.rs | 68 +++++++++++++++++++++- eth2/utils/tree_hash_derive/tests/tests.rs | 33 ++++++++++- 4 files changed, 105 insertions(+), 3 deletions(-) create mode 100644 eth2/utils/tree_hash/src/signed_root.rs diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index 04eb6d80f..ac7e7633d 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -1,4 +1,5 @@ pub mod cached_tree_hash; +pub mod signed_root; pub mod standard_tree_hash; pub const BYTES_PER_CHUNK: usize = 32; @@ -6,6 +7,7 @@ pub const HASHSIZE: usize = 32; pub const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; pub use cached_tree_hash::{BTreeOverlay, CachedTreeHashSubTree, Error, TreeHashCache}; +pub use signed_root::SignedRoot; pub use standard_tree_hash::{efficient_merkleize, TreeHash}; #[derive(Debug, PartialEq, Clone)] diff --git a/eth2/utils/tree_hash/src/signed_root.rs b/eth2/utils/tree_hash/src/signed_root.rs new file mode 100644 index 000000000..f7aeca4af --- /dev/null +++ b/eth2/utils/tree_hash/src/signed_root.rs @@ -0,0 +1,5 @@ +use crate::TreeHash; + +pub trait SignedRoot: TreeHash { + fn signed_root(&self) -> Vec; +} diff --git a/eth2/utils/tree_hash_derive/src/lib.rs b/eth2/utils/tree_hash_derive/src/lib.rs index b2afabaa9..ff5bc0d47 100644 --- a/eth2/utils/tree_hash_derive/src/lib.rs +++ b/eth2/utils/tree_hash_derive/src/lib.rs @@ -2,7 +2,7 @@ extern crate proc_macro; use proc_macro::TokenStream; -use quote::quote; +use quote::{quote, ToTokens}; use syn::{parse_macro_input, DeriveInput}; /// Returns a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields @@ -155,3 +155,69 @@ pub fn tree_hash_derive(input: TokenStream) -> TokenStream { }; output.into() } + +/// Implements `tree_hash::TreeHash` for some `struct`, whilst excluding any fields following and +/// including a field that is of type "Signature" or "AggregateSignature". +/// +/// See: +/// https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#signed-roots +/// +/// This is a rather horrendous macro, it will read the type of the object as a string and decide +/// if it's a signature by matching that string against "Signature" or "AggregateSignature". So, +/// it's important that you use those exact words as your type -- don't alias it to something else. +/// +/// If you can think of a better way to do this, please make an issue! +/// +/// Fields are processed in the order they are defined. +#[proc_macro_derive(SignedRoot, attributes(signed_root))] +pub fn tree_hash_signed_root_derive(input: TokenStream) -> TokenStream { + let item = parse_macro_input!(input as DeriveInput); + + let name = &item.ident; + + let struct_data = match &item.data { + syn::Data::Struct(s) => s, + _ => panic!("tree_hash_derive only supports structs."), + }; + + let idents = get_signed_root_named_field_idents(&struct_data); + + let output = quote! { + impl tree_hash::SignedRoot for #name { + fn signed_root(&self) -> Vec { + let mut leaves = Vec::with_capacity(4 * tree_hash::HASHSIZE); + + #( + leaves.append(&mut self.#idents.tree_hash_root()); + )* + + tree_hash::efficient_merkleize(&leaves)[0..32].to_vec() + } + } + }; + output.into() +} + +fn get_signed_root_named_field_idents(struct_data: &syn::DataStruct) -> Vec<&syn::Ident> { + struct_data + .fields + .iter() + .filter_map(|f| { + if should_skip_signed_root(&f) { + None + } else { + Some(match &f.ident { + Some(ref ident) => ident, + _ => panic!("tree_hash_derive only supports named struct fields"), + }) + } + }) + .collect() +} + +fn should_skip_signed_root(field: &syn::Field) -> bool { + field + .attrs + .iter() + .any(|attr| attr.into_token_stream().to_string() == "# [ signed_root ( skip_hashing ) ]") +} diff --git a/eth2/utils/tree_hash_derive/tests/tests.rs b/eth2/utils/tree_hash_derive/tests/tests.rs index 5f065c982..721e77715 100644 --- a/eth2/utils/tree_hash_derive/tests/tests.rs +++ b/eth2/utils/tree_hash_derive/tests/tests.rs @@ -1,5 +1,5 @@ -use tree_hash::CachedTreeHashSubTree; -use tree_hash_derive::{CachedTreeHashSubTree, TreeHash}; +use tree_hash::{CachedTreeHashSubTree, SignedRoot, TreeHash}; +use tree_hash_derive::{CachedTreeHashSubTree, SignedRoot, TreeHash}; #[derive(Clone, Debug, TreeHash, CachedTreeHashSubTree)] pub struct Inner { @@ -69,3 +69,32 @@ fn uneven_standard_vs_cached() { test_standard_and_cached(&original, &modified); } + +#[derive(Clone, Debug, TreeHash, SignedRoot)] +pub struct SignedInner { + pub a: u64, + pub b: u64, + pub c: u64, + pub d: u64, + #[signed_root(skip_hashing)] + pub e: u64, +} + +#[test] +fn signed_root() { + let unsigned = Inner { + a: 1, + b: 2, + c: 3, + d: 4, + }; + let signed = SignedInner { + a: 1, + b: 2, + c: 3, + d: 4, + e: 5, + }; + + assert_eq!(unsigned.tree_hash_root(), signed.signed_root()); +} From 3eaa06d758152cc69ce934622fcd887f28e6c9c8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 16 Apr 2019 12:29:39 +1000 Subject: [PATCH 53/89] Remove tree hashing from ssz crate --- eth2/utils/ssz/src/impl_tree_hash.rs | 85 ---------- eth2/utils/ssz/src/lib.rs | 5 - eth2/utils/ssz/src/signed_root.rs | 5 - eth2/utils/ssz/src/tree_hash.rs | 107 ------------ eth2/utils/ssz_derive/src/lib.rs | 154 ------------------ eth2/utils/tree_hash/src/lib.rs | 25 ++- .../utils/tree_hash/src/standard_tree_hash.rs | 69 +------- .../tree_hash/src/standard_tree_hash/impls.rs | 97 +++++++++++ eth2/utils/tree_hash_derive/src/lib.rs | 4 +- 9 files changed, 128 insertions(+), 423 deletions(-) delete mode 100644 eth2/utils/ssz/src/impl_tree_hash.rs delete mode 100644 eth2/utils/ssz/src/signed_root.rs delete mode 100644 eth2/utils/ssz/src/tree_hash.rs create mode 100644 eth2/utils/tree_hash/src/standard_tree_hash/impls.rs diff --git a/eth2/utils/ssz/src/impl_tree_hash.rs b/eth2/utils/ssz/src/impl_tree_hash.rs deleted file mode 100644 index 03976f637..000000000 --- a/eth2/utils/ssz/src/impl_tree_hash.rs +++ /dev/null @@ -1,85 +0,0 @@ -use super::ethereum_types::{Address, H256}; -use super::{merkle_hash, ssz_encode, TreeHash}; -use hashing::hash; - -impl TreeHash for u8 { - fn hash_tree_root(&self) -> Vec { - ssz_encode(self) - } -} - -impl TreeHash for u16 { - fn hash_tree_root(&self) -> Vec { - ssz_encode(self) - } -} - -impl TreeHash for u32 { - fn hash_tree_root(&self) -> Vec { - ssz_encode(self) - } -} - -impl TreeHash for u64 { - fn hash_tree_root(&self) -> Vec { - ssz_encode(self) - } -} - -impl TreeHash for usize { - fn hash_tree_root(&self) -> Vec { - ssz_encode(self) - } -} - -impl TreeHash for bool { - fn hash_tree_root(&self) -> Vec { - ssz_encode(self) - } -} - -impl TreeHash for Address { - fn hash_tree_root(&self) -> Vec { - ssz_encode(self) - } -} - -impl TreeHash for H256 { - fn hash_tree_root(&self) -> Vec { - ssz_encode(self) - } -} - -impl TreeHash for [u8] { - fn hash_tree_root(&self) -> Vec { - if self.len() > 32 { - return hash(&self); - } - self.to_vec() - } -} - -impl TreeHash for Vec -where - T: TreeHash, -{ - /// Returns the merkle_hash of a list of hash_tree_root values created - /// from the given list. - /// Note: A byte vector, Vec, must be converted to a slice (as_slice()) - /// to be handled properly (i.e. hashed) as byte array. - fn hash_tree_root(&self) -> Vec { - let mut tree_hashes = self.iter().map(|x| x.hash_tree_root()).collect(); - merkle_hash(&mut tree_hashes) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_impl_tree_hash_vec() { - let result = vec![1u32, 2, 3, 4, 5, 6, 7].hash_tree_root(); - assert_eq!(result.len(), 32); - } -} diff --git a/eth2/utils/ssz/src/lib.rs b/eth2/utils/ssz/src/lib.rs index cb3f63c48..0a00efa5d 100644 --- a/eth2/utils/ssz/src/lib.rs +++ b/eth2/utils/ssz/src/lib.rs @@ -12,17 +12,12 @@ extern crate ethereum_types; pub mod decode; pub mod encode; -mod signed_root; -pub mod tree_hash; mod impl_decode; mod impl_encode; -mod impl_tree_hash; pub use crate::decode::{decode, decode_ssz_list, Decodable, DecodeError}; pub use crate::encode::{Encodable, SszStream}; -pub use crate::signed_root::SignedRoot; -pub use crate::tree_hash::{merkle_hash, TreeHash}; pub use hashing::hash; diff --git a/eth2/utils/ssz/src/signed_root.rs b/eth2/utils/ssz/src/signed_root.rs deleted file mode 100644 index f7aeca4af..000000000 --- a/eth2/utils/ssz/src/signed_root.rs +++ /dev/null @@ -1,5 +0,0 @@ -use crate::TreeHash; - -pub trait SignedRoot: TreeHash { - fn signed_root(&self) -> Vec; -} diff --git a/eth2/utils/ssz/src/tree_hash.rs b/eth2/utils/ssz/src/tree_hash.rs deleted file mode 100644 index 85e56924c..000000000 --- a/eth2/utils/ssz/src/tree_hash.rs +++ /dev/null @@ -1,107 +0,0 @@ -use hashing::hash; - -const BYTES_PER_CHUNK: usize = 32; -const HASHSIZE: usize = 32; - -pub trait TreeHash { - fn hash_tree_root(&self) -> Vec; -} - -/// Returns a 32 byte hash of 'list' - a vector of byte vectors. -/// Note that this will consume 'list'. -pub fn merkle_hash(list: &mut Vec>) -> Vec { - // flatten list - let mut chunkz = list_to_blob(list); - - // get data_len as bytes. It will hashed will the merkle root - let mut datalen = list.len().to_le_bytes().to_vec(); - zpad(&mut datalen, 32); - - // merklelize - while chunkz.len() > HASHSIZE { - let mut new_chunkz: Vec = Vec::new(); - - for two_chunks in chunkz.chunks(BYTES_PER_CHUNK * 2) { - // Hash two chuncks together - new_chunkz.append(&mut hash(two_chunks)); - } - - chunkz = new_chunkz; - } - - chunkz.append(&mut datalen); - hash(&chunkz) -} - -fn list_to_blob(list: &mut Vec>) -> Vec { - // pack - fit as many many items per chunk as we can and then - // right pad to BYTES_PER_CHUNCK - let (items_per_chunk, chunk_count) = if list.is_empty() { - (1, 1) - } else { - let items_per_chunk = BYTES_PER_CHUNK / list[0].len(); - let chunk_count = list.len() / items_per_chunk; - (items_per_chunk, chunk_count) - }; - - let mut chunkz = Vec::new(); - if list.is_empty() { - // handle and empty list - chunkz.append(&mut vec![0; BYTES_PER_CHUNK * 2]); - } else if list[0].len() <= BYTES_PER_CHUNK { - // just create a blob here; we'll divide into - // chunked slices when we merklize - let mut chunk = Vec::with_capacity(BYTES_PER_CHUNK); - let mut item_count_in_chunk = 0; - chunkz.reserve(chunk_count * BYTES_PER_CHUNK); - for item in list.iter_mut() { - item_count_in_chunk += 1; - chunk.append(item); - - // completed chunk? - if item_count_in_chunk == items_per_chunk { - zpad(&mut chunk, BYTES_PER_CHUNK); - chunkz.append(&mut chunk); - item_count_in_chunk = 0; - } - } - - // left-over uncompleted chunk? - if item_count_in_chunk != 0 { - zpad(&mut chunk, BYTES_PER_CHUNK); - chunkz.append(&mut chunk); - } - } - - // extend the number of chunks to a power of two if necessary - if !chunk_count.is_power_of_two() { - let zero_chunks_count = chunk_count.next_power_of_two() - chunk_count; - chunkz.append(&mut vec![0; zero_chunks_count * BYTES_PER_CHUNK]); - } - - chunkz -} - -/// right pads with zeros making 'bytes' 'size' in length -fn zpad(bytes: &mut Vec, size: usize) { - if bytes.len() < size { - bytes.resize(size, 0); - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_merkle_hash() { - let data1 = vec![1; 32]; - let data2 = vec![2; 32]; - let data3 = vec![3; 32]; - let mut list = vec![data1, data2, data3]; - let result = merkle_hash(&mut list); - - //note: should test againt a known test hash value - assert_eq!(HASHSIZE, result.len()); - } -} diff --git a/eth2/utils/ssz_derive/src/lib.rs b/eth2/utils/ssz_derive/src/lib.rs index ce2538785..f3475f5a7 100644 --- a/eth2/utils/ssz_derive/src/lib.rs +++ b/eth2/utils/ssz_derive/src/lib.rs @@ -188,157 +188,3 @@ pub fn ssz_decode_derive(input: TokenStream) -> TokenStream { }; output.into() } - -/// Returns a Vec of `syn::Ident` for each named field in the struct, whilst filtering out fields -/// that should not be tree hashed. -/// -/// # Panics -/// Any unnamed struct field (like in a tuple struct) will raise a panic at compile time. -fn get_tree_hashable_named_field_idents<'a>( - struct_data: &'a syn::DataStruct, -) -> Vec<&'a syn::Ident> { - struct_data - .fields - .iter() - .filter_map(|f| { - if should_skip_tree_hash(&f) { - None - } else { - Some(match &f.ident { - Some(ref ident) => ident, - _ => panic!("ssz_derive only supports named struct fields."), - }) - } - }) - .collect() -} - -/// Returns true if some field has an attribute declaring it should not be tree-hashed. -/// -/// The field attribute is: `#[tree_hash(skip_hashing)]` -fn should_skip_tree_hash(field: &syn::Field) -> bool { - for attr in &field.attrs { - if attr.into_token_stream().to_string() == "# [ tree_hash ( skip_hashing ) ]" { - return true; - } - } - false -} - -/// Implements `ssz::TreeHash` for some `struct`. -/// -/// Fields are processed in the order they are defined. -#[proc_macro_derive(TreeHash, attributes(tree_hash))] -pub fn ssz_tree_hash_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - - let name = &item.ident; - - let struct_data = match &item.data { - syn::Data::Struct(s) => s, - _ => panic!("ssz_derive only supports structs."), - }; - - let field_idents = get_tree_hashable_named_field_idents(&struct_data); - - let output = quote! { - impl ssz::TreeHash for #name { - fn hash_tree_root(&self) -> Vec { - let mut list: Vec> = Vec::new(); - #( - list.push(self.#field_idents.hash_tree_root()); - )* - - ssz::merkle_hash(&mut list) - } - } - }; - output.into() -} - -/// Returns `true` if some `Ident` should be considered to be a signature type. -fn type_ident_is_signature(ident: &syn::Ident) -> bool { - match ident.to_string().as_ref() { - "Signature" => true, - "AggregateSignature" => true, - _ => false, - } -} - -/// Takes a `Field` where the type (`ty`) portion is a path (e.g., `types::Signature`) and returns -/// the final `Ident` in that path. -/// -/// E.g., for `types::Signature` returns `Signature`. -fn final_type_ident(field: &syn::Field) -> &syn::Ident { - match &field.ty { - syn::Type::Path(path) => &path.path.segments.last().unwrap().value().ident, - _ => panic!("ssz_derive only supports Path types."), - } -} - -/// Implements `ssz::TreeHash` for some `struct`, whilst excluding any fields following and -/// including a field that is of type "Signature" or "AggregateSignature". -/// -/// See: -/// https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#signed-roots -/// -/// This is a rather horrendous macro, it will read the type of the object as a string and decide -/// if it's a signature by matching that string against "Signature" or "AggregateSignature". So, -/// it's important that you use those exact words as your type -- don't alias it to something else. -/// -/// If you can think of a better way to do this, please make an issue! -/// -/// Fields are processed in the order they are defined. -#[proc_macro_derive(SignedRoot, attributes(signed_root))] -pub fn ssz_signed_root_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - - let name = &item.ident; - - let struct_data = match &item.data { - syn::Data::Struct(s) => s, - _ => panic!("ssz_derive only supports structs."), - }; - - let mut field_idents: Vec<&syn::Ident> = vec![]; - - let field_idents = get_signed_root_named_field_idents(&struct_data); - - let output = quote! { - impl ssz::SignedRoot for #name { - fn signed_root(&self) -> Vec { - let mut list: Vec> = Vec::new(); - #( - list.push(self.#field_idents.hash_tree_root()); - )* - - ssz::merkle_hash(&mut list) - } - } - }; - output.into() -} - -fn get_signed_root_named_field_idents(struct_data: &syn::DataStruct) -> Vec<&syn::Ident> { - struct_data - .fields - .iter() - .filter_map(|f| { - if should_skip_signed_root(&f) { - None - } else { - Some(match &f.ident { - Some(ref ident) => ident, - _ => panic!("ssz_derive only supports named struct fields"), - }) - } - }) - .collect() -} - -fn should_skip_signed_root(field: &syn::Field) -> bool { - field - .attrs - .iter() - .any(|attr| attr.into_token_stream().to_string() == "# [ signed_root ( skip_hashing ) ]") -} diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index ac7e7633d..7c74c9f97 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -8,7 +8,7 @@ pub const MERKLE_HASH_CHUNCK: usize = 2 * BYTES_PER_CHUNK; pub use cached_tree_hash::{BTreeOverlay, CachedTreeHashSubTree, Error, TreeHashCache}; pub use signed_root::SignedRoot; -pub use standard_tree_hash::{efficient_merkleize, TreeHash}; +pub use standard_tree_hash::{merkle_root, TreeHash}; #[derive(Debug, PartialEq, Clone)] pub enum TreeHashType { @@ -25,3 +25,26 @@ fn num_sanitized_leaves(num_bytes: usize) -> usize { fn num_nodes(num_leaves: usize) -> usize { 2 * num_leaves - 1 } + +#[macro_export] +macro_rules! impl_tree_hash_for_ssz_bytes { + ($type: ident) => { + impl tree_hash::TreeHash for $type { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::List + } + + fn tree_hash_packed_encoding(&self) -> Vec { + panic!("bytesN should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + panic!("bytesN should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { + tree_hash::merkle_root(&ssz::ssz_encode(self)) + } + } + }; +} diff --git a/eth2/utils/tree_hash/src/standard_tree_hash.rs b/eth2/utils/tree_hash/src/standard_tree_hash.rs index e7f94560b..ea0677180 100644 --- a/eth2/utils/tree_hash/src/standard_tree_hash.rs +++ b/eth2/utils/tree_hash/src/standard_tree_hash.rs @@ -3,6 +3,8 @@ use hashing::hash; use int_to_bytes::int_to_bytes32; use ssz::ssz_encode; +mod impls; + pub trait TreeHash { fn tree_hash_type() -> TreeHashType; @@ -13,70 +15,9 @@ pub trait TreeHash { fn tree_hash_root(&self) -> Vec; } -impl TreeHash for u64 { - fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic - } - - fn tree_hash_packed_encoding(&self) -> Vec { - ssz_encode(self) - } - - fn tree_hash_packing_factor() -> usize { - HASHSIZE / 8 - } - - fn tree_hash_root(&self) -> Vec { - int_to_bytes32(*self) - } -} - -impl TreeHash for Vec -where - T: TreeHash, -{ - fn tree_hash_type() -> TreeHashType { - TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> Vec { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Vec { - let leaves = match T::tree_hash_type() { - TreeHashType::Basic => { - let mut leaves = - Vec::with_capacity((HASHSIZE / T::tree_hash_packing_factor()) * self.len()); - - for item in self { - leaves.append(&mut item.tree_hash_packed_encoding()); - } - - leaves - } - TreeHashType::Composite | TreeHashType::List => { - let mut leaves = Vec::with_capacity(self.len() * HASHSIZE); - - for item in self { - leaves.append(&mut item.tree_hash_root()) - } - - leaves - } - }; - - // Mix in the length - let mut root_and_len = Vec::with_capacity(HASHSIZE * 2); - root_and_len.append(&mut efficient_merkleize(&leaves)[0..32].to_vec()); - root_and_len.append(&mut int_to_bytes32(self.len() as u64)); - - hash(&root_and_len) - } +pub fn merkle_root(bytes: &[u8]) -> Vec { + // TODO: replace this with a _more_ efficient fn which is more memory efficient. + efficient_merkleize(&bytes)[0..32].to_vec() } pub fn efficient_merkleize(bytes: &[u8]) -> Vec { diff --git a/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs b/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs new file mode 100644 index 000000000..070e314b8 --- /dev/null +++ b/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs @@ -0,0 +1,97 @@ +use super::*; +use ethereum_types::H256; + +macro_rules! impl_for_bitsize { + ($type: ident, $bit_size: expr) => { + impl TreeHash for $type { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> Vec { + ssz_encode(self) + } + + fn tree_hash_packing_factor() -> usize { + HASHSIZE / ($bit_size / 8) + } + + fn tree_hash_root(&self) -> Vec { + int_to_bytes32(*self as u64) + } + } + }; +} + +impl_for_bitsize!(u8, 8); +impl_for_bitsize!(u16, 16); +impl_for_bitsize!(u32, 32); +impl_for_bitsize!(u64, 64); +impl_for_bitsize!(usize, 64); +impl_for_bitsize!(bool, 8); + +impl TreeHash for H256 { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> Vec { + ssz_encode(self) + } + + fn tree_hash_packing_factor() -> usize { + 1 + } + + fn tree_hash_root(&self) -> Vec { + ssz_encode(self) + } +} + +impl TreeHash for Vec +where + T: TreeHash, +{ + fn tree_hash_type() -> TreeHashType { + TreeHashType::List + } + + fn tree_hash_packed_encoding(&self) -> Vec { + unreachable!("List should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("List should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { + let leaves = match T::tree_hash_type() { + TreeHashType::Basic => { + let mut leaves = + Vec::with_capacity((HASHSIZE / T::tree_hash_packing_factor()) * self.len()); + + for item in self { + leaves.append(&mut item.tree_hash_packed_encoding()); + } + + leaves + } + TreeHashType::Composite | TreeHashType::List => { + let mut leaves = Vec::with_capacity(self.len() * HASHSIZE); + + for item in self { + leaves.append(&mut item.tree_hash_root()) + } + + leaves + } + }; + + // Mix in the length + let mut root_and_len = Vec::with_capacity(HASHSIZE * 2); + root_and_len.append(&mut merkle_root(&leaves)); + root_and_len.append(&mut int_to_bytes32(self.len() as u64)); + + hash(&root_and_len) + } +} diff --git a/eth2/utils/tree_hash_derive/src/lib.rs b/eth2/utils/tree_hash_derive/src/lib.rs index ff5bc0d47..dc3702c72 100644 --- a/eth2/utils/tree_hash_derive/src/lib.rs +++ b/eth2/utils/tree_hash_derive/src/lib.rs @@ -149,7 +149,7 @@ pub fn tree_hash_derive(input: TokenStream) -> TokenStream { leaves.append(&mut self.#idents.tree_hash_root()); )* - tree_hash::efficient_merkleize(&leaves)[0..32].to_vec() + tree_hash::merkle_root(&leaves) } } }; @@ -191,7 +191,7 @@ pub fn tree_hash_signed_root_derive(input: TokenStream) -> TokenStream { leaves.append(&mut self.#idents.tree_hash_root()); )* - tree_hash::efficient_merkleize(&leaves)[0..32].to_vec() + tree_hash::merkle_root(&leaves) } } }; From b8c4c3308a4affc2196bd56a1666965d20e2813c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 16 Apr 2019 14:14:38 +1000 Subject: [PATCH 54/89] Update `types` to new tree_hash crate --- beacon_node/beacon_chain/src/initialise.rs | 6 +- .../testing_beacon_chain_builder.rs | 4 +- .../test_harness/src/beacon_chain_harness.rs | 6 +- .../test_harness/src/test_case.rs | 2 +- beacon_node/network/src/sync/import_queue.rs | 10 +- beacon_node/network/src/sync/simple_sync.rs | 4 +- eth2/attester/src/lib.rs | 5 +- eth2/block_proposer/src/lib.rs | 2 +- .../benches/bench_block_processing.rs | 4 +- .../benches/bench_epoch_processing.rs | 4 +- .../state_processing/src/get_genesis_state.rs | 4 +- .../src/per_block_processing.rs | 2 +- .../validate_attestation.rs | 6 +- .../src/per_block_processing/verify_exit.rs | 2 +- .../verify_proposer_slashing.rs | 2 +- .../verify_slashable_attestation.rs | 6 +- .../per_block_processing/verify_transfer.rs | 2 +- .../src/per_epoch_processing.rs | 6 +- .../src/per_slot_processing.rs | 6 +- eth2/types/Cargo.toml | 2 + eth2/types/src/attestation.rs | 5 +- eth2/types/src/attestation_data.rs | 5 +- .../src/attestation_data_and_custody_bit.rs | 3 +- eth2/types/src/attester_slashing.rs | 3 +- eth2/types/src/beacon_block.rs | 11 ++- eth2/types/src/beacon_block_body.rs | 3 +- eth2/types/src/beacon_block_header.rs | 9 +- eth2/types/src/beacon_state.rs | 10 +- eth2/types/src/crosslink.rs | 3 +- eth2/types/src/crosslink_committee.rs | 3 +- eth2/types/src/deposit.rs | 3 +- eth2/types/src/deposit_data.rs | 3 +- eth2/types/src/deposit_input.rs | 5 +- eth2/types/src/epoch_cache.rs | 0 eth2/types/src/eth1_data.rs | 3 +- eth2/types/src/eth1_data_vote.rs | 3 +- eth2/types/src/fork.rs | 3 +- eth2/types/src/historical_batch.rs | 3 +- eth2/types/src/pending_attestation.rs | 3 +- eth2/types/src/proposer_slashing.rs | 3 +- eth2/types/src/slashable_attestation.rs | 5 +- eth2/types/src/slot_epoch.rs | 2 +- eth2/types/src/slot_epoch_macros.rs | 20 +++- eth2/types/src/slot_height.rs | 2 +- eth2/types/src/test_utils/macros.rs | 6 +- .../test_utils/testing_attestation_builder.rs | 4 +- .../testing_attester_slashing_builder.rs | 4 +- .../testing_beacon_block_builder.rs | 4 +- .../testing_proposer_slashing_builder.rs | 2 +- .../test_utils/testing_transfer_builder.rs | 2 +- .../testing_voluntary_exit_builder.rs | 2 +- eth2/types/src/transfer.rs | 5 +- eth2/types/src/validator.rs | 3 +- eth2/types/src/voluntary_exit.rs | 5 +- eth2/utils/bls/Cargo.toml | 1 + eth2/utils/bls/src/aggregate_signature.rs | 2 +- .../utils/bls/src/fake_aggregate_signature.rs | 9 +- eth2/utils/bls/src/fake_signature.rs | 9 +- eth2/utils/bls/src/public_key.rs | 9 +- eth2/utils/bls/src/secret_key.rs | 9 +- eth2/utils/bls/src/signature.rs | 2 +- eth2/utils/boolean-bitfield/Cargo.toml | 1 + eth2/utils/boolean-bitfield/src/lib.rs | 7 +- eth2/utils/ssz_derive/tests/test_derives.rs | 94 ------------------- .../utils/tree_hash/src/standard_tree_hash.rs | 7 +- .../tree_hash/src/standard_tree_hash/impls.rs | 35 +++++++ eth2/utils/tree_hash_derive/src/lib.rs | 10 +- eth2/utils/tree_hash_derive/tests/tests.rs | 82 ++++++++++++++++ .../src/attestation_producer/mod.rs | 4 +- validator_client/src/block_producer/mod.rs | 2 +- 70 files changed, 284 insertions(+), 234 deletions(-) delete mode 100644 eth2/types/src/epoch_cache.rs delete mode 100644 eth2/utils/ssz_derive/tests/test_derives.rs diff --git a/beacon_node/beacon_chain/src/initialise.rs b/beacon_node/beacon_chain/src/initialise.rs index 0951e06fb..c66dd63b1 100644 --- a/beacon_node/beacon_chain/src/initialise.rs +++ b/beacon_node/beacon_chain/src/initialise.rs @@ -7,9 +7,9 @@ use db::stores::{BeaconBlockStore, BeaconStateStore}; use db::{DiskDB, MemoryDB}; use fork_choice::BitwiseLMDGhost; use slot_clock::SystemTimeSlotClock; -use ssz::TreeHash; use std::path::PathBuf; use std::sync::Arc; +use tree_hash::TreeHash; use types::test_utils::TestingBeaconStateBuilder; use types::{BeaconBlock, ChainSpec, Hash256}; @@ -32,7 +32,7 @@ pub fn initialise_beacon_chain( let (genesis_state, _keypairs) = state_builder.build(); let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); + genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); // Slot clock let slot_clock = SystemTimeSlotClock::new( @@ -73,7 +73,7 @@ pub fn initialise_test_beacon_chain( let (genesis_state, _keypairs) = state_builder.build(); let mut genesis_block = BeaconBlock::empty(spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); + genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); // Slot clock let slot_clock = SystemTimeSlotClock::new( diff --git a/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs b/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs index 5c5477e55..d174670c0 100644 --- a/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/test_utils/testing_beacon_chain_builder.rs @@ -5,8 +5,8 @@ use db::{ }; use fork_choice::BitwiseLMDGhost; use slot_clock::TestingSlotClock; -use ssz::TreeHash; use std::sync::Arc; +use tree_hash::TreeHash; use types::test_utils::TestingBeaconStateBuilder; use types::*; @@ -27,7 +27,7 @@ impl TestingBeaconChainBuilder { let (genesis_state, _keypairs) = self.state_builder.build(); let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); + genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); // Create the Beacon Chain BeaconChain::from_genesis( diff --git a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs index aeb734a4e..34b559478 100644 --- a/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs +++ b/beacon_node/beacon_chain/test_harness/src/beacon_chain_harness.rs @@ -9,8 +9,8 @@ use fork_choice::BitwiseLMDGhost; use log::debug; use rayon::prelude::*; use slot_clock::TestingSlotClock; -use ssz::TreeHash; use std::sync::Arc; +use tree_hash::TreeHash; use types::{test_utils::TestingBeaconStateBuilder, *}; type TestingBeaconChain = BeaconChain>; @@ -54,7 +54,7 @@ impl BeaconChainHarness { let (mut genesis_state, keypairs) = state_builder.build(); let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.hash_tree_root()); + genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); genesis_state .build_epoch_cache(RelativeEpoch::Previous, &spec) @@ -163,7 +163,7 @@ impl BeaconChainHarness { data: data.clone(), custody_bit: false, } - .hash_tree_root(); + .tree_hash_root(); let domain = self.spec.get_domain( state.slot.epoch(self.spec.slots_per_epoch), Domain::Attestation, diff --git a/beacon_node/beacon_chain/test_harness/src/test_case.rs b/beacon_node/beacon_chain/test_harness/src/test_case.rs index f65b45505..28c7ae8a8 100644 --- a/beacon_node/beacon_chain/test_harness/src/test_case.rs +++ b/beacon_node/beacon_chain/test_harness/src/test_case.rs @@ -4,7 +4,7 @@ use crate::beacon_chain_harness::BeaconChainHarness; use beacon_chain::CheckPoint; use log::{info, warn}; -use ssz::SignedRoot; +use tree_hash::SignedRoot; use types::*; use types::test_utils::*; diff --git a/beacon_node/network/src/sync/import_queue.rs b/beacon_node/network/src/sync/import_queue.rs index 0026347eb..106e3eb66 100644 --- a/beacon_node/network/src/sync/import_queue.rs +++ b/beacon_node/network/src/sync/import_queue.rs @@ -2,9 +2,9 @@ use crate::beacon_chain::BeaconChain; use eth2_libp2p::rpc::methods::*; use eth2_libp2p::PeerId; use slog::{debug, error}; -use ssz::TreeHash; use std::sync::Arc; use std::time::{Duration, Instant}; +use tree_hash::TreeHash; use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Hash256, Slot}; /// Provides a queue for fully and partially built `BeaconBlock`s. @@ -15,7 +15,7 @@ use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Hash256, Slot}; /// /// - When we receive a `BeaconBlockBody`, the only way we can find it's matching /// `BeaconBlockHeader` is to find a header such that `header.beacon_block_body == -/// hash_tree_root(body)`. Therefore, if we used a `HashMap` we would need to use the root of +/// tree_hash_root(body)`. Therefore, if we used a `HashMap` we would need to use the root of /// `BeaconBlockBody` as the key. /// - It is possible for multiple distinct blocks to have identical `BeaconBlockBodies`. Therefore /// we cannot use a `HashMap` keyed by the root of `BeaconBlockBody`. @@ -166,7 +166,7 @@ impl ImportQueue { let mut required_bodies: Vec = vec![]; for header in headers { - let block_root = Hash256::from_slice(&header.hash_tree_root()[..]); + let block_root = Hash256::from_slice(&header.tree_hash_root()[..]); if self.chain_has_not_seen_block(&block_root) { self.insert_header(block_root, header, sender.clone()); @@ -230,7 +230,7 @@ impl ImportQueue { /// /// If the body already existed, the `inserted` time is set to `now`. fn insert_body(&mut self, body: BeaconBlockBody, sender: PeerId) { - let body_root = Hash256::from_slice(&body.hash_tree_root()[..]); + let body_root = Hash256::from_slice(&body.tree_hash_root()[..]); self.partials.iter_mut().for_each(|mut p| { if let Some(header) = &mut p.header { @@ -250,7 +250,7 @@ impl ImportQueue { /// /// If the partial already existed, the `inserted` time is set to `now`. fn insert_full_block(&mut self, block: BeaconBlock, sender: PeerId) { - let block_root = Hash256::from_slice(&block.hash_tree_root()[..]); + let block_root = Hash256::from_slice(&block.tree_hash_root()[..]); let partial = PartialBeaconBlock { slot: block.slot, diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 824458b89..1b57fbc00 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -5,10 +5,10 @@ use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; use slog::{debug, error, info, o, warn}; -use ssz::TreeHash; use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; +use tree_hash::TreeHash; use types::{Attestation, BeaconBlock, Epoch, Hash256, Slot}; /// The number of slots that we can import blocks ahead of us, before going into full Sync mode. @@ -565,7 +565,7 @@ impl SimpleSync { return false; } - let block_root = Hash256::from_slice(&block.hash_tree_root()); + let block_root = Hash256::from_slice(&block.tree_hash_root()); // Ignore any block that the chain already knows about. if self.chain_has_seen_block(&block_root) { diff --git a/eth2/attester/src/lib.rs b/eth2/attester/src/lib.rs index a4295f005..a9e3091af 100644 --- a/eth2/attester/src/lib.rs +++ b/eth2/attester/src/lib.rs @@ -2,8 +2,8 @@ pub mod test_utils; mod traits; use slot_clock::SlotClock; -use ssz::TreeHash; use std::sync::Arc; +use tree_hash::TreeHash; use types::{AttestationData, AttestationDataAndCustodyBit, FreeAttestation, Signature, Slot}; pub use self::traits::{ @@ -141,7 +141,8 @@ impl Attester BlockProducer Result< let active_index_root = Hash256::from_slice( &state .get_active_validator_indices(next_epoch + spec.activation_exit_delay) - .hash_tree_root()[..], + .tree_hash_root()[..], ); state.set_active_index_root(next_epoch, active_index_root, spec)?; @@ -261,7 +261,7 @@ pub fn finish_epoch_update(state: &mut BeaconState, spec: &ChainSpec) -> Result< let historical_batch: HistoricalBatch = state.historical_batch(); state .historical_roots - .push(Hash256::from_slice(&historical_batch.hash_tree_root()[..])); + .push(Hash256::from_slice(&historical_batch.tree_hash_root()[..])); } state.previous_epoch_attestations = state.current_epoch_attestations.clone(); diff --git a/eth2/state_processing/src/per_slot_processing.rs b/eth2/state_processing/src/per_slot_processing.rs index c6b5312c7..cd129a5f1 100644 --- a/eth2/state_processing/src/per_slot_processing.rs +++ b/eth2/state_processing/src/per_slot_processing.rs @@ -1,5 +1,5 @@ use crate::*; -use ssz::TreeHash; +use tree_hash::TreeHash; use types::*; #[derive(Debug, PartialEq)] @@ -32,7 +32,7 @@ fn cache_state( latest_block_header: &BeaconBlockHeader, spec: &ChainSpec, ) -> Result<(), Error> { - let previous_slot_state_root = Hash256::from_slice(&state.hash_tree_root()[..]); + let previous_slot_state_root = Hash256::from_slice(&state.tree_hash_root()[..]); // Note: increment the state slot here to allow use of our `state_root` and `block_root` // getter/setter functions. @@ -46,7 +46,7 @@ fn cache_state( state.latest_block_header.state_root = previous_slot_state_root } - let latest_block_root = Hash256::from_slice(&latest_block_header.hash_tree_root()[..]); + let latest_block_root = Hash256::from_slice(&latest_block_header.tree_hash_root()[..]); state.set_block_root(previous_slot, latest_block_root, spec)?; // Set the state slot back to what it should be. diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index 613eb7936..b88e1d4cf 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -26,6 +26,8 @@ ssz = { path = "../utils/ssz" } ssz_derive = { path = "../utils/ssz_derive" } swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" } test_random_derive = { path = "../utils/test_random_derive" } +tree_hash = { path = "../utils/tree_hash" } +tree_hash_derive = { path = "../utils/tree_hash_derive" } libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b3c32d9a821ae6cc89079499cc6e8a6bab0bffc3" } [dev-dependencies] diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index a8eeea909..c43692a7b 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -2,9 +2,10 @@ use super::{AggregateSignature, AttestationData, Bitfield}; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::TreeHash; -use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::{SignedRoot, TreeHash}; /// Details an attestation that can be slashable. /// diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index 4a6b57823..305ddafe0 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -2,9 +2,10 @@ use crate::test_utils::TestRandom; use crate::{Crosslink, Epoch, Hash256, Slot}; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::TreeHash; -use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::{SignedRoot, TreeHash}; /// The data upon which an attestation is based. /// diff --git a/eth2/types/src/attestation_data_and_custody_bit.rs b/eth2/types/src/attestation_data_and_custody_bit.rs index 2cc6bc80c..59a4eee77 100644 --- a/eth2/types/src/attestation_data_and_custody_bit.rs +++ b/eth2/types/src/attestation_data_and_custody_bit.rs @@ -2,7 +2,8 @@ use super::AttestationData; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::Serialize; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; +use tree_hash_derive::TreeHash; /// Used for pairing an attestation with a proof-of-custody. /// diff --git a/eth2/types/src/attester_slashing.rs b/eth2/types/src/attester_slashing.rs index 6fc404f42..0600e0ecc 100644 --- a/eth2/types/src/attester_slashing.rs +++ b/eth2/types/src/attester_slashing.rs @@ -1,8 +1,9 @@ use crate::{test_utils::TestRandom, SlashableAttestation}; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; /// Two conflicting attestations. /// diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 77c1620f3..bc6ccb0d5 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -3,9 +3,10 @@ use crate::*; use bls::Signature; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::TreeHash; -use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::{SignedRoot, TreeHash}; /// A block of the `BeaconChain`. /// @@ -57,11 +58,11 @@ impl BeaconBlock { } } - /// Returns the `hash_tree_root` of the block. + /// Returns the `tree_hash_root | update` of the block. /// /// Spec v0.5.0 pub fn canonical_root(&self) -> Hash256 { - Hash256::from_slice(&self.hash_tree_root()[..]) + Hash256::from_slice(&self.tree_hash_root()[..]) } /// Returns a full `BeaconBlockHeader` of this block. @@ -77,7 +78,7 @@ impl BeaconBlock { slot: self.slot, previous_block_root: self.previous_block_root, state_root: self.state_root, - block_body_root: Hash256::from_slice(&self.body.hash_tree_root()[..]), + block_body_root: Hash256::from_slice(&self.body.tree_hash_root()[..]), signature: self.signature.clone(), } } diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index 677e24cec..0414d0d72 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -2,8 +2,9 @@ use crate::test_utils::TestRandom; use crate::*; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; /// The body of a `BeaconChain` block, containing operations. /// diff --git a/eth2/types/src/beacon_block_header.rs b/eth2/types/src/beacon_block_header.rs index 090d0a965..9076437c0 100644 --- a/eth2/types/src/beacon_block_header.rs +++ b/eth2/types/src/beacon_block_header.rs @@ -3,9 +3,10 @@ use crate::*; use bls::Signature; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::TreeHash; -use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::{SignedRoot, TreeHash}; /// A header of a `BeaconBlock`. /// @@ -32,11 +33,11 @@ pub struct BeaconBlockHeader { } impl BeaconBlockHeader { - /// Returns the `hash_tree_root` of the header. + /// Returns the `tree_hash_root` of the header. /// /// Spec v0.5.0 pub fn canonical_root(&self) -> Hash256 { - Hash256::from_slice(&self.hash_tree_root()[..]) + Hash256::from_slice(&self.tree_hash_root()[..]) } /// Given a `body`, consumes `self` and returns a complete `BeaconBlock`. diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 774e8eb76..19c1b4c11 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -5,9 +5,11 @@ use int_to_bytes::int_to_bytes32; use pubkey_cache::PubkeyCache; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::{hash, ssz_encode, TreeHash}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz::{hash, ssz_encode}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::TreeHash; mod epoch_cache; mod pubkey_cache; @@ -186,11 +188,11 @@ impl BeaconState { } } - /// Returns the `hash_tree_root` of the state. + /// Returns the `tree_hash_root` of the state. /// /// Spec v0.5.0 pub fn canonical_root(&self) -> Hash256 { - Hash256::from_slice(&self.hash_tree_root()[..]) + Hash256::from_slice(&self.tree_hash_root()[..]) } pub fn historical_batch(&self) -> HistoricalBatch { diff --git a/eth2/types/src/crosslink.rs b/eth2/types/src/crosslink.rs index f91680c75..a0fd7e0b3 100644 --- a/eth2/types/src/crosslink.rs +++ b/eth2/types/src/crosslink.rs @@ -2,8 +2,9 @@ use crate::test_utils::TestRandom; use crate::{Epoch, Hash256}; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; /// Specifies the block hash for a shard at an epoch. /// diff --git a/eth2/types/src/crosslink_committee.rs b/eth2/types/src/crosslink_committee.rs index af1778a1b..e8fc1b96d 100644 --- a/eth2/types/src/crosslink_committee.rs +++ b/eth2/types/src/crosslink_committee.rs @@ -1,6 +1,7 @@ use crate::*; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; +use tree_hash_derive::TreeHash; #[derive(Default, Clone, Debug, PartialEq, Serialize, Deserialize, Decode, Encode, TreeHash)] pub struct CrosslinkCommittee { diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs index ff8d83d77..5eb565c2b 100644 --- a/eth2/types/src/deposit.rs +++ b/eth2/types/src/deposit.rs @@ -2,8 +2,9 @@ use super::{DepositData, Hash256}; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; /// A deposit to potentially become a beacon chain validator. /// diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs index a1e30032f..f8726e95d 100644 --- a/eth2/types/src/deposit_data.rs +++ b/eth2/types/src/deposit_data.rs @@ -2,8 +2,9 @@ use super::DepositInput; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; /// Data generated by the deposit contract. /// diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs index 380528dc0..828496293 100644 --- a/eth2/types/src/deposit_input.rs +++ b/eth2/types/src/deposit_input.rs @@ -3,9 +3,10 @@ use crate::*; use bls::{PublicKey, Signature}; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::{SignedRoot, TreeHash}; -use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash::{SignedRoot, TreeHash}; +use tree_hash_derive::{SignedRoot, TreeHash}; /// The data supplied by the user to the deposit contract. /// diff --git a/eth2/types/src/epoch_cache.rs b/eth2/types/src/epoch_cache.rs deleted file mode 100644 index e69de29bb..000000000 diff --git a/eth2/types/src/eth1_data.rs b/eth2/types/src/eth1_data.rs index deced19fb..c1348cfba 100644 --- a/eth2/types/src/eth1_data.rs +++ b/eth2/types/src/eth1_data.rs @@ -2,8 +2,9 @@ use super::Hash256; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; /// Contains data obtained from the Eth1 chain. /// diff --git a/eth2/types/src/eth1_data_vote.rs b/eth2/types/src/eth1_data_vote.rs index 2f3a1ade1..a9741f065 100644 --- a/eth2/types/src/eth1_data_vote.rs +++ b/eth2/types/src/eth1_data_vote.rs @@ -2,8 +2,9 @@ use super::Eth1Data; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; /// A summation of votes for some `Eth1Data`. /// diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index b9d16c333..99908e9ed 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -5,8 +5,9 @@ use crate::{ use int_to_bytes::int_to_bytes4; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// diff --git a/eth2/types/src/historical_batch.rs b/eth2/types/src/historical_batch.rs index 77859ed1a..33dc9c450 100644 --- a/eth2/types/src/historical_batch.rs +++ b/eth2/types/src/historical_batch.rs @@ -2,8 +2,9 @@ use crate::test_utils::TestRandom; use crate::Hash256; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; /// Historical block and state roots. /// diff --git a/eth2/types/src/pending_attestation.rs b/eth2/types/src/pending_attestation.rs index 938e59bef..5cbe1edeb 100644 --- a/eth2/types/src/pending_attestation.rs +++ b/eth2/types/src/pending_attestation.rs @@ -2,8 +2,9 @@ use crate::test_utils::TestRandom; use crate::{Attestation, AttestationData, Bitfield, Slot}; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; /// An attestation that has been included in the state but not yet fully processed. /// diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs index 02216a2fc..901f02388 100644 --- a/eth2/types/src/proposer_slashing.rs +++ b/eth2/types/src/proposer_slashing.rs @@ -2,8 +2,9 @@ use super::BeaconBlockHeader; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; /// Two conflicting proposals from the same proposer (validator). /// diff --git a/eth2/types/src/slashable_attestation.rs b/eth2/types/src/slashable_attestation.rs index e557285b8..37462f006 100644 --- a/eth2/types/src/slashable_attestation.rs +++ b/eth2/types/src/slashable_attestation.rs @@ -1,9 +1,10 @@ use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, Bitfield, ChainSpec}; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::TreeHash; -use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::{SignedRoot, TreeHash}; /// Details an attestation that can be slashable. /// diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index d334177e5..6c6a92ecb 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -14,7 +14,7 @@ use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; use slog; -use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use ssz::{ssz_encode, Decodable, DecodeError, Encodable, SszStream}; use std::cmp::{Ord, Ordering}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index 300ad3f6f..b3ca5c4bc 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -206,11 +206,21 @@ macro_rules! impl_ssz { } } - impl TreeHash for $type { - fn hash_tree_root(&self) -> Vec { - let mut result: Vec = vec![]; - result.append(&mut self.0.hash_tree_root()); - hash(&result) + impl tree_hash::TreeHash for $type { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> Vec { + ssz_encode(self) + } + + fn tree_hash_packing_factor() -> usize { + 32 / 8 + } + + fn tree_hash_root(&self) -> Vec { + int_to_bytes::int_to_bytes32(self.0) } } diff --git a/eth2/types/src/slot_height.rs b/eth2/types/src/slot_height.rs index 4a783d4a0..f7a34cbba 100644 --- a/eth2/types/src/slot_height.rs +++ b/eth2/types/src/slot_height.rs @@ -2,7 +2,7 @@ use crate::slot_epoch::{Epoch, Slot}; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::Serialize; -use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use ssz::{ssz_encode, Decodable, DecodeError, Encodable, SszStream}; use std::cmp::{Ord, Ordering}; use std::fmt; use std::hash::{Hash, Hasher}; diff --git a/eth2/types/src/test_utils/macros.rs b/eth2/types/src/test_utils/macros.rs index d580fd818..d5711e96e 100644 --- a/eth2/types/src/test_utils/macros.rs +++ b/eth2/types/src/test_utils/macros.rs @@ -17,14 +17,14 @@ macro_rules! ssz_tests { } #[test] - pub fn test_hash_tree_root() { + pub fn test_tree_hash_root() { use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use ssz::TreeHash; + use tree_hash::TreeHash; let mut rng = XorShiftRng::from_seed([42; 16]); let original = $type::random_for_test(&mut rng); - let result = original.hash_tree_root(); + let result = original.tree_hash_root(); assert_eq!(result.len(), 32); // TODO: Add further tests diff --git a/eth2/types/src/test_utils/testing_attestation_builder.rs b/eth2/types/src/test_utils/testing_attestation_builder.rs index 60624b48d..162facc8e 100644 --- a/eth2/types/src/test_utils/testing_attestation_builder.rs +++ b/eth2/types/src/test_utils/testing_attestation_builder.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestingAttestationDataBuilder; use crate::*; -use ssz::TreeHash; +use tree_hash::TreeHash; /// Builds an attestation to be used for testing purposes. /// @@ -74,7 +74,7 @@ impl TestingAttestationBuilder { data: self.attestation.data.clone(), custody_bit: false, } - .hash_tree_root(); + .tree_hash_root(); let domain = spec.get_domain( self.attestation.data.slot.epoch(spec.slots_per_epoch), diff --git a/eth2/types/src/test_utils/testing_attester_slashing_builder.rs b/eth2/types/src/test_utils/testing_attester_slashing_builder.rs index fcaa3285b..dc01f7fb0 100644 --- a/eth2/types/src/test_utils/testing_attester_slashing_builder.rs +++ b/eth2/types/src/test_utils/testing_attester_slashing_builder.rs @@ -1,5 +1,5 @@ use crate::*; -use ssz::TreeHash; +use tree_hash::TreeHash; /// Builds an `AttesterSlashing`. /// @@ -66,7 +66,7 @@ impl TestingAttesterSlashingBuilder { data: attestation.data.clone(), custody_bit: false, }; - let message = attestation_data_and_custody_bit.hash_tree_root(); + let message = attestation_data_and_custody_bit.tree_hash_root(); for (i, validator_index) in validator_indices.iter().enumerate() { attestation.custody_bitfield.set(i, false); diff --git a/eth2/types/src/test_utils/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/testing_beacon_block_builder.rs index c5cd22ed4..549c00ac0 100644 --- a/eth2/types/src/test_utils/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/testing_beacon_block_builder.rs @@ -6,7 +6,7 @@ use crate::{ *, }; use rayon::prelude::*; -use ssz::{SignedRoot, TreeHash}; +use tree_hash::{SignedRoot, TreeHash}; /// Builds a beacon block to be used for testing purposes. /// @@ -43,7 +43,7 @@ impl TestingBeaconBlockBuilder { /// Modifying the block's slot after signing may invalidate the signature. pub fn set_randao_reveal(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { let epoch = self.block.slot.epoch(spec.slots_per_epoch); - let message = epoch.hash_tree_root(); + let message = epoch.tree_hash_root(); let domain = spec.get_domain(epoch, Domain::Randao, fork); self.block.body.randao_reveal = Signature::new(&message, domain, sk); } diff --git a/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs index 2cfebd915..03c257b2d 100644 --- a/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/testing_proposer_slashing_builder.rs @@ -1,5 +1,5 @@ use crate::*; -use ssz::SignedRoot; +use tree_hash::SignedRoot; /// Builds a `ProposerSlashing`. /// diff --git a/eth2/types/src/test_utils/testing_transfer_builder.rs b/eth2/types/src/test_utils/testing_transfer_builder.rs index 354e29aa5..2680f7b66 100644 --- a/eth2/types/src/test_utils/testing_transfer_builder.rs +++ b/eth2/types/src/test_utils/testing_transfer_builder.rs @@ -1,5 +1,5 @@ use crate::*; -use ssz::SignedRoot; +use tree_hash::SignedRoot; /// Builds a transfer to be used for testing purposes. /// diff --git a/eth2/types/src/test_utils/testing_voluntary_exit_builder.rs b/eth2/types/src/test_utils/testing_voluntary_exit_builder.rs index fe5c8325a..8583bc451 100644 --- a/eth2/types/src/test_utils/testing_voluntary_exit_builder.rs +++ b/eth2/types/src/test_utils/testing_voluntary_exit_builder.rs @@ -1,5 +1,5 @@ use crate::*; -use ssz::SignedRoot; +use tree_hash::SignedRoot; /// Builds an exit to be used for testing purposes. /// diff --git a/eth2/types/src/transfer.rs b/eth2/types/src/transfer.rs index f291190b2..f40050bc4 100644 --- a/eth2/types/src/transfer.rs +++ b/eth2/types/src/transfer.rs @@ -4,9 +4,10 @@ use bls::{PublicKey, Signature}; use derivative::Derivative; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::TreeHash; -use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::{SignedRoot, TreeHash}; /// The data submitted to the deposit contract. /// diff --git a/eth2/types/src/validator.rs b/eth2/types/src/validator.rs index f57261175..67b4e85df 100644 --- a/eth2/types/src/validator.rs +++ b/eth2/types/src/validator.rs @@ -1,8 +1,9 @@ use crate::{test_utils::TestRandom, Epoch, Hash256, PublicKey}; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz_derive::{Decode, Encode, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; /// Information about a `BeaconChain` validator. /// diff --git a/eth2/types/src/voluntary_exit.rs b/eth2/types/src/voluntary_exit.rs index 0cdc63149..16d22c544 100644 --- a/eth2/types/src/voluntary_exit.rs +++ b/eth2/types/src/voluntary_exit.rs @@ -2,9 +2,10 @@ use crate::{test_utils::TestRandom, Epoch}; use bls::Signature; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; -use ssz::TreeHash; -use ssz_derive::{Decode, Encode, SignedRoot, TreeHash}; +use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::{SignedRoot, TreeHash}; /// An exit voluntarily submitted a validator who wishes to withdraw. /// diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index 4230a06ea..439debdcb 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -12,3 +12,4 @@ serde = "1.0" serde_derive = "1.0" serde_hex = { path = "../serde_hex" } ssz = { path = "../ssz" } +tree_hash = { path = "../tree_hash" } diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index 8c7ae5222..156e362e2 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -166,7 +166,7 @@ impl<'de> Deserialize<'de> for AggregateSignature { } impl TreeHash for AggregateSignature { - fn hash_tree_root(&self) -> Vec { + fn tree_hash_root(&self) -> Vec { hash(&self.as_bytes()) } } diff --git a/eth2/utils/bls/src/fake_aggregate_signature.rs b/eth2/utils/bls/src/fake_aggregate_signature.rs index 3f0ec0d6d..602639b6b 100644 --- a/eth2/utils/bls/src/fake_aggregate_signature.rs +++ b/eth2/utils/bls/src/fake_aggregate_signature.rs @@ -2,7 +2,8 @@ use super::{fake_signature::FakeSignature, AggregatePublicKey, BLS_AGG_SIG_BYTE_ use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; -use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use ssz::{ssz_encode, Decodable, DecodeError, Encodable, SszStream}; +use tree_hash::impl_tree_hash_for_ssz_bytes; /// A BLS aggregate signature. /// @@ -98,11 +99,7 @@ impl<'de> Deserialize<'de> for FakeAggregateSignature { } } -impl TreeHash for FakeAggregateSignature { - fn hash_tree_root(&self) -> Vec { - hash(&self.bytes) - } -} +impl_tree_hash_for_ssz_bytes!(FakeAggregateSignature); #[cfg(test)] mod tests { diff --git a/eth2/utils/bls/src/fake_signature.rs b/eth2/utils/bls/src/fake_signature.rs index 3c9f3a9f4..b07dd66a5 100644 --- a/eth2/utils/bls/src/fake_signature.rs +++ b/eth2/utils/bls/src/fake_signature.rs @@ -3,7 +3,8 @@ use hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::HexVisitor; -use ssz::{hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use ssz::{ssz_encode, Decodable, DecodeError, Encodable, SszStream}; +use tree_hash::impl_tree_hash_for_ssz_bytes; /// A single BLS signature. /// @@ -73,11 +74,7 @@ impl Decodable for FakeSignature { } } -impl TreeHash for FakeSignature { - fn hash_tree_root(&self) -> Vec { - hash(&self.bytes) - } -} +impl_tree_hash_for_ssz_bytes!(FakeSignature); impl Serialize for FakeSignature { fn serialize(&self, serializer: S) -> Result diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index 177a735c4..a553ee888 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -3,10 +3,11 @@ use bls_aggregates::PublicKey as RawPublicKey; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode as hex_encode, HexVisitor}; -use ssz::{decode, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use ssz::{decode, ssz_encode, Decodable, DecodeError, Encodable, SszStream}; use std::default; use std::fmt; use std::hash::{Hash, Hasher}; +use tree_hash::impl_tree_hash_for_ssz_bytes; /// A single BLS signature. /// @@ -104,11 +105,7 @@ impl<'de> Deserialize<'de> for PublicKey { } } -impl TreeHash for PublicKey { - fn hash_tree_root(&self) -> Vec { - hash(&self.0.as_bytes()) - } -} +impl_tree_hash_for_ssz_bytes!(PublicKey); impl PartialEq for PublicKey { fn eq(&self, other: &PublicKey) -> bool { diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs index 40c469513..38fd2d379 100644 --- a/eth2/utils/bls/src/secret_key.rs +++ b/eth2/utils/bls/src/secret_key.rs @@ -4,7 +4,8 @@ use hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::HexVisitor; -use ssz::{decode, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use ssz::{decode, ssz_encode, Decodable, DecodeError, Encodable, SszStream}; +use tree_hash::impl_tree_hash_for_ssz_bytes; /// A single BLS signature. /// @@ -69,11 +70,7 @@ impl<'de> Deserialize<'de> for SecretKey { } } -impl TreeHash for SecretKey { - fn hash_tree_root(&self) -> Vec { - self.0.as_bytes().clone() - } -} +impl_tree_hash_for_ssz_bytes!(SecretKey); #[cfg(test)] mod tests { diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index d19af545f..30b55a787 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -115,7 +115,7 @@ impl Decodable for Signature { } impl TreeHash for Signature { - fn hash_tree_root(&self) -> Vec { + fn tree_hash_root(&self) -> Vec { hash(&self.as_bytes()) } } diff --git a/eth2/utils/boolean-bitfield/Cargo.toml b/eth2/utils/boolean-bitfield/Cargo.toml index cf037c5d7..f08695bd1 100644 --- a/eth2/utils/boolean-bitfield/Cargo.toml +++ b/eth2/utils/boolean-bitfield/Cargo.toml @@ -10,3 +10,4 @@ ssz = { path = "../ssz" } bit-vec = "0.5.0" serde = "1.0" serde_derive = "1.0" +tree_hash = { path = "../tree_hash" } diff --git a/eth2/utils/boolean-bitfield/src/lib.rs b/eth2/utils/boolean-bitfield/src/lib.rs index d04516dba..fbd0e2ecd 100644 --- a/eth2/utils/boolean-bitfield/src/lib.rs +++ b/eth2/utils/boolean-bitfield/src/lib.rs @@ -9,6 +9,7 @@ use serde_hex::{encode, PrefixedHexVisitor}; use ssz::{Decodable, Encodable}; use std::cmp; use std::default; +use tree_hash::impl_tree_hash_for_ssz_bytes; /// A BooleanBitfield represents a set of booleans compactly stored as a vector of bits. /// The BooleanBitfield is given a fixed size during construction. Reads outside of the current size return an out-of-bounds error. Writes outside of the current size expand the size of the set. @@ -256,11 +257,7 @@ impl<'de> Deserialize<'de> for BooleanBitfield { } } -impl ssz::TreeHash for BooleanBitfield { - fn hash_tree_root(&self) -> Vec { - self.to_bytes().hash_tree_root() - } -} +impl_tree_hash_for_ssz_bytes!(BooleanBitfield); #[cfg(test)] mod tests { diff --git a/eth2/utils/ssz_derive/tests/test_derives.rs b/eth2/utils/ssz_derive/tests/test_derives.rs deleted file mode 100644 index e025dc3a5..000000000 --- a/eth2/utils/ssz_derive/tests/test_derives.rs +++ /dev/null @@ -1,94 +0,0 @@ -use ssz::{SignedRoot, TreeHash}; -use ssz_derive::{SignedRoot, TreeHash}; - -#[derive(TreeHash, SignedRoot)] -struct CryptoKitties { - best_kitty: u64, - worst_kitty: u8, - kitties: Vec, -} - -impl CryptoKitties { - fn new() -> Self { - CryptoKitties { - best_kitty: 9999, - worst_kitty: 1, - kitties: vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43], - } - } - - fn hash(&self) -> Vec { - let mut list: Vec> = Vec::new(); - list.push(self.best_kitty.hash_tree_root()); - list.push(self.worst_kitty.hash_tree_root()); - list.push(self.kitties.hash_tree_root()); - ssz::merkle_hash(&mut list) - } -} - -#[test] -fn test_cryptokitties_hash() { - let kitties = CryptoKitties::new(); - let expected_hash = vec![ - 201, 9, 139, 14, 24, 247, 21, 55, 132, 211, 51, 125, 183, 186, 177, 33, 147, 210, 42, 108, - 174, 162, 221, 227, 157, 179, 15, 7, 97, 239, 82, 220, - ]; - assert_eq!(kitties.hash(), expected_hash); -} - -#[test] -fn test_simple_tree_hash_derive() { - let kitties = CryptoKitties::new(); - assert_eq!(kitties.hash_tree_root(), kitties.hash()); -} - -#[test] -fn test_simple_signed_root_derive() { - let kitties = CryptoKitties::new(); - assert_eq!(kitties.signed_root(), kitties.hash()); -} - -#[derive(TreeHash, SignedRoot)] -struct Casper { - friendly: bool, - #[tree_hash(skip_hashing)] - friends: Vec, - #[signed_root(skip_hashing)] - dead: bool, -} - -impl Casper { - fn new() -> Self { - Casper { - friendly: true, - friends: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10], - dead: true, - } - } - - fn expected_signed_hash(&self) -> Vec { - let mut list = Vec::new(); - list.push(self.friendly.hash_tree_root()); - list.push(self.friends.hash_tree_root()); - ssz::merkle_hash(&mut list) - } - - fn expected_tree_hash(&self) -> Vec { - let mut list = Vec::new(); - list.push(self.friendly.hash_tree_root()); - list.push(self.dead.hash_tree_root()); - ssz::merkle_hash(&mut list) - } -} - -#[test] -fn test_annotated_tree_hash_derive() { - let casper = Casper::new(); - assert_eq!(casper.hash_tree_root(), casper.expected_tree_hash()); -} - -#[test] -fn test_annotated_signed_root_derive() { - let casper = Casper::new(); - assert_eq!(casper.signed_root(), casper.expected_signed_hash()); -} diff --git a/eth2/utils/tree_hash/src/standard_tree_hash.rs b/eth2/utils/tree_hash/src/standard_tree_hash.rs index ea0677180..473d2a5f0 100644 --- a/eth2/utils/tree_hash/src/standard_tree_hash.rs +++ b/eth2/utils/tree_hash/src/standard_tree_hash.rs @@ -25,9 +25,14 @@ pub fn efficient_merkleize(bytes: &[u8]) -> Vec { let nodes = num_nodes(leaves); let internal_nodes = nodes - leaves; - let num_bytes = internal_nodes * HASHSIZE + bytes.len(); + let num_bytes = std::cmp::max(internal_nodes, 1) * HASHSIZE + bytes.len(); let mut o: Vec = vec![0; internal_nodes * HASHSIZE]; + + if o.len() < HASHSIZE { + o.resize(HASHSIZE, 0); + } + o.append(&mut bytes.to_vec()); assert_eq!(o.len(), num_bytes); diff --git a/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs b/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs index 070e314b8..749d5b3bb 100644 --- a/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs +++ b/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs @@ -30,6 +30,24 @@ impl_for_bitsize!(u64, 64); impl_for_bitsize!(usize, 64); impl_for_bitsize!(bool, 8); +impl TreeHash for [u8; 4] { + fn tree_hash_type() -> TreeHashType { + TreeHashType::List + } + + fn tree_hash_packed_encoding(&self) -> Vec { + panic!("bytesN should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + panic!("bytesN should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { + merkle_root(&ssz::ssz_encode(self)) + } +} + impl TreeHash for H256 { fn tree_hash_type() -> TreeHashType { TreeHashType::Basic @@ -95,3 +113,20 @@ where hash(&root_and_len) } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn bool() { + let mut true_bytes: Vec = vec![1]; + true_bytes.append(&mut vec![0; 31]); + + let false_bytes: Vec = vec![0; 32]; + + assert_eq!(true.tree_hash_root(), true_bytes); + assert_eq!(false.tree_hash_root(), false_bytes); + } + +} diff --git a/eth2/utils/tree_hash_derive/src/lib.rs b/eth2/utils/tree_hash_derive/src/lib.rs index dc3702c72..e3a7b4aaa 100644 --- a/eth2/utils/tree_hash_derive/src/lib.rs +++ b/eth2/utils/tree_hash_derive/src/lib.rs @@ -31,12 +31,10 @@ fn get_hashable_named_field_idents<'a>(struct_data: &'a syn::DataStruct) -> Vec< /// /// The field attribute is: `#[tree_hash(skip_hashing)]` fn should_skip_hashing(field: &syn::Field) -> bool { - for attr in &field.attrs { - if attr.tts.to_string() == "( skip_hashing )" { - return true; - } - } - false + field + .attrs + .iter() + .any(|attr| attr.into_token_stream().to_string() == "# [ tree_hash ( skip_hashing ) ]") } /// Implements `tree_hash::CachedTreeHashSubTree` for some `struct`. diff --git a/eth2/utils/tree_hash_derive/tests/tests.rs b/eth2/utils/tree_hash_derive/tests/tests.rs index 721e77715..a7c74b23e 100644 --- a/eth2/utils/tree_hash_derive/tests/tests.rs +++ b/eth2/utils/tree_hash_derive/tests/tests.rs @@ -98,3 +98,85 @@ fn signed_root() { assert_eq!(unsigned.tree_hash_root(), signed.signed_root()); } + +#[derive(TreeHash, SignedRoot)] +struct CryptoKitties { + best_kitty: u64, + worst_kitty: u8, + kitties: Vec, +} + +impl CryptoKitties { + fn new() -> Self { + CryptoKitties { + best_kitty: 9999, + worst_kitty: 1, + kitties: vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43], + } + } + + fn hash(&self) -> Vec { + let mut leaves = vec![]; + leaves.append(&mut self.best_kitty.tree_hash_root()); + leaves.append(&mut self.worst_kitty.tree_hash_root()); + leaves.append(&mut self.kitties.tree_hash_root()); + tree_hash::merkle_root(&leaves) + } +} + +#[test] +fn test_simple_tree_hash_derive() { + let kitties = CryptoKitties::new(); + assert_eq!(kitties.tree_hash_root(), kitties.hash()); +} + +#[test] +fn test_simple_signed_root_derive() { + let kitties = CryptoKitties::new(); + assert_eq!(kitties.signed_root(), kitties.hash()); +} + +#[derive(TreeHash, SignedRoot)] +struct Casper { + friendly: bool, + #[tree_hash(skip_hashing)] + friends: Vec, + #[signed_root(skip_hashing)] + dead: bool, +} + +impl Casper { + fn new() -> Self { + Casper { + friendly: true, + friends: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10], + dead: true, + } + } + + fn expected_signed_hash(&self) -> Vec { + let mut list = Vec::new(); + list.append(&mut self.friendly.tree_hash_root()); + list.append(&mut self.friends.tree_hash_root()); + tree_hash::merkle_root(&list) + } + + fn expected_tree_hash(&self) -> Vec { + let mut list = Vec::new(); + list.append(&mut self.friendly.tree_hash_root()); + list.append(&mut self.dead.tree_hash_root()); + tree_hash::merkle_root(&list) + } +} + +#[test] +fn test_annotated_tree_hash_derive() { + let casper = Casper::new(); + assert_eq!(casper.tree_hash_root(), casper.expected_tree_hash()); +} + +#[test] +fn test_annotated_signed_root_derive() { + let casper = Casper::new(); + assert_eq!(casper.signed_root(), casper.expected_signed_hash()); +} diff --git a/validator_client/src/attestation_producer/mod.rs b/validator_client/src/attestation_producer/mod.rs index 0fbc7bcba..d2dbdf2e2 100644 --- a/validator_client/src/attestation_producer/mod.rs +++ b/validator_client/src/attestation_producer/mod.rs @@ -8,7 +8,7 @@ use super::block_producer::{BeaconNodeError, PublishOutcome, ValidatorEvent}; use crate::signer::Signer; use beacon_node_attestation::BeaconNodeAttestation; use slog::{error, info, warn}; -use ssz::TreeHash; +use tree_hash::TreeHash; use types::{ AggregateSignature, Attestation, AttestationData, AttestationDataAndCustodyBit, AttestationDuty, Bitfield, @@ -123,7 +123,7 @@ impl<'a, B: BeaconNodeAttestation, S: Signer> AttestationProducer<'a, B, S> { data: attestation.clone(), custody_bit: false, } - .hash_tree_root(); + .tree_hash_root(); let sig = self.signer.sign_message(&message, domain)?; diff --git a/validator_client/src/block_producer/mod.rs b/validator_client/src/block_producer/mod.rs index 8b4f5abda..9cc0460c7 100644 --- a/validator_client/src/block_producer/mod.rs +++ b/validator_client/src/block_producer/mod.rs @@ -86,7 +86,7 @@ impl<'a, B: BeaconNodeBlock, S: Signer> BlockProducer<'a, B, S> { pub fn produce_block(&mut self) -> Result { let epoch = self.slot.epoch(self.spec.slots_per_epoch); - let message = epoch.hash_tree_root(); + let message = epoch.tree_hash_root(); let randao_reveal = match self.signer.sign_message( &message, self.spec.get_domain(epoch, Domain::Randao, &self.fork), From f69b56ad6032d906b79aa086e55ee9097ca685df Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 16 Apr 2019 14:25:43 +1000 Subject: [PATCH 55/89] Add new `tree_hash` crate project wide. --- beacon_node/beacon_chain/Cargo.toml | 1 + beacon_node/beacon_chain/test_harness/Cargo.toml | 1 + beacon_node/network/Cargo.toml | 1 + eth2/attester/Cargo.toml | 1 + eth2/attester/src/lib.rs | 3 +-- eth2/block_proposer/Cargo.toml | 1 + eth2/block_proposer/src/lib.rs | 2 +- eth2/state_processing/Cargo.toml | 2 ++ eth2/state_processing/src/per_block_processing.rs | 2 +- eth2/utils/bls/src/aggregate_signature.rs | 9 +++------ eth2/utils/bls/src/signature.rs | 9 +++------ validator_client/Cargo.toml | 1 + validator_client/src/block_producer/mod.rs | 2 +- 13 files changed, 18 insertions(+), 17 deletions(-) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 55d4bacfd..e2a4527a9 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -23,4 +23,5 @@ serde_json = "1.0" slot_clock = { path = "../../eth2/utils/slot_clock" } ssz = { path = "../../eth2/utils/ssz" } state_processing = { path = "../../eth2/state_processing" } +tree_hash = { path = "../../eth2/utils/tree_hash" } types = { path = "../../eth2/types" } diff --git a/beacon_node/beacon_chain/test_harness/Cargo.toml b/beacon_node/beacon_chain/test_harness/Cargo.toml index 50d154732..a2abf6c5a 100644 --- a/beacon_node/beacon_chain/test_harness/Cargo.toml +++ b/beacon_node/beacon_chain/test_harness/Cargo.toml @@ -38,5 +38,6 @@ serde_json = "1.0" serde_yaml = "0.8" slot_clock = { path = "../../../eth2/utils/slot_clock" } ssz = { path = "../../../eth2/utils/ssz" } +tree_hash = { path = "../../../eth2/utils/tree_hash" } types = { path = "../../../eth2/types" } yaml-rust = "0.4.2" diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index cd2c2269a..36bf1f141 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -15,6 +15,7 @@ version = { path = "../version" } types = { path = "../../eth2/types" } slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_debug"] } ssz = { path = "../../eth2/utils/ssz" } +tree_hash = { path = "../../eth2/utils/tree_hash" } futures = "0.1.25" error-chain = "0.12.0" crossbeam-channel = "0.3.8" diff --git a/eth2/attester/Cargo.toml b/eth2/attester/Cargo.toml index 956ecf565..41824274d 100644 --- a/eth2/attester/Cargo.toml +++ b/eth2/attester/Cargo.toml @@ -7,4 +7,5 @@ edition = "2018" [dependencies] slot_clock = { path = "../../eth2/utils/slot_clock" } ssz = { path = "../../eth2/utils/ssz" } +tree_hash = { path = "../../eth2/utils/tree_hash" } types = { path = "../../eth2/types" } diff --git a/eth2/attester/src/lib.rs b/eth2/attester/src/lib.rs index a9e3091af..1bbbd6b43 100644 --- a/eth2/attester/src/lib.rs +++ b/eth2/attester/src/lib.rs @@ -141,8 +141,7 @@ impl Attester Deserialize<'de> for AggregateSignature { } } -impl TreeHash for AggregateSignature { - fn tree_hash_root(&self) -> Vec { - hash(&self.as_bytes()) - } -} +impl_tree_hash_for_ssz_bytes!(AggregateSignature); #[cfg(test)] mod tests { diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index 30b55a787..cf6c8fe5a 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -4,7 +4,8 @@ use hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::HexVisitor; -use ssz::{decode, hash, ssz_encode, Decodable, DecodeError, Encodable, SszStream, TreeHash}; +use ssz::{decode, ssz_encode, Decodable, DecodeError, Encodable, SszStream}; +use tree_hash::impl_tree_hash_for_ssz_bytes; /// A single BLS signature. /// @@ -114,11 +115,7 @@ impl Decodable for Signature { } } -impl TreeHash for Signature { - fn tree_hash_root(&self) -> Vec { - hash(&self.as_bytes()) - } -} +impl_tree_hash_for_ssz_bytes!(Signature); impl Serialize for Signature { /// Serde serialization is compliant the Ethereum YAML test format. diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 80477c8ea..7f6b0cee9 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -17,6 +17,7 @@ block_proposer = { path = "../eth2/block_proposer" } attester = { path = "../eth2/attester" } bls = { path = "../eth2/utils/bls" } ssz = { path = "../eth2/utils/ssz" } +tree_hash = { path = "../eth2/utils/tree_hash" } clap = "2.32.0" dirs = "1.0.3" grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } diff --git a/validator_client/src/block_producer/mod.rs b/validator_client/src/block_producer/mod.rs index 9cc0460c7..2689b302d 100644 --- a/validator_client/src/block_producer/mod.rs +++ b/validator_client/src/block_producer/mod.rs @@ -6,8 +6,8 @@ pub use self::beacon_node_block::{BeaconNodeError, PublishOutcome}; pub use self::grpc::BeaconBlockGrpcClient; use crate::signer::Signer; use slog::{error, info, warn}; -use ssz::{SignedRoot, TreeHash}; use std::sync::Arc; +use tree_hash::{SignedRoot, TreeHash}; use types::{BeaconBlock, ChainSpec, Domain, Fork, Slot}; #[derive(Debug, PartialEq)] From 49d066015b08901f5729148c4a27fb8872d42e8f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 17 Apr 2019 10:33:31 +1000 Subject: [PATCH 56/89] Make genesis beacon state return a beacon state --- eth2/state_processing/src/get_genesis_state.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/eth2/state_processing/src/get_genesis_state.rs b/eth2/state_processing/src/get_genesis_state.rs index 21cdafcf9..2bde8ce0c 100644 --- a/eth2/state_processing/src/get_genesis_state.rs +++ b/eth2/state_processing/src/get_genesis_state.rs @@ -15,7 +15,7 @@ pub fn get_genesis_state( genesis_time: u64, genesis_eth1_data: Eth1Data, spec: &ChainSpec, -) -> Result<(), BlockProcessingError> { +) -> Result { // Get the genesis `BeaconState` let mut state = BeaconState::genesis(genesis_time, genesis_eth1_data, spec); @@ -42,7 +42,7 @@ pub fn get_genesis_state( // Generate the current shuffling seed. state.current_shuffling_seed = state.generate_seed(spec.genesis_epoch, spec)?; - Ok(()) + Ok(state) } impl From for GenesisError { From af39f096e7a635f39acf9851f84f96cda6af8750 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 17 Apr 2019 10:57:36 +1000 Subject: [PATCH 57/89] Add vector type to tree hashing --- eth2/types/src/beacon_state.rs | 28 ++++--- eth2/types/src/historical_batch.rs | 6 +- eth2/types/src/lib.rs | 2 + eth2/types/src/test_utils/mod.rs | 5 +- eth2/types/src/tree_hash_vector.rs | 82 +++++++++++++++++++ .../src/cached_tree_hash/impls/vec.rs | 6 +- eth2/utils/tree_hash/src/lib.rs | 32 +++++++- .../utils/tree_hash/src/standard_tree_hash.rs | 15 ++-- .../tree_hash/src/standard_tree_hash/impls.rs | 58 +++++++------ eth2/utils/tree_hash/tests/tests.rs | 34 +++++++- eth2/utils/tree_hash_derive/src/lib.rs | 15 +--- 11 files changed, 211 insertions(+), 72 deletions(-) create mode 100644 eth2/types/src/tree_hash_vector.rs diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 19c1b4c11..c068c4e03 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -60,7 +60,7 @@ pub struct BeaconState { pub validator_registry_update_epoch: Epoch, // Randomness and committees - pub latest_randao_mixes: Vec, + pub latest_randao_mixes: TreeHashVector, pub previous_shuffling_start_shard: u64, pub current_shuffling_start_shard: u64, pub previous_shuffling_epoch: Epoch, @@ -80,11 +80,11 @@ pub struct BeaconState { pub finalized_root: Hash256, // Recent state - pub latest_crosslinks: Vec, - latest_block_roots: Vec, - latest_state_roots: Vec, - latest_active_index_roots: Vec, - latest_slashed_balances: Vec, + pub latest_crosslinks: TreeHashVector, + latest_block_roots: TreeHashVector, + latest_state_roots: TreeHashVector, + latest_active_index_roots: TreeHashVector, + latest_slashed_balances: TreeHashVector, pub latest_block_header: BeaconBlockHeader, pub historical_roots: Vec, @@ -139,7 +139,8 @@ impl BeaconState { validator_registry_update_epoch: spec.genesis_epoch, // Randomness and committees - latest_randao_mixes: vec![spec.zero_hash; spec.latest_randao_mixes_length as usize], + latest_randao_mixes: vec![spec.zero_hash; spec.latest_randao_mixes_length as usize] + .into(), previous_shuffling_start_shard: spec.genesis_start_shard, current_shuffling_start_shard: spec.genesis_start_shard, previous_shuffling_epoch: spec.genesis_epoch, @@ -159,11 +160,12 @@ impl BeaconState { finalized_root: spec.zero_hash, // Recent state - latest_crosslinks: vec![initial_crosslink; spec.shard_count as usize], - latest_block_roots: vec![spec.zero_hash; spec.slots_per_historical_root], - latest_state_roots: vec![spec.zero_hash; spec.slots_per_historical_root], - latest_active_index_roots: vec![spec.zero_hash; spec.latest_active_index_roots_length], - latest_slashed_balances: vec![0; spec.latest_slashed_exit_length], + latest_crosslinks: vec![initial_crosslink; spec.shard_count as usize].into(), + latest_block_roots: vec![spec.zero_hash; spec.slots_per_historical_root].into(), + latest_state_roots: vec![spec.zero_hash; spec.slots_per_historical_root].into(), + latest_active_index_roots: vec![spec.zero_hash; spec.latest_active_index_roots_length] + .into(), + latest_slashed_balances: vec![0; spec.latest_slashed_exit_length].into(), latest_block_header: BeaconBlock::empty(spec).temporary_block_header(spec), historical_roots: vec![], @@ -505,7 +507,7 @@ impl BeaconState { /// Spec v0.5.0 pub fn fill_active_index_roots_with(&mut self, index_root: Hash256, spec: &ChainSpec) { self.latest_active_index_roots = - vec![index_root; spec.latest_active_index_roots_length as usize] + vec![index_root; spec.latest_active_index_roots_length as usize].into() } /// Safely obtains the index for latest state roots, given some `slot`. diff --git a/eth2/types/src/historical_batch.rs b/eth2/types/src/historical_batch.rs index 33dc9c450..23c26901e 100644 --- a/eth2/types/src/historical_batch.rs +++ b/eth2/types/src/historical_batch.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::Hash256; +use crate::{Hash256, TreeHashVector}; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -11,8 +11,8 @@ use tree_hash_derive::TreeHash; /// Spec v0.5.0 #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct HistoricalBatch { - pub block_roots: Vec, - pub state_roots: Vec, + pub block_roots: TreeHashVector, + pub state_roots: TreeHashVector, } #[cfg(test)] diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 118e862e8..070ed6745 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -27,6 +27,7 @@ pub mod pending_attestation; pub mod proposer_slashing; pub mod slashable_attestation; pub mod transfer; +pub mod tree_hash_vector; pub mod voluntary_exit; #[macro_use] pub mod slot_epoch_macros; @@ -65,6 +66,7 @@ pub use crate::slashable_attestation::SlashableAttestation; pub use crate::slot_epoch::{Epoch, Slot}; pub use crate::slot_height::SlotHeight; pub use crate::transfer::Transfer; +pub use crate::tree_hash_vector::TreeHashVector; pub use crate::validator::Validator; pub use crate::voluntary_exit::VoluntaryExit; diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 018b70d15..9d69a48f6 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -17,7 +17,10 @@ mod testing_voluntary_exit_builder; pub use generate_deterministic_keypairs::generate_deterministic_keypairs; pub use keypairs_file::KeypairsFile; -pub use rand::{prng::XorShiftRng, SeedableRng}; +pub use rand::{ + RngCore, + {prng::XorShiftRng, SeedableRng}, +}; pub use serde_utils::{fork_from_hex_str, u8_from_hex_str}; pub use test_random::TestRandom; pub use testing_attestation_builder::TestingAttestationBuilder; diff --git a/eth2/types/src/tree_hash_vector.rs b/eth2/types/src/tree_hash_vector.rs new file mode 100644 index 000000000..1cc8e40a5 --- /dev/null +++ b/eth2/types/src/tree_hash_vector.rs @@ -0,0 +1,82 @@ +use crate::test_utils::{RngCore, TestRandom}; +use serde_derive::{Deserialize, Serialize}; +use ssz::{Decodable, DecodeError, Encodable, SszStream}; +use std::ops::{Deref, DerefMut}; +use tree_hash::TreeHash; + +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +pub struct TreeHashVector(Vec); + +impl From> for TreeHashVector { + fn from(vec: Vec) -> TreeHashVector { + TreeHashVector(vec) + } +} + +impl Into> for TreeHashVector { + fn into(self) -> Vec { + self.0 + } +} + +impl Deref for TreeHashVector { + type Target = Vec; + + fn deref(&self) -> &Vec { + &self.0 + } +} + +impl DerefMut for TreeHashVector { + fn deref_mut(&mut self) -> &mut Vec { + &mut self.0 + } +} + +impl tree_hash::TreeHash for TreeHashVector +where + T: TreeHash, +{ + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::Vector + } + + fn tree_hash_packed_encoding(&self) -> Vec { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { + tree_hash::standard_tree_hash::vec_tree_hash_root(self) + } +} + +impl Encodable for TreeHashVector +where + T: Encodable, +{ + fn ssz_append(&self, s: &mut SszStream) { + s.append_vec(self) + } +} + +impl Decodable for TreeHashVector +where + T: Decodable, +{ + fn ssz_decode(bytes: &[u8], index: usize) -> Result<(Self, usize), DecodeError> { + ssz::decode_ssz_list(bytes, index).and_then(|(vec, i)| Ok((vec.into(), i))) + } +} + +impl TestRandom for TreeHashVector +where + U: TestRandom, +{ + fn random_for_test(rng: &mut T) -> Self { + Vec::random_for_test(rng).into() + } +} diff --git a/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs b/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs index 6c0970cef..1cd7eb902 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs @@ -9,7 +9,7 @@ where TreeHashType::Basic => { TreeHashCache::from_bytes(merkleize(get_packed_leaves(self)?), false) } - TreeHashType::Composite | TreeHashType::List => { + TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { let subtrees = self .iter() .map(|item| TreeHashCache::new(item)) @@ -23,7 +23,7 @@ where fn tree_hash_cache_overlay(&self, chunk_offset: usize) -> Result { let lengths = match T::tree_hash_type() { TreeHashType::Basic => vec![1; self.len() / T::tree_hash_packing_factor()], - TreeHashType::Composite | TreeHashType::List => { + TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { let mut lengths = vec![]; for item in self { @@ -97,7 +97,7 @@ where TreeHashCache::from_bytes(leaves, true)?, ); } - TreeHashType::Composite | TreeHashType::List => { + TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { let mut i = offset_handler.num_leaf_nodes; for &start_chunk in offset_handler.iter_leaf_nodes().rev() { i -= 1; diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index 7c74c9f97..fe2001002 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -13,8 +13,9 @@ pub use standard_tree_hash::{merkle_root, TreeHash}; #[derive(Debug, PartialEq, Clone)] pub enum TreeHashType { Basic, + Vector, List, - Composite, + Container, } fn num_sanitized_leaves(num_bytes: usize) -> usize { @@ -31,15 +32,15 @@ macro_rules! impl_tree_hash_for_ssz_bytes { ($type: ident) => { impl tree_hash::TreeHash for $type { fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List + tree_hash::TreeHashType::Vector } fn tree_hash_packed_encoding(&self) -> Vec { - panic!("bytesN should never be packed.") + unreachable!("Vector should never be packed.") } fn tree_hash_packing_factor() -> usize { - panic!("bytesN should never be packed.") + unreachable!("Vector should never be packed.") } fn tree_hash_root(&self) -> Vec { @@ -48,3 +49,26 @@ macro_rules! impl_tree_hash_for_ssz_bytes { } }; } + +#[macro_export] +macro_rules! impl_vec_as_fixed_len { + ($type: ty) => { + impl tree_hash::TreeHash for $type { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::Vector + } + + fn tree_hash_packed_encoding(&self) -> Vec { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { + tree_hash::standard_tree_hash::vec_tree_hash_root(self) + } + } + }; +} diff --git a/eth2/utils/tree_hash/src/standard_tree_hash.rs b/eth2/utils/tree_hash/src/standard_tree_hash.rs index 473d2a5f0..130c360ed 100644 --- a/eth2/utils/tree_hash/src/standard_tree_hash.rs +++ b/eth2/utils/tree_hash/src/standard_tree_hash.rs @@ -3,6 +3,8 @@ use hashing::hash; use int_to_bytes::int_to_bytes32; use ssz::ssz_encode; +pub use impls::vec_tree_hash_root; + mod impls; pub trait TreeHash { @@ -16,11 +18,18 @@ pub trait TreeHash { } pub fn merkle_root(bytes: &[u8]) -> Vec { - // TODO: replace this with a _more_ efficient fn which is more memory efficient. + // TODO: replace this with a more memory efficient method. efficient_merkleize(&bytes)[0..32].to_vec() } pub fn efficient_merkleize(bytes: &[u8]) -> Vec { + // If the bytes are just one chunk (or less than one chunk) just return them. + if bytes.len() <= HASHSIZE { + let mut o = bytes.to_vec(); + o.resize(HASHSIZE, 0); + return o; + } + let leaves = num_sanitized_leaves(bytes.len()); let nodes = num_nodes(leaves); let internal_nodes = nodes - leaves; @@ -29,10 +38,6 @@ pub fn efficient_merkleize(bytes: &[u8]) -> Vec { let mut o: Vec = vec![0; internal_nodes * HASHSIZE]; - if o.len() < HASHSIZE { - o.resize(HASHSIZE, 0); - } - o.append(&mut bytes.to_vec()); assert_eq!(o.len(), num_bytes); diff --git a/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs b/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs index 749d5b3bb..c3be8d55b 100644 --- a/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs +++ b/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs @@ -50,7 +50,7 @@ impl TreeHash for [u8; 4] { impl TreeHash for H256 { fn tree_hash_type() -> TreeHashType { - TreeHashType::Basic + TreeHashType::Vector } fn tree_hash_packed_encoding(&self) -> Vec { @@ -62,7 +62,7 @@ impl TreeHash for H256 { } fn tree_hash_root(&self) -> Vec { - ssz_encode(self) + merkle_root(&ssz::ssz_encode(self)) } } @@ -83,37 +83,43 @@ where } fn tree_hash_root(&self) -> Vec { - let leaves = match T::tree_hash_type() { - TreeHashType::Basic => { - let mut leaves = - Vec::with_capacity((HASHSIZE / T::tree_hash_packing_factor()) * self.len()); - - for item in self { - leaves.append(&mut item.tree_hash_packed_encoding()); - } - - leaves - } - TreeHashType::Composite | TreeHashType::List => { - let mut leaves = Vec::with_capacity(self.len() * HASHSIZE); - - for item in self { - leaves.append(&mut item.tree_hash_root()) - } - - leaves - } - }; - - // Mix in the length let mut root_and_len = Vec::with_capacity(HASHSIZE * 2); - root_and_len.append(&mut merkle_root(&leaves)); + root_and_len.append(&mut vec_tree_hash_root(self)); root_and_len.append(&mut int_to_bytes32(self.len() as u64)); hash(&root_and_len) } } +pub fn vec_tree_hash_root(vec: &[T]) -> Vec +where + T: TreeHash, +{ + let leaves = match T::tree_hash_type() { + TreeHashType::Basic => { + let mut leaves = + Vec::with_capacity((HASHSIZE / T::tree_hash_packing_factor()) * vec.len()); + + for item in vec { + leaves.append(&mut item.tree_hash_packed_encoding()); + } + + leaves + } + TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { + let mut leaves = Vec::with_capacity(vec.len() * HASHSIZE); + + for item in vec { + leaves.append(&mut item.tree_hash_root()) + } + + leaves + } + }; + + merkle_root(&leaves) +} + #[cfg(test)] mod test { use super::*; diff --git a/eth2/utils/tree_hash/tests/tests.rs b/eth2/utils/tree_hash/tests/tests.rs index db33709ac..4d2c6f282 100644 --- a/eth2/utils/tree_hash/tests/tests.rs +++ b/eth2/utils/tree_hash/tests/tests.rs @@ -13,7 +13,7 @@ pub struct InternalCache { impl TreeHash for InternalCache { fn tree_hash_type() -> TreeHashType { - TreeHashType::Composite + TreeHashType::Container } fn tree_hash_packed_encoding(&self) -> Vec { @@ -146,7 +146,7 @@ pub struct Inner { impl TreeHash for Inner { fn tree_hash_type() -> TreeHashType { - TreeHashType::Composite + TreeHashType::Container } fn tree_hash_packed_encoding(&self) -> Vec { @@ -231,7 +231,7 @@ pub struct Outer { impl TreeHash for Outer { fn tree_hash_type() -> TreeHashType { - TreeHashType::Composite + TreeHashType::Container } fn tree_hash_packed_encoding(&self) -> Vec { @@ -894,11 +894,39 @@ fn vec_of_u64_builds() { let my_vec = vec![1, 2, 3, 4, 5]; + // + // Note: the length is not mixed-in in this example. The user must ensure the length is + // mixed-in. + // + let cache: Vec = TreeHashCache::new(&my_vec).unwrap().into(); assert_eq!(expected, cache); } +#[test] +fn vec_does_mix_in_len() { + let data = join(vec![ + int_to_bytes8(1), + int_to_bytes8(2), + int_to_bytes8(3), + int_to_bytes8(4), + int_to_bytes8(5), + vec![0; 32 - 8], // padding + ]); + + let tree = merkleize(data); + + let my_vec: Vec = vec![1, 2, 3, 4, 5]; + + let mut expected = vec![0; 32]; + expected.copy_from_slice(&tree[0..HASHSIZE]); + expected.append(&mut int_to_bytes32(my_vec.len() as u64)); + let expected = hash(&expected); + + assert_eq!(&expected[0..HASHSIZE], &my_vec.tree_hash_root()[..]); +} + #[test] fn merkleize_odd() { let data = join(vec![ diff --git a/eth2/utils/tree_hash_derive/src/lib.rs b/eth2/utils/tree_hash_derive/src/lib.rs index e3a7b4aaa..4b7761f91 100644 --- a/eth2/utils/tree_hash_derive/src/lib.rs +++ b/eth2/utils/tree_hash_derive/src/lib.rs @@ -129,7 +129,7 @@ pub fn tree_hash_derive(input: TokenStream) -> TokenStream { let output = quote! { impl tree_hash::TreeHash for #name { fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Composite + tree_hash::TreeHashType::Container } fn tree_hash_packed_encoding(&self) -> Vec { @@ -154,19 +154,6 @@ pub fn tree_hash_derive(input: TokenStream) -> TokenStream { output.into() } -/// Implements `tree_hash::TreeHash` for some `struct`, whilst excluding any fields following and -/// including a field that is of type "Signature" or "AggregateSignature". -/// -/// See: -/// https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#signed-roots -/// -/// This is a rather horrendous macro, it will read the type of the object as a string and decide -/// if it's a signature by matching that string against "Signature" or "AggregateSignature". So, -/// it's important that you use those exact words as your type -- don't alias it to something else. -/// -/// If you can think of a better way to do this, please make an issue! -/// -/// Fields are processed in the order they are defined. #[proc_macro_derive(SignedRoot, attributes(signed_root))] pub fn tree_hash_signed_root_derive(input: TokenStream) -> TokenStream { let item = parse_macro_input!(input as DeriveInput); From 10eeced227fe1be279e2ef92dea1ee8a02d86f5c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 17 Apr 2019 11:18:00 +1000 Subject: [PATCH 58/89] Remove SSZ dep from `tree_hash` --- eth2/utils/tree_hash/Cargo.toml | 1 - .../tree_hash/src/cached_tree_hash/impls.rs | 5 ++-- eth2/utils/tree_hash/src/lib.rs | 23 ---------------- .../utils/tree_hash/src/standard_tree_hash.rs | 1 - .../tree_hash/src/standard_tree_hash/impls.rs | 27 +++++++++++++++---- 5 files changed, 24 insertions(+), 33 deletions(-) diff --git a/eth2/utils/tree_hash/Cargo.toml b/eth2/utils/tree_hash/Cargo.toml index 243a49446..328d91577 100644 --- a/eth2/utils/tree_hash/Cargo.toml +++ b/eth2/utils/tree_hash/Cargo.toml @@ -8,4 +8,3 @@ edition = "2018" ethereum-types = "0.5" hashing = { path = "../hashing" } int_to_bytes = { path = "../int_to_bytes" } -ssz = { path = "../ssz" } diff --git a/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs b/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs index 62d013881..6500e4eff 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs @@ -1,13 +1,12 @@ use super::resize::{grow_merkle_cache, shrink_merkle_cache}; use super::*; -use ssz::ssz_encode; mod vec; impl CachedTreeHashSubTree for u64 { fn new_tree_hash_cache(&self) -> Result { Ok(TreeHashCache::from_bytes( - merkleize(ssz_encode(self)), + merkleize(self.to_le_bytes().to_vec()), false, )?) } @@ -23,7 +22,7 @@ impl CachedTreeHashSubTree for u64 { chunk: usize, ) -> Result { if self != other { - let leaf = merkleize(ssz_encode(self)); + let leaf = merkleize(self.to_le_bytes().to_vec()); cache.modify_chunk(chunk, &leaf)?; } diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index fe2001002..fd1708a2d 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -49,26 +49,3 @@ macro_rules! impl_tree_hash_for_ssz_bytes { } }; } - -#[macro_export] -macro_rules! impl_vec_as_fixed_len { - ($type: ty) => { - impl tree_hash::TreeHash for $type { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> Vec { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Vec { - tree_hash::standard_tree_hash::vec_tree_hash_root(self) - } - } - }; -} diff --git a/eth2/utils/tree_hash/src/standard_tree_hash.rs b/eth2/utils/tree_hash/src/standard_tree_hash.rs index 130c360ed..812a2c352 100644 --- a/eth2/utils/tree_hash/src/standard_tree_hash.rs +++ b/eth2/utils/tree_hash/src/standard_tree_hash.rs @@ -1,7 +1,6 @@ use super::*; use hashing::hash; use int_to_bytes::int_to_bytes32; -use ssz::ssz_encode; pub use impls::vec_tree_hash_root; diff --git a/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs b/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs index c3be8d55b..be6b4ba07 100644 --- a/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs +++ b/eth2/utils/tree_hash/src/standard_tree_hash/impls.rs @@ -9,7 +9,7 @@ macro_rules! impl_for_bitsize { } fn tree_hash_packed_encoding(&self) -> Vec { - ssz_encode(self) + self.to_le_bytes().to_vec() } fn tree_hash_packing_factor() -> usize { @@ -28,7 +28,24 @@ impl_for_bitsize!(u16, 16); impl_for_bitsize!(u32, 32); impl_for_bitsize!(u64, 64); impl_for_bitsize!(usize, 64); -impl_for_bitsize!(bool, 8); + +impl TreeHash for bool { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> Vec { + (*self as u8).tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + u8::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> Vec { + int_to_bytes32(*self as u64) + } +} impl TreeHash for [u8; 4] { fn tree_hash_type() -> TreeHashType { @@ -44,7 +61,7 @@ impl TreeHash for [u8; 4] { } fn tree_hash_root(&self) -> Vec { - merkle_root(&ssz::ssz_encode(self)) + merkle_root(&self[..]) } } @@ -54,7 +71,7 @@ impl TreeHash for H256 { } fn tree_hash_packed_encoding(&self) -> Vec { - ssz_encode(self) + self.as_bytes().to_vec() } fn tree_hash_packing_factor() -> usize { @@ -62,7 +79,7 @@ impl TreeHash for H256 { } fn tree_hash_root(&self) -> Vec { - merkle_root(&ssz::ssz_encode(self)) + merkle_root(&self.as_bytes().to_vec()) } } From ea8d5a3db9856eb54f5f8926253448e79834faff Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 17 Apr 2019 11:57:34 +1000 Subject: [PATCH 59/89] Ensure deposit uses correct list type --- eth2/types/src/deposit.rs | 4 ++-- eth2/types/src/test_utils/testing_deposit_builder.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs index 5eb565c2b..bd3355a3f 100644 --- a/eth2/types/src/deposit.rs +++ b/eth2/types/src/deposit.rs @@ -1,4 +1,4 @@ -use super::{DepositData, Hash256}; +use super::{DepositData, Hash256, TreeHashVector}; use crate::test_utils::TestRandom; use rand::RngCore; use serde_derive::{Deserialize, Serialize}; @@ -11,7 +11,7 @@ use tree_hash_derive::TreeHash; /// Spec v0.5.0 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct Deposit { - pub proof: Vec, + pub proof: TreeHashVector, pub index: u64, pub deposit_data: DepositData, } diff --git a/eth2/types/src/test_utils/testing_deposit_builder.rs b/eth2/types/src/test_utils/testing_deposit_builder.rs index 326858c31..080ed5cfb 100644 --- a/eth2/types/src/test_utils/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/testing_deposit_builder.rs @@ -12,7 +12,7 @@ impl TestingDepositBuilder { /// Instantiates a new builder. pub fn new(pubkey: PublicKey, amount: u64) -> Self { let deposit = Deposit { - proof: vec![], + proof: vec![].into(), index: 0, deposit_data: DepositData { amount, From 10a5d2657caf402347d257c8b4429716c27355d8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 17 Apr 2019 11:57:57 +1000 Subject: [PATCH 60/89] Encode bitfield as list not vector --- eth2/utils/bls/src/aggregate_signature.rs | 4 ++-- .../utils/bls/src/fake_aggregate_signature.rs | 4 ++-- eth2/utils/bls/src/fake_signature.rs | 4 ++-- eth2/utils/bls/src/public_key.rs | 4 ++-- eth2/utils/bls/src/secret_key.rs | 4 ++-- eth2/utils/bls/src/signature.rs | 4 ++-- eth2/utils/boolean-bitfield/src/lib.rs | 4 ++-- eth2/utils/tree_hash/src/lib.rs | 24 ++++++++++++++++++- 8 files changed, 37 insertions(+), 15 deletions(-) diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index f26bd2db6..0fbcc3493 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -6,7 +6,7 @@ use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode as hex_encode, HexVisitor}; use ssz::{decode, Decodable, DecodeError, Encodable, SszStream}; -use tree_hash::impl_tree_hash_for_ssz_bytes; +use tree_hash::tree_hash_ssz_encoding_as_vector; /// A BLS aggregate signature. /// @@ -166,7 +166,7 @@ impl<'de> Deserialize<'de> for AggregateSignature { } } -impl_tree_hash_for_ssz_bytes!(AggregateSignature); +tree_hash_ssz_encoding_as_vector!(AggregateSignature); #[cfg(test)] mod tests { diff --git a/eth2/utils/bls/src/fake_aggregate_signature.rs b/eth2/utils/bls/src/fake_aggregate_signature.rs index 602639b6b..f201eba3e 100644 --- a/eth2/utils/bls/src/fake_aggregate_signature.rs +++ b/eth2/utils/bls/src/fake_aggregate_signature.rs @@ -3,7 +3,7 @@ use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; use ssz::{ssz_encode, Decodable, DecodeError, Encodable, SszStream}; -use tree_hash::impl_tree_hash_for_ssz_bytes; +use tree_hash::tree_hash_ssz_encoding_as_vector; /// A BLS aggregate signature. /// @@ -99,7 +99,7 @@ impl<'de> Deserialize<'de> for FakeAggregateSignature { } } -impl_tree_hash_for_ssz_bytes!(FakeAggregateSignature); +tree_hash_ssz_encoding_as_vector!(FakeAggregateSignature); #[cfg(test)] mod tests { diff --git a/eth2/utils/bls/src/fake_signature.rs b/eth2/utils/bls/src/fake_signature.rs index b07dd66a5..3208ed992 100644 --- a/eth2/utils/bls/src/fake_signature.rs +++ b/eth2/utils/bls/src/fake_signature.rs @@ -4,7 +4,7 @@ use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::HexVisitor; use ssz::{ssz_encode, Decodable, DecodeError, Encodable, SszStream}; -use tree_hash::impl_tree_hash_for_ssz_bytes; +use tree_hash::tree_hash_ssz_encoding_as_vector; /// A single BLS signature. /// @@ -74,7 +74,7 @@ impl Decodable for FakeSignature { } } -impl_tree_hash_for_ssz_bytes!(FakeSignature); +tree_hash_ssz_encoding_as_vector!(FakeSignature); impl Serialize for FakeSignature { fn serialize(&self, serializer: S) -> Result diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index a553ee888..dcbbc622a 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -7,7 +7,7 @@ use ssz::{decode, ssz_encode, Decodable, DecodeError, Encodable, SszStream}; use std::default; use std::fmt; use std::hash::{Hash, Hasher}; -use tree_hash::impl_tree_hash_for_ssz_bytes; +use tree_hash::tree_hash_ssz_encoding_as_vector; /// A single BLS signature. /// @@ -105,7 +105,7 @@ impl<'de> Deserialize<'de> for PublicKey { } } -impl_tree_hash_for_ssz_bytes!(PublicKey); +tree_hash_ssz_encoding_as_vector!(PublicKey); impl PartialEq for PublicKey { fn eq(&self, other: &PublicKey) -> bool { diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs index 38fd2d379..d1aaa96da 100644 --- a/eth2/utils/bls/src/secret_key.rs +++ b/eth2/utils/bls/src/secret_key.rs @@ -5,7 +5,7 @@ use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::HexVisitor; use ssz::{decode, ssz_encode, Decodable, DecodeError, Encodable, SszStream}; -use tree_hash::impl_tree_hash_for_ssz_bytes; +use tree_hash::tree_hash_ssz_encoding_as_vector; /// A single BLS signature. /// @@ -70,7 +70,7 @@ impl<'de> Deserialize<'de> for SecretKey { } } -impl_tree_hash_for_ssz_bytes!(SecretKey); +tree_hash_ssz_encoding_as_vector!(SecretKey); #[cfg(test)] mod tests { diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index cf6c8fe5a..3fb68dc53 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -5,7 +5,7 @@ use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::HexVisitor; use ssz::{decode, ssz_encode, Decodable, DecodeError, Encodable, SszStream}; -use tree_hash::impl_tree_hash_for_ssz_bytes; +use tree_hash::tree_hash_ssz_encoding_as_vector; /// A single BLS signature. /// @@ -115,7 +115,7 @@ impl Decodable for Signature { } } -impl_tree_hash_for_ssz_bytes!(Signature); +tree_hash_ssz_encoding_as_vector!(Signature); impl Serialize for Signature { /// Serde serialization is compliant the Ethereum YAML test format. diff --git a/eth2/utils/boolean-bitfield/src/lib.rs b/eth2/utils/boolean-bitfield/src/lib.rs index fbd0e2ecd..d35d87c5c 100644 --- a/eth2/utils/boolean-bitfield/src/lib.rs +++ b/eth2/utils/boolean-bitfield/src/lib.rs @@ -9,7 +9,7 @@ use serde_hex::{encode, PrefixedHexVisitor}; use ssz::{Decodable, Encodable}; use std::cmp; use std::default; -use tree_hash::impl_tree_hash_for_ssz_bytes; +use tree_hash::tree_hash_ssz_encoding_as_list; /// A BooleanBitfield represents a set of booleans compactly stored as a vector of bits. /// The BooleanBitfield is given a fixed size during construction. Reads outside of the current size return an out-of-bounds error. Writes outside of the current size expand the size of the set. @@ -257,7 +257,7 @@ impl<'de> Deserialize<'de> for BooleanBitfield { } } -impl_tree_hash_for_ssz_bytes!(BooleanBitfield); +tree_hash_ssz_encoding_as_list!(BooleanBitfield); #[cfg(test)] mod tests { diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index fd1708a2d..ed60079c8 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -28,7 +28,7 @@ fn num_nodes(num_leaves: usize) -> usize { } #[macro_export] -macro_rules! impl_tree_hash_for_ssz_bytes { +macro_rules! tree_hash_ssz_encoding_as_vector { ($type: ident) => { impl tree_hash::TreeHash for $type { fn tree_hash_type() -> tree_hash::TreeHashType { @@ -49,3 +49,25 @@ macro_rules! impl_tree_hash_for_ssz_bytes { } }; } +#[macro_export] +macro_rules! tree_hash_ssz_encoding_as_list { + ($type: ident) => { + impl tree_hash::TreeHash for $type { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::List + } + + fn tree_hash_packed_encoding(&self) -> Vec { + unreachable!("List should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("List should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { + ssz::ssz_encode(self).tree_hash_root() + } + } + }; +} From 8da8730dca4fa43edf0cca37963f475c108fdbf3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 2 Apr 2019 10:22:19 +1100 Subject: [PATCH 61/89] spec: check ProposalSlashing epochs, not slots As per v0.5.{0,1} of the spec, we only need to check that the epochs of two proposal slashings are equal, not their slots. --- eth2/state_processing/src/per_block_processing/errors.rs | 4 ++-- .../src/per_block_processing/verify_proposer_slashing.rs | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index 6614f6f60..9c36e0238 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -271,10 +271,10 @@ pub enum ProposerSlashingValidationError { pub enum ProposerSlashingInvalid { /// The proposer index is not a known validator. ProposerUnknown(u64), - /// The two proposal have different slots. + /// The two proposal have different epochs. /// /// (proposal_1_slot, proposal_2_slot) - ProposalSlotMismatch(Slot, Slot), + ProposalEpochMismatch(Slot, Slot), /// The proposals are identical and therefore not slashable. ProposalsIdentical, /// The specified proposer has already been slashed. diff --git a/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs index 8e0a70f96..b5113863e 100644 --- a/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs +++ b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs @@ -21,8 +21,9 @@ pub fn verify_proposer_slashing( })?; verify!( - proposer_slashing.header_1.slot == proposer_slashing.header_2.slot, - Invalid::ProposalSlotMismatch( + proposer_slashing.header_1.slot.epoch(spec.slots_per_epoch) + == proposer_slashing.header_2.slot.epoch(spec.slots_per_epoch), + Invalid::ProposalEpochMismatch( proposer_slashing.header_1.slot, proposer_slashing.header_2.slot ) From 0a02567440f1e77dda40e0904b63077fb238d4e0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 2 Apr 2019 14:14:57 +1100 Subject: [PATCH 62/89] bitfield: fix bit ordering issue with YAML parsing --- .../src/common/verify_bitfield.rs | 13 ++++++ eth2/utils/boolean-bitfield/Cargo.toml | 4 ++ eth2/utils/boolean-bitfield/src/lib.rs | 44 ++++++++++++++++--- 3 files changed, 56 insertions(+), 5 deletions(-) diff --git a/eth2/state_processing/src/common/verify_bitfield.rs b/eth2/state_processing/src/common/verify_bitfield.rs index 03fcdbb67..71c9f9c3e 100644 --- a/eth2/state_processing/src/common/verify_bitfield.rs +++ b/eth2/state_processing/src/common/verify_bitfield.rs @@ -18,3 +18,16 @@ pub fn verify_bitfield_length(bitfield: &Bitfield, committee_size: usize) -> boo true } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn bitfield_length() { + assert!(verify_bitfield_length( + &Bitfield::from_bytes(&[0b10000000]), + 4 + )); + } +} diff --git a/eth2/utils/boolean-bitfield/Cargo.toml b/eth2/utils/boolean-bitfield/Cargo.toml index f08695bd1..61bbc60a8 100644 --- a/eth2/utils/boolean-bitfield/Cargo.toml +++ b/eth2/utils/boolean-bitfield/Cargo.toml @@ -8,6 +8,10 @@ edition = "2018" serde_hex = { path = "../serde_hex" } ssz = { path = "../ssz" } bit-vec = "0.5.0" +bit_reverse = "0.1" serde = "1.0" serde_derive = "1.0" tree_hash = { path = "../tree_hash" } + +[dev-dependencies] +serde_yaml = "0.8" diff --git a/eth2/utils/boolean-bitfield/src/lib.rs b/eth2/utils/boolean-bitfield/src/lib.rs index d35d87c5c..c19702ec9 100644 --- a/eth2/utils/boolean-bitfield/src/lib.rs +++ b/eth2/utils/boolean-bitfield/src/lib.rs @@ -1,8 +1,8 @@ extern crate bit_vec; extern crate ssz; +use bit_reverse::LookupReverse; use bit_vec::BitVec; - use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode, PrefixedHexVisitor}; @@ -236,24 +236,36 @@ impl Decodable for BooleanBitfield { } } +// Reverse the bit order of a whole byte vec, so that the ith bit +// of the input vec is placed in the (N - i)th bit of the output vec. +// This function is necessary for converting bitfields to and from YAML, +// as the BitVec library and the hex-parser use opposing bit orders. +fn reverse_bit_order(mut bytes: Vec) -> Vec { + bytes.reverse(); + bytes.into_iter().map(|b| b.swap_bits()).collect() +} + impl Serialize for BooleanBitfield { - /// Serde serialization is compliant the Ethereum YAML test format. + /// Serde serialization is compliant with the Ethereum YAML test format. fn serialize(&self, serializer: S) -> Result where S: Serializer, { - serializer.serialize_str(&encode(&self.to_bytes())) + serializer.serialize_str(&encode(&reverse_bit_order(self.to_bytes()))) } } impl<'de> Deserialize<'de> for BooleanBitfield { - /// Serde serialization is compliant the Ethereum YAML test format. + /// Serde serialization is compliant with the Ethereum YAML test format. fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { + // We reverse the bit-order so that the BitVec library can read its 0th + // bit from the end of the hex string, e.g. + // "0xef01" => [0xef, 0x01] => [0b1000_0000, 0b1111_1110] let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Ok(BooleanBitfield::from_bytes(&bytes)) + Ok(BooleanBitfield::from_bytes(&reverse_bit_order(bytes))) } } @@ -262,6 +274,7 @@ tree_hash_ssz_encoding_as_list!(BooleanBitfield); #[cfg(test)] mod tests { use super::*; + use serde_yaml; use ssz::{decode, ssz_encode, SszStream}; #[test] @@ -462,6 +475,27 @@ mod tests { assert_eq!(field, expected); } + #[test] + fn test_serialize_deserialize() { + use serde_yaml::Value; + + let data: &[(_, &[_])] = &[ + ("0x01", &[0b10000000]), + ("0xf301", &[0b10000000, 0b11001111]), + ]; + for (hex_data, bytes) in data { + let bitfield = BooleanBitfield::from_bytes(bytes); + assert_eq!( + serde_yaml::from_str::(hex_data).unwrap(), + bitfield + ); + assert_eq!( + serde_yaml::to_value(&bitfield).unwrap(), + Value::String(hex_data.to_string()) + ); + } + } + #[test] fn test_ssz_round_trip() { let original = BooleanBitfield::from_bytes(&vec![18; 12][..]); From 300fcd6ec3587ab6377acc210693c2f31bc94395 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 2 Apr 2019 14:17:41 +1100 Subject: [PATCH 63/89] state transition test progress --- eth2/state_processing/tests/tests.rs | 74 +++++++++------------ eth2/state_processing/yaml_utils/Cargo.toml | 1 - eth2/state_processing/yaml_utils/build.rs | 1 - 3 files changed, 32 insertions(+), 44 deletions(-) diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index 1359508dc..03401b2c7 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -27,61 +27,28 @@ pub struct TestDoc { pub test_cases: Vec, } -#[test] -fn test_read_yaml() { - // Test sanity-check_small-config_32-vals.yaml +fn load_test_case(test_name: &str) -> TestDoc { let mut file = { let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - file_path_buf.push("yaml_utils/specs/sanity-check_small-config_32-vals.yaml"); + file_path_buf.push(format!("yaml_utils/specs/{}", test_name)); File::open(file_path_buf).unwrap() }; - let mut yaml_str = String::new(); - file.read_to_string(&mut yaml_str).unwrap(); - yaml_str = yaml_str.to_lowercase(); - let _doc: TestDoc = serde_yaml::from_str(&yaml_str.as_str()).unwrap(); - - // Test sanity-check_default-config_100-vals.yaml - file = { - let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - file_path_buf.push("yaml_utils/specs/sanity-check_default-config_100-vals.yaml"); - - File::open(file_path_buf).unwrap() - }; - - yaml_str = String::new(); - - file.read_to_string(&mut yaml_str).unwrap(); - - yaml_str = yaml_str.to_lowercase(); - - let _doc: TestDoc = serde_yaml::from_str(&yaml_str.as_str()).unwrap(); + serde_yaml::from_str(&yaml_str.as_str()).unwrap() } -#[test] -#[cfg(not(debug_assertions))] -fn run_state_transition_tests_small() { - // Test sanity-check_small-config_32-vals.yaml - let mut file = { - let mut file_path_buf = PathBuf::from(env!("CARGO_MANIFEST_DIR")); - file_path_buf.push("yaml_utils/specs/sanity-check_small-config_32-vals.yaml"); - - File::open(file_path_buf).unwrap() - }; - let mut yaml_str = String::new(); - file.read_to_string(&mut yaml_str).unwrap(); - yaml_str = yaml_str.to_lowercase(); - - let doc: TestDoc = serde_yaml::from_str(&yaml_str.as_str()).unwrap(); +fn run_state_transition_test(test_name: &str) { + let doc = load_test_case(test_name); // Run Tests + let mut ok = true; for (i, test_case) in doc.test_cases.iter().enumerate() { let mut state = test_case.initial_state.clone(); - for block in test_case.blocks.iter() { + for (j, block) in test_case.blocks.iter().enumerate() { while block.slot > state.slot { let latest_block_header = state.latest_block_header.clone(); per_slot_processing(&mut state, &latest_block_header, &test_case.config).unwrap(); @@ -89,8 +56,9 @@ fn run_state_transition_tests_small() { if test_case.verify_signatures { let res = per_block_processing(&mut state, &block, &test_case.config); if res.is_err() { - println!("{:?}", i); + println!("Error in {} (#{}), on block {}", test_case.name, i, j); println!("{:?}", res); + ok = false; }; } else { let res = per_block_processing_without_verifying_block_signature( @@ -99,10 +67,32 @@ fn run_state_transition_tests_small() { &test_case.config, ); if res.is_err() { - println!("{:?}", i); + println!("Error in {} (#{}), on block {}", test_case.name, i, j); println!("{:?}", res); + ok = false; } } } } + + assert!(ok, "one or more tests failed, see above"); +} + +#[test] +#[cfg(not(debug_assertions))] +fn test_read_yaml() { + load_test_case("sanity-check_small-config_32-vals.yaml"); + load_test_case("sanity-check_default-config_100-vals.yaml"); +} + +#[test] +#[cfg(not(debug_assertions))] +fn run_state_transition_tests_small() { + run_state_transition_test("sanity-check_small-config_32-vals.yaml"); +} + +#[test] +#[cfg(not(debug_assertions))] +fn run_state_transition_tests_large() { + run_state_transition_test("sanity-check_default-config_100-vals.yaml"); } diff --git a/eth2/state_processing/yaml_utils/Cargo.toml b/eth2/state_processing/yaml_utils/Cargo.toml index 4a7ae5b89..5f216fe1a 100644 --- a/eth2/state_processing/yaml_utils/Cargo.toml +++ b/eth2/state_processing/yaml_utils/Cargo.toml @@ -6,7 +6,6 @@ edition = "2018" [build-dependencies] reqwest = "0.9" -tempdir = "0.3" [dependencies] diff --git a/eth2/state_processing/yaml_utils/build.rs b/eth2/state_processing/yaml_utils/build.rs index 3b7f31471..7fb652cc1 100644 --- a/eth2/state_processing/yaml_utils/build.rs +++ b/eth2/state_processing/yaml_utils/build.rs @@ -1,5 +1,4 @@ extern crate reqwest; -extern crate tempdir; use std::fs::File; use std::io::copy; From 71a0fed8eb08eba866f553a658e142c8321594a0 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 2 Apr 2019 17:51:12 +1100 Subject: [PATCH 64/89] testing: add a `fake_crypto` feature --- eth2/state_processing/Cargo.toml | 3 +++ eth2/state_processing/build.rs | 1 + eth2/state_processing/tests/tests.rs | 35 +++++++++++++--------------- eth2/utils/bls/Cargo.toml | 3 +++ eth2/utils/bls/build.rs | 19 +++++++++++++++ eth2/utils/bls/src/lib.rs | 16 ++++++------- 6 files changed, 50 insertions(+), 27 deletions(-) create mode 120000 eth2/state_processing/build.rs create mode 100644 eth2/utils/bls/build.rs diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index 1bc7a6c45..a2ae11aa8 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -30,3 +30,6 @@ tree_hash = { path = "../utils/tree_hash" } tree_hash_derive = { path = "../utils/tree_hash_derive" } types = { path = "../types" } rayon = "1.0" + +[features] +fake_crypto = ["bls/fake_crypto"] diff --git a/eth2/state_processing/build.rs b/eth2/state_processing/build.rs new file mode 120000 index 000000000..70d6c75b9 --- /dev/null +++ b/eth2/state_processing/build.rs @@ -0,0 +1 @@ +../utils/bls/build.rs \ No newline at end of file diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index 03401b2c7..6ea8863b8 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -47,31 +47,28 @@ fn run_state_transition_test(test_name: &str) { // Run Tests let mut ok = true; for (i, test_case) in doc.test_cases.iter().enumerate() { + let fake_crypto = cfg!(feature = "fake_crypto"); + if !test_case.verify_signatures == fake_crypto { + println!("Running {}", test_case.name); + } else { + println!( + "Skipping {} (fake_crypto: {}, need fake: {})", + test_case.name, fake_crypto, !test_case.verify_signatures + ); + continue; + } let mut state = test_case.initial_state.clone(); for (j, block) in test_case.blocks.iter().enumerate() { while block.slot > state.slot { let latest_block_header = state.latest_block_header.clone(); per_slot_processing(&mut state, &latest_block_header, &test_case.config).unwrap(); } - if test_case.verify_signatures { - let res = per_block_processing(&mut state, &block, &test_case.config); - if res.is_err() { - println!("Error in {} (#{}), on block {}", test_case.name, i, j); - println!("{:?}", res); - ok = false; - }; - } else { - let res = per_block_processing_without_verifying_block_signature( - &mut state, - &block, - &test_case.config, - ); - if res.is_err() { - println!("Error in {} (#{}), on block {}", test_case.name, i, j); - println!("{:?}", res); - ok = false; - } - } + let res = per_block_processing(&mut state, &block, &test_case.config); + if res.is_err() { + println!("Error in {} (#{}), on block {}", test_case.name, i, j); + println!("{:?}", res); + ok = false; + }; } } diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index 439debdcb..4ce499580 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -13,3 +13,6 @@ serde_derive = "1.0" serde_hex = { path = "../serde_hex" } ssz = { path = "../ssz" } tree_hash = { path = "../tree_hash" } + +[features] +fake_crypto = [] diff --git a/eth2/utils/bls/build.rs b/eth2/utils/bls/build.rs new file mode 100644 index 000000000..7f08a1ed5 --- /dev/null +++ b/eth2/utils/bls/build.rs @@ -0,0 +1,19 @@ +// This build script is symlinked from each project that requires BLS's "fake crypto", +// so that the `fake_crypto` feature of every sub-crate can be turned on by running +// with FAKE_CRYPTO=1 from the top-level workspace. +// At some point in the future it might be possible to do: +// $ cargo test --all --release --features fake_crypto +// but at the present time this doesn't work. +// Related: https://github.com/rust-lang/cargo/issues/5364 +fn main() { + if let Ok(fake_crypto) = std::env::var("FAKE_CRYPTO") { + if fake_crypto == "1" { + println!("cargo:rustc-cfg=feature=\"fake_crypto\""); + println!("cargo:rerun-if-env-changed=FAKE_CRYPTO"); + println!( + "cargo:warning=[{}]: Compiled with fake BLS cryptography. DO NOT USE, TESTING ONLY", + std::env::var("CARGO_PKG_NAME").unwrap() + ); + } + } +} diff --git a/eth2/utils/bls/src/lib.rs b/eth2/utils/bls/src/lib.rs index b9a4d5c1d..fae41aeed 100644 --- a/eth2/utils/bls/src/lib.rs +++ b/eth2/utils/bls/src/lib.rs @@ -6,22 +6,22 @@ mod keypair; mod public_key; mod secret_key; -#[cfg(not(debug_assertions))] +#[cfg(not(feature = "fake_crypto"))] mod aggregate_signature; -#[cfg(not(debug_assertions))] +#[cfg(not(feature = "fake_crypto"))] mod signature; -#[cfg(not(debug_assertions))] +#[cfg(not(feature = "fake_crypto"))] pub use crate::aggregate_signature::AggregateSignature; -#[cfg(not(debug_assertions))] +#[cfg(not(feature = "fake_crypto"))] pub use crate::signature::Signature; -#[cfg(debug_assertions)] +#[cfg(feature = "fake_crypto")] mod fake_aggregate_signature; -#[cfg(debug_assertions)] +#[cfg(feature = "fake_crypto")] mod fake_signature; -#[cfg(debug_assertions)] +#[cfg(feature = "fake_crypto")] pub use crate::fake_aggregate_signature::FakeAggregateSignature as AggregateSignature; -#[cfg(debug_assertions)] +#[cfg(feature = "fake_crypto")] pub use crate::fake_signature::FakeSignature as Signature; pub use crate::aggregate_public_key::AggregatePublicKey; From b21cc64949e5abeb6f9d9805daa4e5449477367e Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 2 Apr 2019 18:46:08 +1100 Subject: [PATCH 65/89] state transition tests: check expected state --- eth2/state_processing/tests/tests.rs | 52 ++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index 6ea8863b8..193511852 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -10,6 +10,45 @@ use types::*; #[allow(unused_imports)] use yaml_utils; +#[derive(Debug, Deserialize)] +pub struct ExpectedState { + pub slot: Option, + pub genesis_time: Option, + pub fork: Option, + pub validator_registry: Option>, + pub validator_balances: Option>, +} + +impl ExpectedState { + // Return a list of fields that differ, and a string representation of the beacon state's field. + fn check(&self, state: &BeaconState) -> Vec<(&str, String)> { + // Check field equality + macro_rules! cfe { + ($field_name:ident) => { + if self.$field_name.as_ref().map_or(true, |$field_name| { + println!(" > Checking {}", stringify!($field_name)); + $field_name == &state.$field_name + }) { + vec![] + } else { + vec![(stringify!($field_name), format!("{:#?}", state.$field_name))] + } + }; + } + + vec![ + cfe!(slot), + cfe!(genesis_time), + cfe!(fork), + cfe!(validator_registry), + cfe!(validator_balances), + ] + .into_iter() + .flat_map(|x| x) + .collect() + } +} + #[derive(Debug, Deserialize)] pub struct TestCase { pub name: String, @@ -17,6 +56,7 @@ pub struct TestCase { pub verify_signatures: bool, pub initial_state: BeaconState, pub blocks: Vec, + pub expected_state: ExpectedState, } #[derive(Debug, Deserialize)] @@ -70,6 +110,18 @@ fn run_state_transition_test(test_name: &str) { ok = false; }; } + + let mismatched_fields = test_case.expected_state.check(&state); + if !mismatched_fields.is_empty() { + println!( + "Error in expected state, these fields didn't match: {:?}", + mismatched_fields.iter().map(|(f, _)| f).collect::>() + ); + for (field_name, state_val) in mismatched_fields { + println!("state.{} was: {}", field_name, state_val); + } + ok = false; + } } assert!(ok, "one or more tests failed, see above"); From 19fad1012f414de90f261bbfcc4beb7805af3b92 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 3 Apr 2019 17:14:11 +1100 Subject: [PATCH 66/89] state transitions tests: check more fields --- eth2/state_processing/tests/tests.rs | 19 ++++++++++++------- .../yaml_utils/expected_state_fields.py | 15 +++++++++++++++ eth2/types/src/beacon_state.rs | 2 +- 3 files changed, 28 insertions(+), 8 deletions(-) create mode 100755 eth2/state_processing/yaml_utils/expected_state_fields.py diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index 193511852..54fd6bf8d 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -1,14 +1,9 @@ use serde_derive::Deserialize; use serde_yaml; #[cfg(not(debug_assertions))] -use state_processing::{ - per_block_processing, per_block_processing_without_verifying_block_signature, - per_slot_processing, -}; +use state_processing::{per_block_processing, per_slot_processing}; use std::{fs::File, io::prelude::*, path::PathBuf}; use types::*; -#[allow(unused_imports)] -use yaml_utils; #[derive(Debug, Deserialize)] pub struct ExpectedState { @@ -17,6 +12,11 @@ pub struct ExpectedState { pub fork: Option, pub validator_registry: Option>, pub validator_balances: Option>, + pub previous_epoch_attestations: Option>, + pub current_epoch_attestations: Option>, + pub historical_roots: Option>, + pub finalized_epoch: Option, + pub latest_block_roots: Option>, } impl ExpectedState { @@ -42,6 +42,11 @@ impl ExpectedState { cfe!(fork), cfe!(validator_registry), cfe!(validator_balances), + cfe!(previous_epoch_attestations), + cfe!(current_epoch_attestations), + cfe!(historical_roots), + cfe!(finalized_epoch), + cfe!(latest_block_roots), ] .into_iter() .flat_map(|x| x) @@ -108,7 +113,7 @@ fn run_state_transition_test(test_name: &str) { println!("Error in {} (#{}), on block {}", test_case.name, i, j); println!("{:?}", res); ok = false; - }; + } } let mismatched_fields = test_case.expected_state.check(&state); diff --git a/eth2/state_processing/yaml_utils/expected_state_fields.py b/eth2/state_processing/yaml_utils/expected_state_fields.py new file mode 100755 index 000000000..df4cb83f7 --- /dev/null +++ b/eth2/state_processing/yaml_utils/expected_state_fields.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python3 + +# Script to extract all the fields of the state mentioned in `expected_state` fields of tests +# in the `spec` directory. These fields can then be added to the `ExpectedState` struct. +# Might take a while to run. + +import os, yaml + +if __name__ == "__main__": + yaml_files = (filename for filename in os.listdir("specs") if filename.endswith(".yaml")) + parsed_yaml = (yaml.load(open("specs/" + filename, "r")) for filename in yaml_files) + all_fields = set() + for y in parsed_yaml: + all_fields.update(*({key for key in case["expected_state"]} for case in y["test_cases"])) + print(all_fields) diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index c068c4e03..0461e947b 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -81,7 +81,7 @@ pub struct BeaconState { // Recent state pub latest_crosslinks: TreeHashVector, - latest_block_roots: TreeHashVector, + pub latest_block_roots: TreeHashVector, latest_state_roots: TreeHashVector, latest_active_index_roots: TreeHashVector, latest_slashed_balances: TreeHashVector, From b801303374bd49f0985a7d63eb965ffc26e6c57d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 3 Apr 2019 17:15:07 +1100 Subject: [PATCH 67/89] spec: fix shuffle direction in get_crosslink_committees_at_slot --- eth2/types/src/beacon_state/epoch_cache.rs | 2 +- eth2/types/src/beacon_state/epoch_cache/tests.rs | 2 +- eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index 62df90271..dd9ae3403 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -288,7 +288,7 @@ impl EpochCrosslinkCommitteesBuilder { self.active_validator_indices, spec.shuffle_round_count, &self.shuffling_seed[..], - true, + false, ) .ok_or_else(|| Error::UnableToShuffle)? }; diff --git a/eth2/types/src/beacon_state/epoch_cache/tests.rs b/eth2/types/src/beacon_state/epoch_cache/tests.rs index 5643776e2..5b1e53338 100644 --- a/eth2/types/src/beacon_state/epoch_cache/tests.rs +++ b/eth2/types/src/beacon_state/epoch_cache/tests.rs @@ -27,7 +27,7 @@ fn do_sane_cache_test( active_indices, spec.shuffle_round_count, &expected_seed[..], - true, + false, ) .unwrap(); diff --git a/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs b/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs index e7e1e18e6..f60d793f2 100644 --- a/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs +++ b/eth2/utils/swap_or_not_shuffle/src/shuffle_list.rs @@ -18,6 +18,8 @@ const TOTAL_SIZE: usize = SEED_SIZE + ROUND_SIZE + POSITION_WINDOW_SIZE; /// Credits to [@protolambda](https://github.com/protolambda) for defining this algorithm. /// /// Shuffles if `forwards == true`, otherwise un-shuffles. +/// It holds that: shuffle_list(shuffle_list(l, r, s, true), r, s, false) == l +/// and: shuffle_list(shuffle_list(l, r, s, false), r, s, true) == l /// /// Returns `None` under any of the following conditions: /// - `list_size == 0` From 32547373e528e8ba584f349f99f7523e8d14a175 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 8 Apr 2019 14:37:01 +1000 Subject: [PATCH 68/89] spec: simplify `cache_state` The `latest_block_root` input argument was unnecessary as we were always setting it to something almost equivalent to `state.latest_block_root` anyway, and more importantly, it was messing up the caching of the state root. Previously it was possible for the function to update the state's latest block root, and then hash the outdated block root that was passed in as an argument. --- beacon_node/beacon_chain/src/beacon_chain.rs | 11 +++-------- eth2/state_processing/src/per_slot_processing.rs | 16 ++++------------ eth2/state_processing/tests/tests.rs | 3 +-- 3 files changed, 8 insertions(+), 22 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index a22f4179e..41a718655 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -303,8 +303,6 @@ where /// then having it iteratively updated -- in such a case it's possible for another thread to /// find the state at an old slot. pub fn update_state(&self, mut state: BeaconState) -> Result<(), Error> { - let latest_block_header = self.head().beacon_block.block_header(); - let present_slot = match self.slot_clock.present_slot() { Ok(Some(slot)) => slot, _ => return Err(Error::UnableToReadSlot), @@ -312,7 +310,7 @@ where // If required, transition the new state to the present slot. for _ in state.slot.as_u64()..present_slot.as_u64() { - per_slot_processing(&mut state, &latest_block_header, &self.spec)?; + per_slot_processing(&mut state, &self.spec)?; } state.build_all_caches(&self.spec)?; @@ -324,8 +322,6 @@ where /// Ensures the current canonical `BeaconState` has been transitioned to match the `slot_clock`. pub fn catchup_state(&self) -> Result<(), Error> { - let latest_block_header = self.head().beacon_block.block_header(); - let present_slot = match self.slot_clock.present_slot() { Ok(Some(slot)) => slot, _ => return Err(Error::UnableToReadSlot), @@ -339,7 +335,7 @@ where state.build_epoch_cache(RelativeEpoch::NextWithoutRegistryChange, &self.spec)?; state.build_epoch_cache(RelativeEpoch::NextWithRegistryChange, &self.spec)?; - per_slot_processing(&mut *state, &latest_block_header, &self.spec)?; + per_slot_processing(&mut *state, &self.spec)?; } state.build_all_caches(&self.spec)?; @@ -617,9 +613,8 @@ where // Transition the parent state to the block slot. let mut state = parent_state; - let previous_block_header = parent_block.block_header(); for _ in state.slot.as_u64()..block.slot.as_u64() { - if let Err(e) = per_slot_processing(&mut state, &previous_block_header, &self.spec) { + if let Err(e) = per_slot_processing(&mut state, &self.spec) { return Ok(BlockProcessingOutcome::InvalidBlock( InvalidBlock::SlotProcessingError(e), )); diff --git a/eth2/state_processing/src/per_slot_processing.rs b/eth2/state_processing/src/per_slot_processing.rs index cd129a5f1..7d2bb468f 100644 --- a/eth2/state_processing/src/per_slot_processing.rs +++ b/eth2/state_processing/src/per_slot_processing.rs @@ -11,12 +11,8 @@ pub enum Error { /// Advances a state forward by one slot, performing per-epoch processing if required. /// /// Spec v0.5.0 -pub fn per_slot_processing( - state: &mut BeaconState, - latest_block_header: &BeaconBlockHeader, - spec: &ChainSpec, -) -> Result<(), Error> { - cache_state(state, latest_block_header, spec)?; +pub fn per_slot_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { + cache_state(state, spec)?; if (state.slot + 1) % spec.slots_per_epoch == 0 { per_epoch_processing(state, spec)?; @@ -27,11 +23,7 @@ pub fn per_slot_processing( Ok(()) } -fn cache_state( - state: &mut BeaconState, - latest_block_header: &BeaconBlockHeader, - spec: &ChainSpec, -) -> Result<(), Error> { +fn cache_state(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { let previous_slot_state_root = Hash256::from_slice(&state.tree_hash_root()[..]); // Note: increment the state slot here to allow use of our `state_root` and `block_root` @@ -46,7 +38,7 @@ fn cache_state( state.latest_block_header.state_root = previous_slot_state_root } - let latest_block_root = Hash256::from_slice(&latest_block_header.tree_hash_root()[..]); + let latest_block_root = Hash256::from_slice(&state.latest_block_header.tree_hash_root()[..]); state.set_block_root(previous_slot, latest_block_root, spec)?; // Set the state slot back to what it should be. diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index 54fd6bf8d..d305b2d3c 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -105,8 +105,7 @@ fn run_state_transition_test(test_name: &str) { let mut state = test_case.initial_state.clone(); for (j, block) in test_case.blocks.iter().enumerate() { while block.slot > state.slot { - let latest_block_header = state.latest_block_header.clone(); - per_slot_processing(&mut state, &latest_block_header, &test_case.config).unwrap(); + per_slot_processing(&mut state, &test_case.config).unwrap(); } let res = per_block_processing(&mut state, &block, &test_case.config); if res.is_err() { From a19f8580f51aca0d1e608342b7a89f94c35dda02 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 15 Apr 2019 09:33:54 +1000 Subject: [PATCH 69/89] travis: state transition tests --- .travis.yml | 1 + eth2/state_processing/tests/tests.rs | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index e725aa0ba..6233ea68b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,6 +11,7 @@ script: - cargo build --verbose --release --all - cargo test --verbose --all - cargo test --verbose --release --all + - cargo test --manifest-path eth2/state_processing/Cargo.toml --verbose --release --features fake_crypto - cargo fmt --all -- --check # No clippy until later... #- cargo clippy diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index d305b2d3c..cdad99062 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -144,8 +144,9 @@ fn run_state_transition_tests_small() { run_state_transition_test("sanity-check_small-config_32-vals.yaml"); } +// Run with --ignored to run this test #[test] -#[cfg(not(debug_assertions))] +#[ignored] fn run_state_transition_tests_large() { run_state_transition_test("sanity-check_default-config_100-vals.yaml"); } From 4f63c89bb649209b9228512533eaf0365cee544a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 15 Apr 2019 09:38:04 +1000 Subject: [PATCH 70/89] jenkins: run all state tests --- Jenkinsfile | 3 +++ eth2/state_processing/tests/tests.rs | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index d12189941..48a07e1e7 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -23,6 +23,9 @@ pipeline { steps { sh 'cargo test --verbose --all' sh 'cargo test --verbose --all --release' + sh 'cargo test --manifest-path eth2/state_processing/Cargo.toml --verbose \ + --release --features fake_crypto --ignored' + } } } diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index cdad99062..6491e255a 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -146,7 +146,7 @@ fn run_state_transition_tests_small() { // Run with --ignored to run this test #[test] -#[ignored] +#[ignore] fn run_state_transition_tests_large() { run_state_transition_test("sanity-check_default-config_100-vals.yaml"); } From 2914d77cd38d19abc88249955cb8b0c1ee3392b2 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 2 Apr 2019 14:18:07 +1100 Subject: [PATCH 71/89] spec: update to v0.5.1 --- eth2/state_processing/src/per_block_processing.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 5afddc74e..6eafcb937 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -99,7 +99,7 @@ fn per_block_processing_signature_optional( /// Processes the block header. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn process_block_header( state: &mut BeaconState, block: &BeaconBlock, @@ -107,11 +107,8 @@ pub fn process_block_header( ) -> Result<(), Error> { verify!(block.slot == state.slot, Invalid::StateSlotMismatch); - // NOTE: this is not to spec. I think spec is broken. See: - // - // https://github.com/ethereum/eth2.0-specs/issues/797 verify!( - block.previous_block_root == *state.get_block_root(state.slot - 1, spec)?, + block.previous_block_root == Hash256::from_slice(&state.latest_block_header.signed_root()), Invalid::ParentBlockRootMismatch ); From d95ae95ce8a882b8a33f676262f5549765be9bf3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 15 Apr 2019 10:51:20 +1000 Subject: [PATCH 72/89] spec: update tags to v0.5.1 --- .../src/common/exit_validator.rs | 2 +- .../src/common/slash_validator.rs | 2 +- .../src/common/verify_bitfield.rs | 2 +- .../state_processing/src/get_genesis_state.rs | 2 +- .../src/per_block_processing.rs | 24 +++---- .../validate_attestation.rs | 10 +-- .../verify_attester_slashing.rs | 4 +- .../per_block_processing/verify_deposit.rs | 10 +-- .../src/per_block_processing/verify_exit.rs | 2 +- .../verify_proposer_slashing.rs | 4 +- .../verify_slashable_attestation.rs | 2 +- .../per_block_processing/verify_transfer.rs | 4 +- .../src/per_epoch_processing.rs | 10 +-- .../src/per_epoch_processing/apply_rewards.rs | 18 ++--- .../get_attestation_participants.rs | 2 +- .../inclusion_distance.rs | 6 +- .../per_epoch_processing/process_ejections.rs | 2 +- .../process_exit_queue.rs | 4 +- .../per_epoch_processing/process_slashings.rs | 2 +- .../update_registry_and_shuffling_data.rs | 8 +-- .../validator_statuses.rs | 14 ++-- .../src/per_epoch_processing/winning_root.rs | 8 +-- .../src/per_slot_processing.rs | 2 +- eth2/types/src/attestation.rs | 2 +- eth2/types/src/attestation_data.rs | 2 +- .../src/attestation_data_and_custody_bit.rs | 2 +- eth2/types/src/attester_slashing.rs | 2 +- eth2/types/src/beacon_block.rs | 10 +-- eth2/types/src/beacon_block_body.rs | 2 +- eth2/types/src/beacon_block_header.rs | 6 +- eth2/types/src/beacon_state.rs | 68 +++++++++---------- eth2/types/src/beacon_state/epoch_cache.rs | 2 +- eth2/types/src/chain_spec.rs | 10 +-- eth2/types/src/crosslink.rs | 2 +- eth2/types/src/deposit.rs | 2 +- eth2/types/src/deposit_data.rs | 2 +- eth2/types/src/deposit_input.rs | 6 +- eth2/types/src/eth1_data.rs | 2 +- eth2/types/src/eth1_data_vote.rs | 2 +- eth2/types/src/fork.rs | 6 +- eth2/types/src/historical_batch.rs | 2 +- eth2/types/src/pending_attestation.rs | 2 +- eth2/types/src/proposer_slashing.rs | 2 +- eth2/types/src/relative_epoch.rs | 6 +- eth2/types/src/slashable_attestation.rs | 6 +- eth2/types/src/transfer.rs | 2 +- eth2/types/src/validator.rs | 2 +- eth2/types/src/voluntary_exit.rs | 2 +- 48 files changed, 148 insertions(+), 148 deletions(-) diff --git a/eth2/state_processing/src/common/exit_validator.rs b/eth2/state_processing/src/common/exit_validator.rs index 8ab530b18..a6cfb395e 100644 --- a/eth2/state_processing/src/common/exit_validator.rs +++ b/eth2/state_processing/src/common/exit_validator.rs @@ -2,7 +2,7 @@ use types::{BeaconStateError as Error, *}; /// Exit the validator of the given `index`. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn exit_validator( state: &mut BeaconState, validator_index: usize, diff --git a/eth2/state_processing/src/common/slash_validator.rs b/eth2/state_processing/src/common/slash_validator.rs index 9be87b978..c1aad7da1 100644 --- a/eth2/state_processing/src/common/slash_validator.rs +++ b/eth2/state_processing/src/common/slash_validator.rs @@ -3,7 +3,7 @@ use types::{BeaconStateError as Error, *}; /// Slash the validator with index ``index``. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn slash_validator( state: &mut BeaconState, validator_index: usize, diff --git a/eth2/state_processing/src/common/verify_bitfield.rs b/eth2/state_processing/src/common/verify_bitfield.rs index 71c9f9c3e..7b3c07086 100644 --- a/eth2/state_processing/src/common/verify_bitfield.rs +++ b/eth2/state_processing/src/common/verify_bitfield.rs @@ -4,7 +4,7 @@ use types::*; /// /// Is title `verify_bitfield` in spec. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn verify_bitfield_length(bitfield: &Bitfield, committee_size: usize) -> bool { if bitfield.num_bytes() != ((committee_size + 7) / 8) { return false; diff --git a/eth2/state_processing/src/get_genesis_state.rs b/eth2/state_processing/src/get_genesis_state.rs index 2bde8ce0c..4e9fb6caf 100644 --- a/eth2/state_processing/src/get_genesis_state.rs +++ b/eth2/state_processing/src/get_genesis_state.rs @@ -9,7 +9,7 @@ pub enum GenesisError { /// Returns the genesis `BeaconState` /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn get_genesis_state( genesis_validator_deposits: &[Deposit], genesis_time: u64, diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 6eafcb937..257d92acf 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -39,7 +39,7 @@ const VERIFY_DEPOSIT_MERKLE_PROOFS: bool = false; /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise /// returns an error describing why the block was invalid or how the function failed to execute. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn per_block_processing( state: &mut BeaconState, block: &BeaconBlock, @@ -54,7 +54,7 @@ pub fn per_block_processing( /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise /// returns an error describing why the block was invalid or how the function failed to execute. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn per_block_processing_without_verifying_block_signature( state: &mut BeaconState, block: &BeaconBlock, @@ -69,7 +69,7 @@ pub fn per_block_processing_without_verifying_block_signature( /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise /// returns an error describing why the block was invalid or how the function failed to execute. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn per_block_processing_signature_optional( mut state: &mut BeaconState, block: &BeaconBlock, @@ -119,7 +119,7 @@ pub fn process_block_header( /// Verifies the signature of a block. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn verify_block_signature( state: &BeaconState, block: &BeaconBlock, @@ -147,7 +147,7 @@ pub fn verify_block_signature( /// Verifies the `randao_reveal` against the block's proposer pubkey and updates /// `state.latest_randao_mixes`. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn process_randao( state: &mut BeaconState, block: &BeaconBlock, @@ -178,7 +178,7 @@ pub fn process_randao( /// Update the `state.eth1_data_votes` based upon the `eth1_data` provided. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn process_eth1_data(state: &mut BeaconState, eth1_data: &Eth1Data) -> Result<(), Error> { // Attempt to find a `Eth1DataVote` with matching `Eth1Data`. let matching_eth1_vote_index = state @@ -204,7 +204,7 @@ pub fn process_eth1_data(state: &mut BeaconState, eth1_data: &Eth1Data) -> Resul /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn process_proposer_slashings( state: &mut BeaconState, proposer_slashings: &[ProposerSlashing], @@ -237,7 +237,7 @@ pub fn process_proposer_slashings( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn process_attester_slashings( state: &mut BeaconState, attester_slashings: &[AttesterSlashing], @@ -295,7 +295,7 @@ pub fn process_attester_slashings( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn process_attestations( state: &mut BeaconState, attestations: &[Attestation], @@ -337,7 +337,7 @@ pub fn process_attestations( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn process_deposits( state: &mut BeaconState, deposits: &[Deposit], @@ -407,7 +407,7 @@ pub fn process_deposits( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn process_exits( state: &mut BeaconState, voluntary_exits: &[VoluntaryExit], @@ -439,7 +439,7 @@ pub fn process_exits( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn process_transfers( state: &mut BeaconState, transfers: &[Transfer], diff --git a/eth2/state_processing/src/per_block_processing/validate_attestation.rs b/eth2/state_processing/src/per_block_processing/validate_attestation.rs index c9d0b38a4..438a75c94 100644 --- a/eth2/state_processing/src/per_block_processing/validate_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/validate_attestation.rs @@ -8,7 +8,7 @@ use types::*; /// /// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn validate_attestation( state: &BeaconState, attestation: &Attestation, @@ -31,7 +31,7 @@ pub fn validate_attestation_time_independent_only( /// /// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn validate_attestation_without_signature( state: &BeaconState, attestation: &Attestation, @@ -44,7 +44,7 @@ pub fn validate_attestation_without_signature( /// given state, optionally validating the aggregate signature. /// /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn validate_attestation_parametric( state: &BeaconState, attestation: &Attestation, @@ -167,7 +167,7 @@ fn validate_attestation_parametric( /// Verify that the `source_epoch` and `source_root` of an `Attestation` correctly /// match the current (or previous) justified epoch and root from the state. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn verify_justified_epoch_and_root( attestation: &Attestation, state: &BeaconState, @@ -222,7 +222,7 @@ fn verify_justified_epoch_and_root( /// - `custody_bitfield` does not have a bit for each index of `committee`. /// - A `validator_index` in `committee` is not in `state.validator_registry`. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn verify_attestation_signature( state: &BeaconState, committee: &[usize], diff --git a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs index abf99da64..3527b62e3 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs @@ -7,7 +7,7 @@ use types::*; /// /// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn verify_attester_slashing( state: &BeaconState, attester_slashing: &AttesterSlashing, @@ -41,7 +41,7 @@ pub fn verify_attester_slashing( /// /// Returns Ok(indices) if `indices.len() > 0`. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn gather_attester_slashing_indices( state: &BeaconState, attester_slashing: &AttesterSlashing, diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index a3a0f5734..22a62a321 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -15,7 +15,7 @@ use types::*; /// /// Note: this function is incomplete. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn verify_deposit( state: &BeaconState, deposit: &Deposit, @@ -46,7 +46,7 @@ pub fn verify_deposit( /// Verify that the `Deposit` index is correct. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn verify_deposit_index(state: &BeaconState, deposit: &Deposit) -> Result<(), Error> { verify!( deposit.index == state.deposit_index, @@ -88,7 +88,7 @@ pub fn get_existing_validator_index( /// Verify that a deposit is included in the state's eth1 deposit root. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn verify_deposit_merkle_proof(state: &BeaconState, deposit: &Deposit, spec: &ChainSpec) -> bool { let leaf = hash(&get_serialized_deposit_data(deposit)); verify_merkle_proof( @@ -102,7 +102,7 @@ fn verify_deposit_merkle_proof(state: &BeaconState, deposit: &Deposit, spec: &Ch /// Helper struct for easily getting the serialized data generated by the deposit contract. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive(Encode)] struct SerializedDepositData { amount: u64, @@ -113,7 +113,7 @@ struct SerializedDepositData { /// Return the serialized data generated by the deposit contract that is used to generate the /// merkle proof. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn get_serialized_deposit_data(deposit: &Deposit) -> Vec { let serialized_deposit_data = SerializedDepositData { amount: deposit.deposit_data.amount, diff --git a/eth2/state_processing/src/per_block_processing/verify_exit.rs b/eth2/state_processing/src/per_block_processing/verify_exit.rs index c5b8ebcb4..697188ee9 100644 --- a/eth2/state_processing/src/per_block_processing/verify_exit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_exit.rs @@ -7,7 +7,7 @@ use types::*; /// /// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn verify_exit( state: &BeaconState, exit: &VoluntaryExit, diff --git a/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs index b5113863e..bbc03dd62 100644 --- a/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs +++ b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs @@ -7,7 +7,7 @@ use types::*; /// /// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn verify_proposer_slashing( proposer_slashing: &ProposerSlashing, state: &BeaconState, @@ -67,7 +67,7 @@ pub fn verify_proposer_slashing( /// /// Returns `true` if the signature is valid. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn verify_header_signature( header: &BeaconBlockHeader, pubkey: &PublicKey, diff --git a/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs index d39ac6759..89cb93ce5 100644 --- a/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_slashable_attestation.rs @@ -10,7 +10,7 @@ use types::*; /// /// Returns `Ok(())` if the `SlashableAttestation` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn verify_slashable_attestation( state: &BeaconState, slashable_attestation: &SlashableAttestation, diff --git a/eth2/state_processing/src/per_block_processing/verify_transfer.rs b/eth2/state_processing/src/per_block_processing/verify_transfer.rs index 978d0cfce..8b0415508 100644 --- a/eth2/state_processing/src/per_block_processing/verify_transfer.rs +++ b/eth2/state_processing/src/per_block_processing/verify_transfer.rs @@ -10,7 +10,7 @@ use types::*; /// /// Note: this function is incomplete. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn verify_transfer( state: &BeaconState, transfer: &Transfer, @@ -122,7 +122,7 @@ fn verify_transfer_parametric( /// /// Does not check that the transfer is valid, however checks for overflow in all actions. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn execute_transfer( state: &mut BeaconState, transfer: &Transfer, diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index f4d6452a4..87c9b9398 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -32,7 +32,7 @@ pub type WinningRootHashSet = HashMap; /// Mutates the given `BeaconState`, returning early if an error is encountered. If an error is /// returned, a state might be "half-processed" and therefore in an invalid state. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { // Ensure the previous and next epoch caches are built. state.build_epoch_cache(RelativeEpoch::Previous, spec)?; @@ -86,7 +86,7 @@ pub fn per_epoch_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result /// Maybe resets the eth1 period. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn maybe_reset_eth1_period(state: &mut BeaconState, spec: &ChainSpec) { let next_epoch = state.next_epoch(spec); let voting_period = spec.epochs_per_eth1_voting_period; @@ -108,7 +108,7 @@ pub fn maybe_reset_eth1_period(state: &mut BeaconState, spec: &ChainSpec) { /// - `justified_epoch` /// - `previous_justified_epoch` /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn update_justification_and_finalization( state: &mut BeaconState, total_balances: &TotalBalances, @@ -178,7 +178,7 @@ pub fn update_justification_and_finalization( /// /// Also returns a `WinningRootHashSet` for later use during epoch processing. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn process_crosslinks( state: &mut BeaconState, spec: &ChainSpec, @@ -221,7 +221,7 @@ pub fn process_crosslinks( /// Finish up an epoch update. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn finish_epoch_update(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { let current_epoch = state.current_epoch(spec); let next_epoch = state.next_epoch(spec); diff --git a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs index ce5fccb21..9af1ee8c3 100644 --- a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs +++ b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs @@ -32,7 +32,7 @@ impl std::ops::AddAssign for Delta { /// Apply attester and proposer rewards. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn apply_rewards( state: &mut BeaconState, validator_statuses: &mut ValidatorStatuses, @@ -79,7 +79,7 @@ pub fn apply_rewards( /// Applies the attestation inclusion reward to each proposer for every validator who included an /// attestation in the previous epoch. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn get_proposer_deltas( deltas: &mut Vec, state: &mut BeaconState, @@ -120,7 +120,7 @@ fn get_proposer_deltas( /// Apply rewards for participation in attestations during the previous epoch. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn get_justification_and_finalization_deltas( deltas: &mut Vec, state: &BeaconState, @@ -163,7 +163,7 @@ fn get_justification_and_finalization_deltas( /// Determine the delta for a single validator, if the chain is finalizing normally. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn compute_normal_justification_and_finalization_delta( validator: &ValidatorStatus, total_balances: &TotalBalances, @@ -215,7 +215,7 @@ fn compute_normal_justification_and_finalization_delta( /// Determine the delta for a single delta, assuming the chain is _not_ finalizing normally. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn compute_inactivity_leak_delta( validator: &ValidatorStatus, base_reward: u64, @@ -261,7 +261,7 @@ fn compute_inactivity_leak_delta( /// Calculate the deltas based upon the winning roots for attestations during the previous epoch. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn get_crosslink_deltas( deltas: &mut Vec, state: &BeaconState, @@ -295,7 +295,7 @@ fn get_crosslink_deltas( /// Returns the base reward for some validator. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn get_base_reward( state: &BeaconState, index: usize, @@ -312,7 +312,7 @@ fn get_base_reward( /// Returns the inactivity penalty for some validator. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn get_inactivity_penalty( state: &BeaconState, index: usize, @@ -328,7 +328,7 @@ fn get_inactivity_penalty( /// Returns the epochs since the last finalized epoch. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn epochs_since_finality(state: &BeaconState, spec: &ChainSpec) -> Epoch { state.current_epoch(spec) + 1 - state.finalized_epoch } diff --git a/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs b/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs index 52ba0274b..bea772204 100644 --- a/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs +++ b/eth2/state_processing/src/per_epoch_processing/get_attestation_participants.rs @@ -3,7 +3,7 @@ use types::*; /// Returns validator indices which participated in the attestation. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn get_attestation_participants( state: &BeaconState, attestation_data: &AttestationData, diff --git a/eth2/state_processing/src/per_epoch_processing/inclusion_distance.rs b/eth2/state_processing/src/per_epoch_processing/inclusion_distance.rs index b52485947..6b221f513 100644 --- a/eth2/state_processing/src/per_epoch_processing/inclusion_distance.rs +++ b/eth2/state_processing/src/per_epoch_processing/inclusion_distance.rs @@ -5,7 +5,7 @@ use types::*; /// Returns the distance between the first included attestation for some validator and this /// slot. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn inclusion_distance( state: &BeaconState, attestations: &[&PendingAttestation], @@ -18,7 +18,7 @@ pub fn inclusion_distance( /// Returns the slot of the earliest included attestation for some validator. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn inclusion_slot( state: &BeaconState, attestations: &[&PendingAttestation], @@ -31,7 +31,7 @@ pub fn inclusion_slot( /// Finds the earliest included attestation for some validator. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn earliest_included_attestation( state: &BeaconState, attestations: &[&PendingAttestation], diff --git a/eth2/state_processing/src/per_epoch_processing/process_ejections.rs b/eth2/state_processing/src/per_epoch_processing/process_ejections.rs index a60d92187..6f64c46f7 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_ejections.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_ejections.rs @@ -4,7 +4,7 @@ use types::{BeaconStateError as Error, *}; /// Iterate through the validator registry and eject active validators with balance below /// ``EJECTION_BALANCE``. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn process_ejections(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { // There is an awkward double (triple?) loop here because we can't loop across the borrowed // active validator indices and mutate state in the one loop. diff --git a/eth2/state_processing/src/per_epoch_processing/process_exit_queue.rs b/eth2/state_processing/src/per_epoch_processing/process_exit_queue.rs index 074db1d08..a6362188d 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_exit_queue.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_exit_queue.rs @@ -2,7 +2,7 @@ use types::*; /// Process the exit queue. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn process_exit_queue(state: &mut BeaconState, spec: &ChainSpec) { let current_epoch = state.current_epoch(spec); @@ -31,7 +31,7 @@ pub fn process_exit_queue(state: &mut BeaconState, spec: &ChainSpec) { /// Initiate an exit for the validator of the given `index`. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn prepare_validator_for_withdrawal( state: &mut BeaconState, validator_index: usize, diff --git a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs index 88777472c..89a7dd484 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs @@ -2,7 +2,7 @@ use types::{BeaconStateError as Error, *}; /// Process slashings. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn process_slashings( state: &mut BeaconState, current_total_balance: u64, diff --git a/eth2/state_processing/src/per_epoch_processing/update_registry_and_shuffling_data.rs b/eth2/state_processing/src/per_epoch_processing/update_registry_and_shuffling_data.rs index 0b18c2571..d290d2987 100644 --- a/eth2/state_processing/src/per_epoch_processing/update_registry_and_shuffling_data.rs +++ b/eth2/state_processing/src/per_epoch_processing/update_registry_and_shuffling_data.rs @@ -4,7 +4,7 @@ use types::*; /// Peforms a validator registry update, if required. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn update_registry_and_shuffling_data( state: &mut BeaconState, current_total_balance: u64, @@ -49,7 +49,7 @@ pub fn update_registry_and_shuffling_data( /// Returns `true` if the validator registry should be updated during an epoch processing. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn should_update_validator_registry( state: &BeaconState, spec: &ChainSpec, @@ -78,7 +78,7 @@ pub fn should_update_validator_registry( /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn update_validator_registry( state: &mut BeaconState, current_total_balance: u64, @@ -133,7 +133,7 @@ pub fn update_validator_registry( /// Activate the validator of the given ``index``. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn activate_validator( state: &mut BeaconState, validator_index: usize, diff --git a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs index 02149cc5a..afa78c9c0 100644 --- a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -160,7 +160,7 @@ impl ValidatorStatuses { /// - Active validators /// - Total balances for the current and previous epochs. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result { let mut statuses = Vec::with_capacity(state.validator_registry.len()); let mut total_balances = TotalBalances::default(); @@ -195,7 +195,7 @@ impl ValidatorStatuses { /// Process some attestations from the given `state` updating the `statuses` and /// `total_balances` fields. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn process_attestations( &mut self, state: &BeaconState, @@ -261,7 +261,7 @@ impl ValidatorStatuses { /// Update the `statuses` for each validator based upon whether or not they attested to the /// "winning" shard block root for the previous epoch. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn process_winning_roots( &mut self, state: &BeaconState, @@ -297,14 +297,14 @@ impl ValidatorStatuses { /// Returns the distance between when the attestation was created and when it was included in a /// block. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn inclusion_distance(a: &PendingAttestation) -> Slot { a.inclusion_slot - a.data.slot } /// Returns `true` if some `PendingAttestation` is from the supplied `epoch`. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool { a.data.slot.epoch(spec.slots_per_epoch) == epoch } @@ -312,7 +312,7 @@ fn is_from_epoch(a: &PendingAttestation, epoch: Epoch, spec: &ChainSpec) -> bool /// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for /// the first slot of the given epoch. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn has_common_epoch_boundary_root( a: &PendingAttestation, state: &BeaconState, @@ -328,7 +328,7 @@ fn has_common_epoch_boundary_root( /// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for /// the current slot of the `PendingAttestation`. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn has_common_beacon_block_root( a: &PendingAttestation, state: &BeaconState, diff --git a/eth2/state_processing/src/per_epoch_processing/winning_root.rs b/eth2/state_processing/src/per_epoch_processing/winning_root.rs index 97cff3e13..5d31dff31 100644 --- a/eth2/state_processing/src/per_epoch_processing/winning_root.rs +++ b/eth2/state_processing/src/per_epoch_processing/winning_root.rs @@ -16,7 +16,7 @@ impl WinningRoot { /// A winning root is "better" than another if it has a higher `total_attesting_balance`. Ties /// are broken by favouring the higher `crosslink_data_root` value. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn is_better_than(&self, other: &Self) -> bool { if self.total_attesting_balance > other.total_attesting_balance { true @@ -34,7 +34,7 @@ impl WinningRoot { /// The `WinningRoot` object also contains additional fields that are useful in later stages of /// per-epoch processing. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn winning_root( state: &BeaconState, shard: u64, @@ -89,7 +89,7 @@ pub fn winning_root( /// Returns `true` if pending attestation `a` is eligible to become a winning root. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn is_eligible_for_winning_root(state: &BeaconState, a: &PendingAttestation, shard: Shard) -> bool { if shard >= state.latest_crosslinks.len() as u64 { return false; @@ -100,7 +100,7 @@ fn is_eligible_for_winning_root(state: &BeaconState, a: &PendingAttestation, sha /// Returns all indices which voted for a given crosslink. Does not contain duplicates. /// -/// Spec v0.5.0 +/// Spec v0.5.1 fn get_attesting_validator_indices( state: &BeaconState, shard: u64, diff --git a/eth2/state_processing/src/per_slot_processing.rs b/eth2/state_processing/src/per_slot_processing.rs index 7d2bb468f..378d5dd2e 100644 --- a/eth2/state_processing/src/per_slot_processing.rs +++ b/eth2/state_processing/src/per_slot_processing.rs @@ -10,7 +10,7 @@ pub enum Error { /// Advances a state forward by one slot, performing per-epoch processing if required. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn per_slot_processing(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { cache_state(state, spec)?; diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index c43692a7b..f7bfdaab9 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -9,7 +9,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// Details an attestation that can be slashable. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive( Debug, Clone, diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index 305ddafe0..f8a0ecd15 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -9,7 +9,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// The data upon which an attestation is based. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive( Debug, Clone, diff --git a/eth2/types/src/attestation_data_and_custody_bit.rs b/eth2/types/src/attestation_data_and_custody_bit.rs index 59a4eee77..e5dc920dc 100644 --- a/eth2/types/src/attestation_data_and_custody_bit.rs +++ b/eth2/types/src/attestation_data_and_custody_bit.rs @@ -7,7 +7,7 @@ use tree_hash_derive::TreeHash; /// Used for pairing an attestation with a proof-of-custody. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive(Debug, Clone, PartialEq, Default, Serialize, Encode, Decode, TreeHash)] pub struct AttestationDataAndCustodyBit { pub data: AttestationData, diff --git a/eth2/types/src/attester_slashing.rs b/eth2/types/src/attester_slashing.rs index 0600e0ecc..b5e851dbd 100644 --- a/eth2/types/src/attester_slashing.rs +++ b/eth2/types/src/attester_slashing.rs @@ -7,7 +7,7 @@ use tree_hash_derive::TreeHash; /// Two conflicting attestations. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct AttesterSlashing { pub slashable_attestation_1: SlashableAttestation, diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index bc6ccb0d5..b4d2752d6 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -10,7 +10,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// A block of the `BeaconChain`. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive( Debug, PartialEq, @@ -35,7 +35,7 @@ pub struct BeaconBlock { impl BeaconBlock { /// Returns an empty block to be used during genesis. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn empty(spec: &ChainSpec) -> BeaconBlock { BeaconBlock { slot: spec.genesis_slot, @@ -60,7 +60,7 @@ impl BeaconBlock { /// Returns the `tree_hash_root | update` of the block. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.tree_hash_root()[..]) } @@ -72,7 +72,7 @@ impl BeaconBlock { /// /// Note: performs a full tree-hash of `self.body`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn block_header(&self) -> BeaconBlockHeader { BeaconBlockHeader { slot: self.slot, @@ -85,7 +85,7 @@ impl BeaconBlock { /// Returns a "temporary" header, where the `state_root` is `spec.zero_hash`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn temporary_block_header(&self, spec: &ChainSpec) -> BeaconBlockHeader { BeaconBlockHeader { state_root: spec.zero_hash, diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index 0414d0d72..de4951f1f 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash; /// The body of a `BeaconChain` block, containing operations. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct BeaconBlockBody { pub randao_reveal: Signature, diff --git a/eth2/types/src/beacon_block_header.rs b/eth2/types/src/beacon_block_header.rs index 9076437c0..fa71bd26b 100644 --- a/eth2/types/src/beacon_block_header.rs +++ b/eth2/types/src/beacon_block_header.rs @@ -10,7 +10,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// A header of a `BeaconBlock`. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive( Debug, PartialEq, @@ -35,14 +35,14 @@ pub struct BeaconBlockHeader { impl BeaconBlockHeader { /// Returns the `tree_hash_root` of the header. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.tree_hash_root()[..]) } /// Given a `body`, consumes `self` and returns a complete `BeaconBlock`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn into_block(self, body: BeaconBlockBody) -> BeaconBlock { BeaconBlock { slot: self.slot, diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 0461e947b..eef408308 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -46,7 +46,7 @@ pub enum Error { /// The state of the `BeaconChain` at some slot. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, TestRandom, Encode, Decode, TreeHash)] pub struct BeaconState { // Misc @@ -120,7 +120,7 @@ impl BeaconState { /// This does not fully build a genesis beacon state, it omits processing of initial validator /// deposits. To obtain a full genesis beacon state, use the `BeaconStateBuilder`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn genesis(genesis_time: u64, latest_eth1_data: Eth1Data, spec: &ChainSpec) -> BeaconState { let initial_crosslink = Crosslink { epoch: spec.genesis_epoch, @@ -192,7 +192,7 @@ impl BeaconState { /// Returns the `tree_hash_root` of the state. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.tree_hash_root()[..]) } @@ -221,7 +221,7 @@ impl BeaconState { /// The epoch corresponding to `self.slot`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn current_epoch(&self, spec: &ChainSpec) -> Epoch { self.slot.epoch(spec.slots_per_epoch) } @@ -230,14 +230,14 @@ impl BeaconState { /// /// If the current epoch is the genesis epoch, the genesis_epoch is returned. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn previous_epoch(&self, spec: &ChainSpec) -> Epoch { self.current_epoch(&spec) - 1 } /// The epoch following `self.current_epoch()`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn next_epoch(&self, spec: &ChainSpec) -> Epoch { self.current_epoch(spec) + 1 } @@ -250,7 +250,7 @@ impl BeaconState { /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_cached_active_validator_indices( &self, relative_epoch: RelativeEpoch, @@ -265,7 +265,7 @@ impl BeaconState { /// /// Does not utilize the cache, performs a full iteration over the validator registry. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_active_validator_indices(&self, epoch: Epoch) -> Vec { get_active_validator_indices(&self.validator_registry, epoch) } @@ -274,7 +274,7 @@ impl BeaconState { /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_crosslink_committees_at_slot( &self, slot: Slot, @@ -299,7 +299,7 @@ impl BeaconState { /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_crosslink_committee_for_shard( &self, epoch: Epoch, @@ -325,7 +325,7 @@ impl BeaconState { /// /// If the state does not contain an index for a beacon proposer at the requested `slot`, then `None` is returned. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_beacon_proposer_index( &self, slot: Slot, @@ -354,7 +354,7 @@ impl BeaconState { /// Safely obtains the index for latest block roots, given some `slot`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 fn get_latest_block_roots_index(&self, slot: Slot, spec: &ChainSpec) -> Result { if (slot < self.slot) && (self.slot <= slot + spec.slots_per_historical_root as u64) { let i = slot.as_usize() % spec.slots_per_historical_root; @@ -370,7 +370,7 @@ impl BeaconState { /// Return the block root at a recent `slot`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_block_root( &self, slot: Slot, @@ -382,7 +382,7 @@ impl BeaconState { /// Sets the block root for some given slot. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn set_block_root( &mut self, slot: Slot, @@ -396,7 +396,7 @@ impl BeaconState { /// Safely obtains the index for `latest_randao_mixes` /// - /// Spec v0.5.0 + /// Spec v0.5.1 fn get_randao_mix_index(&self, epoch: Epoch, spec: &ChainSpec) -> Result { let current_epoch = self.current_epoch(spec); @@ -420,7 +420,7 @@ impl BeaconState { /// /// See `Self::get_randao_mix`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn update_randao_mix( &mut self, epoch: Epoch, @@ -438,7 +438,7 @@ impl BeaconState { /// Return the randao mix at a recent ``epoch``. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_randao_mix(&self, epoch: Epoch, spec: &ChainSpec) -> Result<&Hash256, Error> { let i = self.get_randao_mix_index(epoch, spec)?; Ok(&self.latest_randao_mixes[i]) @@ -446,7 +446,7 @@ impl BeaconState { /// Set the randao mix at a recent ``epoch``. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn set_randao_mix( &mut self, epoch: Epoch, @@ -460,7 +460,7 @@ impl BeaconState { /// Safely obtains the index for `latest_active_index_roots`, given some `epoch`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 fn get_active_index_root_index(&self, epoch: Epoch, spec: &ChainSpec) -> Result { let current_epoch = self.current_epoch(spec); @@ -482,7 +482,7 @@ impl BeaconState { /// Return the `active_index_root` at a recent `epoch`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_active_index_root(&self, epoch: Epoch, spec: &ChainSpec) -> Result { let i = self.get_active_index_root_index(epoch, spec)?; Ok(self.latest_active_index_roots[i]) @@ -490,7 +490,7 @@ impl BeaconState { /// Set the `active_index_root` at a recent `epoch`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn set_active_index_root( &mut self, epoch: Epoch, @@ -504,7 +504,7 @@ impl BeaconState { /// Replace `active_index_roots` with clones of `index_root`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn fill_active_index_roots_with(&mut self, index_root: Hash256, spec: &ChainSpec) { self.latest_active_index_roots = vec![index_root; spec.latest_active_index_roots_length as usize].into() @@ -512,7 +512,7 @@ impl BeaconState { /// Safely obtains the index for latest state roots, given some `slot`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 fn get_latest_state_roots_index(&self, slot: Slot, spec: &ChainSpec) -> Result { if (slot < self.slot) && (self.slot <= slot + spec.slots_per_historical_root as u64) { let i = slot.as_usize() % spec.slots_per_historical_root; @@ -528,7 +528,7 @@ impl BeaconState { /// Gets the state root for some slot. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_state_root(&mut self, slot: Slot, spec: &ChainSpec) -> Result<&Hash256, Error> { let i = self.get_latest_state_roots_index(slot, spec)?; Ok(&self.latest_state_roots[i]) @@ -536,7 +536,7 @@ impl BeaconState { /// Sets the latest state root for slot. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn set_state_root( &mut self, slot: Slot, @@ -550,7 +550,7 @@ impl BeaconState { /// Safely obtains the index for `latest_slashed_balances`, given some `epoch`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 fn get_slashed_balance_index(&self, epoch: Epoch, spec: &ChainSpec) -> Result { let i = epoch.as_usize() % spec.latest_slashed_exit_length; @@ -565,7 +565,7 @@ impl BeaconState { /// Gets the total slashed balances for some epoch. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_slashed_balance(&self, epoch: Epoch, spec: &ChainSpec) -> Result { let i = self.get_slashed_balance_index(epoch, spec)?; Ok(self.latest_slashed_balances[i]) @@ -573,7 +573,7 @@ impl BeaconState { /// Sets the total slashed balances for some epoch. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn set_slashed_balance( &mut self, epoch: Epoch, @@ -587,7 +587,7 @@ impl BeaconState { /// Generate a seed for the given `epoch`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn generate_seed(&self, epoch: Epoch, spec: &ChainSpec) -> Result { let mut input = self .get_randao_mix(epoch - spec.min_seed_lookahead, spec)? @@ -603,7 +603,7 @@ impl BeaconState { /// Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_effective_balance( &self, validator_index: usize, @@ -618,14 +618,14 @@ impl BeaconState { /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_delayed_activation_exit_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Epoch { epoch + 1 + spec.activation_exit_delay } /// Initiate an exit for the validator of the given `index`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn initiate_validator_exit(&mut self, validator_index: usize) { self.validator_registry[validator_index].initiated_exit = true; } @@ -637,7 +637,7 @@ impl BeaconState { /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_attestation_duties( &self, validator_index: usize, @@ -653,7 +653,7 @@ impl BeaconState { /// Return the combined effective balance of an array of validators. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_total_balance( &self, validator_indices: &[usize], diff --git a/eth2/types/src/beacon_state/epoch_cache.rs b/eth2/types/src/beacon_state/epoch_cache.rs index dd9ae3403..1a63e9eb9 100644 --- a/eth2/types/src/beacon_state/epoch_cache.rs +++ b/eth2/types/src/beacon_state/epoch_cache.rs @@ -138,7 +138,7 @@ impl EpochCache { /// Returns a list of all `validator_registry` indices where the validator is active at the given /// `epoch`. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec { let mut active = Vec::with_capacity(validators.len()); diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 0042304f8..f3c92b42c 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -8,7 +8,7 @@ const GWEI: u64 = 1_000_000_000; /// Each of the BLS signature domains. /// -/// Spec v0.5.0 +/// Spec v0.5.1 pub enum Domain { BeaconBlock, Randao, @@ -20,7 +20,7 @@ pub enum Domain { /// Holds all the "constants" for a BeaconChain. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive(PartialEq, Debug, Clone, Deserialize)] #[serde(default)] pub struct ChainSpec { @@ -126,7 +126,7 @@ pub struct ChainSpec { impl ChainSpec { /// Return the number of committees in one epoch. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_epoch_committee_count(&self, active_validator_count: usize) -> u64 { std::cmp::max( 1, @@ -139,7 +139,7 @@ impl ChainSpec { /// Get the domain number that represents the fork meta and signature domain. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_domain(&self, epoch: Epoch, domain: Domain, fork: &Fork) -> u64 { let domain_constant = match domain { Domain::BeaconBlock => self.domain_beacon_block, @@ -161,7 +161,7 @@ impl ChainSpec { /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn foundation() -> Self { let genesis_slot = Slot::new(2_u64.pow(32)); let slots_per_epoch = 64; diff --git a/eth2/types/src/crosslink.rs b/eth2/types/src/crosslink.rs index a0fd7e0b3..623226ad6 100644 --- a/eth2/types/src/crosslink.rs +++ b/eth2/types/src/crosslink.rs @@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash; /// Specifies the block hash for a shard at an epoch. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive( Debug, Clone, diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs index bd3355a3f..291173d34 100644 --- a/eth2/types/src/deposit.rs +++ b/eth2/types/src/deposit.rs @@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash; /// A deposit to potentially become a beacon chain validator. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct Deposit { pub proof: TreeHashVector, diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs index f8726e95d..bc96ac7c4 100644 --- a/eth2/types/src/deposit_data.rs +++ b/eth2/types/src/deposit_data.rs @@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash; /// Data generated by the deposit contract. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct DepositData { pub amount: u64, diff --git a/eth2/types/src/deposit_input.rs b/eth2/types/src/deposit_input.rs index 828496293..be2106cb4 100644 --- a/eth2/types/src/deposit_input.rs +++ b/eth2/types/src/deposit_input.rs @@ -10,7 +10,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// The data supplied by the user to the deposit contract. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive( Debug, PartialEq, @@ -33,7 +33,7 @@ pub struct DepositInput { impl DepositInput { /// Generate the 'proof_of_posession' signature for a given DepositInput details. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn create_proof_of_possession( &self, secret_key: &SecretKey, @@ -49,7 +49,7 @@ impl DepositInput { /// Verify that proof-of-possession is valid. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn validate_proof_of_possession( &self, epoch: Epoch, diff --git a/eth2/types/src/eth1_data.rs b/eth2/types/src/eth1_data.rs index c1348cfba..2ad460d13 100644 --- a/eth2/types/src/eth1_data.rs +++ b/eth2/types/src/eth1_data.rs @@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash; /// Contains data obtained from the Eth1 chain. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive( Debug, PartialEq, Clone, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] diff --git a/eth2/types/src/eth1_data_vote.rs b/eth2/types/src/eth1_data_vote.rs index a9741f065..7a77c8ff0 100644 --- a/eth2/types/src/eth1_data_vote.rs +++ b/eth2/types/src/eth1_data_vote.rs @@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash; /// A summation of votes for some `Eth1Data`. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive( Debug, PartialEq, Clone, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index 99908e9ed..d99842855 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -11,7 +11,7 @@ use tree_hash_derive::TreeHash; /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive( Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] @@ -26,7 +26,7 @@ pub struct Fork { impl Fork { /// Initialize the `Fork` from the genesis parameters in the `spec`. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn genesis(spec: &ChainSpec) -> Self { let mut current_version: [u8; 4] = [0; 4]; current_version.copy_from_slice(&int_to_bytes4(spec.genesis_fork_version)); @@ -40,7 +40,7 @@ impl Fork { /// Return the fork version of the given ``epoch``. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn get_fork_version(&self, epoch: Epoch) -> [u8; 4] { if epoch < self.epoch { return self.previous_version; diff --git a/eth2/types/src/historical_batch.rs b/eth2/types/src/historical_batch.rs index 23c26901e..c4f62fcfc 100644 --- a/eth2/types/src/historical_batch.rs +++ b/eth2/types/src/historical_batch.rs @@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash; /// Historical block and state roots. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct HistoricalBatch { pub block_roots: TreeHashVector, diff --git a/eth2/types/src/pending_attestation.rs b/eth2/types/src/pending_attestation.rs index 5cbe1edeb..ce9ce3d77 100644 --- a/eth2/types/src/pending_attestation.rs +++ b/eth2/types/src/pending_attestation.rs @@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash; /// An attestation that has been included in the state but not yet fully processed. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct PendingAttestation { pub aggregation_bitfield: Bitfield, diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs index 901f02388..a3501a5bd 100644 --- a/eth2/types/src/proposer_slashing.rs +++ b/eth2/types/src/proposer_slashing.rs @@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash; /// Two conflicting proposals from the same proposer (validator). /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] pub struct ProposerSlashing { pub proposer_index: u64, diff --git a/eth2/types/src/relative_epoch.rs b/eth2/types/src/relative_epoch.rs index 8f895e97a..6538ca4aa 100644 --- a/eth2/types/src/relative_epoch.rs +++ b/eth2/types/src/relative_epoch.rs @@ -10,7 +10,7 @@ pub enum Error { /// Defines the epochs relative to some epoch. Most useful when referring to the committees prior /// to and following some epoch. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive(Debug, PartialEq, Clone, Copy)] pub enum RelativeEpoch { /// The prior epoch. @@ -32,7 +32,7 @@ pub enum RelativeEpoch { impl RelativeEpoch { /// Returns the `epoch` that `self` refers to, with respect to the `base` epoch. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn into_epoch(self, base: Epoch) -> Epoch { match self { RelativeEpoch::Previous => base - 1, @@ -51,7 +51,7 @@ impl RelativeEpoch { /// - `AmbiguiousNextEpoch` whenever `other` is one after `base`, because it's unknowable if /// there will be a registry change. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn from_epoch(base: Epoch, other: Epoch) -> Result { if other == base - 1 { Ok(RelativeEpoch::Previous) diff --git a/eth2/types/src/slashable_attestation.rs b/eth2/types/src/slashable_attestation.rs index 37462f006..9c460e482 100644 --- a/eth2/types/src/slashable_attestation.rs +++ b/eth2/types/src/slashable_attestation.rs @@ -10,7 +10,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// /// To be included in an `AttesterSlashing`. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive( Debug, PartialEq, @@ -35,14 +35,14 @@ pub struct SlashableAttestation { impl SlashableAttestation { /// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn is_double_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool { self.data.slot.epoch(spec.slots_per_epoch) == other.data.slot.epoch(spec.slots_per_epoch) } /// Check if ``attestation_data_1`` surrounds ``attestation_data_2``. /// - /// Spec v0.5.0 + /// Spec v0.5.1 pub fn is_surround_vote(&self, other: &SlashableAttestation, spec: &ChainSpec) -> bool { let source_epoch_1 = self.data.source_epoch; let source_epoch_2 = other.data.source_epoch; diff --git a/eth2/types/src/transfer.rs b/eth2/types/src/transfer.rs index f40050bc4..82ead03d5 100644 --- a/eth2/types/src/transfer.rs +++ b/eth2/types/src/transfer.rs @@ -11,7 +11,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// The data submitted to the deposit contract. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive( Debug, Clone, diff --git a/eth2/types/src/validator.rs b/eth2/types/src/validator.rs index 67b4e85df..bbd68ed2b 100644 --- a/eth2/types/src/validator.rs +++ b/eth2/types/src/validator.rs @@ -7,7 +7,7 @@ use tree_hash_derive::TreeHash; /// Information about a `BeaconChain` validator. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)] pub struct Validator { pub pubkey: PublicKey, diff --git a/eth2/types/src/voluntary_exit.rs b/eth2/types/src/voluntary_exit.rs index 16d22c544..cb872cb98 100644 --- a/eth2/types/src/voluntary_exit.rs +++ b/eth2/types/src/voluntary_exit.rs @@ -9,7 +9,7 @@ use tree_hash_derive::{SignedRoot, TreeHash}; /// An exit voluntarily submitted a validator who wishes to withdraw. /// -/// Spec v0.5.0 +/// Spec v0.5.1 #[derive( Debug, PartialEq, From 701cc00d08029343dd6b298e1f42f84d56934316 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 17 Apr 2019 11:29:06 +1000 Subject: [PATCH 73/89] questionable patch for TreeHashVector --- eth2/state_processing/tests/tests.rs | 2 +- eth2/types/src/tree_hash_vector.rs | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index 6491e255a..ccad198bb 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -27,7 +27,7 @@ impl ExpectedState { ($field_name:ident) => { if self.$field_name.as_ref().map_or(true, |$field_name| { println!(" > Checking {}", stringify!($field_name)); - $field_name == &state.$field_name + &state.$field_name == $field_name }) { vec![] } else { diff --git a/eth2/types/src/tree_hash_vector.rs b/eth2/types/src/tree_hash_vector.rs index 1cc8e40a5..9b77e13dc 100644 --- a/eth2/types/src/tree_hash_vector.rs +++ b/eth2/types/src/tree_hash_vector.rs @@ -33,6 +33,12 @@ impl DerefMut for TreeHashVector { } } +impl PartialEq> for TreeHashVector { + fn eq(&self, other: &Vec) -> bool { + &self.0 == other + } +} + impl tree_hash::TreeHash for TreeHashVector where T: TreeHash, From f592183aa9153ebe4aba05f6a9905469e71dbe65 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 17 Apr 2019 11:59:40 +1000 Subject: [PATCH 74/89] Fix signed_root vs tree_hash_root in per_slot --- eth2/state_processing/src/per_slot_processing.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/eth2/state_processing/src/per_slot_processing.rs b/eth2/state_processing/src/per_slot_processing.rs index 378d5dd2e..194e0d6c9 100644 --- a/eth2/state_processing/src/per_slot_processing.rs +++ b/eth2/state_processing/src/per_slot_processing.rs @@ -1,5 +1,5 @@ use crate::*; -use tree_hash::TreeHash; +use tree_hash::{SignedRoot, TreeHash}; use types::*; #[derive(Debug, PartialEq)] @@ -38,7 +38,7 @@ fn cache_state(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { state.latest_block_header.state_root = previous_slot_state_root } - let latest_block_root = Hash256::from_slice(&state.latest_block_header.tree_hash_root()[..]); + let latest_block_root = Hash256::from_slice(&state.latest_block_header.signed_root()[..]); state.set_block_root(previous_slot, latest_block_root, spec)?; // Set the state slot back to what it should be. From c3779caedefd86b9ecb24eba8cd553be065d9bb5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 17 Apr 2019 13:59:40 +1000 Subject: [PATCH 75/89] Add extra info to block proc. error message --- eth2/state_processing/src/per_block_processing.rs | 9 +++++++-- eth2/state_processing/src/per_block_processing/errors.rs | 5 ++++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 257d92acf..58b948f62 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -107,9 +107,14 @@ pub fn process_block_header( ) -> Result<(), Error> { verify!(block.slot == state.slot, Invalid::StateSlotMismatch); + let expected_previous_block_root = + Hash256::from_slice(&state.latest_block_header.signed_root()); verify!( - block.previous_block_root == Hash256::from_slice(&state.latest_block_header.signed_root()), - Invalid::ParentBlockRootMismatch + block.previous_block_root == expected_previous_block_root, + Invalid::ParentBlockRootMismatch { + state: expected_previous_block_root, + block: block.previous_block_root, + } ); state.latest_block_header = block.temporary_block_header(spec); diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index 9c36e0238..d8627d359 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -67,7 +67,10 @@ impl_from_beacon_state_error!(BlockProcessingError); #[derive(Debug, PartialEq)] pub enum BlockInvalid { StateSlotMismatch, - ParentBlockRootMismatch, + ParentBlockRootMismatch { + state: Hash256, + block: Hash256, + }, BadSignature, BadRandaoSignature, MaxAttestationsExceeded, From bf1a93f44422d6f048f185d36d30f5acb76d5d94 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 17 Apr 2019 14:00:00 +1000 Subject: [PATCH 76/89] Allocate correctly for tree hash --- eth2/utils/tree_hash_derive/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/eth2/utils/tree_hash_derive/src/lib.rs b/eth2/utils/tree_hash_derive/src/lib.rs index 4b7761f91..343287313 100644 --- a/eth2/utils/tree_hash_derive/src/lib.rs +++ b/eth2/utils/tree_hash_derive/src/lib.rs @@ -166,11 +166,12 @@ pub fn tree_hash_signed_root_derive(input: TokenStream) -> TokenStream { }; let idents = get_signed_root_named_field_idents(&struct_data); + let num_elems = idents.len(); let output = quote! { impl tree_hash::SignedRoot for #name { fn signed_root(&self) -> Vec { - let mut leaves = Vec::with_capacity(4 * tree_hash::HASHSIZE); + let mut leaves = Vec::with_capacity(#num_elems * tree_hash::HASHSIZE); #( leaves.append(&mut self.#idents.tree_hash_root()); From 343909ef31010a1e5b49e6c255d3474fb6ef9a32 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 17 Apr 2019 17:17:07 +1000 Subject: [PATCH 77/89] Fix boolean-bitfield serialization --- .../src/common/verify_bitfield.rs | 54 ++++++++++- eth2/utils/boolean-bitfield/src/lib.rs | 93 ++++++++++--------- 2 files changed, 98 insertions(+), 49 deletions(-) diff --git a/eth2/state_processing/src/common/verify_bitfield.rs b/eth2/state_processing/src/common/verify_bitfield.rs index 7b3c07086..570a240f1 100644 --- a/eth2/state_processing/src/common/verify_bitfield.rs +++ b/eth2/state_processing/src/common/verify_bitfield.rs @@ -25,9 +25,55 @@ mod test { #[test] fn bitfield_length() { - assert!(verify_bitfield_length( - &Bitfield::from_bytes(&[0b10000000]), - 4 - )); + assert_eq!( + verify_bitfield_length(&Bitfield::from_bytes(&[0b0000_0001]), 4), + true + ); + + assert_eq!( + verify_bitfield_length(&Bitfield::from_bytes(&[0b0001_0001]), 4), + false + ); + + assert_eq!( + verify_bitfield_length(&Bitfield::from_bytes(&[0b0000_0000]), 4), + true + ); + + assert_eq!( + verify_bitfield_length(&Bitfield::from_bytes(&[0b1000_0000]), 8), + true + ); + + assert_eq!( + verify_bitfield_length(&Bitfield::from_bytes(&[0b1000_0000, 0b0000_0000]), 16), + true + ); + + assert_eq!( + verify_bitfield_length(&Bitfield::from_bytes(&[0b1000_0000, 0b0000_0000]), 15), + false + ); + + assert_eq!( + verify_bitfield_length(&Bitfield::from_bytes(&[0b0000_0000, 0b0000_0000]), 8), + false + ); + + assert_eq!( + verify_bitfield_length( + &Bitfield::from_bytes(&[0b0000_0000, 0b0000_0000, 0b0000_0000]), + 8 + ), + false + ); + + assert_eq!( + verify_bitfield_length( + &Bitfield::from_bytes(&[0b0000_0000, 0b0000_0000, 0b0000_0000]), + 24 + ), + true + ); } } diff --git a/eth2/utils/boolean-bitfield/src/lib.rs b/eth2/utils/boolean-bitfield/src/lib.rs index c19702ec9..a744c9498 100644 --- a/eth2/utils/boolean-bitfield/src/lib.rs +++ b/eth2/utils/boolean-bitfield/src/lib.rs @@ -54,10 +54,15 @@ impl BooleanBitfield { /// Create a new bitfield using the supplied `bytes` as input pub fn from_bytes(bytes: &[u8]) -> Self { Self { - 0: BitVec::from_bytes(bytes), + 0: BitVec::from_bytes(&reverse_bit_order(bytes.to_vec())), } } + /// Returns a vector of bytes representing the bitfield + pub fn to_bytes(&self) -> Vec { + reverse_bit_order(self.0.to_bytes().to_vec()) + } + /// Read the value of a bit. /// /// If the index is in bounds, then result is Ok(value) where value is `true` if the bit is 1 and `false` if the bit is 0. @@ -86,11 +91,6 @@ impl BooleanBitfield { previous } - /// Returns the index of the highest set bit. Some(n) if some bit is set, None otherwise. - pub fn highest_set_bit(&self) -> Option { - self.0.iter().rposition(|bit| bit) - } - /// Returns the number of bits in this bitfield. pub fn len(&self) -> usize { self.0.len() @@ -116,12 +116,6 @@ impl BooleanBitfield { self.0.iter().filter(|&bit| bit).count() } - /// Returns a vector of bytes representing the bitfield - /// Note that this returns the bit layout of the underlying implementation in the `bit-vec` crate. - pub fn to_bytes(&self) -> Vec { - self.0.to_bytes() - } - /// Compute the intersection (binary-and) of this bitfield with another. Lengths must match. pub fn intersection(&self, other: &Self) -> Self { let mut res = self.clone(); @@ -218,17 +212,7 @@ impl Decodable for BooleanBitfield { Ok((BooleanBitfield::new(), index + ssz::LENGTH_BYTES)) } else { let bytes = &bytes[(index + 4)..(index + len + 4)]; - - let count = len * 8; - let mut field = BooleanBitfield::with_capacity(count); - for (byte_index, byte) in bytes.iter().enumerate() { - for i in 0..8 { - let bit = byte & (128 >> i); - if bit != 0 { - field.set(8 * byte_index + i, true); - } - } - } + let field = BooleanBitfield::from_bytes(bytes); let index = index + ssz::LENGTH_BYTES + len; Ok((field, index)) @@ -251,7 +235,7 @@ impl Serialize for BooleanBitfield { where S: Serializer, { - serializer.serialize_str(&encode(&reverse_bit_order(self.to_bytes()))) + serializer.serialize_str(&encode(self.to_bytes())) } } @@ -265,11 +249,27 @@ impl<'de> Deserialize<'de> for BooleanBitfield { // bit from the end of the hex string, e.g. // "0xef01" => [0xef, 0x01] => [0b1000_0000, 0b1111_1110] let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Ok(BooleanBitfield::from_bytes(&reverse_bit_order(bytes))) + Ok(BooleanBitfield::from_bytes(&bytes)) } } -tree_hash_ssz_encoding_as_list!(BooleanBitfield); +impl tree_hash::TreeHash for BooleanBitfield { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::List + } + + fn tree_hash_packed_encoding(&self) -> Vec { + unreachable!("List should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("List should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { + self.to_bytes().tree_hash_root() + } +} #[cfg(test)] mod tests { @@ -322,7 +322,7 @@ mod tests { assert_eq!(field.num_set_bits(), 100); } - const INPUT: &[u8] = &[0b0000_0010, 0b0000_0010]; + const INPUT: &[u8] = &[0b0100_0000, 0b0100_0000]; #[test] fn test_get_from_bitfield() { @@ -348,18 +348,6 @@ mod tests { assert!(!previous); } - #[test] - fn test_highest_set_bit() { - let field = BooleanBitfield::from_bytes(INPUT); - assert_eq!(field.highest_set_bit().unwrap(), 14); - - let field = BooleanBitfield::from_bytes(&[0b0000_0011]); - assert_eq!(field.highest_set_bit().unwrap(), 7); - - let field = BooleanBitfield::new(); - assert_eq!(field.highest_set_bit(), None); - } - #[test] fn test_len() { let field = BooleanBitfield::from_bytes(INPUT); @@ -440,15 +428,30 @@ mod tests { #[test] fn test_ssz_encode() { let field = create_test_bitfield(); - let mut stream = SszStream::new(); stream.append(&field); - assert_eq!(stream.drain(), vec![2, 0, 0, 0, 225, 192]); + assert_eq!(stream.drain(), vec![2, 0, 0, 0, 0b0000_0011, 0b1000_0111]); let field = BooleanBitfield::from_elem(18, true); let mut stream = SszStream::new(); stream.append(&field); - assert_eq!(stream.drain(), vec![3, 0, 0, 0, 255, 255, 192]); + assert_eq!( + stream.drain(), + vec![3, 0, 0, 0, 0b0000_0011, 0b1111_1111, 0b1111_1111] + ); + + let mut b = BooleanBitfield::new(); + b.set(1, true); + assert_eq!( + ssz_encode(&b), + vec![ + 0b0000_0001, + 0b0000_0000, + 0b0000_0000, + 0b0000_0000, + 0b0000_0010 + ] + ); } fn create_test_bitfield() -> BooleanBitfield { @@ -464,7 +467,7 @@ mod tests { #[test] fn test_ssz_decode() { - let encoded = vec![2, 0, 0, 0, 225, 192]; + let encoded = vec![2, 0, 0, 0, 0b0000_0011, 0b1000_0111]; let field = decode::(&encoded).unwrap(); let expected = create_test_bitfield(); assert_eq!(field, expected); @@ -480,8 +483,8 @@ mod tests { use serde_yaml::Value; let data: &[(_, &[_])] = &[ - ("0x01", &[0b10000000]), - ("0xf301", &[0b10000000, 0b11001111]), + ("0x01", &[0b00000001]), + ("0xf301", &[0b11110011, 0b00000001]), ]; for (hex_data, bytes) in data { let bitfield = BooleanBitfield::from_bytes(bytes); From 745d3605669705b3e2b74742e5a961ed364682fc Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 17 Apr 2019 17:17:43 +1000 Subject: [PATCH 78/89] Store state roots during slot processing --- eth2/state_processing/src/per_slot_processing.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/eth2/state_processing/src/per_slot_processing.rs b/eth2/state_processing/src/per_slot_processing.rs index 194e0d6c9..a68f98c6d 100644 --- a/eth2/state_processing/src/per_slot_processing.rs +++ b/eth2/state_processing/src/per_slot_processing.rs @@ -38,6 +38,9 @@ fn cache_state(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { state.latest_block_header.state_root = previous_slot_state_root } + // Store the previous slot's post state transition root. + state.set_state_root(previous_slot, previous_slot_state_root, spec)?; + let latest_block_root = Hash256::from_slice(&state.latest_block_header.signed_root()[..]); state.set_block_root(previous_slot, latest_block_root, spec)?; From 332795e8b7e14444871ff4bfc7f492cd0902f546 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 17 Apr 2019 18:00:14 +1000 Subject: [PATCH 79/89] Revert "questionable patch for TreeHashVector" This reverts commit 701cc00d08029343dd6b298e1f42f84d56934316. --- eth2/state_processing/tests/tests.rs | 2 +- eth2/types/src/tree_hash_vector.rs | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index ccad198bb..6491e255a 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -27,7 +27,7 @@ impl ExpectedState { ($field_name:ident) => { if self.$field_name.as_ref().map_or(true, |$field_name| { println!(" > Checking {}", stringify!($field_name)); - &state.$field_name == $field_name + $field_name == &state.$field_name }) { vec![] } else { diff --git a/eth2/types/src/tree_hash_vector.rs b/eth2/types/src/tree_hash_vector.rs index 9b77e13dc..1cc8e40a5 100644 --- a/eth2/types/src/tree_hash_vector.rs +++ b/eth2/types/src/tree_hash_vector.rs @@ -33,12 +33,6 @@ impl DerefMut for TreeHashVector { } } -impl PartialEq> for TreeHashVector { - fn eq(&self, other: &Vec) -> bool { - &self.0 == other - } -} - impl tree_hash::TreeHash for TreeHashVector where T: TreeHash, From b201c52140134ddefb7b19ad8597f4b6054ebc09 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 17 Apr 2019 18:07:28 +1000 Subject: [PATCH 80/89] state transition tests: use TreeHashVector --- eth2/state_processing/tests/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index 6491e255a..fcd034158 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -16,7 +16,7 @@ pub struct ExpectedState { pub current_epoch_attestations: Option>, pub historical_roots: Option>, pub finalized_epoch: Option, - pub latest_block_roots: Option>, + pub latest_block_roots: Option>, } impl ExpectedState { From 2155e3e293390539d2391eeda906f573a65fa59f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 17 Apr 2019 18:54:21 +1000 Subject: [PATCH 81/89] Fix non-compiling tests --- eth2/state_processing/tests/tests.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index ccad198bb..dd611b459 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -1,6 +1,5 @@ use serde_derive::Deserialize; use serde_yaml; -#[cfg(not(debug_assertions))] use state_processing::{per_block_processing, per_slot_processing}; use std::{fs::File, io::prelude::*, path::PathBuf}; use types::*; From 5e81a995ea4b9d7e623c3d7720aa935be58a2aca Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 17 Apr 2019 18:54:39 +1000 Subject: [PATCH 82/89] Use signed_root for canonical header ID --- eth2/types/src/beacon_block_header.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/eth2/types/src/beacon_block_header.rs b/eth2/types/src/beacon_block_header.rs index fa71bd26b..e4db3a721 100644 --- a/eth2/types/src/beacon_block_header.rs +++ b/eth2/types/src/beacon_block_header.rs @@ -5,7 +5,7 @@ use rand::RngCore; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; -use tree_hash::TreeHash; +use tree_hash::{SignedRoot, TreeHash}; use tree_hash_derive::{SignedRoot, TreeHash}; /// A header of a `BeaconBlock`. @@ -37,7 +37,7 @@ impl BeaconBlockHeader { /// /// Spec v0.5.1 pub fn canonical_root(&self) -> Hash256 { - Hash256::from_slice(&self.tree_hash_root()[..]) + Hash256::from_slice(&self.signed_root()[..]) } /// Given a `body`, consumes `self` and returns a complete `BeaconBlock`. From 7b853b33d54052c448020f4ffef3a630241f666b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 17 Apr 2019 21:57:48 +1000 Subject: [PATCH 83/89] Add env vars to travis --- .travis.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 6233ea68b..f75f9e6ea 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,11 +6,12 @@ before_install: - sudo mv protoc3/include/* /usr/local/include/ - sudo chown $USER /usr/local/bin/protoc - sudo chown -R $USER /usr/local/include/google +env: + - BUILD= + - BUILD=--release script: - - cargo build --verbose --all - - cargo build --verbose --release --all - - cargo test --verbose --all - - cargo test --verbose --release --all + - cargo build --verbose $BUILD --all + - cargo test --verbose $BUILD --all - cargo test --manifest-path eth2/state_processing/Cargo.toml --verbose --release --features fake_crypto - cargo fmt --all -- --check # No clippy until later... From 381388d9c2ee5f12035958f36e147ef8952da503 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 18 Apr 2019 06:45:25 +1000 Subject: [PATCH 84/89] Move state processing test into own build --- .travis.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index f75f9e6ea..7a0849894 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,12 +7,12 @@ before_install: - sudo chown $USER /usr/local/bin/protoc - sudo chown -R $USER /usr/local/include/google env: - - BUILD= - - BUILD=--release + - BUILD=--all + - BUILD=--release --all + - BUILD= --manifest-path eth2/state_processing/Cargo.toml --release --features fake_crypto script: - - cargo build --verbose $BUILD --all - - cargo test --verbose $BUILD --all - - cargo test --manifest-path eth2/state_processing/Cargo.toml --verbose --release --features fake_crypto + - cargo build --verbose $BUILD + - cargo test --verbose $BUILD - cargo fmt --all -- --check # No clippy until later... #- cargo clippy From 2ee3b05bd382afcbeaf62934f390d8a17befd5e0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 18 Apr 2019 19:10:13 +1000 Subject: [PATCH 85/89] Only build in debug for beta and nightly --- .travis.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.travis.yml b/.travis.yml index 7a0849894..70b9d2133 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,6 +24,15 @@ matrix: allow_failures: - rust: nightly fast_finish: true + exclude: + - rust: beta + env: BUILD=--release --all + - rust: beta + env: BUILD= --manifest-path eth2/state_processing/Cargo.toml --release --features fake_crypto + - rust: nightly + env: BUILD=--release --all + - rust: nightly + env: BUILD= --manifest-path eth2/state_processing/Cargo.toml --release --features fake_crypto install: - rustup component add rustfmt - rustup component add clippy From ab75f7cbc713f85fa56d2f41e7c928818c8d005c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 24 Apr 2019 11:37:19 +1000 Subject: [PATCH 86/89] Fix cargo cmd in Jenkinsfile --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 48a07e1e7..845cd357f 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -24,7 +24,7 @@ pipeline { sh 'cargo test --verbose --all' sh 'cargo test --verbose --all --release' sh 'cargo test --manifest-path eth2/state_processing/Cargo.toml --verbose \ - --release --features fake_crypto --ignored' + --release --features fake_crypto -- --include-ignored' } } From 6ae00838437a932add676cbb821763d99c99d5af Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 24 Apr 2019 11:41:33 +1000 Subject: [PATCH 87/89] Add travis caching. Reference for commands: https://levans.fr/rust_travis_cache.html --- .travis.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.travis.yml b/.travis.yml index 70b9d2133..f89db54c9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,9 @@ language: rust +cache: + directories: + - /home/travis/.cargo +before_cache: + - rm -rf /home/travis/.cargo/registry before_install: - curl -OL https://github.com/google/protobuf/releases/download/v3.4.0/protoc-3.4.0-linux-x86_64.zip - unzip protoc-3.4.0-linux-x86_64.zip -d protoc3 From 0bb9c59b4764f95ab07bbc4cacb0f2b95fae57b7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 25 Apr 2019 12:24:45 +1000 Subject: [PATCH 88/89] Add ignored and non-ignored state-trans tests --- Jenkinsfile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 845cd357f..11cbf0abe 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -24,7 +24,9 @@ pipeline { sh 'cargo test --verbose --all' sh 'cargo test --verbose --all --release' sh 'cargo test --manifest-path eth2/state_processing/Cargo.toml --verbose \ - --release --features fake_crypto -- --include-ignored' + --release --features fake_crypto' + sh 'cargo test --manifest-path eth2/state_processing/Cargo.toml --verbose \ + --release --features fake_crypto -- --ignored' } } From a76b24e274a30694004f919372698e707cfb9dff Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 25 Apr 2019 12:25:01 +1000 Subject: [PATCH 89/89] Disable running docs example for test harness --- beacon_node/beacon_chain/test_harness/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/test_harness/src/lib.rs b/beacon_node/beacon_chain/test_harness/src/lib.rs index 0703fd4a5..e93fa7003 100644 --- a/beacon_node/beacon_chain/test_harness/src/lib.rs +++ b/beacon_node/beacon_chain/test_harness/src/lib.rs @@ -8,7 +8,7 @@ //! producing blocks and attestations. //! //! Example: -//! ``` +//! ```rust,no_run //! use test_harness::BeaconChainHarness; //! use types::ChainSpec; //!