diff --git a/eth2/utils/tree_hash/src/cached_tree_hash.rs b/eth2/utils/tree_hash/src/cached_tree_hash.rs index 46190ff3c..66ccbb680 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash.rs @@ -6,8 +6,10 @@ use std::ops::Range; pub mod btree_overlay; pub mod impls; pub mod resize; +pub mod tree_hash_cache; pub use btree_overlay::BTreeOverlay; +pub use tree_hash_cache::TreeHashCache; #[derive(Debug, PartialEq)] pub struct CachedTreeHasher { @@ -79,6 +81,8 @@ pub trait CachedTreeHashSubTree: TreeHash { depth: usize, ) -> Result; + fn num_tree_hash_cache_chunks(&self) -> usize; + fn new_tree_hash_cache(&self, depth: usize) -> Result; fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error>; @@ -158,301 +162,3 @@ fn num_unsanitized_leaves(num_bytes: usize) -> usize { fn num_bytes(num_leaves: usize) -> usize { num_leaves * HASHSIZE } - -#[derive(Debug, PartialEq, Clone)] -pub struct TreeHashCache { - cache: Vec, - chunk_modified: Vec, - overlays: Vec, - - pub chunk_index: usize, - pub overlay_index: usize, -} - -impl Into> for TreeHashCache { - fn into(self) -> Vec { - self.cache - } -} - -impl TreeHashCache { - pub fn new(item: &T, depth: usize) -> Result - where - T: CachedTreeHashSubTree, - { - item.new_tree_hash_cache(depth) - } - - pub fn from_leaves_and_subtrees( - item: &T, - leaves_and_subtrees: Vec, - depth: usize, - ) -> Result - where - T: CachedTreeHashSubTree, - { - let overlay = BTreeOverlay::new(item, 0, depth)?; - - // Note how many leaves were provided. If is not a power-of-two, we'll need to pad it out - // later. - let num_provided_leaf_nodes = leaves_and_subtrees.len(); - - // Allocate enough bytes to store the internal nodes and the leaves and subtrees, then fill - // all the to-be-built internal nodes with zeros and append the leaves and subtrees. - let internal_node_bytes = overlay.num_internal_nodes() * BYTES_PER_CHUNK; - let leaves_and_subtrees_bytes = leaves_and_subtrees - .iter() - .fold(0, |acc, t| acc + t.bytes_len()); - let mut cache = Vec::with_capacity(leaves_and_subtrees_bytes + internal_node_bytes); - cache.resize(internal_node_bytes, 0); - - // Allocate enough bytes to store all the leaves. - let mut leaves = Vec::with_capacity(overlay.num_leaf_nodes() * HASHSIZE); - let mut overlays = Vec::with_capacity(leaves_and_subtrees.len()); - - if T::tree_hash_type() == TreeHashType::List { - overlays.push(overlay); - } - - // Iterate through all of the leaves/subtrees, adding their root as a leaf node and then - // concatenating their merkle trees. - for t in leaves_and_subtrees { - leaves.append(&mut t.root()?.to_vec()); - - let (mut bytes, _bools, mut t_overlays) = t.into_components(); - cache.append(&mut bytes); - overlays.append(&mut t_overlays); - } - - // Pad the leaves to an even power-of-two, using zeros. - pad_for_leaf_count(num_provided_leaf_nodes, &mut cache); - - // Merkleize the leaves, then split the leaf nodes off them. Then, replace all-zeros - // internal nodes created earlier with the internal nodes generated by `merkleize`. - let mut merkleized = merkleize(leaves); - merkleized.split_off(internal_node_bytes); - cache.splice(0..internal_node_bytes, merkleized); - - Ok(Self { - chunk_modified: vec![false; cache.len() / BYTES_PER_CHUNK], - cache, - overlays, - chunk_index: 0, - overlay_index: 0, - }) - } - - pub fn from_bytes( - bytes: Vec, - initial_modified_state: bool, - overlay: Option, - ) -> Result { - if bytes.len() % BYTES_PER_CHUNK > 0 { - return Err(Error::BytesAreNotEvenChunks(bytes.len())); - } - - let overlays = match overlay { - Some(overlay) => vec![overlay], - None => vec![], - }; - - Ok(Self { - chunk_modified: vec![initial_modified_state; bytes.len() / BYTES_PER_CHUNK], - cache: bytes, - overlays, - chunk_index: 0, - overlay_index: 0, - }) - } - - pub fn get_overlay( - &self, - overlay_index: usize, - chunk_index: usize, - ) -> Result { - let mut overlay = self - .overlays - .get(overlay_index) - .ok_or_else(|| Error::NoOverlayForIndex(overlay_index))? - .clone(); - - overlay.offset = chunk_index; - - Ok(overlay) - } - - pub fn reset_modifications(&mut self) { - for chunk_modified in &mut self.chunk_modified { - *chunk_modified = false; - } - } - - pub fn replace_overlay( - &mut self, - overlay_index: usize, - chunk_index: usize, - new_overlay: BTreeOverlay, - ) -> Result { - let old_overlay = self.get_overlay(overlay_index, chunk_index)?; - - // If the merkle tree required to represent the new list is of a different size to the one - // required for the previous list, then update our cache. - // - // This grows/shrinks the bytes to accomodate the new tree, preserving as much of the tree - // as possible. - if new_overlay.num_leaf_nodes() != old_overlay.num_leaf_nodes() { - // Get slices of the exsiting tree from the cache. - let (old_bytes, old_flags) = self - .slices(old_overlay.chunk_range()) - .ok_or_else(|| Error::UnableToObtainSlices)?; - - let (new_bytes, new_bools) = - if new_overlay.num_leaf_nodes() > old_overlay.num_leaf_nodes() { - resize::grow_merkle_cache( - old_bytes, - old_flags, - old_overlay.height(), - new_overlay.height(), - ) - .ok_or_else(|| Error::UnableToGrowMerkleTree)? - } else { - resize::shrink_merkle_cache( - old_bytes, - old_flags, - old_overlay.height(), - new_overlay.height(), - new_overlay.num_chunks(), - ) - .ok_or_else(|| Error::UnableToShrinkMerkleTree)? - }; - - // Splice the newly created `TreeHashCache` over the existing elements. - self.splice(old_overlay.chunk_range(), new_bytes, new_bools); - } - - Ok(std::mem::replace( - &mut self.overlays[overlay_index], - new_overlay, - )) - } - - pub fn remove_proceeding_child_overlays(&mut self, overlay_index: usize, depth: usize) { - let end = self - .overlays - .iter() - .skip(overlay_index) - .position(|o| o.depth <= depth) - .unwrap_or_else(|| self.overlays.len()); - - self.overlays.splice(overlay_index..end, vec![]); - } - - pub fn update_internal_nodes(&mut self, overlay: &BTreeOverlay) -> Result<(), Error> { - for (parent, children) in overlay.internal_parents_and_children().into_iter().rev() { - if self.either_modified(children)? { - self.modify_chunk(parent, &self.hash_children(children)?)?; - } - } - - Ok(()) - } - - fn bytes_len(&self) -> usize { - self.cache.len() - } - - pub fn root(&self) -> Result<&[u8], Error> { - self.cache - .get(0..HASHSIZE) - .ok_or_else(|| Error::NoBytesForRoot) - } - - fn splice(&mut self, chunk_range: Range, bytes: Vec, bools: Vec) { - // Update the `chunk_modified` vec, marking all spliced-in nodes as changed. - self.chunk_modified.splice(chunk_range.clone(), bools); - self.cache - .splice(node_range_to_byte_range(&chunk_range), bytes); - } - - pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { - let start = chunk * BYTES_PER_CHUNK; - let end = start + BYTES_PER_CHUNK; - - if !self.chunk_equals(chunk, to)? { - self.cache - .get_mut(start..end) - .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))? - .copy_from_slice(to); - self.chunk_modified[chunk] = true; - } - - Ok(()) - } - - fn slices(&self, chunk_range: Range) -> Option<(&[u8], &[bool])> { - Some(( - self.cache.get(node_range_to_byte_range(&chunk_range))?, - self.chunk_modified.get(chunk_range)?, - )) - } - - fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { - let start = chunk * BYTES_PER_CHUNK; - let end = start + BYTES_PER_CHUNK; - - self.cache - .get_mut(start..end) - .ok_or_else(|| Error::NoBytesForChunk(chunk))? - .copy_from_slice(to); - - self.chunk_modified[chunk] = true; - - Ok(()) - } - - fn get_chunk(&self, chunk: usize) -> Result<&[u8], Error> { - let start = chunk * BYTES_PER_CHUNK; - let end = start + BYTES_PER_CHUNK; - - Ok(self - .cache - .get(start..end) - .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))?) - } - - fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Result { - Ok(self.get_chunk(chunk)? == other) - } - - pub fn changed(&self, chunk: usize) -> Result { - self.chunk_modified - .get(chunk) - .cloned() - .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk)) - } - - fn either_modified(&self, children: (usize, usize)) -> Result { - Ok(self.changed(children.0)? | self.changed(children.1)?) - } - - fn hash_children(&self, children: (usize, usize)) -> Result, Error> { - let mut child_bytes = Vec::with_capacity(BYTES_PER_CHUNK * 2); - child_bytes.append(&mut self.get_chunk(children.0)?.to_vec()); - child_bytes.append(&mut self.get_chunk(children.1)?.to_vec()); - - Ok(hash(&child_bytes)) - } - - pub fn mix_in_length(&self, chunk: usize, length: usize) -> Result, Error> { - let mut bytes = Vec::with_capacity(2 * BYTES_PER_CHUNK); - - bytes.append(&mut self.get_chunk(chunk)?.to_vec()); - bytes.append(&mut int_to_bytes32(length as u64)); - - Ok(hash(&bytes)) - } - - pub fn into_components(self) -> (Vec, Vec, Vec) { - (self.cache, self.chunk_modified, self.overlays) - } -} diff --git a/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs b/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs index 42d77d11d..34902f062 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash/impls.rs @@ -3,21 +3,58 @@ use super::*; mod vec; impl CachedTreeHashSubTree for u64 { - fn new_tree_hash_cache(&self, depth: usize) -> Result { + fn new_tree_hash_cache(&self, _depth: usize) -> Result { Ok(TreeHashCache::from_bytes( merkleize(self.to_le_bytes().to_vec()), false, - // self.tree_hash_cache_overlay(0, depth)?, None, )?) } + fn num_tree_hash_cache_chunks(&self) -> usize { + 1 + } + fn tree_hash_cache_overlay( &self, chunk_offset: usize, depth: usize, ) -> Result { - BTreeOverlay::from_lengths(chunk_offset, 1, depth, vec![1]) + panic!("Basic should not produce overlay"); + // BTreeOverlay::from_lengths(chunk_offset, 1, depth, vec![1]) + } + + fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error> { + let leaf = merkleize(self.to_le_bytes().to_vec()); + cache.maybe_update_chunk(cache.chunk_index, &leaf)?; + + cache.chunk_index += 1; + // cache.overlay_index += 1; + + Ok(()) + } +} + +impl CachedTreeHashSubTree for usize { + fn new_tree_hash_cache(&self, _depth: usize) -> Result { + Ok(TreeHashCache::from_bytes( + merkleize(self.to_le_bytes().to_vec()), + false, + None, + )?) + } + + fn num_tree_hash_cache_chunks(&self) -> usize { + 1 + } + + fn tree_hash_cache_overlay( + &self, + chunk_offset: usize, + depth: usize, + ) -> Result { + panic!("Basic should not produce overlay"); + // BTreeOverlay::from_lengths(chunk_offset, 1, depth, vec![1]) } fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error> { diff --git a/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs b/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs index 5ab7d06e4..fc43cc9b8 100644 --- a/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs +++ b/eth2/utils/tree_hash/src/cached_tree_hash/impls/vec.rs @@ -5,7 +5,7 @@ where T: CachedTreeHashSubTree + TreeHash, { fn new_tree_hash_cache(&self, depth: usize) -> Result { - let overlay = self.tree_hash_cache_overlay(0, depth)?; + let mut overlay = self.tree_hash_cache_overlay(0, depth)?; let mut cache = match T::tree_hash_type() { TreeHashType::Basic => TreeHashCache::from_bytes( @@ -23,13 +23,18 @@ where } }?; - // Mix in the length of the list. - let root_node = overlay.root(); - cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?; + cache.add_length_nodes(overlay.chunk_range(), self.len())?; Ok(cache) } + fn num_tree_hash_cache_chunks(&self) -> usize { + BTreeOverlay::new(self, 0, 0) + .and_then(|o| Ok(o.num_chunks())) + .unwrap_or_else(|_| 1) + + 2 + } + fn tree_hash_cache_overlay( &self, chunk_offset: usize, @@ -48,7 +53,7 @@ where let mut lengths = vec![]; for item in self { - lengths.push(BTreeOverlay::new(item, 0, depth)?.num_chunks()) + lengths.push(item.num_tree_hash_cache_chunks()) } // Disallow zero-length as an empty list still has one all-padding node. @@ -64,6 +69,9 @@ where } fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error> { + // Skip the length-mixed-in root node. + cache.chunk_index += 1; + let old_overlay = cache.get_overlay(cache.overlay_index, cache.chunk_index)?; let new_overlay = BTreeOverlay::new(self, cache.chunk_index, old_overlay.depth)?; @@ -113,9 +121,6 @@ where // The item existed in the previous list and exists in the current list. (Some(_old), Some(new)) => { cache.chunk_index = new.start; - if cache.chunk_index + 1 < cache.chunk_modified.len() { - cache.chunk_modified[cache.chunk_index + 1] = true; - } self[i].update_tree_hash_cache(cache)?; } @@ -169,21 +174,11 @@ where cache.update_internal_nodes(&new_overlay)?; - // Mix in length. - let root_node = new_overlay.root(); - if cache.changed(root_node)? { - cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?; - } else if old_overlay.num_items != new_overlay.num_items { - if new_overlay.num_internal_nodes() == 0 { - cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?; - } else { - let children = new_overlay.child_chunks(0); - cache.modify_chunk(root_node, &cache.hash_children(children)?)?; - cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?; - } - } + // Mix in length + cache.mix_in_length(new_overlay.chunk_range(), self.len())?; - cache.chunk_index = new_overlay.next_node(); + // Skip an extra node to clear the length node. + cache.chunk_index = new_overlay.next_node() + 1; Ok(()) } diff --git a/eth2/utils/tree_hash/src/cached_tree_hash/tree_hash_cache.rs b/eth2/utils/tree_hash/src/cached_tree_hash/tree_hash_cache.rs new file mode 100644 index 000000000..8fa08e306 --- /dev/null +++ b/eth2/utils/tree_hash/src/cached_tree_hash/tree_hash_cache.rs @@ -0,0 +1,329 @@ +use super::*; + +#[derive(Debug, PartialEq, Clone)] +pub struct TreeHashCache { + pub cache: Vec, + pub chunk_modified: Vec, + pub overlays: Vec, + + pub chunk_index: usize, + pub overlay_index: usize, +} + +impl Into> for TreeHashCache { + fn into(self) -> Vec { + self.cache + } +} + +impl TreeHashCache { + pub fn new(item: &T, depth: usize) -> Result + where + T: CachedTreeHashSubTree, + { + item.new_tree_hash_cache(depth) + } + + pub fn from_leaves_and_subtrees( + item: &T, + leaves_and_subtrees: Vec, + depth: usize, + ) -> Result + where + T: CachedTreeHashSubTree, + { + let overlay = BTreeOverlay::new(item, 0, depth)?; + + // Note how many leaves were provided. If is not a power-of-two, we'll need to pad it out + // later. + let num_provided_leaf_nodes = leaves_and_subtrees.len(); + + // Allocate enough bytes to store the internal nodes and the leaves and subtrees, then fill + // all the to-be-built internal nodes with zeros and append the leaves and subtrees. + let internal_node_bytes = overlay.num_internal_nodes() * BYTES_PER_CHUNK; + let leaves_and_subtrees_bytes = leaves_and_subtrees + .iter() + .fold(0, |acc, t| acc + t.bytes_len()); + let mut cache = Vec::with_capacity(leaves_and_subtrees_bytes + internal_node_bytes); + cache.resize(internal_node_bytes, 0); + + // Allocate enough bytes to store all the leaves. + let mut leaves = Vec::with_capacity(overlay.num_leaf_nodes() * HASHSIZE); + let mut overlays = Vec::with_capacity(leaves_and_subtrees.len()); + + if T::tree_hash_type() == TreeHashType::List { + overlays.push(overlay); + } + + // Iterate through all of the leaves/subtrees, adding their root as a leaf node and then + // concatenating their merkle trees. + for t in leaves_and_subtrees { + leaves.append(&mut t.root()?.to_vec()); + + let (mut bytes, _bools, mut t_overlays) = t.into_components(); + cache.append(&mut bytes); + overlays.append(&mut t_overlays); + } + + // Pad the leaves to an even power-of-two, using zeros. + pad_for_leaf_count(num_provided_leaf_nodes, &mut cache); + + // Merkleize the leaves, then split the leaf nodes off them. Then, replace all-zeros + // internal nodes created earlier with the internal nodes generated by `merkleize`. + let mut merkleized = merkleize(leaves); + merkleized.split_off(internal_node_bytes); + cache.splice(0..internal_node_bytes, merkleized); + + Ok(Self { + chunk_modified: vec![false; cache.len() / BYTES_PER_CHUNK], + cache, + overlays, + chunk_index: 0, + overlay_index: 0, + }) + } + + pub fn from_bytes( + bytes: Vec, + initial_modified_state: bool, + overlay: Option, + ) -> Result { + if bytes.len() % BYTES_PER_CHUNK > 0 { + return Err(Error::BytesAreNotEvenChunks(bytes.len())); + } + + let overlays = match overlay { + Some(overlay) => vec![overlay], + None => vec![], + }; + + Ok(Self { + chunk_modified: vec![initial_modified_state; bytes.len() / BYTES_PER_CHUNK], + cache: bytes, + overlays, + chunk_index: 0, + overlay_index: 0, + }) + } + + pub fn get_overlay( + &self, + overlay_index: usize, + chunk_index: usize, + ) -> Result { + let mut overlay = self + .overlays + .get(overlay_index) + .ok_or_else(|| Error::NoOverlayForIndex(overlay_index))? + .clone(); + + overlay.offset = chunk_index; + + Ok(overlay) + } + + pub fn reset_modifications(&mut self) { + for chunk_modified in &mut self.chunk_modified { + *chunk_modified = false; + } + } + + pub fn replace_overlay( + &mut self, + overlay_index: usize, + chunk_index: usize, + new_overlay: BTreeOverlay, + ) -> Result { + let old_overlay = self.get_overlay(overlay_index, chunk_index)?; + + // If the merkle tree required to represent the new list is of a different size to the one + // required for the previous list, then update our cache. + // + // This grows/shrinks the bytes to accomodate the new tree, preserving as much of the tree + // as possible. + if new_overlay.num_leaf_nodes() != old_overlay.num_leaf_nodes() { + // Get slices of the exsiting tree from the cache. + let (old_bytes, old_flags) = self + .slices(old_overlay.chunk_range()) + .ok_or_else(|| Error::UnableToObtainSlices)?; + + let (new_bytes, new_bools) = + if new_overlay.num_leaf_nodes() > old_overlay.num_leaf_nodes() { + resize::grow_merkle_cache( + old_bytes, + old_flags, + old_overlay.height(), + new_overlay.height(), + ) + .ok_or_else(|| Error::UnableToGrowMerkleTree)? + } else { + resize::shrink_merkle_cache( + old_bytes, + old_flags, + old_overlay.height(), + new_overlay.height(), + new_overlay.num_chunks(), + ) + .ok_or_else(|| Error::UnableToShrinkMerkleTree)? + }; + + // Splice the newly created `TreeHashCache` over the existing elements. + self.splice(old_overlay.chunk_range(), new_bytes, new_bools); + } + + Ok(std::mem::replace( + &mut self.overlays[overlay_index], + new_overlay, + )) + } + + pub fn remove_proceeding_child_overlays(&mut self, overlay_index: usize, depth: usize) { + let end = self + .overlays + .iter() + .skip(overlay_index) + .position(|o| o.depth <= depth) + .unwrap_or_else(|| self.overlays.len()); + + self.overlays.splice(overlay_index..end, vec![]); + } + + pub fn update_internal_nodes(&mut self, overlay: &BTreeOverlay) -> Result<(), Error> { + for (parent, children) in overlay.internal_parents_and_children().into_iter().rev() { + if self.either_modified(children)? { + self.modify_chunk(parent, &self.hash_children(children)?)?; + } + } + + Ok(()) + } + + fn bytes_len(&self) -> usize { + self.cache.len() + } + + pub fn root(&self) -> Result<&[u8], Error> { + self.cache + .get(0..HASHSIZE) + .ok_or_else(|| Error::NoBytesForRoot) + } + + pub fn splice(&mut self, chunk_range: Range, bytes: Vec, bools: Vec) { + // Update the `chunk_modified` vec, marking all spliced-in nodes as changed. + self.chunk_modified.splice(chunk_range.clone(), bools); + self.cache + .splice(node_range_to_byte_range(&chunk_range), bytes); + } + + pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { + let start = chunk * BYTES_PER_CHUNK; + let end = start + BYTES_PER_CHUNK; + + if !self.chunk_equals(chunk, to)? { + self.cache + .get_mut(start..end) + .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))? + .copy_from_slice(to); + self.chunk_modified[chunk] = true; + } + + Ok(()) + } + + fn slices(&self, chunk_range: Range) -> Option<(&[u8], &[bool])> { + Some(( + self.cache.get(node_range_to_byte_range(&chunk_range))?, + self.chunk_modified.get(chunk_range)?, + )) + } + + pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> { + let start = chunk * BYTES_PER_CHUNK; + let end = start + BYTES_PER_CHUNK; + + self.cache + .get_mut(start..end) + .ok_or_else(|| Error::NoBytesForChunk(chunk))? + .copy_from_slice(to); + + self.chunk_modified[chunk] = true; + + Ok(()) + } + + fn get_chunk(&self, chunk: usize) -> Result<&[u8], Error> { + let start = chunk * BYTES_PER_CHUNK; + let end = start + BYTES_PER_CHUNK; + + Ok(self + .cache + .get(start..end) + .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))?) + } + + fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Result { + Ok(self.get_chunk(chunk)? == other) + } + + pub fn changed(&self, chunk: usize) -> Result { + self.chunk_modified + .get(chunk) + .cloned() + .ok_or_else(|| Error::NoModifiedFieldForChunk(chunk)) + } + + fn either_modified(&self, children: (usize, usize)) -> Result { + Ok(self.changed(children.0)? | self.changed(children.1)?) + } + + pub fn hash_children(&self, children: (usize, usize)) -> Result, Error> { + let mut child_bytes = Vec::with_capacity(BYTES_PER_CHUNK * 2); + child_bytes.append(&mut self.get_chunk(children.0)?.to_vec()); + child_bytes.append(&mut self.get_chunk(children.1)?.to_vec()); + + Ok(hash(&child_bytes)) + } + + pub fn add_length_nodes( + &mut self, + chunk_range: Range, + length: usize, + ) -> Result<(), Error> { + self.chunk_modified[chunk_range.start] = true; + + let byte_range = node_range_to_byte_range(&chunk_range); + + // Add the last node. + self.cache + .splice(byte_range.end..byte_range.end, vec![0; HASHSIZE]); + self.chunk_modified + .splice(chunk_range.end..chunk_range.end, vec![false]); + + // Add the first node. + self.cache + .splice(byte_range.start..byte_range.start, vec![0; HASHSIZE]); + self.chunk_modified + .splice(chunk_range.start..chunk_range.start, vec![false]); + + self.mix_in_length(chunk_range.start + 1..chunk_range.end + 1, length)?; + + Ok(()) + } + + pub fn mix_in_length(&mut self, chunk_range: Range, length: usize) -> Result<(), Error> { + // Update the length chunk. + self.maybe_update_chunk(chunk_range.end, &int_to_bytes32(length as u64))?; + + // Update the mixed-in root if the main root or the length have changed. + let children = (chunk_range.start, chunk_range.end); + if self.either_modified(children)? { + self.modify_chunk(chunk_range.start - 1, &self.hash_children(children)?)?; + } + + Ok(()) + } + + pub fn into_components(self) -> (Vec, Vec, Vec) { + (self.cache, self.chunk_modified, self.overlays) + } +} diff --git a/eth2/utils/tree_hash/tests/tests.rs b/eth2/utils/tree_hash/tests/tests.rs index bc3c5538f..cef366da4 100644 --- a/eth2/utils/tree_hash/tests/tests.rs +++ b/eth2/utils/tree_hash/tests/tests.rs @@ -74,9 +74,9 @@ fn test_inner() { #[test] fn test_vec() { - let original = vec![1, 2, 3, 4, 5]; + let original: Vec = vec![1, 2, 3, 4, 5]; - let modified = vec![ + let modified: Vec> = vec![ vec![1, 2, 3, 4, 42], vec![1, 2, 3, 4], vec![], @@ -93,7 +93,7 @@ fn test_vec() { #[test] fn test_nested_list_of_u64() { - let original: Vec> = vec![vec![1]]; + let original: Vec> = vec![vec![42]]; let modified = vec![ vec![vec![1]], diff --git a/eth2/utils/tree_hash_derive/src/lib.rs b/eth2/utils/tree_hash_derive/src/lib.rs index 38a72f4fa..272ea7e96 100644 --- a/eth2/utils/tree_hash_derive/src/lib.rs +++ b/eth2/utils/tree_hash_derive/src/lib.rs @@ -73,11 +73,17 @@ pub fn subtree_derive(input: TokenStream) -> TokenStream { Ok(tree) } + fn num_tree_hash_cache_chunks(&self) -> usize { + tree_hash::BTreeOverlay::new(self, 0, 0) + .and_then(|o| Ok(o.num_chunks())) + .unwrap_or_else(|_| 1) + } + fn tree_hash_cache_overlay(&self, chunk_offset: usize, depth: usize) -> Result { let mut lengths = vec![]; #( - lengths.push(tree_hash::BTreeOverlay::new(&self.#idents_b, 0, depth)?.num_chunks()); + lengths.push(self.#idents_b.num_tree_hash_cache_chunks()); )* tree_hash::BTreeOverlay::from_lengths(chunk_offset, #num_items, depth, lengths) @@ -97,10 +103,7 @@ pub fn subtree_derive(input: TokenStream) -> TokenStream { )* // Iterate through the internal nodes, updating them if their children have changed. - dbg!("START"); - dbg!(overlay.offset); cache.update_internal_nodes(&overlay)?; - dbg!("END"); Ok(()) }