Fix mix-in-length issue
This commit is contained in:
parent
e12fa58e6e
commit
7563755b15
@ -6,8 +6,10 @@ use std::ops::Range;
|
||||
pub mod btree_overlay;
|
||||
pub mod impls;
|
||||
pub mod resize;
|
||||
pub mod tree_hash_cache;
|
||||
|
||||
pub use btree_overlay::BTreeOverlay;
|
||||
pub use tree_hash_cache::TreeHashCache;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub struct CachedTreeHasher {
|
||||
@ -79,6 +81,8 @@ pub trait CachedTreeHashSubTree<Item>: TreeHash {
|
||||
depth: usize,
|
||||
) -> Result<BTreeOverlay, Error>;
|
||||
|
||||
fn num_tree_hash_cache_chunks(&self) -> usize;
|
||||
|
||||
fn new_tree_hash_cache(&self, depth: usize) -> Result<TreeHashCache, Error>;
|
||||
|
||||
fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error>;
|
||||
@ -158,301 +162,3 @@ fn num_unsanitized_leaves(num_bytes: usize) -> usize {
|
||||
fn num_bytes(num_leaves: usize) -> usize {
|
||||
num_leaves * HASHSIZE
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct TreeHashCache {
|
||||
cache: Vec<u8>,
|
||||
chunk_modified: Vec<bool>,
|
||||
overlays: Vec<BTreeOverlay>,
|
||||
|
||||
pub chunk_index: usize,
|
||||
pub overlay_index: usize,
|
||||
}
|
||||
|
||||
impl Into<Vec<u8>> for TreeHashCache {
|
||||
fn into(self) -> Vec<u8> {
|
||||
self.cache
|
||||
}
|
||||
}
|
||||
|
||||
impl TreeHashCache {
|
||||
pub fn new<T>(item: &T, depth: usize) -> Result<Self, Error>
|
||||
where
|
||||
T: CachedTreeHashSubTree<T>,
|
||||
{
|
||||
item.new_tree_hash_cache(depth)
|
||||
}
|
||||
|
||||
pub fn from_leaves_and_subtrees<T>(
|
||||
item: &T,
|
||||
leaves_and_subtrees: Vec<Self>,
|
||||
depth: usize,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
T: CachedTreeHashSubTree<T>,
|
||||
{
|
||||
let overlay = BTreeOverlay::new(item, 0, depth)?;
|
||||
|
||||
// Note how many leaves were provided. If is not a power-of-two, we'll need to pad it out
|
||||
// later.
|
||||
let num_provided_leaf_nodes = leaves_and_subtrees.len();
|
||||
|
||||
// Allocate enough bytes to store the internal nodes and the leaves and subtrees, then fill
|
||||
// all the to-be-built internal nodes with zeros and append the leaves and subtrees.
|
||||
let internal_node_bytes = overlay.num_internal_nodes() * BYTES_PER_CHUNK;
|
||||
let leaves_and_subtrees_bytes = leaves_and_subtrees
|
||||
.iter()
|
||||
.fold(0, |acc, t| acc + t.bytes_len());
|
||||
let mut cache = Vec::with_capacity(leaves_and_subtrees_bytes + internal_node_bytes);
|
||||
cache.resize(internal_node_bytes, 0);
|
||||
|
||||
// Allocate enough bytes to store all the leaves.
|
||||
let mut leaves = Vec::with_capacity(overlay.num_leaf_nodes() * HASHSIZE);
|
||||
let mut overlays = Vec::with_capacity(leaves_and_subtrees.len());
|
||||
|
||||
if T::tree_hash_type() == TreeHashType::List {
|
||||
overlays.push(overlay);
|
||||
}
|
||||
|
||||
// Iterate through all of the leaves/subtrees, adding their root as a leaf node and then
|
||||
// concatenating their merkle trees.
|
||||
for t in leaves_and_subtrees {
|
||||
leaves.append(&mut t.root()?.to_vec());
|
||||
|
||||
let (mut bytes, _bools, mut t_overlays) = t.into_components();
|
||||
cache.append(&mut bytes);
|
||||
overlays.append(&mut t_overlays);
|
||||
}
|
||||
|
||||
// Pad the leaves to an even power-of-two, using zeros.
|
||||
pad_for_leaf_count(num_provided_leaf_nodes, &mut cache);
|
||||
|
||||
// Merkleize the leaves, then split the leaf nodes off them. Then, replace all-zeros
|
||||
// internal nodes created earlier with the internal nodes generated by `merkleize`.
|
||||
let mut merkleized = merkleize(leaves);
|
||||
merkleized.split_off(internal_node_bytes);
|
||||
cache.splice(0..internal_node_bytes, merkleized);
|
||||
|
||||
Ok(Self {
|
||||
chunk_modified: vec![false; cache.len() / BYTES_PER_CHUNK],
|
||||
cache,
|
||||
overlays,
|
||||
chunk_index: 0,
|
||||
overlay_index: 0,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from_bytes(
|
||||
bytes: Vec<u8>,
|
||||
initial_modified_state: bool,
|
||||
overlay: Option<BTreeOverlay>,
|
||||
) -> Result<Self, Error> {
|
||||
if bytes.len() % BYTES_PER_CHUNK > 0 {
|
||||
return Err(Error::BytesAreNotEvenChunks(bytes.len()));
|
||||
}
|
||||
|
||||
let overlays = match overlay {
|
||||
Some(overlay) => vec![overlay],
|
||||
None => vec![],
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
chunk_modified: vec![initial_modified_state; bytes.len() / BYTES_PER_CHUNK],
|
||||
cache: bytes,
|
||||
overlays,
|
||||
chunk_index: 0,
|
||||
overlay_index: 0,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_overlay(
|
||||
&self,
|
||||
overlay_index: usize,
|
||||
chunk_index: usize,
|
||||
) -> Result<BTreeOverlay, Error> {
|
||||
let mut overlay = self
|
||||
.overlays
|
||||
.get(overlay_index)
|
||||
.ok_or_else(|| Error::NoOverlayForIndex(overlay_index))?
|
||||
.clone();
|
||||
|
||||
overlay.offset = chunk_index;
|
||||
|
||||
Ok(overlay)
|
||||
}
|
||||
|
||||
pub fn reset_modifications(&mut self) {
|
||||
for chunk_modified in &mut self.chunk_modified {
|
||||
*chunk_modified = false;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn replace_overlay(
|
||||
&mut self,
|
||||
overlay_index: usize,
|
||||
chunk_index: usize,
|
||||
new_overlay: BTreeOverlay,
|
||||
) -> Result<BTreeOverlay, Error> {
|
||||
let old_overlay = self.get_overlay(overlay_index, chunk_index)?;
|
||||
|
||||
// If the merkle tree required to represent the new list is of a different size to the one
|
||||
// required for the previous list, then update our cache.
|
||||
//
|
||||
// This grows/shrinks the bytes to accomodate the new tree, preserving as much of the tree
|
||||
// as possible.
|
||||
if new_overlay.num_leaf_nodes() != old_overlay.num_leaf_nodes() {
|
||||
// Get slices of the exsiting tree from the cache.
|
||||
let (old_bytes, old_flags) = self
|
||||
.slices(old_overlay.chunk_range())
|
||||
.ok_or_else(|| Error::UnableToObtainSlices)?;
|
||||
|
||||
let (new_bytes, new_bools) =
|
||||
if new_overlay.num_leaf_nodes() > old_overlay.num_leaf_nodes() {
|
||||
resize::grow_merkle_cache(
|
||||
old_bytes,
|
||||
old_flags,
|
||||
old_overlay.height(),
|
||||
new_overlay.height(),
|
||||
)
|
||||
.ok_or_else(|| Error::UnableToGrowMerkleTree)?
|
||||
} else {
|
||||
resize::shrink_merkle_cache(
|
||||
old_bytes,
|
||||
old_flags,
|
||||
old_overlay.height(),
|
||||
new_overlay.height(),
|
||||
new_overlay.num_chunks(),
|
||||
)
|
||||
.ok_or_else(|| Error::UnableToShrinkMerkleTree)?
|
||||
};
|
||||
|
||||
// Splice the newly created `TreeHashCache` over the existing elements.
|
||||
self.splice(old_overlay.chunk_range(), new_bytes, new_bools);
|
||||
}
|
||||
|
||||
Ok(std::mem::replace(
|
||||
&mut self.overlays[overlay_index],
|
||||
new_overlay,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn remove_proceeding_child_overlays(&mut self, overlay_index: usize, depth: usize) {
|
||||
let end = self
|
||||
.overlays
|
||||
.iter()
|
||||
.skip(overlay_index)
|
||||
.position(|o| o.depth <= depth)
|
||||
.unwrap_or_else(|| self.overlays.len());
|
||||
|
||||
self.overlays.splice(overlay_index..end, vec![]);
|
||||
}
|
||||
|
||||
pub fn update_internal_nodes(&mut self, overlay: &BTreeOverlay) -> Result<(), Error> {
|
||||
for (parent, children) in overlay.internal_parents_and_children().into_iter().rev() {
|
||||
if self.either_modified(children)? {
|
||||
self.modify_chunk(parent, &self.hash_children(children)?)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn bytes_len(&self) -> usize {
|
||||
self.cache.len()
|
||||
}
|
||||
|
||||
pub fn root(&self) -> Result<&[u8], Error> {
|
||||
self.cache
|
||||
.get(0..HASHSIZE)
|
||||
.ok_or_else(|| Error::NoBytesForRoot)
|
||||
}
|
||||
|
||||
fn splice(&mut self, chunk_range: Range<usize>, bytes: Vec<u8>, bools: Vec<bool>) {
|
||||
// Update the `chunk_modified` vec, marking all spliced-in nodes as changed.
|
||||
self.chunk_modified.splice(chunk_range.clone(), bools);
|
||||
self.cache
|
||||
.splice(node_range_to_byte_range(&chunk_range), bytes);
|
||||
}
|
||||
|
||||
pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> {
|
||||
let start = chunk * BYTES_PER_CHUNK;
|
||||
let end = start + BYTES_PER_CHUNK;
|
||||
|
||||
if !self.chunk_equals(chunk, to)? {
|
||||
self.cache
|
||||
.get_mut(start..end)
|
||||
.ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))?
|
||||
.copy_from_slice(to);
|
||||
self.chunk_modified[chunk] = true;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn slices(&self, chunk_range: Range<usize>) -> Option<(&[u8], &[bool])> {
|
||||
Some((
|
||||
self.cache.get(node_range_to_byte_range(&chunk_range))?,
|
||||
self.chunk_modified.get(chunk_range)?,
|
||||
))
|
||||
}
|
||||
|
||||
fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> {
|
||||
let start = chunk * BYTES_PER_CHUNK;
|
||||
let end = start + BYTES_PER_CHUNK;
|
||||
|
||||
self.cache
|
||||
.get_mut(start..end)
|
||||
.ok_or_else(|| Error::NoBytesForChunk(chunk))?
|
||||
.copy_from_slice(to);
|
||||
|
||||
self.chunk_modified[chunk] = true;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_chunk(&self, chunk: usize) -> Result<&[u8], Error> {
|
||||
let start = chunk * BYTES_PER_CHUNK;
|
||||
let end = start + BYTES_PER_CHUNK;
|
||||
|
||||
Ok(self
|
||||
.cache
|
||||
.get(start..end)
|
||||
.ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))?)
|
||||
}
|
||||
|
||||
fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Result<bool, Error> {
|
||||
Ok(self.get_chunk(chunk)? == other)
|
||||
}
|
||||
|
||||
pub fn changed(&self, chunk: usize) -> Result<bool, Error> {
|
||||
self.chunk_modified
|
||||
.get(chunk)
|
||||
.cloned()
|
||||
.ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))
|
||||
}
|
||||
|
||||
fn either_modified(&self, children: (usize, usize)) -> Result<bool, Error> {
|
||||
Ok(self.changed(children.0)? | self.changed(children.1)?)
|
||||
}
|
||||
|
||||
fn hash_children(&self, children: (usize, usize)) -> Result<Vec<u8>, Error> {
|
||||
let mut child_bytes = Vec::with_capacity(BYTES_PER_CHUNK * 2);
|
||||
child_bytes.append(&mut self.get_chunk(children.0)?.to_vec());
|
||||
child_bytes.append(&mut self.get_chunk(children.1)?.to_vec());
|
||||
|
||||
Ok(hash(&child_bytes))
|
||||
}
|
||||
|
||||
pub fn mix_in_length(&self, chunk: usize, length: usize) -> Result<Vec<u8>, Error> {
|
||||
let mut bytes = Vec::with_capacity(2 * BYTES_PER_CHUNK);
|
||||
|
||||
bytes.append(&mut self.get_chunk(chunk)?.to_vec());
|
||||
bytes.append(&mut int_to_bytes32(length as u64));
|
||||
|
||||
Ok(hash(&bytes))
|
||||
}
|
||||
|
||||
pub fn into_components(self) -> (Vec<u8>, Vec<bool>, Vec<BTreeOverlay>) {
|
||||
(self.cache, self.chunk_modified, self.overlays)
|
||||
}
|
||||
}
|
||||
|
@ -3,21 +3,58 @@ use super::*;
|
||||
mod vec;
|
||||
|
||||
impl CachedTreeHashSubTree<u64> for u64 {
|
||||
fn new_tree_hash_cache(&self, depth: usize) -> Result<TreeHashCache, Error> {
|
||||
fn new_tree_hash_cache(&self, _depth: usize) -> Result<TreeHashCache, Error> {
|
||||
Ok(TreeHashCache::from_bytes(
|
||||
merkleize(self.to_le_bytes().to_vec()),
|
||||
false,
|
||||
// self.tree_hash_cache_overlay(0, depth)?,
|
||||
None,
|
||||
)?)
|
||||
}
|
||||
|
||||
fn num_tree_hash_cache_chunks(&self) -> usize {
|
||||
1
|
||||
}
|
||||
|
||||
fn tree_hash_cache_overlay(
|
||||
&self,
|
||||
chunk_offset: usize,
|
||||
depth: usize,
|
||||
) -> Result<BTreeOverlay, Error> {
|
||||
BTreeOverlay::from_lengths(chunk_offset, 1, depth, vec![1])
|
||||
panic!("Basic should not produce overlay");
|
||||
// BTreeOverlay::from_lengths(chunk_offset, 1, depth, vec![1])
|
||||
}
|
||||
|
||||
fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error> {
|
||||
let leaf = merkleize(self.to_le_bytes().to_vec());
|
||||
cache.maybe_update_chunk(cache.chunk_index, &leaf)?;
|
||||
|
||||
cache.chunk_index += 1;
|
||||
// cache.overlay_index += 1;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl CachedTreeHashSubTree<usize> for usize {
|
||||
fn new_tree_hash_cache(&self, _depth: usize) -> Result<TreeHashCache, Error> {
|
||||
Ok(TreeHashCache::from_bytes(
|
||||
merkleize(self.to_le_bytes().to_vec()),
|
||||
false,
|
||||
None,
|
||||
)?)
|
||||
}
|
||||
|
||||
fn num_tree_hash_cache_chunks(&self) -> usize {
|
||||
1
|
||||
}
|
||||
|
||||
fn tree_hash_cache_overlay(
|
||||
&self,
|
||||
chunk_offset: usize,
|
||||
depth: usize,
|
||||
) -> Result<BTreeOverlay, Error> {
|
||||
panic!("Basic should not produce overlay");
|
||||
// BTreeOverlay::from_lengths(chunk_offset, 1, depth, vec![1])
|
||||
}
|
||||
|
||||
fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error> {
|
||||
|
@ -5,7 +5,7 @@ where
|
||||
T: CachedTreeHashSubTree<T> + TreeHash,
|
||||
{
|
||||
fn new_tree_hash_cache(&self, depth: usize) -> Result<TreeHashCache, Error> {
|
||||
let overlay = self.tree_hash_cache_overlay(0, depth)?;
|
||||
let mut overlay = self.tree_hash_cache_overlay(0, depth)?;
|
||||
|
||||
let mut cache = match T::tree_hash_type() {
|
||||
TreeHashType::Basic => TreeHashCache::from_bytes(
|
||||
@ -23,13 +23,18 @@ where
|
||||
}
|
||||
}?;
|
||||
|
||||
// Mix in the length of the list.
|
||||
let root_node = overlay.root();
|
||||
cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?;
|
||||
cache.add_length_nodes(overlay.chunk_range(), self.len())?;
|
||||
|
||||
Ok(cache)
|
||||
}
|
||||
|
||||
fn num_tree_hash_cache_chunks(&self) -> usize {
|
||||
BTreeOverlay::new(self, 0, 0)
|
||||
.and_then(|o| Ok(o.num_chunks()))
|
||||
.unwrap_or_else(|_| 1)
|
||||
+ 2
|
||||
}
|
||||
|
||||
fn tree_hash_cache_overlay(
|
||||
&self,
|
||||
chunk_offset: usize,
|
||||
@ -48,7 +53,7 @@ where
|
||||
let mut lengths = vec![];
|
||||
|
||||
for item in self {
|
||||
lengths.push(BTreeOverlay::new(item, 0, depth)?.num_chunks())
|
||||
lengths.push(item.num_tree_hash_cache_chunks())
|
||||
}
|
||||
|
||||
// Disallow zero-length as an empty list still has one all-padding node.
|
||||
@ -64,6 +69,9 @@ where
|
||||
}
|
||||
|
||||
fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error> {
|
||||
// Skip the length-mixed-in root node.
|
||||
cache.chunk_index += 1;
|
||||
|
||||
let old_overlay = cache.get_overlay(cache.overlay_index, cache.chunk_index)?;
|
||||
let new_overlay = BTreeOverlay::new(self, cache.chunk_index, old_overlay.depth)?;
|
||||
|
||||
@ -113,9 +121,6 @@ where
|
||||
// The item existed in the previous list and exists in the current list.
|
||||
(Some(_old), Some(new)) => {
|
||||
cache.chunk_index = new.start;
|
||||
if cache.chunk_index + 1 < cache.chunk_modified.len() {
|
||||
cache.chunk_modified[cache.chunk_index + 1] = true;
|
||||
}
|
||||
|
||||
self[i].update_tree_hash_cache(cache)?;
|
||||
}
|
||||
@ -169,21 +174,11 @@ where
|
||||
|
||||
cache.update_internal_nodes(&new_overlay)?;
|
||||
|
||||
// Mix in length.
|
||||
let root_node = new_overlay.root();
|
||||
if cache.changed(root_node)? {
|
||||
cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?;
|
||||
} else if old_overlay.num_items != new_overlay.num_items {
|
||||
if new_overlay.num_internal_nodes() == 0 {
|
||||
cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?;
|
||||
} else {
|
||||
let children = new_overlay.child_chunks(0);
|
||||
cache.modify_chunk(root_node, &cache.hash_children(children)?)?;
|
||||
cache.modify_chunk(root_node, &cache.mix_in_length(root_node, self.len())?)?;
|
||||
}
|
||||
}
|
||||
// Mix in length
|
||||
cache.mix_in_length(new_overlay.chunk_range(), self.len())?;
|
||||
|
||||
cache.chunk_index = new_overlay.next_node();
|
||||
// Skip an extra node to clear the length node.
|
||||
cache.chunk_index = new_overlay.next_node() + 1;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
329
eth2/utils/tree_hash/src/cached_tree_hash/tree_hash_cache.rs
Normal file
329
eth2/utils/tree_hash/src/cached_tree_hash/tree_hash_cache.rs
Normal file
@ -0,0 +1,329 @@
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct TreeHashCache {
|
||||
pub cache: Vec<u8>,
|
||||
pub chunk_modified: Vec<bool>,
|
||||
pub overlays: Vec<BTreeOverlay>,
|
||||
|
||||
pub chunk_index: usize,
|
||||
pub overlay_index: usize,
|
||||
}
|
||||
|
||||
impl Into<Vec<u8>> for TreeHashCache {
|
||||
fn into(self) -> Vec<u8> {
|
||||
self.cache
|
||||
}
|
||||
}
|
||||
|
||||
impl TreeHashCache {
|
||||
pub fn new<T>(item: &T, depth: usize) -> Result<Self, Error>
|
||||
where
|
||||
T: CachedTreeHashSubTree<T>,
|
||||
{
|
||||
item.new_tree_hash_cache(depth)
|
||||
}
|
||||
|
||||
pub fn from_leaves_and_subtrees<T>(
|
||||
item: &T,
|
||||
leaves_and_subtrees: Vec<Self>,
|
||||
depth: usize,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
T: CachedTreeHashSubTree<T>,
|
||||
{
|
||||
let overlay = BTreeOverlay::new(item, 0, depth)?;
|
||||
|
||||
// Note how many leaves were provided. If is not a power-of-two, we'll need to pad it out
|
||||
// later.
|
||||
let num_provided_leaf_nodes = leaves_and_subtrees.len();
|
||||
|
||||
// Allocate enough bytes to store the internal nodes and the leaves and subtrees, then fill
|
||||
// all the to-be-built internal nodes with zeros and append the leaves and subtrees.
|
||||
let internal_node_bytes = overlay.num_internal_nodes() * BYTES_PER_CHUNK;
|
||||
let leaves_and_subtrees_bytes = leaves_and_subtrees
|
||||
.iter()
|
||||
.fold(0, |acc, t| acc + t.bytes_len());
|
||||
let mut cache = Vec::with_capacity(leaves_and_subtrees_bytes + internal_node_bytes);
|
||||
cache.resize(internal_node_bytes, 0);
|
||||
|
||||
// Allocate enough bytes to store all the leaves.
|
||||
let mut leaves = Vec::with_capacity(overlay.num_leaf_nodes() * HASHSIZE);
|
||||
let mut overlays = Vec::with_capacity(leaves_and_subtrees.len());
|
||||
|
||||
if T::tree_hash_type() == TreeHashType::List {
|
||||
overlays.push(overlay);
|
||||
}
|
||||
|
||||
// Iterate through all of the leaves/subtrees, adding their root as a leaf node and then
|
||||
// concatenating their merkle trees.
|
||||
for t in leaves_and_subtrees {
|
||||
leaves.append(&mut t.root()?.to_vec());
|
||||
|
||||
let (mut bytes, _bools, mut t_overlays) = t.into_components();
|
||||
cache.append(&mut bytes);
|
||||
overlays.append(&mut t_overlays);
|
||||
}
|
||||
|
||||
// Pad the leaves to an even power-of-two, using zeros.
|
||||
pad_for_leaf_count(num_provided_leaf_nodes, &mut cache);
|
||||
|
||||
// Merkleize the leaves, then split the leaf nodes off them. Then, replace all-zeros
|
||||
// internal nodes created earlier with the internal nodes generated by `merkleize`.
|
||||
let mut merkleized = merkleize(leaves);
|
||||
merkleized.split_off(internal_node_bytes);
|
||||
cache.splice(0..internal_node_bytes, merkleized);
|
||||
|
||||
Ok(Self {
|
||||
chunk_modified: vec![false; cache.len() / BYTES_PER_CHUNK],
|
||||
cache,
|
||||
overlays,
|
||||
chunk_index: 0,
|
||||
overlay_index: 0,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from_bytes(
|
||||
bytes: Vec<u8>,
|
||||
initial_modified_state: bool,
|
||||
overlay: Option<BTreeOverlay>,
|
||||
) -> Result<Self, Error> {
|
||||
if bytes.len() % BYTES_PER_CHUNK > 0 {
|
||||
return Err(Error::BytesAreNotEvenChunks(bytes.len()));
|
||||
}
|
||||
|
||||
let overlays = match overlay {
|
||||
Some(overlay) => vec![overlay],
|
||||
None => vec![],
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
chunk_modified: vec![initial_modified_state; bytes.len() / BYTES_PER_CHUNK],
|
||||
cache: bytes,
|
||||
overlays,
|
||||
chunk_index: 0,
|
||||
overlay_index: 0,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_overlay(
|
||||
&self,
|
||||
overlay_index: usize,
|
||||
chunk_index: usize,
|
||||
) -> Result<BTreeOverlay, Error> {
|
||||
let mut overlay = self
|
||||
.overlays
|
||||
.get(overlay_index)
|
||||
.ok_or_else(|| Error::NoOverlayForIndex(overlay_index))?
|
||||
.clone();
|
||||
|
||||
overlay.offset = chunk_index;
|
||||
|
||||
Ok(overlay)
|
||||
}
|
||||
|
||||
pub fn reset_modifications(&mut self) {
|
||||
for chunk_modified in &mut self.chunk_modified {
|
||||
*chunk_modified = false;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn replace_overlay(
|
||||
&mut self,
|
||||
overlay_index: usize,
|
||||
chunk_index: usize,
|
||||
new_overlay: BTreeOverlay,
|
||||
) -> Result<BTreeOverlay, Error> {
|
||||
let old_overlay = self.get_overlay(overlay_index, chunk_index)?;
|
||||
|
||||
// If the merkle tree required to represent the new list is of a different size to the one
|
||||
// required for the previous list, then update our cache.
|
||||
//
|
||||
// This grows/shrinks the bytes to accomodate the new tree, preserving as much of the tree
|
||||
// as possible.
|
||||
if new_overlay.num_leaf_nodes() != old_overlay.num_leaf_nodes() {
|
||||
// Get slices of the exsiting tree from the cache.
|
||||
let (old_bytes, old_flags) = self
|
||||
.slices(old_overlay.chunk_range())
|
||||
.ok_or_else(|| Error::UnableToObtainSlices)?;
|
||||
|
||||
let (new_bytes, new_bools) =
|
||||
if new_overlay.num_leaf_nodes() > old_overlay.num_leaf_nodes() {
|
||||
resize::grow_merkle_cache(
|
||||
old_bytes,
|
||||
old_flags,
|
||||
old_overlay.height(),
|
||||
new_overlay.height(),
|
||||
)
|
||||
.ok_or_else(|| Error::UnableToGrowMerkleTree)?
|
||||
} else {
|
||||
resize::shrink_merkle_cache(
|
||||
old_bytes,
|
||||
old_flags,
|
||||
old_overlay.height(),
|
||||
new_overlay.height(),
|
||||
new_overlay.num_chunks(),
|
||||
)
|
||||
.ok_or_else(|| Error::UnableToShrinkMerkleTree)?
|
||||
};
|
||||
|
||||
// Splice the newly created `TreeHashCache` over the existing elements.
|
||||
self.splice(old_overlay.chunk_range(), new_bytes, new_bools);
|
||||
}
|
||||
|
||||
Ok(std::mem::replace(
|
||||
&mut self.overlays[overlay_index],
|
||||
new_overlay,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn remove_proceeding_child_overlays(&mut self, overlay_index: usize, depth: usize) {
|
||||
let end = self
|
||||
.overlays
|
||||
.iter()
|
||||
.skip(overlay_index)
|
||||
.position(|o| o.depth <= depth)
|
||||
.unwrap_or_else(|| self.overlays.len());
|
||||
|
||||
self.overlays.splice(overlay_index..end, vec![]);
|
||||
}
|
||||
|
||||
pub fn update_internal_nodes(&mut self, overlay: &BTreeOverlay) -> Result<(), Error> {
|
||||
for (parent, children) in overlay.internal_parents_and_children().into_iter().rev() {
|
||||
if self.either_modified(children)? {
|
||||
self.modify_chunk(parent, &self.hash_children(children)?)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn bytes_len(&self) -> usize {
|
||||
self.cache.len()
|
||||
}
|
||||
|
||||
pub fn root(&self) -> Result<&[u8], Error> {
|
||||
self.cache
|
||||
.get(0..HASHSIZE)
|
||||
.ok_or_else(|| Error::NoBytesForRoot)
|
||||
}
|
||||
|
||||
pub fn splice(&mut self, chunk_range: Range<usize>, bytes: Vec<u8>, bools: Vec<bool>) {
|
||||
// Update the `chunk_modified` vec, marking all spliced-in nodes as changed.
|
||||
self.chunk_modified.splice(chunk_range.clone(), bools);
|
||||
self.cache
|
||||
.splice(node_range_to_byte_range(&chunk_range), bytes);
|
||||
}
|
||||
|
||||
pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> {
|
||||
let start = chunk * BYTES_PER_CHUNK;
|
||||
let end = start + BYTES_PER_CHUNK;
|
||||
|
||||
if !self.chunk_equals(chunk, to)? {
|
||||
self.cache
|
||||
.get_mut(start..end)
|
||||
.ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))?
|
||||
.copy_from_slice(to);
|
||||
self.chunk_modified[chunk] = true;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn slices(&self, chunk_range: Range<usize>) -> Option<(&[u8], &[bool])> {
|
||||
Some((
|
||||
self.cache.get(node_range_to_byte_range(&chunk_range))?,
|
||||
self.chunk_modified.get(chunk_range)?,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> {
|
||||
let start = chunk * BYTES_PER_CHUNK;
|
||||
let end = start + BYTES_PER_CHUNK;
|
||||
|
||||
self.cache
|
||||
.get_mut(start..end)
|
||||
.ok_or_else(|| Error::NoBytesForChunk(chunk))?
|
||||
.copy_from_slice(to);
|
||||
|
||||
self.chunk_modified[chunk] = true;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_chunk(&self, chunk: usize) -> Result<&[u8], Error> {
|
||||
let start = chunk * BYTES_PER_CHUNK;
|
||||
let end = start + BYTES_PER_CHUNK;
|
||||
|
||||
Ok(self
|
||||
.cache
|
||||
.get(start..end)
|
||||
.ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))?)
|
||||
}
|
||||
|
||||
fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Result<bool, Error> {
|
||||
Ok(self.get_chunk(chunk)? == other)
|
||||
}
|
||||
|
||||
pub fn changed(&self, chunk: usize) -> Result<bool, Error> {
|
||||
self.chunk_modified
|
||||
.get(chunk)
|
||||
.cloned()
|
||||
.ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))
|
||||
}
|
||||
|
||||
fn either_modified(&self, children: (usize, usize)) -> Result<bool, Error> {
|
||||
Ok(self.changed(children.0)? | self.changed(children.1)?)
|
||||
}
|
||||
|
||||
pub fn hash_children(&self, children: (usize, usize)) -> Result<Vec<u8>, Error> {
|
||||
let mut child_bytes = Vec::with_capacity(BYTES_PER_CHUNK * 2);
|
||||
child_bytes.append(&mut self.get_chunk(children.0)?.to_vec());
|
||||
child_bytes.append(&mut self.get_chunk(children.1)?.to_vec());
|
||||
|
||||
Ok(hash(&child_bytes))
|
||||
}
|
||||
|
||||
pub fn add_length_nodes(
|
||||
&mut self,
|
||||
chunk_range: Range<usize>,
|
||||
length: usize,
|
||||
) -> Result<(), Error> {
|
||||
self.chunk_modified[chunk_range.start] = true;
|
||||
|
||||
let byte_range = node_range_to_byte_range(&chunk_range);
|
||||
|
||||
// Add the last node.
|
||||
self.cache
|
||||
.splice(byte_range.end..byte_range.end, vec![0; HASHSIZE]);
|
||||
self.chunk_modified
|
||||
.splice(chunk_range.end..chunk_range.end, vec![false]);
|
||||
|
||||
// Add the first node.
|
||||
self.cache
|
||||
.splice(byte_range.start..byte_range.start, vec![0; HASHSIZE]);
|
||||
self.chunk_modified
|
||||
.splice(chunk_range.start..chunk_range.start, vec![false]);
|
||||
|
||||
self.mix_in_length(chunk_range.start + 1..chunk_range.end + 1, length)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn mix_in_length(&mut self, chunk_range: Range<usize>, length: usize) -> Result<(), Error> {
|
||||
// Update the length chunk.
|
||||
self.maybe_update_chunk(chunk_range.end, &int_to_bytes32(length as u64))?;
|
||||
|
||||
// Update the mixed-in root if the main root or the length have changed.
|
||||
let children = (chunk_range.start, chunk_range.end);
|
||||
if self.either_modified(children)? {
|
||||
self.modify_chunk(chunk_range.start - 1, &self.hash_children(children)?)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn into_components(self) -> (Vec<u8>, Vec<bool>, Vec<BTreeOverlay>) {
|
||||
(self.cache, self.chunk_modified, self.overlays)
|
||||
}
|
||||
}
|
@ -74,9 +74,9 @@ fn test_inner() {
|
||||
|
||||
#[test]
|
||||
fn test_vec() {
|
||||
let original = vec![1, 2, 3, 4, 5];
|
||||
let original: Vec<u64> = vec![1, 2, 3, 4, 5];
|
||||
|
||||
let modified = vec![
|
||||
let modified: Vec<Vec<u64>> = vec![
|
||||
vec![1, 2, 3, 4, 42],
|
||||
vec![1, 2, 3, 4],
|
||||
vec![],
|
||||
@ -93,7 +93,7 @@ fn test_vec() {
|
||||
|
||||
#[test]
|
||||
fn test_nested_list_of_u64() {
|
||||
let original: Vec<Vec<u64>> = vec![vec![1]];
|
||||
let original: Vec<Vec<u64>> = vec![vec![42]];
|
||||
|
||||
let modified = vec![
|
||||
vec![vec![1]],
|
||||
|
@ -73,11 +73,17 @@ pub fn subtree_derive(input: TokenStream) -> TokenStream {
|
||||
Ok(tree)
|
||||
}
|
||||
|
||||
fn num_tree_hash_cache_chunks(&self) -> usize {
|
||||
tree_hash::BTreeOverlay::new(self, 0, 0)
|
||||
.and_then(|o| Ok(o.num_chunks()))
|
||||
.unwrap_or_else(|_| 1)
|
||||
}
|
||||
|
||||
fn tree_hash_cache_overlay(&self, chunk_offset: usize, depth: usize) -> Result<tree_hash::BTreeOverlay, tree_hash::Error> {
|
||||
let mut lengths = vec![];
|
||||
|
||||
#(
|
||||
lengths.push(tree_hash::BTreeOverlay::new(&self.#idents_b, 0, depth)?.num_chunks());
|
||||
lengths.push(self.#idents_b.num_tree_hash_cache_chunks());
|
||||
)*
|
||||
|
||||
tree_hash::BTreeOverlay::from_lengths(chunk_offset, #num_items, depth, lengths)
|
||||
@ -97,10 +103,7 @@ pub fn subtree_derive(input: TokenStream) -> TokenStream {
|
||||
)*
|
||||
|
||||
// Iterate through the internal nodes, updating them if their children have changed.
|
||||
dbg!("START");
|
||||
dbg!(overlay.offset);
|
||||
cache.update_internal_nodes(&overlay)?;
|
||||
dbg!("END");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user