Add failing test for grow merkle tree

This commit is contained in:
Paul Hauner 2019-04-14 10:34:54 +10:00
parent 1ce1fce03c
commit e038bd18b5
No known key found for this signature in database
GPG Key ID: 303E4494BB28068C
3 changed files with 43 additions and 7 deletions

View File

@ -16,6 +16,8 @@ pub enum Error {
ShouldNotProduceOffsetHandler,
NoFirstNode,
NoBytesForRoot,
UnableToObtainSlices,
UnableToGrowMerkleTree,
BytesAreNotEvenChunks(usize),
NoModifiedFieldForChunk(usize),
NoBytesForChunk(usize),
@ -74,6 +76,13 @@ impl TreeHashCache {
item.build_tree_hash_cache()
}
pub fn from_elems(cache: Vec<u8>, chunk_modified: Vec<bool>) -> Self {
Self {
cache,
chunk_modified,
}
}
pub fn from_leaves_and_subtrees<T>(
item: &T,
leaves_and_subtrees: Vec<Self>,
@ -149,7 +158,7 @@ impl TreeHashCache {
// Update the `chunk_modified` vec, marking all spliced-in nodes as changed.
self.chunk_modified.splice(chunk_range.clone(), bools);
self.cache
.splice(node_range_to_byte_range(chunk_range), bytes);
.splice(node_range_to_byte_range(&chunk_range), bytes);
}
pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> {
@ -167,6 +176,13 @@ impl TreeHashCache {
Ok(())
}
pub fn slices(&self, chunk_range: Range<usize>) -> Option<(&[u8], &[bool])> {
Some((
self.cache.get(node_range_to_byte_range(&chunk_range))?,
self.chunk_modified.get(chunk_range)?,
))
}
pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> {
let start = chunk * BYTES_PER_CHUNK;
let end = start + BYTES_PER_CHUNK;
@ -231,7 +247,7 @@ fn num_nodes(num_leaves: usize) -> usize {
2 * num_leaves - 1
}
fn node_range_to_byte_range(node_range: Range<usize>) -> Range<usize> {
fn node_range_to_byte_range(node_range: &Range<usize>) -> Range<usize> {
node_range.start * HASHSIZE..node_range.end * HASHSIZE
}
@ -281,6 +297,10 @@ impl OffsetHandler {
})
}
pub fn node_range(&self) -> Result<Range<usize>, Error> {
Ok(*self.offsets.first().ok_or_else(|| Error::NoFirstNode)?..self.next_node())
}
pub fn total_nodes(&self) -> usize {
self.num_internal_nodes + self.num_leaf_nodes
}

View File

@ -113,9 +113,27 @@ where
) -> Result<usize, Error> {
let offset_handler = OffsetHandler::new(self, chunk)?;
if other.len().next_power_of_two() > self.len().next_power_of_two() {
if self.len().next_power_of_two() > other.len().next_power_of_two() {
// Get slices of the exsiting tree from the cache.
let (old_bytes, old_flags) = cache
.slices(offset_handler.node_range()?)
.ok_or_else(|| Error::UnableToObtainSlices)?;
// From the existing slices build new, expanded Vecs.
let (new_bytes, new_flags) = grow_merkle_cache(
old_bytes,
old_flags,
other.len().next_power_of_two().leading_zeros() as usize,
self.len().next_power_of_two().leading_zeros() as usize,
).ok_or_else(|| Error::UnableToGrowMerkleTree)?;
// Create a `TreeHashCache` from the raw elements.
let expanded_cache = TreeHashCache::from_elems(new_bytes, new_flags);
// Splice the newly created `TreeHashCache` over the existing, smaller elements.
cache.splice(offset_handler.node_range()?, expanded_cache);
//
} else if other.len().next_power_of_two() < self.len().next_power_of_two() {
} else if self.len().next_power_of_two() < other.len().next_power_of_two() {
panic!("shrinking below power of two is not implemented")
}

View File

@ -408,7 +408,6 @@ fn extended_u64_vec_len_within_pow_2_boundary() {
test_u64_vec_modifications(original_vec, modified_vec);
}
/*
#[test]
fn extended_u64_vec_len_outside_pow_2_boundary() {
let original_vec: Vec<u64> = (0..2_u64.pow(5)).collect();
@ -417,7 +416,6 @@ fn extended_u64_vec_len_outside_pow_2_boundary() {
test_u64_vec_modifications(original_vec, modified_vec);
}
*/
#[test]
fn large_vec_of_u64_builds() {