Merge branch 'master' into attestation-processing

This commit is contained in:
Paul Hauner 2019-08-08 17:11:19 +10:00
commit 284166c7f8
No known key found for this signature in database
GPG Key ID: 5E2CFF9B75FA63DF
90 changed files with 227 additions and 2672 deletions

View File

@ -5,13 +5,12 @@ members = [
"eth2/state_processing", "eth2/state_processing",
"eth2/types", "eth2/types",
"eth2/utils/bls", "eth2/utils/bls",
"eth2/utils/cached_tree_hash",
"eth2/utils/compare_fields", "eth2/utils/compare_fields",
"eth2/utils/compare_fields_derive", "eth2/utils/compare_fields_derive",
"eth2/utils/eth2_config", "eth2/utils/eth2_config",
"eth2/utils/eth2_interop_keypairs", "eth2/utils/eth2_interop_keypairs",
"eth2/utils/hashing",
"eth2/utils/logging", "eth2/utils/logging",
"eth2/utils/eth2_hashing",
"eth2/utils/merkle_proof", "eth2/utils/merkle_proof",
"eth2/utils/int_to_bytes", "eth2/utils/int_to_bytes",
"eth2/utils/serde_hex", "eth2/utils/serde_hex",
@ -39,3 +38,11 @@ members = [
"validator_client", "validator_client",
"account_manager", "account_manager",
] ]
[patch]
[patch.crates-io]
tree_hash = { path = "eth2/utils/tree_hash" }
tree_hash_derive = { path = "eth2/utils/tree_hash_derive" }
eth2_ssz = { path = "eth2/utils/ssz" }
eth2_ssz_derive = { path = "eth2/utils/ssz_derive" }
eth2_ssz_types = { path = "eth2/utils/ssz_types" }

View File

@ -69,7 +69,7 @@ In this example we use the `account_manager` to create some keys, launch two
`beacon_nodes` should stay in sync and build a Beacon Chain. `beacon_nodes` should stay in sync and build a Beacon Chain.
First, clone this repository, [setup a development First, clone this repository, [setup a development
environment](docs/installation.md) and navigate to the root directory of this repository. environment](docs/env.md) and navigate to the root directory of this repository.
Then, run `$ cargo build --all --release` and navigate to the `target/release` Then, run `$ cargo build --all --release` and navigate to the `target/release`
directory and follow the steps: directory and follow the steps:

View File

@ -15,10 +15,10 @@ serde_derive = "1.0"
slog = { version = "^2.2.3" , features = ["max_level_trace"] } slog = { version = "^2.2.3" , features = ["max_level_trace"] }
sloggers = { version = "^0.3" } sloggers = { version = "^0.3" }
slot_clock = { path = "../../eth2/utils/slot_clock" } slot_clock = { path = "../../eth2/utils/slot_clock" }
eth2_ssz = { path = "../../eth2/utils/ssz" } eth2_ssz = "0.1"
eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" } eth2_ssz_derive = "0.1"
state_processing = { path = "../../eth2/state_processing" } state_processing = { path = "../../eth2/state_processing" }
tree_hash = { path = "../../eth2/utils/tree_hash" } tree_hash = "0.1"
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }
lmd_ghost = { path = "../../eth2/lmd_ghost" } lmd_ghost = { path = "../../eth2/lmd_ghost" }

View File

@ -236,18 +236,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
Ok(headers?) Ok(headers?)
} }
/// Iterates through all the `BeaconBlock` roots and slots, first returning /// Iterates across all `(block_root, slot)` pairs from the head of the chain (inclusive) to
/// `self.head().beacon_block` then all prior blocks until either genesis or if the database /// the earliest reachable ancestor (may or may not be genesis).
/// fails to return a prior block.
/// ///
/// Returns duplicate roots for skip-slots. /// ## Notes
/// ///
/// Iterator returns `(Hash256, Slot)`. /// `slot` always decreases by `1`.
/// /// - Skipped slots contain the root of the closest prior
/// ## Note /// non-skipped slot (identical to the way they are stored in `state.block_roots`) .
/// /// - Iterator returns `(Hash256, Slot)`.
/// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot /// - As this iterator starts at the `head` of the chain (viz., the best block), the first slot
/// returned may be earlier than the wall-clock slot. /// returned may be earlier than the wall-clock slot.
pub fn rev_iter_block_roots(&self) -> ReverseBlockRootIterator<T::EthSpec, T::Store> { pub fn rev_iter_block_roots(&self) -> ReverseBlockRootIterator<T::EthSpec, T::Store> {
let state = &self.head().beacon_state; let state = &self.head().beacon_state;
let block_root = self.head().beacon_block_root; let block_root = self.head().beacon_block_root;
@ -258,16 +257,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
ReverseBlockRootIterator::new((block_root, block_slot), iter) ReverseBlockRootIterator::new((block_root, block_slot), iter)
} }
/// Iterates through all the `BeaconState` roots and slots, first returning /// Iterates across all `(state_root, slot)` pairs from the head of the chain (inclusive) to
/// `self.head().beacon_state` then all prior states until either genesis or if the database /// the earliest reachable ancestor (may or may not be genesis).
/// fails to return a prior state.
/// ///
/// Iterator returns `(Hash256, Slot)`. /// ## Notes
/// ///
/// ## Note /// `slot` always decreases by `1`.
/// /// - Iterator returns `(Hash256, Slot)`.
/// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot /// - As this iterator starts at the `head` of the chain (viz., the best block), the first slot
/// returned may be earlier than the wall-clock slot. /// returned may be earlier than the wall-clock slot.
pub fn rev_iter_state_roots(&self) -> ReverseStateRootIterator<T::EthSpec, T::Store> { pub fn rev_iter_state_roots(&self) -> ReverseStateRootIterator<T::EthSpec, T::Store> {
let state = &self.head().beacon_state; let state = &self.head().beacon_state;
let state_root = self.head().beacon_state_root; let state_root = self.head().beacon_state_root;
@ -293,8 +291,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Returns a read-lock guarded `BeaconState` which is the `canonical_head` that has been /// Returns a read-lock guarded `BeaconState` which is the `canonical_head` that has been
/// updated to match the current slot clock. /// updated to match the current slot clock.
pub fn speculative_state(&self) -> Result<RwLockReadGuard<BeaconState<T::EthSpec>>, Error> { pub fn speculative_state(&self) -> Result<RwLockReadGuard<BeaconState<T::EthSpec>>, Error> {
// TODO: ensure the state has done a catch-up.
Ok(self.state.read()) Ok(self.state.read())
} }

View File

@ -12,7 +12,7 @@ rpc = { path = "../rpc" }
rest_api = { path = "../rest_api" } rest_api = { path = "../rest_api" }
prometheus = "^0.6" prometheus = "^0.6"
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }
tree_hash = { path = "../../eth2/utils/tree_hash" } tree_hash = "0.1"
eth2_config = { path = "../../eth2/utils/eth2_config" } eth2_config = { path = "../../eth2/utils/eth2_config" }
slot_clock = { path = "../../eth2/utils/slot_clock" } slot_clock = { path = "../../eth2/utils/slot_clock" }
serde = "1.0.93" serde = "1.0.93"

View File

@ -12,8 +12,8 @@ enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "be5710bbde69d8c5be
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
eth2_ssz = { path = "../../eth2/utils/ssz" } eth2_ssz = "0.1"
eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" } eth2_ssz_derive = "0.1"
slog = { version = "^2.4.1" , features = ["max_level_trace"] } slog = { version = "^2.4.1" , features = ["max_level_trace"] }
version = { path = "../version" } version = { path = "../version" }
tokio = "0.1.16" tokio = "0.1.16"

View File

@ -13,8 +13,8 @@ store = { path = "../store" }
eth2-libp2p = { path = "../eth2-libp2p" } eth2-libp2p = { path = "../eth2-libp2p" }
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }
slog = { version = "^2.2.3" , features = ["max_level_trace"] } slog = { version = "^2.2.3" , features = ["max_level_trace"] }
eth2_ssz = { path = "../../eth2/utils/ssz" } eth2_ssz = "0.1"
tree_hash = { path = "../../eth2/utils/tree_hash" } tree_hash = "0.1"
futures = "0.1.25" futures = "0.1.25"
error-chain = "0.12.0" error-chain = "0.12.0"
tokio = "0.1.16" tokio = "0.1.16"

View File

@ -266,8 +266,7 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
fn root_at_slot(&self, target_slot: Slot) -> Option<Hash256> { fn root_at_slot(&self, target_slot: Slot) -> Option<Hash256> {
self.chain self.chain
.rev_iter_block_roots(target_slot) .rev_iter_block_roots()
.take(1)
.find(|(_root, slot)| *slot == target_slot) .find(|(_root, slot)| *slot == target_slot)
.map(|(root, _slot)| root) .map(|(root, _slot)| root)
} }
@ -280,8 +279,6 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
req: BeaconBlockRootsRequest, req: BeaconBlockRootsRequest,
network: &mut NetworkContext<T::EthSpec>, network: &mut NetworkContext<T::EthSpec>,
) { ) {
let state = &self.chain.head().beacon_state;
debug!( debug!(
self.log, self.log,
"BlockRootsRequest"; "BlockRootsRequest";
@ -292,8 +289,9 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
let mut roots: Vec<BlockRootSlot> = self let mut roots: Vec<BlockRootSlot> = self
.chain .chain
.rev_iter_block_roots(std::cmp::min(req.start_slot + req.count, state.slot)) .rev_iter_block_roots()
.take_while(|(_root, slot)| req.start_slot <= *slot) .take_while(|(_root, slot)| req.start_slot <= *slot)
.filter(|(_root, slot)| *slot < req.start_slot + req.count)
.map(|(block_root, slot)| BlockRootSlot { slot, block_root }) .map(|(block_root, slot)| BlockRootSlot { slot, block_root })
.collect(); .collect();
@ -391,8 +389,6 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
req: BeaconBlockHeadersRequest, req: BeaconBlockHeadersRequest,
network: &mut NetworkContext<T::EthSpec>, network: &mut NetworkContext<T::EthSpec>,
) { ) {
let state = &self.chain.head().beacon_state;
debug!( debug!(
self.log, self.log,
"BlockHeadersRequest"; "BlockHeadersRequest";
@ -405,8 +401,9 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
// Collect the block roots. // Collect the block roots.
let mut roots: Vec<Hash256> = self let mut roots: Vec<Hash256> = self
.chain .chain
.rev_iter_block_roots(std::cmp::min(req.start_slot + count, state.slot)) .rev_iter_block_roots()
.take_while(|(_root, slot)| req.start_slot <= *slot) .take_while(|(_root, slot)| req.start_slot <= *slot)
.filter(|(_root, slot)| *slot < req.start_slot + count)
.map(|(root, _slot)| root) .map(|(root, _slot)| root)
.collect(); .collect();

View File

@ -11,7 +11,7 @@ network = { path = "../network" }
eth2-libp2p = { path = "../eth2-libp2p" } eth2-libp2p = { path = "../eth2-libp2p" }
version = { path = "../version" } version = { path = "../version" }
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }
eth2_ssz = { path = "../../eth2/utils/ssz" } eth2_ssz = "0.1"
protos = { path = "../../protos" } protos = { path = "../../protos" }
grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] }
clap = "2.32.0" clap = "2.32.0"

View File

@ -11,7 +11,7 @@ tempfile = "3"
db-key = "0.0.5" db-key = "0.0.5"
leveldb = "0.8.4" leveldb = "0.8.4"
parking_lot = "0.7" parking_lot = "0.7"
eth2_ssz = { path = "../../eth2/utils/ssz" } eth2_ssz = "0.1"
eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" } eth2_ssz_derive = "0.1"
tree_hash = { path = "../../eth2/utils/tree_hash" } tree_hash = "0.1"
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }

View File

@ -10,7 +10,7 @@ itertools = "0.8"
parking_lot = "0.7" parking_lot = "0.7"
types = { path = "../types" } types = { path = "../types" }
state_processing = { path = "../state_processing" } state_processing = { path = "../state_processing" }
eth2_ssz = { path = "../utils/ssz" } eth2_ssz = "0.1"
eth2_ssz_derive = { path = "../utils/ssz_derive" } eth2_ssz_derive = { path = "../utils/ssz_derive" }
[dev-dependencies] [dev-dependencies]

View File

@ -16,8 +16,8 @@ integer-sqrt = "0.1"
itertools = "0.8" itertools = "0.8"
eth2_ssz_types = { path = "../utils/ssz_types" } eth2_ssz_types = { path = "../utils/ssz_types" }
merkle_proof = { path = "../utils/merkle_proof" } merkle_proof = { path = "../utils/merkle_proof" }
tree_hash = { path = "../utils/tree_hash" } tree_hash = "0.1"
tree_hash_derive = { path = "../utils/tree_hash_derive" } tree_hash_derive = "0.2"
types = { path = "../types" } types = { path = "../types" }
rayon = "1.0" rayon = "1.0"

View File

@ -6,14 +6,13 @@ edition = "2018"
[dependencies] [dependencies]
bls = { path = "../utils/bls" } bls = { path = "../utils/bls" }
cached_tree_hash = { path = "../utils/cached_tree_hash" }
compare_fields = { path = "../utils/compare_fields" } compare_fields = { path = "../utils/compare_fields" }
compare_fields_derive = { path = "../utils/compare_fields_derive" } compare_fields_derive = { path = "../utils/compare_fields_derive" }
dirs = "1.0" dirs = "1.0"
derivative = "1.0" derivative = "1.0"
eth2_interop_keypairs = { path = "../utils/eth2_interop_keypairs" } eth2_interop_keypairs = { path = "../utils/eth2_interop_keypairs" }
ethereum-types = "0.6" ethereum-types = "0.6"
hashing = { path = "../utils/hashing" } eth2_hashing = { path = "../utils/eth2_hashing" }
hex = "0.3" hex = "0.3"
int_to_bytes = { path = "../utils/int_to_bytes" } int_to_bytes = { path = "../utils/int_to_bytes" }
log = "0.4" log = "0.4"
@ -22,13 +21,13 @@ rand = "0.5.5"
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
slog = "^2.2.3" slog = "^2.2.3"
eth2_ssz = { path = "../utils/ssz" } eth2_ssz = "0.1"
eth2_ssz_derive = { path = "../utils/ssz_derive" } eth2_ssz_derive = "0.1"
eth2_ssz_types = { path = "../utils/ssz_types" } eth2_ssz_types = { path = "../utils/ssz_types" }
swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" } swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" }
test_random_derive = { path = "../utils/test_random_derive" } test_random_derive = { path = "../utils/test_random_derive" }
tree_hash = { path = "../utils/tree_hash" } tree_hash = "0.1"
tree_hash_derive = { path = "../utils/tree_hash_derive" } tree_hash_derive = "0.2"
[dev-dependencies] [dev-dependencies]
env_logger = "0.6.0" env_logger = "0.6.0"

View File

@ -5,7 +5,7 @@ use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; use tree_hash_derive::{SignedRoot, TreeHash};
/// Details an attestation that can be slashable. /// Details an attestation that can be slashable.
/// ///
@ -19,7 +19,6 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash};
Encode, Encode,
Decode, Decode,
TreeHash, TreeHash,
CachedTreeHash,
TestRandom, TestRandom,
SignedRoot, SignedRoot,
)] )]
@ -59,5 +58,5 @@ mod tests {
use crate::*; use crate::*;
ssz_tests!(Attestation<MainnetEthSpec>); ssz_tests!(Attestation<MainnetEthSpec>);
cached_tree_hash_tests!(Attestation<MainnetEthSpec>);
} }

View File

@ -5,7 +5,7 @@ use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; use tree_hash_derive::{SignedRoot, TreeHash};
/// The data upon which an attestation is based. /// The data upon which an attestation is based.
/// ///
@ -21,7 +21,6 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash};
Encode, Encode,
Decode, Decode,
TreeHash, TreeHash,
CachedTreeHash,
TestRandom, TestRandom,
SignedRoot, SignedRoot,
)] )]
@ -42,5 +41,4 @@ mod tests {
use super::*; use super::*;
ssz_tests!(AttestationData); ssz_tests!(AttestationData);
cached_tree_hash_tests!(AttestationData);
} }

View File

@ -3,23 +3,12 @@ use crate::test_utils::TestRandom;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
/// Used for pairing an attestation with a proof-of-custody. /// Used for pairing an attestation with a proof-of-custody.
/// ///
/// Spec v0.8.1 /// Spec v0.8.1
#[derive( #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
CachedTreeHash,
TestRandom,
)]
pub struct AttestationDataAndCustodyBit { pub struct AttestationDataAndCustodyBit {
pub data: AttestationData, pub data: AttestationData,
pub custody_bit: bool, pub custody_bit: bool,
@ -30,5 +19,5 @@ mod test {
use super::*; use super::*;
ssz_tests!(AttestationDataAndCustodyBit); ssz_tests!(AttestationDataAndCustodyBit);
cached_tree_hash_tests!(AttestationDataAndCustodyBit);
} }

View File

@ -3,23 +3,12 @@ use crate::{test_utils::TestRandom, EthSpec, IndexedAttestation};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
/// Two conflicting attestations. /// Two conflicting attestations.
/// ///
/// Spec v0.8.0 /// Spec v0.8.0
#[derive( #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
Debug,
PartialEq,
Clone,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
CachedTreeHash,
TestRandom,
)]
#[serde(bound = "T: EthSpec")] #[serde(bound = "T: EthSpec")]
pub struct AttesterSlashing<T: EthSpec> { pub struct AttesterSlashing<T: EthSpec> {
pub attestation_1: IndexedAttestation<T>, pub attestation_1: IndexedAttestation<T>,
@ -32,5 +21,5 @@ mod tests {
use crate::*; use crate::*;
ssz_tests!(AttesterSlashing<MainnetEthSpec>); ssz_tests!(AttesterSlashing<MainnetEthSpec>);
cached_tree_hash_tests!(AttesterSlashing<MainnetEthSpec>);
} }

View File

@ -6,7 +6,7 @@ use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash::{SignedRoot, TreeHash}; use tree_hash::{SignedRoot, TreeHash};
use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; use tree_hash_derive::{SignedRoot, TreeHash};
/// A block of the `BeaconChain`. /// A block of the `BeaconChain`.
/// ///
@ -20,7 +20,6 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash};
Encode, Encode,
Decode, Decode,
TreeHash, TreeHash,
CachedTreeHash,
TestRandom, TestRandom,
SignedRoot, SignedRoot,
)] )]
@ -109,5 +108,4 @@ mod tests {
use super::*; use super::*;
ssz_tests!(BeaconBlock<MainnetEthSpec>); ssz_tests!(BeaconBlock<MainnetEthSpec>);
cached_tree_hash_tests!(BeaconBlock<MainnetEthSpec>);
} }

View File

@ -6,23 +6,12 @@ use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use ssz_types::VariableList; use ssz_types::VariableList;
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
/// The body of a `BeaconChain` block, containing operations. /// The body of a `BeaconChain` block, containing operations.
/// ///
/// Spec v0.8.0 /// Spec v0.8.0
#[derive( #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
Debug,
PartialEq,
Clone,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
CachedTreeHash,
TestRandom,
)]
#[serde(bound = "T: EthSpec")] #[serde(bound = "T: EthSpec")]
pub struct BeaconBlockBody<T: EthSpec> { pub struct BeaconBlockBody<T: EthSpec> {
pub randao_reveal: Signature, pub randao_reveal: Signature,
@ -42,5 +31,4 @@ mod tests {
use super::*; use super::*;
ssz_tests!(BeaconBlockBody<MainnetEthSpec>); ssz_tests!(BeaconBlockBody<MainnetEthSpec>);
cached_tree_hash_tests!(BeaconBlockBody<MainnetEthSpec>);
} }

View File

@ -6,7 +6,7 @@ use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash::{SignedRoot, TreeHash}; use tree_hash::{SignedRoot, TreeHash};
use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; use tree_hash_derive::{SignedRoot, TreeHash};
/// A header of a `BeaconBlock`. /// A header of a `BeaconBlock`.
/// ///
@ -20,7 +20,6 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash};
Encode, Encode,
Decode, Decode,
TreeHash, TreeHash,
CachedTreeHash,
TestRandom, TestRandom,
SignedRoot, SignedRoot,
)] )]
@ -60,5 +59,4 @@ mod tests {
use super::*; use super::*;
ssz_tests!(BeaconBlockHeader); ssz_tests!(BeaconBlockHeader);
cached_tree_hash_tests!(BeaconBlockHeader);
} }

View File

@ -2,9 +2,8 @@ use self::committee_cache::get_active_validator_indices;
use self::exit_cache::ExitCache; use self::exit_cache::ExitCache;
use crate::test_utils::TestRandom; use crate::test_utils::TestRandom;
use crate::*; use crate::*;
use cached_tree_hash::{Error as TreeHashCacheError, TreeHashCache};
use compare_fields_derive::CompareFields; use compare_fields_derive::CompareFields;
use hashing::hash; use eth2_hashing::hash;
use int_to_bytes::{int_to_bytes32, int_to_bytes8}; use int_to_bytes::{int_to_bytes32, int_to_bytes8};
use pubkey_cache::PubkeyCache; use pubkey_cache::PubkeyCache;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
@ -13,7 +12,7 @@ use ssz_derive::{Decode, Encode};
use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; use ssz_types::{typenum::Unsigned, BitVector, FixedVector};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
pub use self::committee_cache::CommitteeCache; pub use self::committee_cache::CommitteeCache;
pub use beacon_state_types::*; pub use beacon_state_types::*;
@ -58,7 +57,6 @@ pub enum Error {
CurrentCommitteeCacheUninitialized, CurrentCommitteeCacheUninitialized,
RelativeEpochError(RelativeEpochError), RelativeEpochError(RelativeEpochError),
CommitteeCacheUninitialized(RelativeEpoch), CommitteeCacheUninitialized(RelativeEpoch),
TreeHashCacheError(TreeHashCacheError),
SszTypesError(ssz_types::Error), SszTypesError(ssz_types::Error),
} }
@ -76,7 +74,6 @@ pub enum Error {
Decode, Decode,
TreeHash, TreeHash,
CompareFields, CompareFields,
CachedTreeHash,
)] )]
#[serde(bound = "T: EthSpec")] #[serde(bound = "T: EthSpec")]
pub struct BeaconState<T> pub struct BeaconState<T>
@ -151,12 +148,6 @@ where
#[ssz(skip_deserializing)] #[ssz(skip_deserializing)]
#[tree_hash(skip_hashing)] #[tree_hash(skip_hashing)]
#[test_random(default)] #[test_random(default)]
pub tree_hash_cache: TreeHashCache,
#[serde(skip_serializing, skip_deserializing)]
#[ssz(skip_serializing)]
#[ssz(skip_deserializing)]
#[tree_hash(skip_hashing)]
#[test_random(default)]
pub exit_cache: ExitCache, pub exit_cache: ExitCache,
} }
@ -218,7 +209,6 @@ impl<T: EthSpec> BeaconState<T> {
CommitteeCache::default(), CommitteeCache::default(),
], ],
pubkey_cache: PubkeyCache::default(), pubkey_cache: PubkeyCache::default(),
tree_hash_cache: TreeHashCache::default(),
exit_cache: ExitCache::default(), exit_cache: ExitCache::default(),
} }
} }
@ -929,22 +919,12 @@ impl<T: EthSpec> BeaconState<T> {
/// ///
/// Returns the `tree_hash_root` resulting from the update. This root can be considered the /// Returns the `tree_hash_root` resulting from the update. This root can be considered the
/// canonical root of `self`. /// canonical root of `self`.
///
/// ## Note
///
/// Cache not currently implemented, just performs a full tree hash.
pub fn update_tree_hash_cache(&mut self) -> Result<Hash256, Error> { pub fn update_tree_hash_cache(&mut self) -> Result<Hash256, Error> {
/* TODO(#440): re-enable cached tree hash // TODO(#440): re-enable cached tree hash
if self.tree_hash_cache.is_empty() {
self.tree_hash_cache = TreeHashCache::new(self)?;
} else {
// Move the cache outside of `self` to satisfy the borrow checker.
let mut cache = std::mem::replace(&mut self.tree_hash_cache, TreeHashCache::default());
cache.update(self)?;
// Move the updated cache back into `self`.
self.tree_hash_cache = cache
}
self.cached_tree_hash_root()
*/
Ok(Hash256::from_slice(&self.tree_hash_root())) Ok(Hash256::from_slice(&self.tree_hash_root()))
} }
@ -954,19 +934,22 @@ impl<T: EthSpec> BeaconState<T> {
/// ///
/// Returns an error if the cache is not initialized or if an error is encountered during the /// Returns an error if the cache is not initialized or if an error is encountered during the
/// cache update. /// cache update.
///
/// ## Note
///
/// Cache not currently implemented, just performs a full tree hash.
pub fn cached_tree_hash_root(&self) -> Result<Hash256, Error> { pub fn cached_tree_hash_root(&self) -> Result<Hash256, Error> {
/* TODO(#440): re-enable cached tree hash // TODO(#440): re-enable cached tree hash
self.tree_hash_cache
.tree_hash_root()
.and_then(|b| Ok(Hash256::from_slice(b)))
.map_err(Into::into)
*/
Ok(Hash256::from_slice(&self.tree_hash_root())) Ok(Hash256::from_slice(&self.tree_hash_root()))
} }
/// Completely drops the tree hash cache, replacing it with a new, empty cache. /// Completely drops the tree hash cache, replacing it with a new, empty cache.
///
/// ## Note
///
/// Cache not currently implemented, is a no-op.
pub fn drop_tree_hash_cache(&mut self) { pub fn drop_tree_hash_cache(&mut self) {
self.tree_hash_cache = TreeHashCache::default() // TODO(#440): re-enable cached tree hash
} }
} }
@ -976,12 +959,6 @@ impl From<RelativeEpochError> for Error {
} }
} }
impl From<TreeHashCacheError> for Error {
fn from(e: TreeHashCacheError) -> Error {
Error::TreeHashCacheError(e)
}
}
impl From<ssz_types::Error> for Error { impl From<ssz_types::Error> for Error {
fn from(e: ssz_types::Error) -> Error { fn from(e: ssz_types::Error) -> Error {
Error::SszTypesError(e) Error::SszTypesError(e)

View File

@ -4,7 +4,6 @@ use crate::test_utils::*;
use std::ops::RangeInclusive; use std::ops::RangeInclusive;
ssz_tests!(FoundationBeaconState); ssz_tests!(FoundationBeaconState);
cached_tree_hash_tests!(FoundationBeaconState);
fn test_beacon_proposer_index<T: EthSpec>() { fn test_beacon_proposer_index<T: EthSpec>() {
let spec = T::default_spec(); let spec = T::default_spec();

View File

@ -4,7 +4,7 @@ use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; use tree_hash_derive::{SignedRoot, TreeHash};
/// Casper FFG checkpoint, used in attestations. /// Casper FFG checkpoint, used in attestations.
/// ///
@ -21,7 +21,6 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash};
Encode, Encode,
Decode, Decode,
TreeHash, TreeHash,
CachedTreeHash,
TestRandom, TestRandom,
SignedRoot, SignedRoot,
)] )]
@ -35,5 +34,5 @@ mod tests {
use super::*; use super::*;
ssz_tests!(Checkpoint); ssz_tests!(Checkpoint);
cached_tree_hash_tests!(Checkpoint);
} }

View File

@ -4,21 +4,10 @@ use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use ssz_types::VariableList; use ssz_types::VariableList;
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
/// Spec v0.8.0 /// Spec v0.8.0
#[derive( #[derive(Clone, Debug, PartialEq, TreeHash, Encode, Decode, Serialize, Deserialize, TestRandom)]
Clone,
Debug,
PartialEq,
TreeHash,
CachedTreeHash,
Encode,
Decode,
Serialize,
Deserialize,
TestRandom,
)]
#[serde(bound = "T: EthSpec")] #[serde(bound = "T: EthSpec")]
pub struct CompactCommittee<T: EthSpec> { pub struct CompactCommittee<T: EthSpec> {
pub pubkeys: VariableList<PublicKey, T::MaxValidatorsPerCommittee>, pub pubkeys: VariableList<PublicKey, T::MaxValidatorsPerCommittee>,

View File

@ -4,7 +4,7 @@ use crate::{Epoch, Hash256};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
/// Specifies the block hash for a shard at an epoch. /// Specifies the block hash for a shard at an epoch.
/// ///
@ -21,7 +21,6 @@ use tree_hash_derive::{CachedTreeHash, TreeHash};
Encode, Encode,
Decode, Decode,
TreeHash, TreeHash,
CachedTreeHash,
TestRandom, TestRandom,
)] )]
pub struct Crosslink { pub struct Crosslink {
@ -38,5 +37,5 @@ mod tests {
use super::*; use super::*;
ssz_tests!(Crosslink); ssz_tests!(Crosslink);
cached_tree_hash_tests!(Crosslink);
} }

View File

@ -1,7 +1,7 @@
use crate::*; use crate::*;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
#[derive(Default, Clone, Debug, PartialEq, TreeHash, CachedTreeHash)] #[derive(Default, Clone, Debug, PartialEq, TreeHash)]
pub struct CrosslinkCommittee<'a> { pub struct CrosslinkCommittee<'a> {
pub slot: Slot, pub slot: Slot,
pub shard: Shard, pub shard: Shard,
@ -18,7 +18,7 @@ impl<'a> CrosslinkCommittee<'a> {
} }
} }
#[derive(Default, Clone, Debug, PartialEq, TreeHash, CachedTreeHash)] #[derive(Default, Clone, Debug, PartialEq, TreeHash)]
pub struct OwnedCrosslinkCommittee { pub struct OwnedCrosslinkCommittee {
pub slot: Slot, pub slot: Slot,
pub shard: Shard, pub shard: Shard,

View File

@ -5,23 +5,12 @@ use ssz_types::typenum::U33;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
/// A deposit to potentially become a beacon chain validator. /// A deposit to potentially become a beacon chain validator.
/// ///
/// Spec v0.8.0 /// Spec v0.8.0
#[derive( #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
Debug,
PartialEq,
Clone,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
CachedTreeHash,
TestRandom,
)]
pub struct Deposit { pub struct Deposit {
pub proof: FixedVector<Hash256, U33>, pub proof: FixedVector<Hash256, U33>,
pub data: DepositData, pub data: DepositData,
@ -32,5 +21,5 @@ mod tests {
use super::*; use super::*;
ssz_tests!(Deposit); ssz_tests!(Deposit);
cached_tree_hash_tests!(Deposit);
} }

View File

@ -7,7 +7,7 @@ use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash::{SignedRoot, TreeHash}; use tree_hash::{SignedRoot, TreeHash};
use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; use tree_hash_derive::{SignedRoot, TreeHash};
/// The data supplied by the user to the deposit contract. /// The data supplied by the user to the deposit contract.
/// ///
@ -22,7 +22,6 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash};
Decode, Decode,
SignedRoot, SignedRoot,
TreeHash, TreeHash,
CachedTreeHash,
TestRandom, TestRandom,
)] )]
pub struct DepositData { pub struct DepositData {
@ -56,5 +55,5 @@ mod tests {
use super::*; use super::*;
ssz_tests!(DepositData); ssz_tests!(DepositData);
cached_tree_hash_tests!(DepositData);
} }

View File

@ -4,23 +4,13 @@ use crate::test_utils::TestRandom;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
/// Contains data obtained from the Eth1 chain. /// Contains data obtained from the Eth1 chain.
/// ///
/// Spec v0.8.1 /// Spec v0.8.1
#[derive( #[derive(
Debug, Debug, PartialEq, Clone, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom,
PartialEq,
Clone,
Default,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
CachedTreeHash,
TestRandom,
)] )]
pub struct Eth1Data { pub struct Eth1Data {
pub deposit_root: Hash256, pub deposit_root: Hash256,
@ -33,5 +23,5 @@ mod tests {
use super::*; use super::*;
ssz_tests!(Eth1Data); ssz_tests!(Eth1Data);
cached_tree_hash_tests!(Eth1Data);
} }

View File

@ -5,23 +5,13 @@ use crate::Epoch;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
/// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// Specifies a fork of the `BeaconChain`, to prevent replay attacks.
/// ///
/// Spec v0.8.1 /// Spec v0.8.1
#[derive( #[derive(
Debug, Debug, Clone, PartialEq, Default, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom,
Clone,
PartialEq,
Default,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
CachedTreeHash,
TestRandom,
)] )]
pub struct Fork { pub struct Fork {
#[serde( #[serde(
@ -65,7 +55,6 @@ mod tests {
use super::*; use super::*;
ssz_tests!(Fork); ssz_tests!(Fork);
cached_tree_hash_tests!(Fork);
fn test_genesis(epoch: Epoch) { fn test_genesis(epoch: Epoch) {
let fork = Fork::genesis(epoch); let fork = Fork::genesis(epoch);

View File

@ -5,23 +5,12 @@ use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use ssz_types::FixedVector; use ssz_types::FixedVector;
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
/// Historical block and state roots. /// Historical block and state roots.
/// ///
/// Spec v0.8.1 /// Spec v0.8.1
#[derive( #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
CachedTreeHash,
TestRandom,
)]
pub struct HistoricalBatch<T: EthSpec> { pub struct HistoricalBatch<T: EthSpec> {
pub block_roots: FixedVector<Hash256, T::SlotsPerHistoricalRoot>, pub block_roots: FixedVector<Hash256, T::SlotsPerHistoricalRoot>,
pub state_roots: FixedVector<Hash256, T::SlotsPerHistoricalRoot>, pub state_roots: FixedVector<Hash256, T::SlotsPerHistoricalRoot>,
@ -34,5 +23,5 @@ mod tests {
pub type FoundationHistoricalBatch = HistoricalBatch<MainnetEthSpec>; pub type FoundationHistoricalBatch = HistoricalBatch<MainnetEthSpec>;
ssz_tests!(FoundationHistoricalBatch); ssz_tests!(FoundationHistoricalBatch);
cached_tree_hash_tests!(FoundationHistoricalBatch);
} }

View File

@ -3,7 +3,7 @@ use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; use tree_hash_derive::{SignedRoot, TreeHash};
/// Details an attestation that can be slashable. /// Details an attestation that can be slashable.
/// ///
@ -19,7 +19,6 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash};
Encode, Encode,
Decode, Decode,
TreeHash, TreeHash,
CachedTreeHash,
TestRandom, TestRandom,
SignedRoot, SignedRoot,
)] )]
@ -124,7 +123,6 @@ mod tests {
} }
ssz_tests!(IndexedAttestation<MainnetEthSpec>); ssz_tests!(IndexedAttestation<MainnetEthSpec>);
cached_tree_hash_tests!(IndexedAttestation<MainnetEthSpec>);
fn create_indexed_attestation( fn create_indexed_attestation(
target_epoch: u64, target_epoch: u64,

View File

@ -4,23 +4,12 @@ use crate::{AttestationData, BitList, EthSpec};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
/// An attestation that has been included in the state but not yet fully processed. /// An attestation that has been included in the state but not yet fully processed.
/// ///
/// Spec v0.8.0 /// Spec v0.8.0
#[derive( #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
CachedTreeHash,
TestRandom,
)]
pub struct PendingAttestation<T: EthSpec> { pub struct PendingAttestation<T: EthSpec> {
pub aggregation_bits: BitList<T::MaxValidatorsPerCommittee>, pub aggregation_bits: BitList<T::MaxValidatorsPerCommittee>,
pub data: AttestationData, pub data: AttestationData,
@ -34,5 +23,5 @@ mod tests {
use crate::*; use crate::*;
ssz_tests!(PendingAttestation<MainnetEthSpec>); ssz_tests!(PendingAttestation<MainnetEthSpec>);
cached_tree_hash_tests!(PendingAttestation<MainnetEthSpec>);
} }

View File

@ -4,23 +4,12 @@ use crate::test_utils::TestRandom;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
/// Two conflicting proposals from the same proposer (validator). /// Two conflicting proposals from the same proposer (validator).
/// ///
/// Spec v0.8.1 /// Spec v0.8.1
#[derive( #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)]
Debug,
PartialEq,
Clone,
Serialize,
Deserialize,
Encode,
Decode,
TreeHash,
CachedTreeHash,
TestRandom,
)]
pub struct ProposerSlashing { pub struct ProposerSlashing {
pub proposer_index: u64, pub proposer_index: u64,
pub header_1: BeaconBlockHeader, pub header_1: BeaconBlockHeader,
@ -32,5 +21,5 @@ mod tests {
use super::*; use super::*;
ssz_tests!(ProposerSlashing); ssz_tests!(ProposerSlashing);
cached_tree_hash_tests!(ProposerSlashing);
} }

View File

@ -238,26 +238,6 @@ macro_rules! impl_ssz {
} }
} }
impl cached_tree_hash::CachedTreeHash for $type {
fn new_tree_hash_cache(
&self,
depth: usize,
) -> Result<cached_tree_hash::TreeHashCache, cached_tree_hash::Error> {
self.0.new_tree_hash_cache(depth)
}
fn tree_hash_cache_schema(&self, depth: usize) -> cached_tree_hash::BTreeSchema {
self.0.tree_hash_cache_schema(depth)
}
fn update_tree_hash_cache(
&self,
cache: &mut cached_tree_hash::TreeHashCache,
) -> Result<(), cached_tree_hash::Error> {
self.0.update_tree_hash_cache(cache)
}
}
impl TestRandom for $type { impl TestRandom for $type {
fn random_for_test(rng: &mut impl RngCore) -> Self { fn random_for_test(rng: &mut impl RngCore) -> Self {
$type::from(u64::random_for_test(rng)) $type::from(u64::random_for_test(rng))
@ -579,7 +559,6 @@ macro_rules! all_tests {
math_between_tests!($type, $type); math_between_tests!($type, $type);
math_tests!($type); math_tests!($type);
ssz_tests!($type); ssz_tests!($type);
cached_tree_hash_tests!($type);
mod u64_tests { mod u64_tests {
use super::*; use super::*;

View File

@ -33,53 +33,3 @@ macro_rules! ssz_tests {
} }
}; };
} }
#[cfg(test)]
#[macro_export]
macro_rules! cached_tree_hash_tests {
($type: ty) => {
#[test]
#[ignore]
// FIXME: re-enable https://github.com/sigp/lighthouse/issues/440
pub fn test_cached_tree_hash() {
use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng};
use tree_hash::TreeHash;
let mut rng = XorShiftRng::from_seed([42; 16]);
// Test the original hash
let original = <$type>::random_for_test(&mut rng);
let mut cache = cached_tree_hash::TreeHashCache::new(&original).unwrap();
assert_eq!(
cache.tree_hash_root().unwrap().to_vec(),
original.tree_hash_root(),
"Original hash failed."
);
// Test the updated hash
let modified = <$type>::random_for_test(&mut rng);
cache.update(&modified).unwrap();
assert_eq!(
cache.tree_hash_root().unwrap().to_vec(),
modified.tree_hash_root(),
"Modification hash failed"
);
// Produce a new cache for the modified object and compare it to the updated cache.
let mut modified_cache = cached_tree_hash::TreeHashCache::new(&modified).unwrap();
// Reset the caches.
cache.reset_modifications();
modified_cache.reset_modifications();
// Ensure the modified cache is the same as a newly created cache. This is a sanity
// check to make sure there are no artifacts of the original cache remaining after an
// update.
assert_eq!(
modified_cache, cache,
"The modified cache does not match a new cache."
)
}
};
}

View File

@ -7,7 +7,7 @@ use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; use tree_hash_derive::{SignedRoot, TreeHash};
/// The data submitted to the deposit contract. /// The data submitted to the deposit contract.
/// ///
@ -20,7 +20,6 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash};
Encode, Encode,
Decode, Decode,
TreeHash, TreeHash,
CachedTreeHash,
TestRandom, TestRandom,
SignedRoot, SignedRoot,
Derivative, Derivative,
@ -43,5 +42,5 @@ mod tests {
use super::*; use super::*;
ssz_tests!(Transfer); ssz_tests!(Transfer);
cached_tree_hash_tests!(Transfer);
} }

View File

@ -3,23 +3,12 @@ use crate::{test_utils::TestRandom, Epoch, Hash256, PublicKey};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash_derive::{CachedTreeHash, TreeHash}; use tree_hash_derive::TreeHash;
/// Information about a `BeaconChain` validator. /// Information about a `BeaconChain` validator.
/// ///
/// Spec v0.8.0 /// Spec v0.8.0
#[derive( #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, TreeHash)]
Debug,
Clone,
PartialEq,
Serialize,
Deserialize,
Encode,
Decode,
TestRandom,
TreeHash,
CachedTreeHash,
)]
pub struct Validator { pub struct Validator {
pub pubkey: PublicKey, pub pubkey: PublicKey,
pub withdrawal_credentials: Hash256, pub withdrawal_credentials: Hash256,
@ -128,5 +117,5 @@ mod tests {
} }
ssz_tests!(Validator); ssz_tests!(Validator);
cached_tree_hash_tests!(Validator);
} }

View File

@ -5,7 +5,7 @@ use serde_derive::{Deserialize, Serialize};
use ssz_derive::{Decode, Encode}; use ssz_derive::{Decode, Encode};
use test_random_derive::TestRandom; use test_random_derive::TestRandom;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; use tree_hash_derive::{SignedRoot, TreeHash};
/// An exit voluntarily submitted a validator who wishes to withdraw. /// An exit voluntarily submitted a validator who wishes to withdraw.
/// ///
@ -19,7 +19,6 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash};
Encode, Encode,
Decode, Decode,
TreeHash, TreeHash,
CachedTreeHash,
TestRandom, TestRandom,
SignedRoot, SignedRoot,
)] )]
@ -36,5 +35,5 @@ mod tests {
use super::*; use super::*;
ssz_tests!(VoluntaryExit); ssz_tests!(VoluntaryExit);
cached_tree_hash_tests!(VoluntaryExit);
} }

View File

@ -6,16 +6,15 @@ edition = "2018"
[dependencies] [dependencies]
milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.9.0" } milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.9.0" }
cached_tree_hash = { path = "../cached_tree_hash" } eth2_hashing = { path = "../eth2_hashing" }
hashing = { path = "../hashing" }
hex = "0.3" hex = "0.3"
rand = "^0.5" rand = "^0.5"
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
serde_hex = { path = "../serde_hex" } serde_hex = { path = "../serde_hex" }
eth2_ssz = { path = "../ssz" } eth2_ssz = "0.1"
eth2_ssz_types = { path = "../ssz_types" } eth2_ssz_types = { path = "../ssz_types" }
tree_hash = { path = "../tree_hash" } tree_hash = "0.1"
[features] [features]
fake_crypto = [] fake_crypto = []

View File

@ -143,8 +143,6 @@ impl_ssz!(
impl_tree_hash!(AggregateSignature, U96); impl_tree_hash!(AggregateSignature, U96);
impl_cached_tree_hash!(AggregateSignature, U96);
impl Serialize for AggregateSignature { impl Serialize for AggregateSignature {
/// Serde serialization is compliant the Ethereum YAML test format. /// Serde serialization is compliant the Ethereum YAML test format.
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>

View File

@ -86,8 +86,6 @@ impl_ssz!(
impl_tree_hash!(FakeAggregateSignature, U96); impl_tree_hash!(FakeAggregateSignature, U96);
impl_cached_tree_hash!(FakeAggregateSignature, U96);
impl Serialize for FakeAggregateSignature { impl Serialize for FakeAggregateSignature {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where where

View File

@ -84,8 +84,6 @@ impl_ssz!(FakePublicKey, BLS_PUBLIC_KEY_BYTE_SIZE, "FakePublicKey");
impl_tree_hash!(FakePublicKey, U48); impl_tree_hash!(FakePublicKey, U48);
impl_cached_tree_hash!(FakePublicKey, U48);
impl Serialize for FakePublicKey { impl Serialize for FakePublicKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where where
@ -129,7 +127,6 @@ impl Hash for FakePublicKey {
mod tests { mod tests {
use super::*; use super::*;
use ssz::ssz_encode; use ssz::ssz_encode;
use tree_hash::TreeHash;
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -141,27 +138,4 @@ mod tests {
assert_eq!(original, decoded); assert_eq!(original, decoded);
} }
#[test]
pub fn test_cached_tree_hash() {
let sk = SecretKey::random();
let original = FakePublicKey::from_secret_key(&sk);
let mut cache = cached_tree_hash::TreeHashCache::new(&original).unwrap();
assert_eq!(
cache.tree_hash_root().unwrap().to_vec(),
original.tree_hash_root()
);
let sk = SecretKey::random();
let modified = FakePublicKey::from_secret_key(&sk);
cache.update(&modified).unwrap();
assert_eq!(
cache.tree_hash_root().unwrap().to_vec(),
modified.tree_hash_root()
);
}
} }

View File

@ -84,8 +84,6 @@ impl_ssz!(FakeSignature, BLS_SIG_BYTE_SIZE, "FakeSignature");
impl_tree_hash!(FakeSignature, U96); impl_tree_hash!(FakeSignature, U96);
impl_cached_tree_hash!(FakeSignature, U96);
impl Serialize for FakeSignature { impl Serialize for FakeSignature {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where where

View File

@ -57,7 +57,7 @@ pub const BLS_SIG_BYTE_SIZE: usize = 96;
pub const BLS_SECRET_KEY_BYTE_SIZE: usize = 48; pub const BLS_SECRET_KEY_BYTE_SIZE: usize = 48;
pub const BLS_PUBLIC_KEY_BYTE_SIZE: usize = 48; pub const BLS_PUBLIC_KEY_BYTE_SIZE: usize = 48;
use hashing::hash; use eth2_hashing::hash;
use ssz::ssz_encode; use ssz::ssz_encode;
/// Returns the withdrawal credentials for a given public key. /// Returns the withdrawal credentials for a given public key.

View File

@ -61,30 +61,6 @@ macro_rules! impl_tree_hash {
}; };
} }
macro_rules! impl_cached_tree_hash {
($type: ty, $byte_size: ident) => {
impl cached_tree_hash::CachedTreeHash for $type {
fn new_tree_hash_cache(
&self,
_depth: usize,
) -> Result<cached_tree_hash::TreeHashCache, cached_tree_hash::Error> {
unimplemented!("CachedTreeHash is not implemented for BLS types")
}
fn tree_hash_cache_schema(&self, _depth: usize) -> cached_tree_hash::BTreeSchema {
unimplemented!("CachedTreeHash is not implemented for BLS types")
}
fn update_tree_hash_cache(
&self,
_cache: &mut cached_tree_hash::TreeHashCache,
) -> Result<(), cached_tree_hash::Error> {
unimplemented!("CachedTreeHash is not implemented for BLS types")
}
}
};
}
macro_rules! bytes_struct { macro_rules! bytes_struct {
($name: ident, $type: ty, $byte_size: expr, $small_name: expr, $ssz_type_size: ident, ($name: ident, $type: ty, $byte_size: expr, $small_name: expr, $ssz_type_size: ident,
$type_str: expr, $byte_size_str: expr) => { $type_str: expr, $byte_size_str: expr) => {
@ -166,8 +142,6 @@ macro_rules! bytes_struct {
impl_tree_hash!($name, $ssz_type_size); impl_tree_hash!($name, $ssz_type_size);
impl_cached_tree_hash!($name, $ssz_type_size);
impl serde::ser::Serialize for $name { impl serde::ser::Serialize for $name {
/// Serde serialization is compliant the Ethereum YAML test format. /// Serde serialization is compliant the Ethereum YAML test format.
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>

View File

@ -92,8 +92,6 @@ impl_ssz!(PublicKey, BLS_PUBLIC_KEY_BYTE_SIZE, "PublicKey");
impl_tree_hash!(PublicKey, U48); impl_tree_hash!(PublicKey, U48);
impl_cached_tree_hash!(PublicKey, U48);
impl Serialize for PublicKey { impl Serialize for PublicKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where where
@ -137,7 +135,6 @@ impl Hash for PublicKey {
mod tests { mod tests {
use super::*; use super::*;
use ssz::ssz_encode; use ssz::ssz_encode;
use tree_hash::TreeHash;
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -158,29 +155,4 @@ mod tests {
let bytes = ssz_encode(&original); let bytes = ssz_encode(&original);
assert_eq!(bytes.len(), BLS_PUBLIC_KEY_BYTE_SIZE); assert_eq!(bytes.len(), BLS_PUBLIC_KEY_BYTE_SIZE);
} }
#[test]
// TODO: once `CachedTreeHash` is fixed, this test should _not_ panic.
#[should_panic]
pub fn test_cached_tree_hash() {
let sk = SecretKey::random();
let original = PublicKey::from_secret_key(&sk);
let mut cache = cached_tree_hash::TreeHashCache::new(&original).unwrap();
assert_eq!(
cache.tree_hash_root().unwrap().to_vec(),
original.tree_hash_root()
);
let sk = SecretKey::random();
let modified = PublicKey::from_secret_key(&sk);
cache.update(&modified).unwrap();
assert_eq!(
cache.tree_hash_root().unwrap().to_vec(),
modified.tree_hash_root()
);
}
} }

View File

@ -47,8 +47,6 @@ impl_ssz!(SecretKey, BLS_SECRET_KEY_BYTE_SIZE, "SecretKey");
impl_tree_hash!(SecretKey, U48); impl_tree_hash!(SecretKey, U48);
impl_cached_tree_hash!(SecretKey, U48);
impl Serialize for SecretKey { impl Serialize for SecretKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where where

View File

@ -111,8 +111,6 @@ impl_ssz!(Signature, BLS_SIG_BYTE_SIZE, "Signature");
impl_tree_hash!(Signature, U96); impl_tree_hash!(Signature, U96);
impl_cached_tree_hash!(Signature, U96);
impl Serialize for Signature { impl Serialize for Signature {
/// Serde serialization is compliant the Ethereum YAML test format. /// Serde serialization is compliant the Ethereum YAML test format.
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@ -141,7 +139,6 @@ mod tests {
use super::super::Keypair; use super::super::Keypair;
use super::*; use super::*;
use ssz::ssz_encode; use ssz::ssz_encode;
use tree_hash::TreeHash;
#[test] #[test]
pub fn test_ssz_round_trip() { pub fn test_ssz_round_trip() {
@ -164,30 +161,6 @@ mod tests {
assert_eq!(bytes.len(), BLS_SIG_BYTE_SIZE); assert_eq!(bytes.len(), BLS_SIG_BYTE_SIZE);
} }
#[test]
// TODO: once `CachedTreeHash` is fixed, this test should _not_ panic.
#[should_panic]
pub fn test_cached_tree_hash() {
let keypair = Keypair::random();
let original = Signature::new(&[42, 42], 0, &keypair.sk);
let mut cache = cached_tree_hash::TreeHashCache::new(&original).unwrap();
assert_eq!(
cache.tree_hash_root().unwrap().to_vec(),
original.tree_hash_root()
);
let modified = Signature::new(&[99, 99], 0, &keypair.sk);
cache.update(&modified).unwrap();
assert_eq!(
cache.tree_hash_root().unwrap().to_vec(),
modified.tree_hash_root()
);
}
#[test] #[test]
pub fn test_empty_signature() { pub fn test_empty_signature() {
let sig = Signature::empty_signature(); let sig = Signature::empty_signature();

View File

@ -1,14 +0,0 @@
[package]
name = "cached_tree_hash"
version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dev-dependencies]
tree_hash_derive = { path = "../tree_hash_derive" }
[dependencies]
tree_hash = { path = "../tree_hash" }
ethereum-types = "0.6"
hashing = { path = "../hashing" }
int_to_bytes = { path = "../int_to_bytes" }

View File

@ -1,76 +0,0 @@
# Tree hashing
Provides both cached and non-cached tree hashing methods.
## Standard Tree Hash
```rust
use tree_hash_derive::TreeHash;
#[derive(TreeHash)]
struct Foo {
a: u64,
b: Vec<u64>,
}
fn main() {
let foo = Foo {
a: 42,
b: vec![1, 2, 3]
};
println!("root: {}", foo.tree_hash_root());
}
```
## Cached Tree Hash
```rust
use tree_hash_derive::{TreeHash, CachedTreeHash};
#[derive(TreeHash, CachedTreeHash)]
struct Foo {
a: u64,
b: Vec<u64>,
}
#[derive(TreeHash, CachedTreeHash)]
struct Bar {
a: Vec<Foo>,
b: u64,
}
fn main() {
let bar = Bar {
a: vec![
Foo {
a: 42,
b: vec![1, 2, 3]
}
],
b: 42
};
let modified_bar = Bar {
a: vec![
Foo {
a: 100,
b: vec![1, 2, 3, 4, 5, 6]
},
Foo {
a: 42,
b: vec![]
}
],
b: 99
};
let mut hasher = CachedTreeHasher::new(&bar).unwrap();
hasher.update(&modified_bar).unwrap();
// Assert that the cached tree hash matches a standard tree hash.
assert_eq!(hasher.tree_hash_root(), modified_bar.tree_hash_root());
}
```

View File

@ -1,340 +0,0 @@
use super::*;
/// A schema defining a binary tree over a `TreeHashCache`.
///
/// This structure is used for succinct storage; run-time functionality is gained by converting a
/// `BTreeSchema` into a `BTreeOverlay`.
#[derive(Debug, PartialEq, Clone)]
pub struct BTreeSchema {
/// The depth of a schema defines how far it is nested within other fixed-length items.
///
/// Each time a new variable-length object is created all items within it are assigned a depth
/// of `depth + 1`.
///
/// When storing the schemas in a list, the depth parameter allows for removing all schemas
/// belonging to a specific variable-length item without removing schemas related to adjacent
/// variable-length items.
pub depth: usize,
lengths: Vec<usize>,
}
impl BTreeSchema {
pub fn from_lengths(depth: usize, lengths: Vec<usize>) -> Self {
Self { depth, lengths }
}
pub fn into_overlay(self, offset: usize) -> BTreeOverlay {
BTreeOverlay::from_schema(self, offset)
}
}
impl Into<BTreeSchema> for BTreeOverlay {
fn into(self) -> BTreeSchema {
BTreeSchema {
depth: self.depth,
lengths: self.lengths,
}
}
}
/// Provides a status for some leaf-node in binary tree.
#[derive(Debug, PartialEq, Clone)]
pub enum LeafNode {
/// The leaf node does not exist in this tree.
DoesNotExist,
/// The leaf node exists in the tree and has a real value within the given `chunk` range.
Exists(Range<usize>),
/// The leaf node exists in the tree only as padding.
Padding,
}
/// Instantiated from a `BTreeSchema`, a `BTreeOverlay` allows for interpreting some
/// non-consecutive chunks of a `TreeHashCache` as a perfect binary tree.
///
/// The primary purpose of this struct is to map from binary tree "nodes" to `TreeHashCache`
/// "chunks". Each tree has nodes `0..n` where `n` is the number of nodes and `0` is the root node.
/// Each of these nodes is mapped to a chunk, starting from `self.offset` and increasing in steps
/// of `1` for internal nodes and arbitrary steps for leaf-nodes.
#[derive(Debug, PartialEq, Clone)]
pub struct BTreeOverlay {
offset: usize,
/// See `BTreeSchema.depth` for a description.
pub depth: usize,
lengths: Vec<usize>,
}
impl BTreeOverlay {
/// Instantiates a new instance for `item`, where it's first chunk is `initial_offset` and has
/// the specified `depth`.
pub fn new<T>(item: &T, initial_offset: usize, depth: usize) -> Self
where
T: CachedTreeHash,
{
Self::from_schema(item.tree_hash_cache_schema(depth), initial_offset)
}
/// Instantiates a new instance from a schema, where it's first chunk is `offset`.
pub fn from_schema(schema: BTreeSchema, offset: usize) -> Self {
Self {
offset,
depth: schema.depth,
lengths: schema.lengths,
}
}
/// Returns a `LeafNode` for each of the `n` leaves of the tree.
///
/// `LeafNode::DoesNotExist` is returned for each element `i` in `0..n` where `i >=
/// self.num_leaf_nodes()`.
pub fn get_leaf_nodes(&self, n: usize) -> Vec<LeafNode> {
let mut running_offset = self.offset + self.num_internal_nodes();
let mut leaf_nodes: Vec<LeafNode> = self
.lengths
.iter()
.map(|length| {
let range = running_offset..running_offset + length;
running_offset += length;
LeafNode::Exists(range)
})
.collect();
leaf_nodes.resize(self.num_leaf_nodes(), LeafNode::Padding);
leaf_nodes.resize(n, LeafNode::DoesNotExist);
leaf_nodes
}
/// Returns the number of leaf nodes in the tree.
pub fn num_leaf_nodes(&self) -> usize {
self.lengths.len().next_power_of_two()
}
/// Returns the number of leafs in the tree which are padding.
pub fn num_padding_leaves(&self) -> usize {
self.num_leaf_nodes() - self.lengths.len()
}
/// Returns the number of nodes in the tree.
///
/// Note: this is distinct from `num_chunks`, which returns the total number of chunks in
/// this tree.
pub fn num_nodes(&self) -> usize {
2 * self.num_leaf_nodes() - 1
}
/// Returns the number of internal (non-leaf) nodes in the tree.
pub fn num_internal_nodes(&self) -> usize {
self.num_leaf_nodes() - 1
}
/// Returns the chunk of the first node of the tree.
fn first_node(&self) -> usize {
self.offset
}
/// Returns the root chunk of the tree (the zero-th node)
pub fn root(&self) -> usize {
self.first_node()
}
/// Returns the first chunk outside of the boundary of this tree. It is the root node chunk
/// plus the total number of chunks in the tree.
pub fn next_node(&self) -> usize {
self.first_node() + self.num_internal_nodes() + self.num_leaf_nodes() - self.lengths.len()
+ self.lengths.iter().sum::<usize>()
}
/// Returns the height of the tree where a tree with a single node has a height of 1.
pub fn height(&self) -> usize {
self.num_leaf_nodes().trailing_zeros() as usize
}
/// Returns the range of chunks that belong to the internal nodes of the tree.
pub fn internal_chunk_range(&self) -> Range<usize> {
self.offset..self.offset + self.num_internal_nodes()
}
/// Returns all of the chunks that are encompassed by the tree.
pub fn chunk_range(&self) -> Range<usize> {
self.first_node()..self.next_node()
}
/// Returns the number of chunks inside this tree (including subtrees).
///
/// Note: this is distinct from `num_nodes` which returns the number of nodes in the binary
/// tree.
pub fn num_chunks(&self) -> usize {
self.next_node() - self.first_node()
}
/// Returns the first chunk of the first leaf node in the tree.
pub fn first_leaf_node(&self) -> usize {
self.offset + self.num_internal_nodes()
}
/// Returns the chunks for some given parent node.
///
/// Note: it is a parent _node_ not a parent _chunk_.
pub fn child_chunks(&self, parent: usize) -> (usize, usize) {
let children = children(parent);
if children.1 < self.num_internal_nodes() {
(children.0 + self.offset, children.1 + self.offset)
} else {
let chunks = self.n_leaf_node_chunks(children.1);
(chunks[chunks.len() - 2], chunks[chunks.len() - 1])
}
}
/// Returns a vec of (parent_chunk, (left_child_chunk, right_child_chunk)).
pub fn internal_parents_and_children(&self) -> Vec<(usize, (usize, usize))> {
let mut chunks = Vec::with_capacity(self.num_nodes());
chunks.append(&mut self.internal_node_chunks());
chunks.append(&mut self.leaf_node_chunks());
(0..self.num_internal_nodes())
.map(|parent| {
let children = children(parent);
(chunks[parent], (chunks[children.0], chunks[children.1]))
})
.collect()
}
/// Returns a vec of chunk indices for each internal node of the tree.
pub fn internal_node_chunks(&self) -> Vec<usize> {
(self.offset..self.offset + self.num_internal_nodes()).collect()
}
/// Returns a vec of the first chunk for each leaf node of the tree.
pub fn leaf_node_chunks(&self) -> Vec<usize> {
self.n_leaf_node_chunks(self.num_leaf_nodes())
}
/// Returns a vec of the first chunk index for the first `n` leaf nodes of the tree.
fn n_leaf_node_chunks(&self, n: usize) -> Vec<usize> {
let mut chunks = Vec::with_capacity(n);
let mut chunk = self.offset + self.num_internal_nodes();
for i in 0..n {
chunks.push(chunk);
match self.lengths.get(i) {
Some(len) => {
chunk += len;
}
None => chunk += 1,
}
}
chunks
}
}
fn children(parent: usize) -> (usize, usize) {
((2 * parent + 1), (2 * parent + 2))
}
#[cfg(test)]
mod test {
use super::*;
fn get_tree_a(n: usize) -> BTreeOverlay {
BTreeSchema::from_lengths(0, vec![1; n]).into_overlay(0)
}
#[test]
fn leaf_node_chunks() {
let tree = get_tree_a(4);
assert_eq!(tree.leaf_node_chunks(), vec![3, 4, 5, 6])
}
#[test]
fn internal_node_chunks() {
let tree = get_tree_a(4);
assert_eq!(tree.internal_node_chunks(), vec![0, 1, 2])
}
#[test]
fn internal_parents_and_children() {
let tree = get_tree_a(4);
assert_eq!(
tree.internal_parents_and_children(),
vec![(0, (1, 2)), (1, (3, 4)), (2, (5, 6))]
)
}
#[test]
fn chunk_range() {
let tree = get_tree_a(4);
assert_eq!(tree.chunk_range(), 0..7);
let tree = get_tree_a(1);
assert_eq!(tree.chunk_range(), 0..1);
let tree = get_tree_a(2);
assert_eq!(tree.chunk_range(), 0..3);
let tree = BTreeSchema::from_lengths(0, vec![1, 1]).into_overlay(11);
assert_eq!(tree.chunk_range(), 11..14);
let tree = BTreeSchema::from_lengths(0, vec![7, 7, 7]).into_overlay(0);
assert_eq!(tree.chunk_range(), 0..25);
}
#[test]
fn get_leaf_node() {
let tree = get_tree_a(4);
let leaves = tree.get_leaf_nodes(5);
assert_eq!(leaves[0], LeafNode::Exists(3..4));
assert_eq!(leaves[1], LeafNode::Exists(4..5));
assert_eq!(leaves[2], LeafNode::Exists(5..6));
assert_eq!(leaves[3], LeafNode::Exists(6..7));
assert_eq!(leaves[4], LeafNode::DoesNotExist);
let tree = get_tree_a(3);
let leaves = tree.get_leaf_nodes(5);
assert_eq!(leaves[0], LeafNode::Exists(3..4));
assert_eq!(leaves[1], LeafNode::Exists(4..5));
assert_eq!(leaves[2], LeafNode::Exists(5..6));
assert_eq!(leaves[3], LeafNode::Padding);
assert_eq!(leaves[4], LeafNode::DoesNotExist);
let tree = get_tree_a(0);
let leaves = tree.get_leaf_nodes(2);
assert_eq!(leaves[0], LeafNode::Padding);
assert_eq!(leaves[1], LeafNode::DoesNotExist);
let tree = BTreeSchema::from_lengths(0, vec![3]).into_overlay(0);
let leaves = tree.get_leaf_nodes(2);
assert_eq!(leaves[0], LeafNode::Exists(0..3));
assert_eq!(leaves[1], LeafNode::DoesNotExist);
let tree = BTreeSchema::from_lengths(0, vec![3]).into_overlay(10);
let leaves = tree.get_leaf_nodes(2);
assert_eq!(leaves[0], LeafNode::Exists(10..13));
assert_eq!(leaves[1], LeafNode::DoesNotExist);
}
#[test]
fn root_of_one_node() {
let tree = get_tree_a(1);
assert_eq!(tree.root(), 0);
assert_eq!(tree.num_internal_nodes(), 0);
assert_eq!(tree.num_leaf_nodes(), 1);
}
#[test]
fn child_chunks() {
let tree = get_tree_a(4);
assert_eq!(tree.child_chunks(0), (1, 2))
}
}

View File

@ -1,19 +0,0 @@
use tree_hash::TreeHashType;
#[derive(Debug, PartialEq, Clone)]
pub enum Error {
ShouldNotProduceBTreeOverlay,
NoFirstNode,
NoBytesForRoot,
UnableToObtainSlices,
UnableToGrowMerkleTree,
UnableToShrinkMerkleTree,
TreeCannotHaveZeroNodes,
CacheNotInitialized,
ShouldNeverBePacked(TreeHashType),
BytesAreNotEvenChunks(usize),
NoModifiedFieldForChunk(usize),
NoBytesForChunk(usize),
NoSchemaForIndex(usize),
NotLeafNode(usize),
}

View File

@ -1,117 +0,0 @@
use super::*;
use crate::merkleize::merkleize;
use ethereum_types::H256;
pub mod vec;
macro_rules! impl_for_single_leaf_int {
($type: ident) => {
impl CachedTreeHash for $type {
fn new_tree_hash_cache(&self, _depth: usize) -> Result<TreeHashCache, Error> {
Ok(TreeHashCache::from_bytes(
merkleize(self.to_le_bytes().to_vec()),
false,
None,
)?)
}
fn tree_hash_cache_schema(&self, depth: usize) -> BTreeSchema {
BTreeSchema::from_lengths(depth, vec![1])
}
fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error> {
let leaf = merkleize(self.to_le_bytes().to_vec());
cache.maybe_update_chunk(cache.chunk_index, &leaf)?;
cache.chunk_index += 1;
Ok(())
}
}
};
}
impl_for_single_leaf_int!(u8);
impl_for_single_leaf_int!(u16);
impl_for_single_leaf_int!(u32);
impl_for_single_leaf_int!(u64);
impl_for_single_leaf_int!(usize);
impl CachedTreeHash for bool {
fn new_tree_hash_cache(&self, _depth: usize) -> Result<TreeHashCache, Error> {
Ok(TreeHashCache::from_bytes(
merkleize((*self as u8).to_le_bytes().to_vec()),
false,
None,
)?)
}
fn tree_hash_cache_schema(&self, depth: usize) -> BTreeSchema {
BTreeSchema::from_lengths(depth, vec![1])
}
fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error> {
let leaf = merkleize((*self as u8).to_le_bytes().to_vec());
cache.maybe_update_chunk(cache.chunk_index, &leaf)?;
cache.chunk_index += 1;
Ok(())
}
}
macro_rules! impl_for_u8_array {
($len: expr) => {
impl CachedTreeHash for [u8; $len] {
fn new_tree_hash_cache(&self, _depth: usize) -> Result<TreeHashCache, Error> {
Ok(TreeHashCache::from_bytes(
merkleize(self.to_vec()),
false,
None,
)?)
}
fn tree_hash_cache_schema(&self, depth: usize) -> BTreeSchema {
BTreeSchema::from_lengths(depth, vec![1])
}
fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error> {
let leaf = merkleize(self.to_vec());
cache.maybe_update_chunk(cache.chunk_index, &leaf)?;
cache.chunk_index += 1;
Ok(())
}
}
};
}
impl_for_u8_array!(4);
impl_for_u8_array!(32);
impl CachedTreeHash for H256 {
fn new_tree_hash_cache(&self, _depth: usize) -> Result<TreeHashCache, Error> {
Ok(TreeHashCache::from_bytes(
self.as_bytes().to_vec(),
false,
None,
)?)
}
fn num_tree_hash_cache_chunks(&self) -> usize {
1
}
fn tree_hash_cache_schema(&self, depth: usize) -> BTreeSchema {
BTreeSchema::from_lengths(depth, vec![1])
}
fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error> {
cache.maybe_update_chunk(cache.chunk_index, self.as_bytes())?;
cache.chunk_index += 1;
Ok(())
}
}

View File

@ -1,338 +0,0 @@
use super::*;
use crate::btree_overlay::LeafNode;
use crate::merkleize::{merkleize, num_sanitized_leaves, sanitise_bytes};
macro_rules! impl_for_list {
($type: ty) => {
impl<T> CachedTreeHash for $type
where
T: CachedTreeHash + TreeHash,
{
fn new_tree_hash_cache(&self, depth: usize) -> Result<TreeHashCache, Error> {
let (mut cache, schema) = new_tree_hash_cache(self, depth)?;
cache.add_length_nodes(schema.into_overlay(0).chunk_range(), self.len())?;
Ok(cache)
}
fn num_tree_hash_cache_chunks(&self) -> usize {
// Add two extra nodes to cater for the node before and after to allow mixing-in length.
BTreeOverlay::new(self, 0, 0).num_chunks() + 2
}
fn tree_hash_cache_schema(&self, depth: usize) -> BTreeSchema {
produce_schema(self, depth)
}
fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error> {
// Skip the length-mixed-in root node.
cache.chunk_index += 1;
// Update the cache, returning the new overlay.
let new_overlay = update_tree_hash_cache(&self, cache)?;
// Mix in length
cache.mix_in_length(new_overlay.chunk_range(), self.len())?;
// Skip an extra node to clear the length node.
cache.chunk_index += 1;
Ok(())
}
}
};
}
impl_for_list!(Vec<T>);
impl_for_list!(&[T]);
/// Build a new tree hash cache for some slice.
///
/// Valid for both variable- and fixed-length slices. Does _not_ mix-in the length of the list,
/// the caller must do this.
pub fn new_tree_hash_cache<T: CachedTreeHash>(
vec: &[T],
depth: usize,
) -> Result<(TreeHashCache, BTreeSchema), Error> {
let schema = vec.tree_hash_cache_schema(depth);
let cache = match T::tree_hash_type() {
TreeHashType::Basic => TreeHashCache::from_bytes(
merkleize(get_packed_leaves(vec)?),
false,
Some(schema.clone()),
),
TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => {
let subtrees = vec
.iter()
.map(|item| TreeHashCache::new_at_depth(item, depth + 1))
.collect::<Result<Vec<TreeHashCache>, _>>()?;
TreeHashCache::from_subtrees(&vec, subtrees, depth)
}
}?;
Ok((cache, schema))
}
/// Produce a schema for some slice.
///
/// Valid for both variable- and fixed-length slices. Does _not_ add the mix-in length nodes, the
/// caller must do this.
pub fn produce_schema<T: CachedTreeHash>(vec: &[T], depth: usize) -> BTreeSchema {
let lengths = match T::tree_hash_type() {
TreeHashType::Basic => {
// Ceil division.
let num_leaves =
(vec.len() + T::tree_hash_packing_factor() - 1) / T::tree_hash_packing_factor();
// Disallow zero-length as an empty list still has one all-padding node.
vec![1; std::cmp::max(1, num_leaves)]
}
TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => {
let mut lengths = vec![];
for item in vec {
lengths.push(item.num_tree_hash_cache_chunks())
}
lengths
}
};
BTreeSchema::from_lengths(depth, lengths)
}
/// Updates the cache for some slice.
///
/// Valid for both variable- and fixed-length slices. Does _not_ cater for the mix-in length nodes,
/// the caller must do this.
#[allow(clippy::range_plus_one)] // Minor readability lint requiring structural changes; not worth it.
pub fn update_tree_hash_cache<T: CachedTreeHash>(
vec: &[T],
cache: &mut TreeHashCache,
) -> Result<BTreeOverlay, Error> {
let old_overlay = cache.get_overlay(cache.schema_index, cache.chunk_index)?;
let new_overlay = BTreeOverlay::new(&vec, cache.chunk_index, old_overlay.depth);
cache.replace_overlay(cache.schema_index, cache.chunk_index, new_overlay.clone())?;
cache.schema_index += 1;
match T::tree_hash_type() {
TreeHashType::Basic => {
let mut buf = vec![0; HASHSIZE];
let item_bytes = HASHSIZE / T::tree_hash_packing_factor();
// If the number of leaf nodes has changed, resize the cache.
if new_overlay.num_leaf_nodes() < old_overlay.num_leaf_nodes() {
let start = new_overlay.next_node();
let end = start + (old_overlay.num_leaf_nodes() - new_overlay.num_leaf_nodes());
cache.splice(start..end, vec![], vec![]);
} else if new_overlay.num_leaf_nodes() > old_overlay.num_leaf_nodes() {
let start = old_overlay.next_node();
let new_nodes = new_overlay.num_leaf_nodes() - old_overlay.num_leaf_nodes();
cache.splice(
start..start,
vec![0; new_nodes * HASHSIZE],
vec![true; new_nodes],
);
}
// Iterate through each of the leaf nodes in the new list.
for i in 0..new_overlay.num_leaf_nodes() {
// Iterate through the number of items that may be packing into the leaf node.
for j in 0..T::tree_hash_packing_factor() {
// Create a mut slice that can be filled with either a serialized item or
// padding.
let buf_slice = &mut buf[j * item_bytes..(j + 1) * item_bytes];
// Attempt to get the item for this portion of the chunk. If it exists,
// update `buf` with it's serialized bytes. If it doesn't exist, update
// `buf` with padding.
match vec.get(i * T::tree_hash_packing_factor() + j) {
Some(item) => {
buf_slice.copy_from_slice(&item.tree_hash_packed_encoding());
}
None => buf_slice.copy_from_slice(&vec![0; item_bytes]),
}
}
// Update the chunk if the generated `buf` is not the same as the cache.
let chunk = new_overlay.first_leaf_node() + i;
cache.maybe_update_chunk(chunk, &buf)?;
}
}
TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => {
let longest_len =
std::cmp::max(new_overlay.num_leaf_nodes(), old_overlay.num_leaf_nodes());
let old_leaf_nodes = old_overlay.get_leaf_nodes(longest_len);
let new_leaf_nodes = if old_overlay == new_overlay {
old_leaf_nodes.clone()
} else {
new_overlay.get_leaf_nodes(longest_len)
};
for i in 0..longest_len {
match (&old_leaf_nodes[i], &new_leaf_nodes[i]) {
// The item existed in the previous list and exists in the current list.
//
// Update the item.
(LeafNode::Exists(_old), LeafNode::Exists(new)) => {
cache.chunk_index = new.start;
vec[i].update_tree_hash_cache(cache)?;
}
// The list has been lengthened and this is a new item that did not exist in
// the previous list.
//
// Splice the tree for the new item into the current chunk_index.
(LeafNode::DoesNotExist, LeafNode::Exists(new)) => {
splice_in_new_tree(
&vec[i],
new.start..new.start,
new_overlay.depth + 1,
cache,
)?;
cache.chunk_index = new.end;
}
// The list has been lengthened and this is a new item that was previously a
// padding item.
//
// Splice the tree for the new item over the padding chunk.
(LeafNode::Padding, LeafNode::Exists(new)) => {
splice_in_new_tree(
&vec[i],
new.start..new.start + 1,
new_overlay.depth + 1,
cache,
)?;
cache.chunk_index = new.end;
}
// The list has been shortened and this item was removed from the list and made
// into padding.
//
// Splice a padding node over the number of nodes the previous item occupied,
// starting at the current chunk_index.
(LeafNode::Exists(old), LeafNode::Padding) => {
let num_chunks = old.end - old.start;
cache.splice(
cache.chunk_index..cache.chunk_index + num_chunks,
vec![0; HASHSIZE],
vec![true],
);
cache.chunk_index += 1;
}
// The list has been shortened and the item for this leaf existed in the
// previous list, but does not exist in this list.
//
// Remove the number of nodes the previous item occupied, starting at the
// current chunk_index.
(LeafNode::Exists(old), LeafNode::DoesNotExist) => {
let num_chunks = old.end - old.start;
cache.splice(
cache.chunk_index..cache.chunk_index + num_chunks,
vec![],
vec![],
);
}
// The list has been shortened and this leaf was padding in the previous list,
// however it should not exist in this list.
//
// Remove one node, starting at the current `chunk_index`.
(LeafNode::Padding, LeafNode::DoesNotExist) => {
cache.splice(cache.chunk_index..cache.chunk_index + 1, vec![], vec![]);
}
// The list has been lengthened and this leaf did not exist in the previous
// list, but should be padding for this list.
//
// Splice in a new padding node at the current chunk_index.
(LeafNode::DoesNotExist, LeafNode::Padding) => {
cache.splice(
cache.chunk_index..cache.chunk_index,
vec![0; HASHSIZE],
vec![true],
);
cache.chunk_index += 1;
}
// This leaf was padding in both lists, there's nothing to do.
(LeafNode::Padding, LeafNode::Padding) => (),
// As we are looping through the larger of the lists of leaf nodes, it should
// be impossible for either leaf to be non-existent.
(LeafNode::DoesNotExist, LeafNode::DoesNotExist) => unreachable!(),
}
}
// Clean out any excess schemas that may or may not be remaining if the list was
// shortened.
cache.remove_proceeding_child_schemas(cache.schema_index, new_overlay.depth);
}
}
cache.update_internal_nodes(&new_overlay)?;
cache.chunk_index = new_overlay.next_node();
Ok(new_overlay)
}
/// Create a new `TreeHashCache` from `item` and splice it over the `chunks_to_replace` chunks of
/// the given `cache`.
///
/// Useful for the case where a new element is added to a list.
///
/// The schemas created for `item` will have the given `depth`.
fn splice_in_new_tree<T>(
item: &T,
chunks_to_replace: Range<usize>,
depth: usize,
cache: &mut TreeHashCache,
) -> Result<(), Error>
where
T: CachedTreeHash,
{
let (bytes, mut bools, schemas) = TreeHashCache::new_at_depth(item, depth)?.into_components();
// Record the number of schemas, this will be used later in the fn.
let num_schemas = schemas.len();
// Flag the root node of the new tree as dirty.
bools[0] = true;
cache.splice(chunks_to_replace, bytes, bools);
cache
.schemas
.splice(cache.schema_index..cache.schema_index, schemas);
cache.schema_index += num_schemas;
Ok(())
}
/// Packs all of the leaves of `vec` into a single byte-array, appending `0` to ensure the number
/// of chunks in the byte-array is a power-of-two.
fn get_packed_leaves<T>(vec: &[T]) -> Result<Vec<u8>, Error>
where
T: CachedTreeHash,
{
let num_packed_bytes = (BYTES_PER_CHUNK / T::tree_hash_packing_factor()) * vec.len();
let num_leaves = num_sanitized_leaves(num_packed_bytes);
let mut packed = Vec::with_capacity(num_leaves * HASHSIZE);
for item in vec {
packed.append(&mut item.tree_hash_packed_encoding());
}
Ok(sanitise_bytes(packed))
}

View File

@ -1,150 +0,0 @@
//! Performs cached merkle-hashing adhering to the Ethereum 2.0 specification defined
//! [here](https://github.com/ethereum/eth2.0-specs/blob/v0.5.1/specs/simple-serialize.md#merkleization).
//!
//! Caching allows for reduced hashing when some object has only been partially modified, which
//! consumes less CPU-time at the cost of additional storage. For example,
//! determining the root of a list of 1024 items with a single modification has been observed to
//! run in 1/25th of the time of a full merkle hash.
//!
//!
//! # Example:
//!
//! ```
//! use cached_tree_hash::TreeHashCache;
//! use tree_hash_derive::{TreeHash, CachedTreeHash};
//!
//! #[derive(TreeHash, CachedTreeHash)]
//! struct Foo {
//! bar: u64,
//! baz: Vec<u64>
//! }
//!
//! let mut foo = Foo {
//! bar: 1,
//! baz: vec![0, 1, 2]
//! };
//!
//! let mut cache = TreeHashCache::new(&foo).unwrap();
//!
//! foo.baz[1] = 0;
//!
//! cache.update(&foo).unwrap();
//!
//! println!("Root is: {:?}", cache.tree_hash_root().unwrap());
//! ```
use hashing::hash;
use std::ops::Range;
use tree_hash::{TreeHash, TreeHashType, BYTES_PER_CHUNK, HASHSIZE};
mod btree_overlay;
mod errors;
mod impls;
pub mod merkleize;
mod resize;
mod tree_hash_cache;
pub use btree_overlay::{BTreeOverlay, BTreeSchema};
pub use errors::Error;
pub use impls::vec;
pub use tree_hash_cache::TreeHashCache;
pub trait CachedTreeHash: TreeHash {
fn tree_hash_cache_schema(&self, depth: usize) -> BTreeSchema;
fn num_tree_hash_cache_chunks(&self) -> usize {
self.tree_hash_cache_schema(0).into_overlay(0).num_chunks()
}
fn new_tree_hash_cache(&self, depth: usize) -> Result<TreeHashCache, Error>;
fn update_tree_hash_cache(&self, cache: &mut TreeHashCache) -> Result<(), Error>;
}
/// Implements `CachedTreeHash` on `$type`, where `$type` is a fixed-length vector and each item in
/// the `$type` is encoded as bytes using `ssz_encode`.
#[macro_export]
macro_rules! cached_tree_hash_ssz_encoding_as_vector {
($type: ident, $num_bytes: expr) => {
impl cached_tree_hash::CachedTreeHash for $type {
fn new_tree_hash_cache(
&self,
depth: usize,
) -> Result<cached_tree_hash::TreeHashCache, cached_tree_hash::Error> {
let (cache, _schema) =
cached_tree_hash::vec::new_tree_hash_cache(&ssz::ssz_encode(self), depth)?;
Ok(cache)
}
fn tree_hash_cache_schema(&self, depth: usize) -> cached_tree_hash::BTreeSchema {
let lengths =
vec![1; cached_tree_hash::merkleize::num_unsanitized_leaves($num_bytes)];
cached_tree_hash::BTreeSchema::from_lengths(depth, lengths)
}
fn update_tree_hash_cache(
&self,
cache: &mut cached_tree_hash::TreeHashCache,
) -> Result<(), cached_tree_hash::Error> {
cached_tree_hash::vec::update_tree_hash_cache(&ssz::ssz_encode(self), cache)?;
Ok(())
}
}
};
}
/// Implements `CachedTreeHash` on `$type`, where `$type` is a variable-length list and each item
/// in `$type` is encoded as bytes by calling `item.to_bytes()`.
#[macro_export]
macro_rules! cached_tree_hash_bytes_as_list {
($type: ident) => {
impl cached_tree_hash::CachedTreeHash for $type {
fn new_tree_hash_cache(
&self,
depth: usize,
) -> Result<cached_tree_hash::TreeHashCache, cached_tree_hash::Error> {
let bytes = self.to_bytes();
let (mut cache, schema) =
cached_tree_hash::vec::new_tree_hash_cache(&bytes, depth)?;
cache.add_length_nodes(schema.into_overlay(0).chunk_range(), bytes.len())?;
Ok(cache)
}
fn num_tree_hash_cache_chunks(&self) -> usize {
// Add two extra nodes to cater for the node before and after to allow mixing-in length.
cached_tree_hash::BTreeOverlay::new(self, 0, 0).num_chunks() + 2
}
fn tree_hash_cache_schema(&self, depth: usize) -> cached_tree_hash::BTreeSchema {
let bytes = self.to_bytes();
cached_tree_hash::vec::produce_schema(&bytes, depth)
}
fn update_tree_hash_cache(
&self,
cache: &mut cached_tree_hash::TreeHashCache,
) -> Result<(), cached_tree_hash::Error> {
let bytes = self.to_bytes();
// Skip the length-mixed-in root node.
cache.chunk_index += 1;
// Update the cache, returning the new overlay.
let new_overlay = cached_tree_hash::vec::update_tree_hash_cache(&bytes, cache)?;
// Mix in length
cache.mix_in_length(new_overlay.chunk_range(), bytes.len())?;
// Skip an extra node to clear the length node.
cache.chunk_index += 1;
Ok(())
}
}
};
}

View File

@ -1,83 +0,0 @@
use hashing::hash;
use tree_hash::{BYTES_PER_CHUNK, HASHSIZE, MERKLE_HASH_CHUNK};
/// Split `values` into a power-of-two, identical-length chunks (padding with `0`) and merkleize
/// them, returning the entire merkle tree.
///
/// The root hash is `merkleize(values)[0..BYTES_PER_CHUNK]`.
pub fn merkleize(values: Vec<u8>) -> Vec<u8> {
let values = sanitise_bytes(values);
let leaves = values.len() / HASHSIZE;
if leaves == 0 {
panic!("No full leaves");
}
if !leaves.is_power_of_two() {
panic!("leaves is not power of two");
}
let mut o: Vec<u8> = vec![0; (num_nodes(leaves) - leaves) * HASHSIZE];
o.append(&mut values.to_vec());
let mut i = o.len();
let mut j = o.len() - values.len();
while i >= MERKLE_HASH_CHUNK {
i -= MERKLE_HASH_CHUNK;
let hash = hash(&o[i..i + MERKLE_HASH_CHUNK]);
j -= HASHSIZE;
o[j..j + HASHSIZE].copy_from_slice(&hash);
}
o
}
/// Ensures that the given `bytes` are a power-of-two chunks, padding with zero if not.
pub fn sanitise_bytes(mut bytes: Vec<u8>) -> Vec<u8> {
let present_leaves = num_unsanitized_leaves(bytes.len());
let required_leaves = present_leaves.next_power_of_two();
if (present_leaves != required_leaves) | last_leaf_needs_padding(bytes.len()) {
bytes.resize(num_bytes(required_leaves), 0);
}
bytes
}
/// Pads out `bytes` to ensure it is a clean `num_leaves` chunks.
pub fn pad_for_leaf_count(num_leaves: usize, bytes: &mut Vec<u8>) {
let required_leaves = num_leaves.next_power_of_two();
bytes.resize(
bytes.len() + (required_leaves - num_leaves) * BYTES_PER_CHUNK,
0,
);
}
fn last_leaf_needs_padding(num_bytes: usize) -> bool {
num_bytes % HASHSIZE != 0
}
/// Returns the number of leaves for a given `bytes_len` number of bytes, rounding up if
/// `num_bytes` is not a client multiple of chunk size.
pub fn num_unsanitized_leaves(bytes_len: usize) -> usize {
(bytes_len + HASHSIZE - 1) / HASHSIZE
}
fn num_bytes(num_leaves: usize) -> usize {
num_leaves * HASHSIZE
}
fn num_nodes(num_leaves: usize) -> usize {
2 * num_leaves - 1
}
/// Returns the power-of-two number of leaves that would result from the given `bytes_len` number
/// of bytes.
pub fn num_sanitized_leaves(bytes_len: usize) -> usize {
let leaves = (bytes_len + HASHSIZE - 1) / HASHSIZE;
leaves.next_power_of_two()
}

View File

@ -1,223 +0,0 @@
#![allow(clippy::range_plus_one)] // Minor readability lint requiring structural changes; not worth it.
use super::*;
/// New vec is bigger than old vec.
pub fn grow_merkle_tree(
old_bytes: &[u8],
old_flags: &[bool],
from_height: usize,
to_height: usize,
) -> Option<(Vec<u8>, Vec<bool>)> {
let to_nodes = nodes_in_tree_of_height(to_height);
let mut bytes = vec![0; to_nodes * HASHSIZE];
let mut flags = vec![true; to_nodes];
for i in 0..=from_height {
let old_byte_slice = old_bytes.get(byte_range_at_height(i))?;
let old_flag_slice = old_flags.get(node_range_at_height(i))?;
let offset = i + to_height - from_height;
let new_byte_slice = bytes.get_mut(byte_range_at_height(offset))?;
let new_flag_slice = flags.get_mut(node_range_at_height(offset))?;
new_byte_slice
.get_mut(0..old_byte_slice.len())?
.copy_from_slice(old_byte_slice);
new_flag_slice
.get_mut(0..old_flag_slice.len())?
.copy_from_slice(old_flag_slice);
}
Some((bytes, flags))
}
/// New vec is smaller than old vec.
pub fn shrink_merkle_tree(
from_bytes: &[u8],
from_flags: &[bool],
from_height: usize,
to_height: usize,
) -> Option<(Vec<u8>, Vec<bool>)> {
let to_nodes = nodes_in_tree_of_height(to_height);
let mut bytes = vec![0; to_nodes * HASHSIZE];
let mut flags = vec![true; to_nodes];
for i in 0..=to_height as usize {
let offset = i + from_height - to_height;
let from_byte_slice = from_bytes.get(byte_range_at_height(offset))?;
let from_flag_slice = from_flags.get(node_range_at_height(offset))?;
let to_byte_slice = bytes.get_mut(byte_range_at_height(i))?;
let to_flag_slice = flags.get_mut(node_range_at_height(i))?;
to_byte_slice.copy_from_slice(from_byte_slice.get(0..to_byte_slice.len())?);
to_flag_slice.copy_from_slice(from_flag_slice.get(0..to_flag_slice.len())?);
}
Some((bytes, flags))
}
pub fn nodes_in_tree_of_height(h: usize) -> usize {
2 * (1 << h) - 1
}
fn byte_range_at_height(h: usize) -> Range<usize> {
let node_range = node_range_at_height(h);
node_range.start * HASHSIZE..node_range.end * HASHSIZE
}
fn node_range_at_height(h: usize) -> Range<usize> {
first_node_at_height(h)..last_node_at_height(h) + 1
}
fn first_node_at_height(h: usize) -> usize {
(1 << h) - 1
}
fn last_node_at_height(h: usize) -> usize {
(1 << (h + 1)) - 2
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn can_grow_and_shrink_three_levels() {
let small: usize = 1;
let big: usize = 15;
let original_bytes = vec![42; small * HASHSIZE];
let original_flags = vec![false; small];
let (grown_bytes, grown_flags) = grow_merkle_tree(
&original_bytes,
&original_flags,
(small + 1).trailing_zeros() as usize - 1,
(big + 1).trailing_zeros() as usize - 1,
)
.unwrap();
let mut expected_bytes = vec![];
let mut expected_flags = vec![];
// First level
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(true);
// Second level
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(true);
expected_flags.push(true);
// Third level
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
// Fourth level
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(false);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
assert_eq!(expected_bytes, grown_bytes);
assert_eq!(expected_flags, grown_flags);
let (shrunk_bytes, shrunk_flags) = shrink_merkle_tree(
&grown_bytes,
&grown_flags,
(big + 1).trailing_zeros() as usize - 1,
(small + 1).trailing_zeros() as usize - 1,
)
.unwrap();
assert_eq!(original_bytes, shrunk_bytes);
assert_eq!(original_flags, shrunk_flags);
}
#[test]
fn can_grow_and_shrink_one_level() {
let small: usize = 7;
let big: usize = 15;
let original_bytes = vec![42; small * HASHSIZE];
let original_flags = vec![false; small];
let (grown_bytes, grown_flags) = grow_merkle_tree(
&original_bytes,
&original_flags,
(small + 1).trailing_zeros() as usize - 1,
(big + 1).trailing_zeros() as usize - 1,
)
.unwrap();
let mut expected_bytes = vec![];
let mut expected_flags = vec![];
// First level
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(true);
// Second level
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(false);
expected_flags.push(true);
// Third level
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(false);
expected_flags.push(false);
expected_flags.push(true);
expected_flags.push(true);
// Fourth level
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![42; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_bytes.append(&mut vec![0; 32]);
expected_flags.push(false);
expected_flags.push(false);
expected_flags.push(false);
expected_flags.push(false);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
expected_flags.push(true);
assert_eq!(expected_bytes, grown_bytes);
assert_eq!(expected_flags, grown_flags);
let (shrunk_bytes, shrunk_flags) = shrink_merkle_tree(
&grown_bytes,
&grown_flags,
(big + 1).trailing_zeros() as usize - 1,
(small + 1).trailing_zeros() as usize - 1,
)
.unwrap();
assert_eq!(original_bytes, shrunk_bytes);
assert_eq!(original_flags, shrunk_flags);
}
}

View File

@ -1,446 +0,0 @@
#![allow(clippy::range_plus_one)] // Minor readability lint requiring structural changes; not worth it.
use super::*;
use crate::merkleize::{merkleize, pad_for_leaf_count};
use int_to_bytes::int_to_bytes32;
/// Provides cached tree hashing for some object implementing `CachedTreeHash`.
///
/// Caching allows for doing minimal internal-node hashing when an object has only been partially
/// changed.
///
/// See the crate root for an example.
#[derive(Debug, PartialEq, Clone)]
pub struct TreeHashCache {
/// Stores the binary-tree in 32-byte chunks.
pub bytes: Vec<u8>,
/// Maps to each chunk of `self.bytes`, indicating if the chunk is dirty.
pub chunk_modified: Vec<bool>,
/// Contains a schema for each variable-length item stored in the cache.
pub schemas: Vec<BTreeSchema>,
/// A counter used during updates.
pub chunk_index: usize,
/// A counter used during updates.
pub schema_index: usize,
}
impl Default for TreeHashCache {
/// Create an empty cache.
///
/// Note: an empty cache is effectively useless, an error will be raised if `self.update` is
/// called.
fn default() -> TreeHashCache {
TreeHashCache {
bytes: vec![],
chunk_modified: vec![],
schemas: vec![],
chunk_index: 0,
schema_index: 0,
}
}
}
impl TreeHashCache {
/// Instantiates a new cache from `item` at a depth of `0`.
///
/// The returned cache is fully-built and will return an accurate tree-hash root.
pub fn new<T>(item: &T) -> Result<Self, Error>
where
T: CachedTreeHash,
{
Self::new_at_depth(item, 0)
}
/// Instantiates a new cache from `item` at the specified `depth`.
///
/// The returned cache is fully-built and will return an accurate tree-hash root.
pub fn new_at_depth<T>(item: &T, depth: usize) -> Result<Self, Error>
where
T: CachedTreeHash,
{
item.new_tree_hash_cache(depth)
}
/// Updates the cache with `item`.
///
/// `item` _must_ be of the same type as the `item` used to build the cache, otherwise an error
/// may be returned.
///
/// After calling `update`, the cache will return an accurate tree-hash root using
/// `self.tree_hash_root()`.
pub fn update<T>(&mut self, item: &T) -> Result<(), Error>
where
T: CachedTreeHash,
{
if self.is_empty() {
Err(Error::CacheNotInitialized)
} else {
self.reset_modifications();
item.update_tree_hash_cache(self)
}
}
/// Builds a new cache for `item`, given `subtrees` contains a `Self` for field/item of `item`.
///
/// Each `subtree` in `subtree` will become a leaf-node of the merkle-tree of `item`.
pub fn from_subtrees<T>(item: &T, subtrees: Vec<Self>, depth: usize) -> Result<Self, Error>
where
T: CachedTreeHash,
{
let overlay = BTreeOverlay::new(item, 0, depth);
// Note how many leaves were provided. If is not a power-of-two, we'll need to pad it out
// later.
let num_provided_leaf_nodes = subtrees.len();
// Allocate enough bytes to store the internal nodes and the leaves and subtrees, then fill
// all the to-be-built internal nodes with zeros and append the leaves and subtrees.
let internal_node_bytes = overlay.num_internal_nodes() * BYTES_PER_CHUNK;
let subtrees_bytes = subtrees.iter().fold(0, |acc, t| acc + t.bytes.len());
let mut bytes = Vec::with_capacity(subtrees_bytes + internal_node_bytes);
bytes.resize(internal_node_bytes, 0);
// Allocate enough bytes to store all the leaves.
let mut leaves = Vec::with_capacity(overlay.num_leaf_nodes() * HASHSIZE);
let mut schemas = Vec::with_capacity(subtrees.len());
if T::tree_hash_type() == TreeHashType::List {
schemas.push(overlay.into());
}
// Iterate through all of the leaves/subtrees, adding their root as a leaf node and then
// concatenating their merkle trees.
for t in subtrees {
leaves.append(&mut t.tree_hash_root()?.to_vec());
let (mut t_bytes, _bools, mut t_schemas) = t.into_components();
bytes.append(&mut t_bytes);
schemas.append(&mut t_schemas);
}
// Pad the leaves to an even power-of-two, using zeros.
pad_for_leaf_count(num_provided_leaf_nodes, &mut bytes);
// Merkleize the leaves, then split the leaf nodes off them. Then, replace all-zeros
// internal nodes created earlier with the internal nodes generated by `merkleize`.
let mut merkleized = merkleize(leaves);
merkleized.split_off(internal_node_bytes);
bytes.splice(0..internal_node_bytes, merkleized);
Ok(Self {
chunk_modified: vec![true; bytes.len() / BYTES_PER_CHUNK],
bytes,
schemas,
chunk_index: 0,
schema_index: 0,
})
}
/// Instantiate a new cache from the pre-built `bytes` where each `self.chunk_modified` will be
/// set to `initial_modified_state`.
///
/// Note: `bytes.len()` must be a multiple of 32
pub fn from_bytes(
bytes: Vec<u8>,
initial_modified_state: bool,
schema: Option<BTreeSchema>,
) -> Result<Self, Error> {
if bytes.len() % BYTES_PER_CHUNK > 0 {
return Err(Error::BytesAreNotEvenChunks(bytes.len()));
}
let schemas = match schema {
Some(schema) => vec![schema],
None => vec![],
};
Ok(Self {
chunk_modified: vec![initial_modified_state; bytes.len() / BYTES_PER_CHUNK],
bytes,
schemas,
chunk_index: 0,
schema_index: 0,
})
}
/// Returns `true` if this cache is empty (i.e., it has never been built for some item).
///
/// Note: an empty cache is effectively useless, an error will be raised if `self.update` is
/// called.
pub fn is_empty(&self) -> bool {
self.chunk_modified.is_empty()
}
/// Return an overlay, built from the schema at `schema_index` with an offset of `chunk_index`.
pub fn get_overlay(
&self,
schema_index: usize,
chunk_index: usize,
) -> Result<BTreeOverlay, Error> {
Ok(self
.schemas
.get(schema_index)
.ok_or_else(|| Error::NoSchemaForIndex(schema_index))?
.clone()
.into_overlay(chunk_index))
}
/// Resets the per-update counters, allowing a new update to start.
///
/// Note: this does _not_ delete the contents of the cache.
pub fn reset_modifications(&mut self) {
// Reset the per-hash counters.
self.chunk_index = 0;
self.schema_index = 0;
for chunk_modified in &mut self.chunk_modified {
*chunk_modified = false;
}
}
/// Replace the schema at `schema_index` with the schema derived from `new_overlay`.
///
/// If the `new_overlay` schema has a different number of internal nodes to the schema at
/// `schema_index`, the cache will be updated to add/remove these new internal nodes.
pub fn replace_overlay(
&mut self,
schema_index: usize,
// TODO: remove chunk index (if possible)
chunk_index: usize,
new_overlay: BTreeOverlay,
) -> Result<BTreeOverlay, Error> {
let old_overlay = self.get_overlay(schema_index, chunk_index)?;
// If the merkle tree required to represent the new list is of a different size to the one
// required for the previous list, then update the internal nodes.
//
// Leaf nodes are not touched, they should be updated externally to this function.
//
// This grows/shrinks the bytes to accommodate the new tree, preserving as much of the tree
// as possible.
if new_overlay.num_internal_nodes() != old_overlay.num_internal_nodes() {
// Get slices of the existing tree from the cache.
let (old_bytes, old_flags) = self
.slices(old_overlay.internal_chunk_range())
.ok_or_else(|| Error::UnableToObtainSlices)?;
let (new_bytes, new_flags) = if new_overlay.num_internal_nodes() == 0 {
// The new tree has zero internal nodes, simply return empty lists.
(vec![], vec![])
} else if old_overlay.num_internal_nodes() == 0 {
// The old tree has zero nodes and the new tree has some nodes. Create new nodes to
// suit.
let nodes = resize::nodes_in_tree_of_height(new_overlay.height() - 1);
(vec![0; nodes * HASHSIZE], vec![true; nodes])
} else if new_overlay.num_internal_nodes() > old_overlay.num_internal_nodes() {
// The new tree is bigger than the old tree.
//
// Grow the internal nodes, preserving any existing nodes.
resize::grow_merkle_tree(
old_bytes,
old_flags,
old_overlay.height() - 1,
new_overlay.height() - 1,
)
.ok_or_else(|| Error::UnableToGrowMerkleTree)?
} else {
// The new tree is smaller than the old tree.
//
// Shrink the internal nodes, preserving any existing nodes.
resize::shrink_merkle_tree(
old_bytes,
old_flags,
old_overlay.height() - 1,
new_overlay.height() - 1,
)
.ok_or_else(|| Error::UnableToShrinkMerkleTree)?
};
// Splice the resized created elements over the existing elements, effectively updating
// the number of stored internal nodes for this tree.
self.splice(old_overlay.internal_chunk_range(), new_bytes, new_flags);
}
let old_schema = std::mem::replace(&mut self.schemas[schema_index], new_overlay.into());
Ok(old_schema.into_overlay(chunk_index))
}
/// Remove all of the child schemas following `schema_index`.
///
/// Schema `a` is a child of schema `b` if `a.depth < b.depth`.
pub fn remove_proceeding_child_schemas(&mut self, schema_index: usize, depth: usize) {
let end = self
.schemas
.iter()
.skip(schema_index)
.position(|o| o.depth <= depth)
.and_then(|i| Some(i + schema_index))
.unwrap_or_else(|| self.schemas.len());
self.schemas.splice(schema_index..end, vec![]);
}
/// Iterate through the internal nodes chunks of `overlay`, updating the chunk with the
/// merkle-root of it's children if either of those children are dirty.
pub fn update_internal_nodes(&mut self, overlay: &BTreeOverlay) -> Result<(), Error> {
for (parent, children) in overlay.internal_parents_and_children().into_iter().rev() {
if self.either_modified(children)? {
self.modify_chunk(parent, &self.hash_children(children)?)?;
}
}
Ok(())
}
/// Returns to the tree-hash root of the cache.
pub fn tree_hash_root(&self) -> Result<&[u8], Error> {
if self.is_empty() {
Err(Error::CacheNotInitialized)
} else {
self.bytes
.get(0..HASHSIZE)
.ok_or_else(|| Error::NoBytesForRoot)
}
}
/// Splices the given `bytes` over `self.bytes` and `bools` over `self.chunk_modified` at the
/// specified `chunk_range`.
pub fn splice(&mut self, chunk_range: Range<usize>, bytes: Vec<u8>, bools: Vec<bool>) {
// Update the `chunk_modified` vec, marking all spliced-in nodes as changed.
self.chunk_modified.splice(chunk_range.clone(), bools);
self.bytes
.splice(node_range_to_byte_range(&chunk_range), bytes);
}
/// If the bytes at `chunk` are not the same as `to`, `self.bytes` is updated and
/// `self.chunk_modified` is set to `true`.
pub fn maybe_update_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> {
let start = chunk * BYTES_PER_CHUNK;
let end = start + BYTES_PER_CHUNK;
if !self.chunk_equals(chunk, to)? {
self.bytes
.get_mut(start..end)
.ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))?
.copy_from_slice(to);
self.chunk_modified[chunk] = true;
}
Ok(())
}
/// Returns the slices of `self.bytes` and `self.chunk_modified` at the given `chunk_range`.
fn slices(&self, chunk_range: Range<usize>) -> Option<(&[u8], &[bool])> {
Some((
self.bytes.get(node_range_to_byte_range(&chunk_range))?,
self.chunk_modified.get(chunk_range)?,
))
}
/// Updates `self.bytes` at `chunk` and sets `self.chunk_modified` for the `chunk` to `true`.
pub fn modify_chunk(&mut self, chunk: usize, to: &[u8]) -> Result<(), Error> {
let start = chunk * BYTES_PER_CHUNK;
let end = start + BYTES_PER_CHUNK;
self.bytes
.get_mut(start..end)
.ok_or_else(|| Error::NoBytesForChunk(chunk))?
.copy_from_slice(to);
self.chunk_modified[chunk] = true;
Ok(())
}
/// Returns the bytes at `chunk`.
fn get_chunk(&self, chunk: usize) -> Result<&[u8], Error> {
let start = chunk * BYTES_PER_CHUNK;
let end = start + BYTES_PER_CHUNK;
Ok(self
.bytes
.get(start..end)
.ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))?)
}
/// Returns `true` if the bytes at `chunk` are equal to `other`.
fn chunk_equals(&mut self, chunk: usize, other: &[u8]) -> Result<bool, Error> {
Ok(self.get_chunk(chunk)? == other)
}
/// Returns `true` if `chunk` is dirty.
pub fn changed(&self, chunk: usize) -> Result<bool, Error> {
self.chunk_modified
.get(chunk)
.cloned()
.ok_or_else(|| Error::NoModifiedFieldForChunk(chunk))
}
/// Returns `true` if either of the `children` chunks is dirty.
fn either_modified(&self, children: (usize, usize)) -> Result<bool, Error> {
Ok(self.changed(children.0)? | self.changed(children.1)?)
}
/// Returns the hash of the concatenation of the given `children`.
pub fn hash_children(&self, children: (usize, usize)) -> Result<Vec<u8>, Error> {
let mut child_bytes = Vec::with_capacity(BYTES_PER_CHUNK * 2);
child_bytes.append(&mut self.get_chunk(children.0)?.to_vec());
child_bytes.append(&mut self.get_chunk(children.1)?.to_vec());
Ok(hash(&child_bytes))
}
/// Adds a chunk before and after the given `chunk` range and calls `self.mix_in_length()`.
pub fn add_length_nodes(
&mut self,
chunk_range: Range<usize>,
length: usize,
) -> Result<(), Error> {
self.chunk_modified[chunk_range.start] = true;
let byte_range = node_range_to_byte_range(&chunk_range);
// Add the last node.
self.bytes
.splice(byte_range.end..byte_range.end, vec![0; HASHSIZE]);
self.chunk_modified
.splice(chunk_range.end..chunk_range.end, vec![false]);
// Add the first node.
self.bytes
.splice(byte_range.start..byte_range.start, vec![0; HASHSIZE]);
self.chunk_modified
.splice(chunk_range.start..chunk_range.start, vec![false]);
self.mix_in_length(chunk_range.start + 1..chunk_range.end + 1, length)?;
Ok(())
}
/// Sets `chunk_range.end + 1` equal to the little-endian serialization of `length`. Sets
/// `chunk_range.start - 1` equal to `self.hash_children(chunk_range.start, chunk_range.end + 1)`.
pub fn mix_in_length(&mut self, chunk_range: Range<usize>, length: usize) -> Result<(), Error> {
// Update the length chunk.
self.maybe_update_chunk(chunk_range.end, &int_to_bytes32(length as u64))?;
// Update the mixed-in root if the main root or the length have changed.
let children = (chunk_range.start, chunk_range.end);
if self.either_modified(children)? {
self.modify_chunk(chunk_range.start - 1, &self.hash_children(children)?)?;
}
Ok(())
}
/// Returns `(self.bytes, self.chunk_modified, self.schemas)`.
pub fn into_components(self) -> (Vec<u8>, Vec<bool>, Vec<BTreeSchema>) {
(self.bytes, self.chunk_modified, self.schemas)
}
}
fn node_range_to_byte_range(node_range: &Range<usize>) -> Range<usize> {
node_range.start * HASHSIZE..node_range.end * HASHSIZE
}

View File

@ -1,13 +1,14 @@
[package] [package]
name = "hashing" name = "eth2_hashing"
version = "0.1.0" version = "0.1.0"
authors = ["Paul Hauner <paul@paulhauner.com>"] authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"
license = "Apache-2.0"
description = "Hashing primitives used in Ethereum 2.0"
[target.'cfg(not(target_arch = "wasm32"))'.dependencies] [target.'cfg(not(target_arch = "wasm32"))'.dependencies]
ring = "0.14.6" ring = "0.14.6"
[dev-dependencies] [dev-dependencies]
rustc-hex = "2.0.1" rustc-hex = "2.0.1"

View File

@ -0,0 +1,44 @@
//! Provides a simple hash function utilizing `ring::digest::SHA256`.
//!
//! The purpose of this crate is to provide an abstraction to whatever hash function Ethereum
//! 2.0 is using. The hash function has been subject to change during the specification process, so
//! defining it once in this crate makes it easy to replace.
#[cfg(not(target_arch = "wasm32"))]
use ring::digest::{digest, SHA256};
#[cfg(target_arch = "wasm32")]
use sha2::{Digest, Sha256};
/// Returns the digest of `input`.
///
/// Uses `ring::digest::SHA256`.
pub fn hash(input: &[u8]) -> Vec<u8> {
#[cfg(not(target_arch = "wasm32"))]
let h = digest(&SHA256, input).as_ref().into();
#[cfg(target_arch = "wasm32")]
let h = Sha256::digest(input).as_ref().into();
h
}
#[cfg(test)]
mod tests {
use super::*;
use rustc_hex::FromHex;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_test::*;
#[cfg_attr(not(target_arch = "wasm32"), test)]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn test_hashing() {
let input: Vec<u8> = b"hello world".as_ref().into();
let output = hash(input.as_ref());
let expected_hex = "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9";
let expected: Vec<u8> = expected_hex.from_hex().unwrap();
assert_eq!(expected, output);
}
}

View File

@ -1,117 +0,0 @@
#[cfg(not(target_arch = "wasm32"))]
use ring::digest::{digest, SHA256};
#[cfg(target_arch = "wasm32")]
use sha2::{Digest, Sha256};
pub fn hash(input: &[u8]) -> Vec<u8> {
#[cfg(not(target_arch = "wasm32"))]
let h = digest(&SHA256, input).as_ref().into();
#[cfg(target_arch = "wasm32")]
let h = Sha256::digest(input).as_ref().into();
h
}
/// Get merkle root of some hashed values - the input leaf nodes is expected to already be hashed
/// Outputs a `Vec<u8>` byte array of the merkle root given a set of leaf node values.
pub fn merkle_root(values: &[Vec<u8>]) -> Option<Vec<u8>> {
let values_len = values.len();
// check size of vector > 0 and ^ 2
if values.is_empty() || !values_len.is_power_of_two() {
return None;
}
// vector to store hashes
// filled with 0 as placeholders
let mut o: Vec<Vec<u8>> = vec![vec![0]; values_len];
// append values to the end
o.append(&mut values.to_vec());
// traverse backwards as values are at the end
// then fill placeholders with a hash of two leaf nodes
for i in (0..values_len).rev() {
let mut current_value: Vec<u8> = o[i * 2].clone();
current_value.append(&mut o[i * 2 + 1].clone());
o[i] = hash(&current_value[..]);
}
// the root hash will be at index 1
Some(o[1].clone())
}
#[cfg(test)]
mod tests {
use super::*;
use rustc_hex::FromHex;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_test::*;
#[cfg_attr(not(target_arch = "wasm32"), test)]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn test_hashing() {
let input: Vec<u8> = b"hello world".as_ref().into();
let output = hash(input.as_ref());
let expected_hex = "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9";
let expected: Vec<u8> = expected_hex.from_hex().unwrap();
assert_eq!(expected, output);
}
#[cfg_attr(not(target_arch = "wasm32"), test)]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn test_merkle_root() {
// hash the leaf nodes
let mut input = vec![
hash("a".as_bytes()),
hash("b".as_bytes()),
hash("c".as_bytes()),
hash("d".as_bytes()),
];
// generate a merkle tree and return the root
let output = merkle_root(&input[..]);
// create merkle root manually
let mut leaf_1_2: Vec<u8> = input[0].clone(); // a
leaf_1_2.append(&mut input[1].clone()); // b
let mut leaf_3_4: Vec<u8> = input[2].clone(); // c
leaf_3_4.append(&mut input[3].clone()); // d
let node_1 = hash(&leaf_1_2[..]);
let node_2 = hash(&leaf_3_4[..]);
let mut root: Vec<u8> = node_1.clone(); // ab
root.append(&mut node_2.clone()); // cd
let expected = hash(&root[..]);
assert_eq!(&expected[..], output.unwrap().as_slice());
}
#[cfg_attr(not(target_arch = "wasm32"), test)]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn test_empty_input_merkle_root() {
let input = vec![];
let output = merkle_root(&input[..]);
assert_eq!(None, output);
}
#[cfg_attr(not(target_arch = "wasm32"), test)]
#[cfg_attr(target_arch = "wasm32", wasm_bindgen_test)]
fn test_odd_leaf_merkle_root() {
let input = vec![
hash("a".as_bytes()),
hash("b".as_bytes()),
hash("a".as_bytes()),
];
let output = merkle_root(&input[..]);
assert_eq!(None, output);
}
}

View File

@ -6,4 +6,4 @@ edition = "2018"
[dependencies] [dependencies]
ethereum-types = "0.6" ethereum-types = "0.6"
hashing = { path = "../hashing" } eth2_hashing = { path = "../eth2_hashing" }

View File

@ -1,5 +1,5 @@
use eth2_hashing::hash;
use ethereum_types::H256; use ethereum_types::H256;
use hashing::hash;
/// Verify a proof that `leaf` exists at `index` in a Merkle tree rooted at `root`. /// Verify a proof that `leaf` exists at `index` in a Merkle tree rooted at `root`.
/// ///

View File

@ -1,6 +1,6 @@
[package] [package]
name = "eth2_ssz_types" name = "eth2_ssz_types"
version = "0.1.0" version = "0.2.0"
authors = ["Paul Hauner <paul@paulhauner.com>"] authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"
@ -8,14 +8,13 @@ edition = "2018"
name = "ssz_types" name = "ssz_types"
[dependencies] [dependencies]
cached_tree_hash = { path = "../cached_tree_hash" } tree_hash = "0.1"
tree_hash = { path = "../tree_hash" }
serde = "1.0" serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
serde_hex = { path = "../serde_hex" } serde_hex = { path = "../serde_hex" }
eth2_ssz = { path = "../ssz" } eth2_ssz = "0.1"
typenum = "1.10" typenum = "1.10"
[dev-dependencies] [dev-dependencies]
serde_yaml = "0.8" serde_yaml = "0.8"
tree_hash_derive = { path = "../tree_hash_derive" } tree_hash_derive = "0.2"

View File

@ -606,50 +606,6 @@ impl<N: Unsigned + Clone> tree_hash::TreeHash for Bitfield<Fixed<N>> {
} }
} }
impl<N: Unsigned + Clone> cached_tree_hash::CachedTreeHash for Bitfield<Variable<N>> {
fn new_tree_hash_cache(
&self,
_depth: usize,
) -> Result<cached_tree_hash::TreeHashCache, cached_tree_hash::Error> {
unimplemented!("CachedTreeHash is not implemented for BitList")
}
fn num_tree_hash_cache_chunks(&self) -> usize {
unimplemented!("CachedTreeHash is not implemented for BitList")
}
fn tree_hash_cache_schema(&self, _depth: usize) -> cached_tree_hash::BTreeSchema {
unimplemented!("CachedTreeHash is not implemented for BitList")
}
fn update_tree_hash_cache(
&self,
_cache: &mut cached_tree_hash::TreeHashCache,
) -> Result<(), cached_tree_hash::Error> {
unimplemented!("CachedTreeHash is not implemented for BitList")
}
}
impl<N: Unsigned + Clone> cached_tree_hash::CachedTreeHash for Bitfield<Fixed<N>> {
fn new_tree_hash_cache(
&self,
_depth: usize,
) -> Result<cached_tree_hash::TreeHashCache, cached_tree_hash::Error> {
unimplemented!("CachedTreeHash is not implemented for BitVec")
}
fn tree_hash_cache_schema(&self, _depth: usize) -> cached_tree_hash::BTreeSchema {
unimplemented!("CachedTreeHash is not implemented for BitVec")
}
fn update_tree_hash_cache(
&self,
_cache: &mut cached_tree_hash::TreeHashCache,
) -> Result<(), cached_tree_hash::Error> {
unimplemented!("CachedTreeHash is not implemented for BitVec")
}
}
#[cfg(test)] #[cfg(test)]
mod bitvector { mod bitvector {
use super::*; use super::*;

View File

@ -167,29 +167,6 @@ where
} }
} }
impl<T, N: Unsigned> cached_tree_hash::CachedTreeHash for FixedVector<T, N>
where
T: cached_tree_hash::CachedTreeHash + tree_hash::TreeHash,
{
fn new_tree_hash_cache(
&self,
_depth: usize,
) -> Result<cached_tree_hash::TreeHashCache, cached_tree_hash::Error> {
unimplemented!("CachedTreeHash is not implemented for FixedVector")
}
fn tree_hash_cache_schema(&self, _depth: usize) -> cached_tree_hash::BTreeSchema {
unimplemented!("CachedTreeHash is not implemented for FixedVector")
}
fn update_tree_hash_cache(
&self,
_cache: &mut cached_tree_hash::TreeHashCache,
) -> Result<(), cached_tree_hash::Error> {
unimplemented!("CachedTreeHash is not implemented for FixedVector")
}
}
impl<T, N: Unsigned> ssz::Encode for FixedVector<T, N> impl<T, N: Unsigned> ssz::Encode for FixedVector<T, N>
where where
T: ssz::Encode, T: ssz::Encode,

View File

@ -196,29 +196,6 @@ where
} }
} }
impl<T, N: Unsigned> cached_tree_hash::CachedTreeHash for VariableList<T, N>
where
T: cached_tree_hash::CachedTreeHash + tree_hash::TreeHash,
{
fn new_tree_hash_cache(
&self,
_depth: usize,
) -> Result<cached_tree_hash::TreeHashCache, cached_tree_hash::Error> {
unimplemented!("CachedTreeHash is not implemented for VariableList")
}
fn tree_hash_cache_schema(&self, _depth: usize) -> cached_tree_hash::BTreeSchema {
unimplemented!("CachedTreeHash is not implemented for VariableList")
}
fn update_tree_hash_cache(
&self,
_cache: &mut cached_tree_hash::TreeHashCache,
) -> Result<(), cached_tree_hash::Error> {
unimplemented!("CachedTreeHash is not implemented for VariableList")
}
}
impl<T, N: Unsigned> ssz::Encode for VariableList<T, N> impl<T, N: Unsigned> ssz::Encode for VariableList<T, N>
where where
T: ssz::Encode, T: ssz::Encode,

View File

@ -15,5 +15,5 @@ hex = "0.3"
ethereum-types = "0.6" ethereum-types = "0.6"
[dependencies] [dependencies]
hashing = { path = "../hashing" } eth2_hashing = { path = "../eth2_hashing" }
int_to_bytes = { path = "../int_to_bytes" } int_to_bytes = { path = "../int_to_bytes" }

View File

@ -1,4 +1,4 @@
use hashing::hash; use eth2_hashing::hash;
use int_to_bytes::{int_to_bytes1, int_to_bytes4}; use int_to_bytes::{int_to_bytes1, int_to_bytes4};
use std::cmp::max; use std::cmp::max;

View File

@ -1,4 +1,4 @@
use hashing::hash; use eth2_hashing::hash;
use int_to_bytes::int_to_bytes4; use int_to_bytes::int_to_bytes4;
const SEED_SIZE: usize = 32; const SEED_SIZE: usize = 32;

View File

@ -1,8 +1,10 @@
[package] [package]
name = "tree_hash" name = "tree_hash"
version = "0.1.0" version = "0.1.1"
authors = ["Paul Hauner <paul@paulhauner.com>"] authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"
license = "Apache-2.0"
description = "Efficient Merkle-hashing as used in Ethereum 2.0"
[[bench]] [[bench]]
name = "benches" name = "benches"
@ -11,11 +13,10 @@ harness = false
[dev-dependencies] [dev-dependencies]
criterion = "0.2" criterion = "0.2"
rand = "0.7" rand = "0.7"
tree_hash_derive = { path = "../tree_hash_derive" } tree_hash_derive = "0.2"
types = { path = "../../types" } types = { path = "../../types" }
[dependencies] [dependencies]
ethereum-types = "0.6" ethereum-types = "0.6"
hashing = { path = "../hashing" } eth2_hashing = "0.1"
int_to_bytes = { path = "../int_to_bytes" }
lazy_static = "0.1" lazy_static = "0.1"

View File

@ -1,76 +0,0 @@
# Tree hashing
Provides both cached and non-cached tree hashing methods.
## Standard Tree Hash
```rust
use tree_hash_derive::TreeHash;
#[derive(TreeHash)]
struct Foo {
a: u64,
b: Vec<u64>,
}
fn main() {
let foo = Foo {
a: 42,
b: vec![1, 2, 3]
};
println!("root: {}", foo.tree_hash_root());
}
```
## Cached Tree Hash
```rust
use tree_hash_derive::{TreeHash, CachedTreeHash};
#[derive(TreeHash, CachedTreeHash)]
struct Foo {
a: u64,
b: Vec<u64>,
}
#[derive(TreeHash, CachedTreeHash)]
struct Bar {
a: Vec<Foo>,
b: u64,
}
fn main() {
let bar = Bar {
a: vec![
Foo {
a: 42,
b: vec![1, 2, 3]
}
],
b: 42
};
let modified_bar = Bar {
a: vec![
Foo {
a: 100,
b: vec![1, 2, 3, 4, 5, 6]
},
Foo {
a: 42,
b: vec![]
}
],
b: 99
};
let mut hasher = CachedTreeHasher::new(&bar).unwrap();
hasher.update(&modified_bar).unwrap();
// Assert that the cached tree hash matches a standard tree hash.
assert_eq!(hasher.tree_hash_root(), modified_bar.tree_hash_root());
}
```

View File

@ -3,7 +3,6 @@ extern crate lazy_static;
use criterion::Criterion; use criterion::Criterion;
use criterion::{black_box, criterion_group, criterion_main, Benchmark}; use criterion::{black_box, criterion_group, criterion_main, Benchmark};
use tree_hash::TreeHash;
use types::test_utils::{generate_deterministic_keypairs, TestingBeaconStateBuilder}; use types::test_utils::{generate_deterministic_keypairs, TestingBeaconStateBuilder};
use types::{BeaconState, EthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; use types::{BeaconState, EthSpec, Keypair, MainnetEthSpec, MinimalEthSpec};
@ -36,7 +35,12 @@ fn bench_suite<T: EthSpec>(c: &mut Criterion, spec_desc: &str, validator_count:
Benchmark::new("genesis_state", move |b| { Benchmark::new("genesis_state", move |b| {
b.iter_batched_ref( b.iter_batched_ref(
|| state.clone(), || state.clone(),
|state| black_box(state.tree_hash_root()), // Note: `state.canonical_root()` uses whatever `tree_hash` that the `types` crate
// uses, which is not necessarily this crate. If you want to ensure that types is
// using this local version of `tree_hash`, ensure you add a workspace-level
// [dependency
// patch](https://doc.rust-lang.org/cargo/reference/manifest.html#the-patch-section).
|state| black_box(state.canonical_root()),
criterion::BatchSize::SmallInput, criterion::BatchSize::SmallInput,
) )
}) })

View File

@ -1,4 +1,3 @@
use tree_hash::TreeHash;
use types::test_utils::TestingBeaconStateBuilder; use types::test_utils::TestingBeaconStateBuilder;
use types::{BeaconState, EthSpec, MainnetEthSpec}; use types::{BeaconState, EthSpec, MainnetEthSpec};
@ -29,7 +28,7 @@ fn main() {
let mut vec = Vec::with_capacity(TREE_HASH_LOOPS); let mut vec = Vec::with_capacity(TREE_HASH_LOOPS);
for _ in 0..TREE_HASH_LOOPS { for _ in 0..TREE_HASH_LOOPS {
let root = state.tree_hash_root(); let root = state.canonical_root();
vec.push(root[0]); vec.push(root[0]);
} }
} }

View File

@ -1,6 +1,5 @@
use super::*; use super::*;
use ethereum_types::H256; use ethereum_types::H256;
use int_to_bytes::int_to_bytes32;
macro_rules! impl_for_bitsize { macro_rules! impl_for_bitsize {
($type: ident, $bit_size: expr) => { ($type: ident, $bit_size: expr) => {
@ -122,6 +121,13 @@ macro_rules! impl_for_list {
impl_for_list!(Vec<T>); impl_for_list!(Vec<T>);
impl_for_list!(&[T]); impl_for_list!(&[T]);
/// Returns `int` as little-endian bytes with a length of 32.
fn int_to_bytes32(int: u64) -> Vec<u8> {
let mut vec = int.to_le_bytes().to_vec();
vec.resize(32, 0);
vec
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
@ -137,4 +143,22 @@ mod test {
assert_eq!(false.tree_hash_root(), false_bytes); assert_eq!(false.tree_hash_root(), false_bytes);
} }
#[test]
fn int_to_bytes() {
assert_eq!(&int_to_bytes32(0), &[0; 32]);
assert_eq!(
&int_to_bytes32(1),
&[
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0
]
);
assert_eq!(
&int_to_bytes32(u64::max_value()),
&[
255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0
]
);
}
} }

View File

@ -108,7 +108,7 @@ mod test {
let mut preimage = vec![42; BYTES_PER_CHUNK]; let mut preimage = vec![42; BYTES_PER_CHUNK];
preimage.append(&mut vec![42]); preimage.append(&mut vec![42]);
preimage.append(&mut vec![0; BYTES_PER_CHUNK - 1]); preimage.append(&mut vec![0; BYTES_PER_CHUNK - 1]);
hashing::hash(&preimage) eth2_hashing::hash(&preimage)
}; };
assert_eq!(mix_in_length(&[42; BYTES_PER_CHUNK], 42), hash); assert_eq!(mix_in_length(&[42; BYTES_PER_CHUNK], 42), hash);

View File

@ -1,5 +1,5 @@
use super::BYTES_PER_CHUNK; use super::BYTES_PER_CHUNK;
use hashing::hash; use eth2_hashing::hash;
/// The size of the cache that stores padding nodes for a given height. /// The size of the cache that stores padding nodes for a given height.
/// ///

View File

@ -1,5 +1,5 @@
use super::*; use super::*;
use hashing::hash; use eth2_hashing::hash;
/// Merkleizes bytes and returns the root, using a simple algorithm that does not optimize to avoid /// Merkleizes bytes and returns the root, using a simple algorithm that does not optimize to avoid
/// processing or storing padding bytes. /// processing or storing padding bytes.

View File

@ -1,17 +1,14 @@
[package] [package]
name = "tree_hash_derive" name = "tree_hash_derive"
version = "0.1.0" version = "0.2.0"
authors = ["Paul Hauner <paul@paulhauner.com>"] authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"
description = "Procedural derive macros for SSZ tree hashing." description = "Procedural derive macros to accompany the tree_hash crate."
license = "Apache-2.0"
[lib] [lib]
proc-macro = true proc-macro = true
[dev-dependencies]
tree_hash = { path = "../tree_hash" }
cached_tree_hash = { path = "../cached_tree_hash" }
[dependencies] [dependencies]
syn = "0.15" syn = "0.15"
quote = "0.6" quote = "0.6"

View File

@ -37,81 +37,6 @@ fn should_skip_hashing(field: &syn::Field) -> bool {
.any(|attr| attr.into_token_stream().to_string() == "# [ tree_hash ( skip_hashing ) ]") .any(|attr| attr.into_token_stream().to_string() == "# [ tree_hash ( skip_hashing ) ]")
} }
/// Implements `tree_hash::CachedTreeHash` for some `struct`.
///
/// Fields are hashed in the order they are defined.
#[proc_macro_derive(CachedTreeHash, attributes(tree_hash))]
pub fn subtree_derive(input: TokenStream) -> TokenStream {
let item = parse_macro_input!(input as DeriveInput);
let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl();
let name = &item.ident;
let struct_data = match &item.data {
syn::Data::Struct(s) => s,
_ => panic!("tree_hash_derive only supports structs."),
};
let idents_a = get_hashable_named_field_idents(&struct_data);
let idents_b = idents_a.clone();
let idents_c = idents_a.clone();
let output = quote! {
impl #impl_generics cached_tree_hash::CachedTreeHash for #name #ty_generics #where_clause {
fn new_tree_hash_cache(&self, depth: usize) -> Result<cached_tree_hash::TreeHashCache, cached_tree_hash::Error> {
let tree = cached_tree_hash::TreeHashCache::from_subtrees(
self,
vec![
#(
self.#idents_a.new_tree_hash_cache(depth)?,
)*
],
depth
)?;
Ok(tree)
}
fn num_tree_hash_cache_chunks(&self) -> usize {
cached_tree_hash::BTreeOverlay::new(self, 0, 0).num_chunks()
}
fn tree_hash_cache_schema(&self, depth: usize) -> cached_tree_hash::BTreeSchema {
let mut lengths = vec![];
#(
lengths.push(self.#idents_b.num_tree_hash_cache_chunks());
)*
cached_tree_hash::BTreeSchema::from_lengths(depth, lengths)
}
fn update_tree_hash_cache(&self, cache: &mut cached_tree_hash::TreeHashCache) -> Result<(), cached_tree_hash::Error> {
let overlay = cached_tree_hash::BTreeOverlay::new(self, cache.chunk_index, 0);
// Skip the chunk index to the first leaf node of this struct.
cache.chunk_index = overlay.first_leaf_node();
// Skip the overlay index to the first leaf node of this struct.
// cache.overlay_index += 1;
// Recurse into the struct items, updating their caches.
#(
self.#idents_c.update_tree_hash_cache(cache)?;
)*
// Iterate through the internal nodes, updating them if their children have changed.
cache.update_internal_nodes(&overlay)?;
cache.chunk_index = overlay.next_node();
Ok(())
}
}
};
output.into()
}
/// Implements `tree_hash::TreeHash` for some `struct`. /// Implements `tree_hash::TreeHash` for some `struct`.
/// ///
/// Fields are hashed in the order they are defined. /// Fields are hashed in the order they are defined.

View File

@ -6,6 +6,6 @@ edition = "2018"
[dependencies] [dependencies]
bytes = "0.4.10" bytes = "0.4.10"
hashing = { path = "../utils/hashing" } eth2_hashing = { path = "../utils/eth2_hashing" }
eth2_ssz = { path = "../utils/ssz" } eth2_ssz = "0.1"
types = { path = "../types" } types = { path = "../types" }

View File

@ -3,7 +3,7 @@ extern crate hashing;
extern crate types; extern crate types;
use bytes::{BufMut, BytesMut}; use bytes::{BufMut, BytesMut};
use hashing::canonical_hash; use eth2_hashing::canonical_hash;
use ssz::ssz_encode; use ssz::ssz_encode;
use std::cmp::max; use std::cmp::max;
use types::{Hash256, ValidatorRecord, ValidatorStatus}; use types::{Hash256, ValidatorRecord, ValidatorStatus};

View File

@ -17,9 +17,8 @@ serde = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
serde_repr = "0.1" serde_repr = "0.1"
serde_yaml = "0.8" serde_yaml = "0.8"
eth2_ssz = { path = "../../eth2/utils/ssz" } eth2_ssz = "0.1"
tree_hash = { path = "../../eth2/utils/tree_hash" } tree_hash = "0.1"
cached_tree_hash = { path = "../../eth2/utils/cached_tree_hash" }
state_processing = { path = "../../eth2/state_processing" } state_processing = { path = "../../eth2/state_processing" }
swap_or_not_shuffle = { path = "../../eth2/utils/swap_or_not_shuffle" } swap_or_not_shuffle = { path = "../../eth2/utils/swap_or_not_shuffle" }
types = { path = "../../eth2/types" } types = { path = "../../eth2/types" }

View File

@ -1,6 +1,5 @@
use super::*; use super::*;
use crate::case_result::compare_result; use crate::case_result::compare_result;
use cached_tree_hash::CachedTreeHash;
use serde_derive::Deserialize; use serde_derive::Deserialize;
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use std::fmt::Debug; use std::fmt::Debug;
@ -103,7 +102,6 @@ where
+ PartialEq<T> + PartialEq<T>
+ serde::de::DeserializeOwned + serde::de::DeserializeOwned
+ TreeHash + TreeHash
+ CachedTreeHash
+ TestRandom, + TestRandom,
{ {
// Verify we can decode SSZ in the same way we can decode YAML. // Verify we can decode SSZ in the same way we can decode YAML.

View File

@ -14,9 +14,9 @@ path = "src/lib.rs"
[dependencies] [dependencies]
bls = { path = "../eth2/utils/bls" } bls = { path = "../eth2/utils/bls" }
eth2_ssz = { path = "../eth2/utils/ssz" } eth2_ssz = "0.1"
eth2_config = { path = "../eth2/utils/eth2_config" } eth2_config = { path = "../eth2/utils/eth2_config" }
tree_hash = { path = "../eth2/utils/tree_hash" } tree_hash = "0.1"
clap = "2.32.0" clap = "2.32.0"
grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] }
protos = { path = "../protos" } protos = { path = "../protos" }