From 69ad55af19d1b5c650327e387f4d5e3338c6f7a9 Mon Sep 17 00:00:00 2001 From: John Adler Date: Fri, 26 Jul 2019 13:31:31 -0400 Subject: [PATCH 01/25] Fix submodule init command to also update. --- docs/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation.md b/docs/installation.md index 29048bc45..6d6482ba4 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -15,7 +15,7 @@ installed): - `cmake`: required for building protobuf - `git-lfs`: The Git extension for [Large File Support](https://git-lfs.github.com/) (required for EF tests submodule). 6. If you haven't already, clone the repository with submodules: `git clone --recursive https://github.com/sigp/lighthouse`. - Alternatively, run `git submodule init` in a repository which was cloned without submodules. + Alternatively, run `git submodule init; git submodule update` in a repository which was cloned without submodules. 7. Change directory to the root of the repository. 8. Run the test by using command `cargo test --all --release`. By running, it will pass all the required test cases. If you are doing it for the first time, then you can grab a coffee in the meantime. Usually, it takes time From fec71685129d433e6e1019ba688f2b9c84a4565f Mon Sep 17 00:00:00 2001 From: John Adler Date: Fri, 26 Jul 2019 15:26:06 -0400 Subject: [PATCH 02/25] Fix lots of typos. --- README.md | 4 ++-- beacon_node/beacon_chain/src/beacon_chain.rs | 8 ++++---- beacon_node/beacon_chain/src/fork_choice.rs | 2 +- beacon_node/eth2-libp2p/src/config.rs | 2 +- beacon_node/eth2-libp2p/src/rpc/methods.rs | 4 ++-- beacon_node/network/src/message_handler.rs | 2 +- beacon_node/network/src/sync/import_queue.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 10 +++++----- beacon_node/rpc/src/attestation.rs | 2 +- beacon_node/src/run.rs | 2 +- beacon_node/store/src/block_at_slot.rs | 14 +++++++------- beacon_node/store/src/disk_db.rs | 4 ++-- beacon_node/store/src/lib.rs | 4 ++-- beacon_node/store/src/store.rs | 2 +- docs/onboarding.md | 2 +- eth2/lmd_ghost/src/reduced_tree.rs | 4 ++-- eth2/operation_pool/src/lib.rs | 2 +- .../benches/bench_epoch_processing.rs | 2 +- .../src/per_block_processing/errors.rs | 4 ++-- .../src/per_block_processing/verify_transfer.rs | 2 +- eth2/state_processing/src/per_epoch_processing.rs | 2 +- .../src/per_epoch_processing/process_slashings.rs | 4 ++-- .../src/per_epoch_processing/registry_updates.rs | 2 +- .../src/per_epoch_processing/validator_statuses.rs | 2 +- eth2/types/src/slot_epoch.rs | 2 +- .../test_utils/builders/testing_deposit_builder.rs | 2 +- .../types/src/test_utils/test_random/secret_key.rs | 2 +- eth2/utils/bls/src/aggregate_signature.rs | 8 ++++---- eth2/utils/cached_tree_hash/src/btree_overlay.rs | 2 +- eth2/utils/cached_tree_hash/src/impls/vec.rs | 4 ++-- eth2/utils/cached_tree_hash/src/tree_hash_cache.rs | 2 +- eth2/utils/honey-badger-split/src/lib.rs | 2 +- eth2/utils/ssz/README.md | 4 ++-- eth2/utils/ssz/src/decode/impls.rs | 2 +- eth2/utils/tree_hash/src/merkleize_standard.rs | 2 +- eth2/utils/tree_hash_derive/src/lib.rs | 2 +- eth2/validator_change/src/lib.rs | 2 +- protos/src/services.proto | 6 +++--- tests/ef_tests/tests/tests.rs | 2 +- validator_client/src/attestation_producer/mod.rs | 2 +- validator_client/src/block_producer/mod.rs | 2 +- 41 files changed, 68 insertions(+), 68 deletions(-) diff --git a/README.md b/README.md index 85290f6c8..e4f2e8ccb 100644 --- a/README.md +++ b/README.md @@ -16,12 +16,12 @@ An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prim Lighthouse is: - Fully open-source, licensed under Apache 2.0. -- Security-focussed, fuzzing has begun and security reviews are planned +- Security-focused, fuzzing has begun and security reviews are planned for late-2019. - Built in [Rust](https://www.rust-lang.org/), a modern language providing unique safety guarantees and excellent performance (comparable to C++). - Funded by various organisations, including Sigma Prime, the - Ethereum Foundation, Consensys and private individuals. + Ethereum Foundation, ConsenSys and private individuals. - Actively working to promote an inter-operable, multi-client Ethereum 2.0. diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 96ebe4b41..e5a163a16 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -70,10 +70,10 @@ pub struct BeaconChain { /// Stores all operations (e.g., `Attestation`, `Deposit`, etc) that are candidates for /// inclusion in a block. pub op_pool: OperationPool, - /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was recieved. + /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. canonical_head: RwLock>, /// The same state from `self.canonical_head`, but updated at the start of each slot with a - /// skip slot if no block is recieved. This is effectively a cache that avoids repeating calls + /// skip slot if no block is received. This is effectively a cache that avoids repeating calls /// to `per_slot_processing`. state: RwLock>, /// The root of the genesis block. @@ -391,12 +391,12 @@ impl BeaconChain { /// /// Information is read from the current state, so only information from the present and prior /// epoch is available. - pub fn validator_attestion_slot_and_shard( + pub fn validator_attestation_slot_and_shard( &self, validator_index: usize, ) -> Result, BeaconStateError> { trace!( - "BeaconChain::validator_attestion_slot_and_shard: validator_index: {}", + "BeaconChain::validator_attestation_slot_and_shard: validator_index: {}", validator_index ); if let Some(attestation_duty) = self diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index c693145ea..30b20ffe6 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -145,7 +145,7 @@ impl ForkChoice { // 2. Ignore all attestations to the zero hash. // // (1) becomes weird once we hit finality and fork choice drops the genesis block. (2) is - // fine becuase votes to the genesis block are not useful; all validators implicitly attest + // fine because votes to the genesis block are not useful; all validators implicitly attest // to genesis just by being present in the chain. if block_hash != Hash256::zero() { let block_slot = attestation diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index ea87075b7..4c6f0b6da 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -54,7 +54,7 @@ impl Default for Config { network_dir.push("network"); Config { network_dir, - listen_address: "127.0.0.1".parse().expect("vaild ip address"), + listen_address: "127.0.0.1".parse().expect("valid ip address"), libp2p_port: 9000, discovery_address: "127.0.0.1".parse().expect("valid ip address"), discovery_port: 9000, diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index 0d6311d9d..8cc336395 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -43,7 +43,7 @@ pub enum GoodbyeReason { ClientShutdown = 1, /// Incompatible networks. - IrreleventNetwork = 2, + IrrelevantNetwork = 2, /// Error/fault in the RPC. Fault = 3, @@ -56,7 +56,7 @@ impl From for GoodbyeReason { fn from(id: u64) -> GoodbyeReason { match id { 1 => GoodbyeReason::ClientShutdown, - 2 => GoodbyeReason::IrreleventNetwork, + 2 => GoodbyeReason::IrrelevantNetwork, 3 => GoodbyeReason::Fault, _ => GoodbyeReason::Unknown, } diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index b1d88415c..4e510094f 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -252,7 +252,7 @@ impl MessageHandler { fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { match gossip_message { PubsubMessage::Block(message) => { - let _should_foward_on = + let _should_forward_on = self.sync .on_block_gossip(peer_id, message, &mut self.network_context); } diff --git a/beacon_node/network/src/sync/import_queue.rs b/beacon_node/network/src/sync/import_queue.rs index fe640aaa0..504add4f8 100644 --- a/beacon_node/network/src/sync/import_queue.rs +++ b/beacon_node/network/src/sync/import_queue.rs @@ -270,7 +270,7 @@ pub struct PartialBeaconBlock { impl PartialBeaconBlock { /// Attempts to build a block. /// - /// Does not comsume the `PartialBeaconBlock`. + /// Does not consume the `PartialBeaconBlock`. pub fn attempt_complete(&self) -> PartialBeaconBlockCompletion { if self.header.is_none() { PartialBeaconBlockCompletion::MissingHeader(self.slot) diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 91594b999..aeabd0507 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -186,7 +186,7 @@ impl SimpleSync { "reason" => "network_id" ); - network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork); + network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); } else if remote.latest_finalized_epoch <= local.latest_finalized_epoch && remote.latest_finalized_root != self.chain.spec.zero_hash && local.latest_finalized_root != self.chain.spec.zero_hash @@ -202,7 +202,7 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "reason" => "different finalized chain" ); - network.disconnect(peer_id.clone(), GoodbyeReason::IrreleventNetwork); + network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); } else if remote.latest_finalized_epoch < local.latest_finalized_epoch { // The node has a lower finalized epoch, their chain is not useful to us. There are two // cases where a node can have a lower finalized epoch: @@ -529,7 +529,7 @@ impl SimpleSync { .import_queue .enqueue_bodies(res.block_bodies, peer_id.clone()); - // Attempt to process all recieved bodies by recursively processing the latest block + // Attempt to process all received bodies by recursively processing the latest block if let Some(root) = last_root { match self.attempt_process_partial_block(peer_id, root, network, &"rpc") { Some(BlockProcessingOutcome::Processed { block_root: _ }) => { @@ -606,7 +606,7 @@ impl SimpleSync { } // Note: known blocks are forwarded on the gossip network. // - // We rely upon the lower layers (libp2p) to stop loops occuring from re-gossiped + // We rely upon the lower layers (libp2p) to stop loops occurring from re-gossiped // blocks. BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK, _ => SHOULD_NOT_FORWARD_GOSSIP_BLOCK, @@ -837,7 +837,7 @@ impl SimpleSync { // If the parent is in the `import_queue` attempt to complete it then process it. match self.attempt_process_partial_block(peer_id, parent, network, source) { - // If processing parent is sucessful, re-process block and remove parent from queue + // If processing parent is successful, re-process block and remove parent from queue Some(BlockProcessingOutcome::Processed { block_root: _ }) => { self.import_queue.remove(parent); diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index b85d4e947..cedd184e3 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -43,7 +43,7 @@ impl AttestationService for AttestationServiceInstance { let state = &self.chain.current_state(); // Start by performing some checks - // Check that the AttestionData is for the current slot (otherwise it will not be valid) + // Check that the AttestationData is for the current slot (otherwise it will not be valid) if slot_requested > state.slot.as_u64() { let log_clone = self.log.clone(); let f = sink diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 51fa16154..fc46a3f44 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -116,7 +116,7 @@ where ctrlc_send.send(()).expect("Error sending ctrl-c message"); } }) - .map_err(|e| format!("Could not set ctrlc hander: {:?}", e))?; + .map_err(|e| format!("Could not set ctrlc handler: {:?}", e))?; let (exit_signal, exit) = exit_future::signal(); diff --git a/beacon_node/store/src/block_at_slot.rs b/beacon_node/store/src/block_at_slot.rs index 5a0dd6861..12f1cccfe 100644 --- a/beacon_node/store/src/block_at_slot.rs +++ b/beacon_node/store/src/block_at_slot.rs @@ -20,18 +20,18 @@ fn read_previous_block_root_from_block_bytes(bytes: &[u8]) -> Result( +pub fn get_block_at_preceding_slot( store: &T, slot: Slot, start_root: Hash256, ) -> Result, Error> { - Ok(match get_at_preceeding_slot(store, slot, start_root)? { + Ok(match get_at_preceding_slot(store, slot, start_root)? { Some((hash, bytes)) => Some((hash, BeaconBlock::from_ssz_bytes(&bytes)?)), None => None, }) } -fn get_at_preceeding_slot( +fn get_at_preceding_slot( store: &T, slot: Slot, mut root: Hash256, @@ -141,7 +141,7 @@ mod tests { let (target_root, target_block) = &blocks_and_roots[target]; let (found_root, found_block) = store - .get_block_at_preceeding_slot(*source_root, target_block.slot) + .get_block_at_preceding_slot(*source_root, target_block.slot) .unwrap() .unwrap(); @@ -166,7 +166,7 @@ mod tests { let (target_root, target_block) = &blocks_and_roots[target]; let (found_root, found_block) = store - .get_block_at_preceeding_slot(*source_root, target_block.slot) + .get_block_at_preceding_slot(*source_root, target_block.slot) .unwrap() .unwrap(); @@ -177,14 +177,14 @@ mod tests { // Slot that doesn't exist let (source_root, _source_block) = &blocks_and_roots[3]; assert!(store - .get_block_at_preceeding_slot(*source_root, Slot::new(3)) + .get_block_at_preceding_slot(*source_root, Slot::new(3)) .unwrap() .is_none()); // Slot too high let (source_root, _source_block) = &blocks_and_roots[3]; assert!(store - .get_block_at_preceeding_slot(*source_root, Slot::new(3)) + .get_block_at_preceding_slot(*source_root, Slot::new(3)) .unwrap() .is_none()); } diff --git a/beacon_node/store/src/disk_db.rs b/beacon_node/store/src/disk_db.rs index 669547ab9..873c9df82 100644 --- a/beacon_node/store/src/disk_db.rs +++ b/beacon_node/store/src/disk_db.rs @@ -106,7 +106,7 @@ impl ClientDB for DiskStore { fn exists(&self, col: &str, key: &[u8]) -> Result { /* * I'm not sure if this is the correct way to read if some - * block exists. Naively I would expect this to unncessarily + * block exists. Naively I would expect this to unnecessarily * copy some data, but I could be wrong. */ match self.db.cf_handle(col) { @@ -164,7 +164,7 @@ mod tests { let thread_count = 10; let write_count = 10; - // We're execting the product of these numbers to fit in one byte. + // We're expecting the product of these numbers to fit in one byte. assert!(thread_count * write_count <= 255); let mut handles = vec![]; diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 24f622fdc..f4e335ab7 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -52,12 +52,12 @@ pub trait Store: Sync + Send + Sized { /// /// Returns `None` if no parent block exists at that slot, or if `slot` is greater than the /// slot of `start_block_root`. - fn get_block_at_preceeding_slot( + fn get_block_at_preceding_slot( &self, start_block_root: Hash256, slot: Slot, ) -> Result, Error> { - block_at_slot::get_block_at_preceeding_slot(self, slot, start_block_root) + block_at_slot::get_block_at_preceding_slot(self, slot, start_block_root) } /// Retrieve some bytes in `column` with `key`. diff --git a/beacon_node/store/src/store.rs b/beacon_node/store/src/store.rs index 5d18c7ba5..84447b83c 100644 --- a/beacon_node/store/src/store.rs +++ b/beacon_node/store/src/store.rs @@ -19,7 +19,7 @@ pub trait Store: Sync + Send + Sized { I::db_delete(self, key) } - fn get_block_at_preceeding_slot( + fn get_block_at_preceding_slot( &self, start_block_root: Hash256, slot: Slot, diff --git a/docs/onboarding.md b/docs/onboarding.md index 275f95484..1937271a0 100644 --- a/docs/onboarding.md +++ b/docs/onboarding.md @@ -115,7 +115,7 @@ other programming languages you may have used. * **Trait**: A trait is a collection of methods defined for a type, they can be implemented for any data type. * **Struct**: A custom data type that lets us name and package together -multiple related values that make a meaninguful group. +multiple related values that make a meaningful group. * **Crate**: A crate is synonymous with a *library* or *package* in other languages. They can produce an executable or library depending on the project. diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index dace2bda6..7baf45b38 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -1,6 +1,6 @@ //! An implementation of "reduced tree" LMD GHOST fork choice. //! -//! This algorithm was concieved at IC3 Cornell, 2019. +//! This algorithm was conceived at IC3 Cornell, 2019. //! //! This implementation is incomplete and has known bugs. Do not use in production. use super::{LmdGhost, Result as SuperResult}; @@ -147,7 +147,7 @@ where Ok(()) } - /// Removes `current_hash` and all decendants, except `subtree_hash` and all nodes + /// Removes `current_hash` and all descendants, except `subtree_hash` and all nodes /// which have `subtree_hash` as an ancestor. /// /// In effect, prunes the tree so that only decendants of `subtree_hash` exist. diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 6c6f1e752..a39fcce33 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -34,7 +34,7 @@ pub struct OperationPool { /// Map from deposit index to deposit data. // NOTE: We assume that there is only one deposit per index // because the Eth1 data is updated (at most) once per epoch, - // and the spec doesn't seem to accomodate for re-orgs on a time-frame + // and the spec doesn't seem to accommodate for re-orgs on a time-frame // longer than an epoch deposits: RwLock>, /// Map from two attestation IDs to a slashing for those IDs. diff --git a/eth2/state_processing/benches/bench_epoch_processing.rs b/eth2/state_processing/benches/bench_epoch_processing.rs index e89305ce4..977464513 100644 --- a/eth2/state_processing/benches/bench_epoch_processing.rs +++ b/eth2/state_processing/benches/bench_epoch_processing.rs @@ -48,7 +48,7 @@ pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: u "The state should have an attestation for each committee." ); - // Assert that we will run the first arm of process_rewards_and_penalities + // Assert that we will run the first arm of process_rewards_and_penalties let epochs_since_finality = state.next_epoch(&spec) - state.finalized_epoch; assert_eq!( epochs_since_finality, 4, diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index 41e6410be..8c8c365cc 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -247,7 +247,7 @@ pub enum IndexedAttestationInvalid { MaxIndicesExceed(u64, usize), /// The validator indices were not in increasing order. /// - /// The error occured between the given `index` and `index + 1` + /// The error occurred between the given `index` and `index + 1` BadValidatorIndicesOrdering(usize), /// The validator index is unknown. One cannot slash one who does not exist. UnknownValidator(u64), @@ -413,7 +413,7 @@ pub enum TransferInvalid { /// The `transfer.from` validator has been activated and is not withdrawable. /// /// (from_validator) - FromValidatorIneligableForTransfer(u64), + FromValidatorIneligibleForTransfer(u64), /// The validators withdrawal credentials do not match `transfer.pubkey`. /// /// (state_credentials, transfer_pubkey_credentials) diff --git a/eth2/state_processing/src/per_block_processing/verify_transfer.rs b/eth2/state_processing/src/per_block_processing/verify_transfer.rs index e0b3d22e8..20a16959b 100644 --- a/eth2/state_processing/src/per_block_processing/verify_transfer.rs +++ b/eth2/state_processing/src/per_block_processing/verify_transfer.rs @@ -114,7 +114,7 @@ fn verify_transfer_parametric( || sender_validator.activation_eligibility_epoch == spec.far_future_epoch || sender_validator.is_withdrawable_at(epoch) || total_amount + spec.max_effective_balance <= sender_balance, - Invalid::FromValidatorIneligableForTransfer(transfer.sender) + Invalid::FromValidatorIneligibleForTransfer(transfer.sender) ); // Ensure the withdrawal credentials generated from the sender's pubkey match those stored in diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index be4213408..c1d601b47 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -47,7 +47,7 @@ pub fn per_epoch_processing( // Crosslinks. let winning_root_for_shards = process_crosslinks(state, spec)?; - // Rewards and Penalities. + // Rewards and Penalties. process_rewards_and_penalties( state, &mut validator_statuses, diff --git a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs index d17dd1622..df743c553 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs @@ -12,7 +12,7 @@ pub fn process_slashings( let total_at_start = state.get_slashed_balance(current_epoch + 1)?; let total_at_end = state.get_slashed_balance(current_epoch)?; - let total_penalities = total_at_end - total_at_start; + let total_penalties = total_at_end - total_at_start; for (index, validator) in state.validator_registry.iter().enumerate() { let should_penalize = current_epoch.as_usize() + T::LatestSlashedExitLength::to_usize() / 2 @@ -22,7 +22,7 @@ pub fn process_slashings( let effective_balance = state.get_effective_balance(index, spec)?; let penalty = std::cmp::max( - effective_balance * std::cmp::min(total_penalities * 3, current_total_balance) + effective_balance * std::cmp::min(total_penalties * 3, current_total_balance) / current_total_balance, effective_balance / spec.min_slashing_penalty_quotient, ); diff --git a/eth2/state_processing/src/per_epoch_processing/registry_updates.rs b/eth2/state_processing/src/per_epoch_processing/registry_updates.rs index f97841d72..b18111faf 100644 --- a/eth2/state_processing/src/per_epoch_processing/registry_updates.rs +++ b/eth2/state_processing/src/per_epoch_processing/registry_updates.rs @@ -3,7 +3,7 @@ use super::Error; use itertools::{Either, Itertools}; use types::*; -/// Peforms a validator registry update, if required. +/// Performs a validator registry update, if required. /// /// Spec v0.6.3 pub fn process_registry_updates( diff --git a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs index e20aa6cf2..9f05b8204 100644 --- a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -150,7 +150,7 @@ pub struct TotalBalances { /// some `BeaconState`. #[derive(Clone)] pub struct ValidatorStatuses { - /// Information about each individual validator from the state's validator registy. + /// Information about each individual validator from the state's validator registry. pub statuses: Vec, /// Summed balances for various sets of validators. pub total_balances: TotalBalances, diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index 9a7808da4..bd611aa0c 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -1,4 +1,4 @@ -//! The `Slot` and `Epoch` types are defined as newtypes over u64 to enforce type-safety between +//! The `Slot` and `Epoch` types are defined as new types over u64 to enforce type-safety between //! the two types. //! //! `Slot` and `Epoch` have implementations which permit conversion, comparison and math operations diff --git a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs index dcb6a56ef..aec7ae48f 100644 --- a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs @@ -34,7 +34,7 @@ impl TestingDepositBuilder { /// /// - `pubkey` to the signing pubkey. /// - `withdrawal_credentials` to the signing pubkey. - /// - `proof_of_possesssion` + /// - `proof_of_possession` pub fn sign(&mut self, keypair: &Keypair, epoch: Epoch, fork: &Fork, spec: &ChainSpec) { let withdrawal_credentials = Hash256::from_slice( &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], diff --git a/eth2/types/src/test_utils/test_random/secret_key.rs b/eth2/types/src/test_utils/test_random/secret_key.rs index a833a4488..7953b7c53 100644 --- a/eth2/types/src/test_utils/test_random/secret_key.rs +++ b/eth2/types/src/test_utils/test_random/secret_key.rs @@ -6,7 +6,7 @@ impl TestRandom for SecretKey { let mut key_bytes = vec![0; 48]; rng.fill_bytes(&mut key_bytes); /* - * An `unreachable!` is used here as there's no reason why you cannot constuct a key from a + * An `unreachable!` is used here as there's no reason why you cannot construct a key from a * fixed-length byte slice. Also, this should only be used during testing so a panic is * acceptable. */ diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index f434feb0d..60f9ee993 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -23,7 +23,7 @@ impl AggregateSignature { /// Instantiate a new AggregateSignature. /// /// is_empty is false - /// AggregateSiganture is point at infinity + /// AggregateSignature is point at infinity pub fn new() -> Self { Self { aggregate_signature: RawAggregateSignature::new(), @@ -87,7 +87,7 @@ impl AggregateSignature { .verify_multiple(&msg[..], domain, &aggregate_public_keys[..]) } - /// Return AggregateSiganture as bytes + /// Return AggregateSignature as bytes pub fn as_bytes(&self) -> Vec { if self.is_empty { return vec![0; BLS_AGG_SIG_BYTE_SIZE]; @@ -95,7 +95,7 @@ impl AggregateSignature { self.aggregate_signature.as_bytes() } - /// Convert bytes to AggregateSiganture + /// Convert bytes to AggregateSignature pub fn from_bytes(bytes: &[u8]) -> Result { for byte in bytes { if *byte != 0 { @@ -114,7 +114,7 @@ impl AggregateSignature { Ok(Self::empty_signature()) } - /// Returns if the AggregateSiganture `is_empty` + /// Returns if the AggregateSignature `is_empty` pub fn is_empty(&self) -> bool { self.is_empty } diff --git a/eth2/utils/cached_tree_hash/src/btree_overlay.rs b/eth2/utils/cached_tree_hash/src/btree_overlay.rs index 5692a4391..7ab22bcef 100644 --- a/eth2/utils/cached_tree_hash/src/btree_overlay.rs +++ b/eth2/utils/cached_tree_hash/src/btree_overlay.rs @@ -64,7 +64,7 @@ pub struct BTreeOverlay { } impl BTreeOverlay { - /// Instantiates a new instance for `item`, where it's first chunk is `inital_offset` and has + /// Instantiates a new instance for `item`, where it's first chunk is `initial_offset` and has /// the specified `depth`. pub fn new(item: &T, initial_offset: usize, depth: usize) -> Self where diff --git a/eth2/utils/cached_tree_hash/src/impls/vec.rs b/eth2/utils/cached_tree_hash/src/impls/vec.rs index bdb7eb134..0196e6a71 100644 --- a/eth2/utils/cached_tree_hash/src/impls/vec.rs +++ b/eth2/utils/cached_tree_hash/src/impls/vec.rs @@ -201,7 +201,7 @@ pub fn update_tree_hash_cache( cache.chunk_index = new.end; } - // The list has been lengthened and this is a new item that was prevously a + // The list has been lengthened and this is a new item that was previously a // padding item. // // Splice the tree for the new item over the padding chunk. @@ -268,7 +268,7 @@ pub fn update_tree_hash_cache( // This leaf was padding in both lists, there's nothing to do. (LeafNode::Padding, LeafNode::Padding) => (), // As we are looping through the larger of the lists of leaf nodes, it should - // be impossible for either leaf to be non-existant. + // be impossible for either leaf to be non-existent. (LeafNode::DoesNotExist, LeafNode::DoesNotExist) => unreachable!(), } } diff --git a/eth2/utils/cached_tree_hash/src/tree_hash_cache.rs b/eth2/utils/cached_tree_hash/src/tree_hash_cache.rs index 8f7b9de86..30b63b70d 100644 --- a/eth2/utils/cached_tree_hash/src/tree_hash_cache.rs +++ b/eth2/utils/cached_tree_hash/src/tree_hash_cache.rs @@ -139,7 +139,7 @@ impl TreeHashCache { } /// Instantiate a new cache from the pre-built `bytes` where each `self.chunk_modified` will be - /// set to `intitial_modified_state`. + /// set to `initial_modified_state`. /// /// Note: `bytes.len()` must be a multiple of 32 pub fn from_bytes( diff --git a/eth2/utils/honey-badger-split/src/lib.rs b/eth2/utils/honey-badger-split/src/lib.rs index ca02b5c01..6b5b325c9 100644 --- a/eth2/utils/honey-badger-split/src/lib.rs +++ b/eth2/utils/honey-badger-split/src/lib.rs @@ -29,7 +29,7 @@ impl<'a, T> Iterator for Split<'a, T> { } } -/// Splits a slice into chunks of size n. All postive n values are applicable, +/// Splits a slice into chunks of size n. All positive n values are applicable, /// hence the honey_badger prefix. /// /// Returns an iterator over the original list. diff --git a/eth2/utils/ssz/README.md b/eth2/utils/ssz/README.md index 0a9bbff25..7db85b10b 100644 --- a/eth2/utils/ssz/README.md +++ b/eth2/utils/ssz/README.md @@ -213,7 +213,7 @@ return rawbytes[current_index+4:current_index+4+bytes_length], new_index #### List -Deserailize each object in the list. +Deserialize each object in the list. 1. Get the length of the serialized list. 2. Loop through deseralizing each item in the list until you reach the entire length of the list. @@ -437,7 +437,7 @@ let decoded: Result<(Vec, usize), DecodeError> = decode_ssz_list( &encode Deserializes the "length" value in the serialized bytes from the index. The length of bytes is given (usually 4 stated in the reference implementation) and -is often the value appended to the list infront of the actual serialized +is often the value appended to the list in front of the actual serialized object. | Parameter | Description | diff --git a/eth2/utils/ssz/src/decode/impls.rs b/eth2/utils/ssz/src/decode/impls.rs index 6f7986945..1a12e7e19 100644 --- a/eth2/utils/ssz/src/decode/impls.rs +++ b/eth2/utils/ssz/src/decode/impls.rs @@ -535,7 +535,7 @@ mod tests { } #[test] - fn awkward_fixed_lenth_portion() { + fn awkward_fixed_length_portion() { assert_eq!( >>::from_ssz_bytes(&[10, 0, 0, 0, 10, 0, 0, 0, 0, 0]), Err(DecodeError::InvalidByteLength { diff --git a/eth2/utils/tree_hash/src/merkleize_standard.rs b/eth2/utils/tree_hash/src/merkleize_standard.rs index c47d342dd..88ea6d00e 100644 --- a/eth2/utils/tree_hash/src/merkleize_standard.rs +++ b/eth2/utils/tree_hash/src/merkleize_standard.rs @@ -47,7 +47,7 @@ pub fn merkleize_standard(bytes: &[u8]) -> Vec { j -= HASHSIZE; let hash = match o.get(i..i + MERKLE_HASH_CHUNK) { - // All bytes are available, hash as ususal. + // All bytes are available, hash as usual. Some(slice) => hash(slice), // Unable to get all the bytes. None => { diff --git a/eth2/utils/tree_hash_derive/src/lib.rs b/eth2/utils/tree_hash_derive/src/lib.rs index f815dd529..5a7b304b5 100644 --- a/eth2/utils/tree_hash_derive/src/lib.rs +++ b/eth2/utils/tree_hash_derive/src/lib.rs @@ -27,7 +27,7 @@ fn get_hashable_named_field_idents<'a>(struct_data: &'a syn::DataStruct) -> Vec< .collect() } -/// Returns true if some field has an attribute declaring it should not be hashedd. +/// Returns true if some field has an attribute declaring it should not be hashed. /// /// The field attribute is: `#[tree_hash(skip_hashing)]` fn should_skip_hashing(field: &syn::Field) -> bool { diff --git a/eth2/validator_change/src/lib.rs b/eth2/validator_change/src/lib.rs index 7c13b168a..3b119519c 100644 --- a/eth2/validator_change/src/lib.rs +++ b/eth2/validator_change/src/lib.rs @@ -59,7 +59,7 @@ pub fn update_validator_set( for (i, v) in validators.iter_mut().enumerate() { match v.status { /* - * Validator is pending activiation. + * Validator is pending activation. */ ValidatorStatus::PendingActivation => { let new_total_changed = total_changed diff --git a/protos/src/services.proto b/protos/src/services.proto index ecc75ee26..bf23ff391 100644 --- a/protos/src/services.proto +++ b/protos/src/services.proto @@ -6,7 +6,7 @@ // the block will be lost. // // This "stateful" method is being used presently because it's easier and -// requires less maintainence as the `BeaconBlock` definition changes. +// requires less maintenance as the `BeaconBlock` definition changes. syntax = "proto3"; @@ -28,7 +28,7 @@ service BeaconBlockService { /// Service that provides the validator client with requisite knowledge about //its public keys service ValidatorService { - // Gets the block proposer slot and comittee slot that a validator needs to + // Gets the block proposer slot and committee slot that a validator needs to // perform work on. rpc GetValidatorDuties(GetDutiesRequest) returns (GetDutiesResponse); } @@ -79,7 +79,7 @@ message PublishBeaconBlockRequest { BeaconBlock block = 1; } -// Beacon node indicates a sucessfully submitted proposal. +// Beacon node indicates a successfully submitted proposal. message PublishBeaconBlockResponse { bool success = 1; bytes msg = 2; diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index 06e8f223d..f6e14c927 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -12,7 +12,7 @@ fn yaml_files_in_test_dir(dir: &Path) -> Vec { assert!( base_path.exists(), format!( - "Unable to locate {:?}. Did you init git submoules?", + "Unable to locate {:?}. Did you init git submodules?", base_path ) ); diff --git a/validator_client/src/attestation_producer/mod.rs b/validator_client/src/attestation_producer/mod.rs index d59f383ef..900b0de24 100644 --- a/validator_client/src/attestation_producer/mod.rs +++ b/validator_client/src/attestation_producer/mod.rs @@ -39,7 +39,7 @@ pub struct AttestationProducer<'a, B: BeaconNodeAttestation, S: Signer> { pub beacon_node: Arc, /// The signer to sign the block. pub signer: &'a S, - /// Used for caclulating epoch. + /// Used for calculating epoch. pub slots_per_epoch: u64, } diff --git a/validator_client/src/block_producer/mod.rs b/validator_client/src/block_producer/mod.rs index 212db1f8e..48173b835 100644 --- a/validator_client/src/block_producer/mod.rs +++ b/validator_client/src/block_producer/mod.rs @@ -48,7 +48,7 @@ pub struct BlockProducer<'a, B: BeaconNodeBlock, S: Signer> { pub beacon_node: Arc, /// The signer to sign the block. pub signer: &'a S, - /// Used for caclulating epoch. + /// Used for calculating epoch. pub slots_per_epoch: u64, } From db094022b9bde71f9dc23d3140d1e7814be810b3 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 29 Jul 2019 05:25:57 +0530 Subject: [PATCH 03/25] Remove unused dependencies (#456) --- account_manager/Cargo.toml | 1 - beacon_node/Cargo.toml | 3 --- beacon_node/beacon_chain/Cargo.toml | 7 ------- beacon_node/client/Cargo.toml | 4 ---- beacon_node/eth2-libp2p/Cargo.toml | 1 - beacon_node/http_server/Cargo.toml | 11 ----------- beacon_node/network/Cargo.toml | 1 - beacon_node/rpc/Cargo.toml | 6 ------ beacon_node/store/Cargo.toml | 3 --- eth2/lmd_ghost/Cargo.toml | 4 ---- eth2/state_processing/Cargo.toml | 6 ------ eth2/types/Cargo.toml | 3 --- eth2/utils/boolean-bitfield/Cargo.toml | 1 - eth2/utils/eth2_config/Cargo.toml | 1 - eth2/utils/hashing/Cargo.toml | 2 -- eth2/utils/ssz/Cargo.toml | 3 --- tests/ef_tests/Cargo.toml | 1 - validator_client/Cargo.toml | 2 -- 18 files changed, 60 deletions(-) diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index b3c687eef..32d2286c8 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -12,5 +12,4 @@ slog-term = "^2.4.0" slog-async = "^2.3.0" validator_client = { path = "../validator_client" } types = { path = "../eth2/types" } -eth2_config = { path = "../eth2/utils/eth2_config" } dirs = "2.0.1" diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 5c8786c70..24e148dd0 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -7,12 +7,10 @@ edition = "2018" [dependencies] eth2_config = { path = "../eth2/utils/eth2_config" } types = { path = "../eth2/types" } -toml = "^0.5" store = { path = "./store" } client = { path = "client" } version = { path = "version" } clap = "2.32.0" -serde = "1.0" slog = { version = "^2.2.3" , features = ["max_level_trace"] } slog-term = "^2.4.0" slog-async = "^2.3.0" @@ -21,6 +19,5 @@ tokio = "0.1.15" tokio-timer = "0.2.10" futures = "0.1.25" exit-future = "0.1.3" -state_processing = { path = "../eth2/state_processing" } env_logger = "0.6.1" dirs = "2.0.1" diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 793ce79cd..5b5ae3780 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -5,20 +5,13 @@ authors = ["Paul Hauner ", "Age Manning "] edition = "2018" [dependencies] -beacon_chain = { path = "../beacon_chain" } clap = "2.32.0" #SigP repository libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "be5710bbde69d8c5be732c13ba64239e2f370a7b" } diff --git a/beacon_node/http_server/Cargo.toml b/beacon_node/http_server/Cargo.toml index 3e428357d..e87ff2997 100644 --- a/beacon_node/http_server/Cargo.toml +++ b/beacon_node/http_server/Cargo.toml @@ -5,30 +5,19 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -bls = { path = "../../eth2/utils/bls" } beacon_chain = { path = "../beacon_chain" } iron = "^0.6" router = "^0.6" network = { path = "../network" } -eth2-libp2p = { path = "../eth2-libp2p" } -version = { path = "../version" } types = { path = "../../eth2/types" } -eth2_ssz = { path = "../../eth2/utils/ssz" } slot_clock = { path = "../../eth2/utils/slot_clock" } -protos = { path = "../../protos" } -grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } persistent = "^0.4" -protobuf = "2.0.2" prometheus = { version = "^0.6", features = ["process"] } clap = "2.32.0" -store = { path = "../store" } -dirs = "1.0.3" futures = "0.1.23" serde = "1.0" serde_derive = "1.0" serde_json = "1.0" slog = { version = "^2.2.3" , features = ["max_level_trace"] } -slog-term = "^2.4.0" -slog-async = "^2.3.0" tokio = "0.1.17" exit-future = "0.1.4" diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 9eadede76..f6b1a7ee7 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -11,7 +11,6 @@ sloggers = "0.3.2" beacon_chain = { path = "../beacon_chain" } store = { path = "../store" } eth2-libp2p = { path = "../eth2-libp2p" } -version = { path = "../version" } types = { path = "../../eth2/types" } slog = { version = "^2.2.3" , features = ["max_level_trace"] } eth2_ssz = { path = "../../eth2/utils/ssz" } diff --git a/beacon_node/rpc/Cargo.toml b/beacon_node/rpc/Cargo.toml index 99cd78e6a..80c5c8666 100644 --- a/beacon_node/rpc/Cargo.toml +++ b/beacon_node/rpc/Cargo.toml @@ -12,18 +12,12 @@ eth2-libp2p = { path = "../eth2-libp2p" } version = { path = "../version" } types = { path = "../../eth2/types" } eth2_ssz = { path = "../../eth2/utils/ssz" } -slot_clock = { path = "../../eth2/utils/slot_clock" } protos = { path = "../../protos" } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } -protobuf = "2.0.2" clap = "2.32.0" -store = { path = "../store" } -dirs = "1.0.3" futures = "0.1.23" serde = "1.0" serde_derive = "1.0" slog = { version = "^2.2.3" , features = ["max_level_trace"] } -slog-term = "^2.4.0" -slog-async = "^2.3.0" tokio = "0.1.17" exit-future = "0.1.4" diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 6dcb771d2..94f644272 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -8,9 +8,6 @@ edition = "2018" tempfile = "3" [dependencies] -blake2-rfc = "0.2.18" -bls = { path = "../../eth2/utils/bls" } -bytes = "0.4.10" db-key = "0.0.5" leveldb = "0.8.4" parking_lot = "0.7" diff --git a/eth2/lmd_ghost/Cargo.toml b/eth2/lmd_ghost/Cargo.toml index c21af693e..eaf41730e 100644 --- a/eth2/lmd_ghost/Cargo.toml +++ b/eth2/lmd_ghost/Cargo.toml @@ -7,11 +7,7 @@ edition = "2018" [dependencies] parking_lot = "0.7" store = { path = "../../beacon_node/store" } -eth2_ssz = { path = "../utils/ssz" } -state_processing = { path = "../state_processing" } types = { path = "../types" } -log = "0.4.6" -bit-vec = "0.5.0" [dev-dependencies] criterion = "0.2" diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index e1f98260b..cf51ee564 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -17,15 +17,9 @@ serde_yaml = "0.8" [dependencies] bls = { path = "../utils/bls" } -fnv = "1.0" -hashing = { path = "../utils/hashing" } -int_to_bytes = { path = "../utils/int_to_bytes" } integer-sqrt = "0.1" itertools = "0.8" -log = "0.4" merkle_proof = { path = "../utils/merkle_proof" } -eth2_ssz = { path = "../utils/ssz" } -eth2_ssz_derive = { path = "../utils/ssz_derive" } tree_hash = { path = "../utils/tree_hash" } tree_hash_derive = { path = "../utils/tree_hash_derive" } types = { path = "../types" } diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index fd6578340..ed71598d7 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -16,15 +16,12 @@ ethereum-types = "0.5" fixed_len_vec = { path = "../utils/fixed_len_vec" } hashing = { path = "../utils/hashing" } hex = "0.3" -honey-badger-split = { path = "../utils/honey-badger-split" } int_to_bytes = { path = "../utils/int_to_bytes" } log = "0.4" rayon = "1.0" rand = "0.5.5" serde = "1.0" serde_derive = "1.0" -serde_json = "1.0" -serde_yaml = "0.8" slog = "^2.2.3" eth2_ssz = { path = "../utils/ssz" } eth2_ssz_derive = { path = "../utils/ssz_derive" } diff --git a/eth2/utils/boolean-bitfield/Cargo.toml b/eth2/utils/boolean-bitfield/Cargo.toml index ceb04a55a..e892fa5ba 100644 --- a/eth2/utils/boolean-bitfield/Cargo.toml +++ b/eth2/utils/boolean-bitfield/Cargo.toml @@ -11,7 +11,6 @@ eth2_ssz = { path = "../ssz" } bit-vec = "0.5.0" bit_reverse = "0.1" serde = "1.0" -serde_derive = "1.0" tree_hash = { path = "../tree_hash" } [dev-dependencies] diff --git a/eth2/utils/eth2_config/Cargo.toml b/eth2/utils/eth2_config/Cargo.toml index 5af385e2d..a12588748 100644 --- a/eth2/utils/eth2_config/Cargo.toml +++ b/eth2/utils/eth2_config/Cargo.toml @@ -6,7 +6,6 @@ edition = "2018" [dependencies] clap = "2.32.0" -dirs = "1.0.3" serde = "1.0" serde_derive = "1.0" toml = "^0.5" diff --git a/eth2/utils/hashing/Cargo.toml b/eth2/utils/hashing/Cargo.toml index 506b84a6b..10b457523 100644 --- a/eth2/utils/hashing/Cargo.toml +++ b/eth2/utils/hashing/Cargo.toml @@ -7,8 +7,6 @@ edition = "2018" [target.'cfg(not(target_arch = "wasm32"))'.dependencies] ring = "0.14.6" -[target.'cfg(target_arch = "wasm32")'.dependencies] -sha2 = "0.8.0" [dev-dependencies] rustc-hex = "2.0.1" diff --git a/eth2/utils/ssz/Cargo.toml b/eth2/utils/ssz/Cargo.toml index 9002dd6e7..928a0e6e9 100644 --- a/eth2/utils/ssz/Cargo.toml +++ b/eth2/utils/ssz/Cargo.toml @@ -18,7 +18,4 @@ criterion = "0.2" eth2_ssz_derive = "0.1.0" [dependencies] -bytes = "0.4.9" ethereum-types = "0.5" -hex = "0.3" -yaml-rust = "0.4" diff --git a/tests/ef_tests/Cargo.toml b/tests/ef_tests/Cargo.toml index e8d6b0f2f..90f66f355 100644 --- a/tests/ef_tests/Cargo.toml +++ b/tests/ef_tests/Cargo.toml @@ -24,4 +24,3 @@ state_processing = { path = "../../eth2/state_processing" } swap_or_not_shuffle = { path = "../../eth2/utils/swap_or_not_shuffle" } types = { path = "../../eth2/types" } walkdir = "2" -yaml-rust = { git = "https://github.com/sigp/yaml-rust", branch = "escape_all_str"} diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index d2824bc2f..19bd10a1e 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -19,7 +19,6 @@ eth2_config = { path = "../eth2/utils/eth2_config" } tree_hash = { path = "../eth2/utils/tree_hash" } clap = "2.32.0" grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } -protobuf = "2.0.2" protos = { path = "../protos" } slot_clock = { path = "../eth2/utils/slot_clock" } types = { path = "../eth2/types" } @@ -31,7 +30,6 @@ slog-json = "^2.3" slog-term = "^2.4.0" tokio = "0.1.18" tokio-timer = "0.2.10" -toml = "^0.5" error-chain = "0.12.0" bincode = "^1.1.2" futures = "0.1.25" From 6de9e5bd6f7491ab0a546e1936d98f0f96854756 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Mon, 29 Jul 2019 10:01:56 +1000 Subject: [PATCH 04/25] Spec. for REST API (#455) * A first run at fleshing full REST API spec. - Added a new REST OpenAPI YAML specification to the docs folder, starting from the minimal validator spec. - Added a bunch of additional endpoints, including network information and beacon chain information. - Current yaml file has not been checked for syntax or any correctness. * Fixed REST OpenAPI Spec. - Updated spelling mistakes, indentation, and incorrect fields. * Added block_discovery endpoint to REST API spec. * Added /node/stats endpoint - /node/stats endpoint provides information about the running process - Added some extra TODOs as reminders. * Added missing Attestations to REST spec. - Added ability to get attestations and pending attestations from chain data. - Moved the Attestaion object into its own schema, with reference. --- docs/rest_oapi.yaml | 1379 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1379 insertions(+) create mode 100644 docs/rest_oapi.yaml diff --git a/docs/rest_oapi.yaml b/docs/rest_oapi.yaml new file mode 100644 index 000000000..dea892c18 --- /dev/null +++ b/docs/rest_oapi.yaml @@ -0,0 +1,1379 @@ +openapi: "3.0.2" +info: + title: "Lighthouse REST API" + description: "" + version: "0.1.0" + license: + name: "Apache 2.0" + url: "https://www.apache.org/licenses/LICENSE-2.0.html" +tags: + - name: Phase0 + description: Endpoints which will be implemented for phase 0 of Ethereum Serenity + - name: Phase1 + description: Endpoints which will be implemented for phase 1 of Ethereum Serenity + - name: Future + description: Potential future endpoints or optional nice-to-haves + +paths: + /node/version: + get: + tags: + - Phase0 + summary: "Get version string of the running beacon node." + description: "Requests that the beacon node identify information about its implementation in a format similar to a [HTTP User-Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) field." + responses: + 200: + description: Request successful + content: + application/json: + schema: + $ref: '#/components/schemas/version' + 500: + $ref: '#/components/responses/InternalError' + + /node/genesis_time: + get: + tags: + - Phase0 + summary: "Get the genesis_time parameter from beacon node configuration." + description: "Requests the genesis_time parameter from the beacon node, which should be consistent across all beacon nodes that follow the same beacon chain." + responses: + 200: + description: Request successful + content: + application/json: + schema: + $ref: '#/components/schemas/genesis_time' + 500: + $ref: '#/components/responses/InternalError' + + /node/deposit_contract: + get: + tags: + - Phase0 + summary: "Get the address of the Ethereum 1 deposit contract." + description: "Requests the address of the deposit contract on the Ethereum 1 chain, which was used to start the current beacon chain." + responses: + 200: + description: Request successful + content: + application/json: + schema: + $ref: '#/components/schemas/ethereum_address' + 500: + $ref: '#/components/responses/InternalError' + + /node/syncing: + get: + tags: + - Phase0 + summary: "Poll to see if the the beacon node is syncing." + description: "Requests the beacon node to describe if it's currently syncing or not, and if it is, what block it is up to. This is modelled after the Eth1.0 JSON-RPC eth_syncing call.." + responses: + 200: + description: Request successful + content: + application/json: + schema: + type: object + properties: + is_syncing: + type: boolean + description: "A boolean of whether the node is currently syncing or not." + sync_status: + $ref: '#/components/schemas/SyncingStatus' + 500: + $ref: '#/components/responses/InternalError' + + /node/fork: + get: + tags: + - Phase0 + summary: "Get fork information from running beacon node." + description: "Requests the beacon node to provide which fork version it is currently on." + responses: + 200: + description: Request successful + content: + application/json: + schema: + type: object + properties: + fork: + $ref: '#/components/schemas/Fork' + chain_id: + type: integer + format: uint64 + description: "Sometimes called the network id, this number discerns the active chain for the beacon node. Analogous to Eth1.0 JSON-RPC net_version." + 500: + $ref: '#/components/responses/InternalError' + + /node/stats: + get: + tags: + - Future + summary: "Get operational information about the node." + description: "Fetches some operational information about the node's process, such as memory usage, database size, etc." + responses: + 200: + description: Request successful + content: + application/json: + schema: + type: object + properties: + memory_usage: + type: integer + format: uint64 + description: "The amount of memory used by the currently running beacon node process, expressed in bytes." + uptime: + type: integer + format: uint64 + description: "The number of seconds that have elapsed since beacon node process was started." + #TODO: what other useful process information could be expressed here? + + + /node/network/peer_count: + get: + tags: + - Phase0 + summary: "The number of established peers" + description: "Requests the beacon node to identify the number of peers with which an established connection is currently maintained." + responses: + 200: + description: Request successful + content: + application/json: + schema: + type: integer + format: uint64 + example: 25 + + /node/network/peers: + get: + tags: + - Phase0 + summary: "List the networking peers with which the node is communicating." + description: "Requests that the beacon node identify all of the peers with which it is communicating, including established connections and connections which it is attempting to establish." + responses: + 200: + description: Request successful + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Peer' + + /node/network/listening: + get: + tags: + - Phase0 + summary: "Identify if the beacon node is listening for networking connections, and on what address." + description: "Requests that the beacon node identify whether it is listening for incoming networking connections, and if so, what network address(es) are being used." + responses: + 200: + description: Request successful + content: + application/json: + schema: + type: object + properties: + listening: + type: boolean + nullable: false + description: "True if the node is listening for incoming network connections. False if networking has been disabled or if the node has been configured to only connect with a static set of peers." + listen_address: + $ref: '#/components/schemas/multiaddr' + + /node/network/stats: + get: + tags: + - Future + summary: "Get some simple network statistics from the node." + description: "Request that the beacon node provide some historical summary information about its networking interface." + #TODO: Do we actually collect these stats? Should we? + responses: + 200: + description: Request successful + content: + application/json: + schema: + type: object + properties: + bytes_sent: + type: integer + format: uint64 + description: "The total number of bytes sent by the beacon node since it was started." + bytes_received: + type: integer + format: uint64 + description: "The total number of bytes sent by the beacon node since it was started." + peers_seen: + type: integer + format: uint64 + description: "The total number of unique peers (by multiaddr) that have been discovered since the beacon node instance was started." + #TODO: This might be too difficult to collect + + /node/network/block_discovery: + get: + tags: + - Future + summary: "Identify the time at which particular blocks were first seen." + description: "Request the node to provide the time at which particular blocks were first seen on the network." + parameters: + - name: block_roots + description: "Provide an array of block roots for which the discovered time is to be returned." + in: query + required: false + schema: + type: array + items: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + responses: + 200: + description: Success response + content: + application/json: + schema: + type: array + items: + type: object + properties: + root: + type: string + format: bytes + description: "The merkle root of the block." + pattern: "^0x[a-fA-F0-9]{64}$" + discovered_time: + type: integer + format: uint64 + description: "UNIX time in milliseconds that the block was first discovered, either from a network peer or the validator client." + + + + #TODO: Add the endpoints that enable a validator to join, exit, withdraw, etc. + /validator/duties: + get: + tags: + - Phase0 + summary: "Get validator duties for the requested validators." + description: "Requests the beacon node to provide a set of _duties_, which are actions that should be performed by validators, for a particular epoch. Duties should only need to be checked once per epoch, however a chain reorganization (of > MIN_SEED_LOOKAHEAD epochs) could occur, resulting in a change of duties. For full safety, this API call should be polled at every slot to ensure that chain reorganizations are recognized, and to ensure that the beacon node is properly synchronized. If no epoch parameter is provided, then the current epoch is assumed." + parameters: + - name: validator_pubkeys + in: query + required: true + description: "An array of hex-encoded BLS public keys" + schema: + type: array + items: + $ref: '#/components/schemas/pubkey' + minItems: 1 + - name: epoch + in: query + required: false + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ValidatorDuty' + 400: + $ref: '#/components/responses/InvalidRequest' + 406: + description: "Duties cannot be provided for the requested epoch." + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + + /validator/block: + get: + tags: + - Phase0 + summary: "Produce a new block, without signature." + description: "Requests a beacon node to produce a valid block, which can then be signed by a validator." + parameters: + - name: slot + in: query + required: true + description: "The slot for which the block should be proposed." + schema: + type: integer + format: uint64 + - name: randao_reveal + in: query + required: true + description: "The validator's randao reveal value." + schema: + type: string + format: byte + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/BeaconBlock' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + post: + tags: + - Phase0 + summary: "Publish a signed block." + description: "Instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be included in the beacon chain. The beacon node is not required to validate the signed `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new block into its state, and therefore validate the block internally, however blocks which fail the validation are still broadcast but a different status code is returned (202)" + parameters: + - name: beacon_block + in: query + required: true + description: "The `BeaconBlock` object, as sent from the beacon node originally, but now with the signature field completed." + schema: + $ref: '#/components/schemas/BeaconBlock' + responses: + 200: + description: "The block was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." + 202: + description: "The block failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + + /validator/attestation: + get: + tags: + - Phase0 + summary: "Produce an attestation, without signature." + description: "Requests that the beacon node produce an IndexedAttestation, with a blank signature field, which the validator will then sign." + parameters: + - name: validator_pubkey + in: query + required: true + description: "Uniquely identifying which validator this attestation is to be produced for." + schema: + $ref: '#/components/schemas/pubkey' + - name: poc_bit + in: query + required: true + description: "The proof-of-custody bit that is to be reported by the requesting validator. This bit will be inserted into the appropriate location in the returned `IndexedAttestation`." + schema: + type: integer + format: uint32 + minimum: 0 + maximum: 1 + - name: slot + in: query + required: true + description: "The slot for which the attestation should be proposed." + schema: + type: integer + - name: shard + in: query + required: true + description: "The shard number for which the attestation is to be proposed." + schema: + type: integer + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/IndexedAttestation' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + post: + tags: + - Phase0 + summary: "Publish a signed attestation." + description: "Instructs the beacon node to broadcast a newly signed IndexedAttestation object to the intended shard subnet. The beacon node is not required to validate the signed IndexedAttestation, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new attestation into its state, and therefore validate the attestation internally, however attestations which fail the validation are still broadcast but a different status code is returned (202)" + parameters: + - name: attestation + in: query + required: true + description: "An `IndexedAttestation` structure, as originally provided by the beacon node, but now with the signature field completed." + schema: + $ref: '#/components/schemas/IndexedAttestation' + responses: + 200: + description: "The attestation was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." + 202: + description: "The attestation failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + + /chain/beacon/blocks: + get: + tags: + - Phase0 + summary: 'Retrieve blocks by root, slot, or epoch.' + description: "Request that the node return beacon chain blocks that match the provided criteria (a block root, beacon chain slot, or epoch). Only one of the parameters should be provided as a criteria." + parameters: + - name: root + description: "Filter by block root, returning a single block." + in: query + required: false + schema: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + - name: slot + description: "Filter blocks by slot number. It is possible that multiple blocks will be returned if the slot has not yet been finalized, or if the node has seen blocks from multiple forks." + #TODO: Is this description accurate? + in: query + required: false + schema: + type: integer + format: uint64 + - name: epoch + description: "Filter blocks by epoch, returning all blocks found for the provided epoch. It is possible that multiple blocks will be returned with the same slot number if the slot has not yet been finalized, or if the node has seen blocks from multiple forks." + #TODO: Should this actually return no more than one block per slot, if it has been finalized? i.e. not blocks on multiple forks? + in: query + required: false + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/BeaconBlock' + 400: + $ref: '#/components/responses/InvalidRequest' + #TODO: Make this request error more specific if one of the parameters is not provided correctly. + + /chain/beacon/chainhead: + get: + tags: + - Phase0 + summary: "Detail the current perspective of the beacon node." + description: "Request the beacon node to identify the most up-to-date information about the beacon chain from its perspective. This includes the latest block, which slots have been finalized, etc." + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + description: "The latest information about the head of the beacon chain." + properties: + block_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The merkle tree root of the canonical head block in the beacon node." + block_slot: + type: integer + format: uint64 + description: "The slot of the head block." + finalized_slot: + type: integer + format: uint64 + description: "The slot number of the most recent finalized slot." + finalized_block_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The block root for the most recent finalized block." + justified_slot: + type: integer + format: uint64 + description: "The slot number of the most recent justified slot." + justified_block_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The block root of the most recent justified block." + previous_justified_slot: + type: integer + format: uint64 + description: "The slot number of the second most recent justified slot." + previous_justified_block_root: + type: integer + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The block root of the second most recent justified block." + + /chain/beacon/attestations: + get: + tags: + - Phase0 + summary: 'Retrieve attestations by root, slot, or epoch.' + description: "Request that the node return all attestations which it has seen, that match the provided criteria (a block root, beacon chain slot, or epoch). Only one of the parameters should be provided as a criteria." + parameters: + - name: root + description: "Filter by block root, returning attestations associated with that block." + in: query + required: false + schema: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + - name: slot + description: "Filter attestations by slot number." + #TODO: Is this description accurate? + in: query + required: false + schema: + type: integer + format: uint64 + - name: epoch + description: "Filter attestations by epoch number." + #TODO: Should this actually return no more than one block per slot, if it has been finalized? i.e. not blocks on multiple forks? + in: query + required: false + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Attestation' + 400: + $ref: '#/components/responses/InvalidRequest' + #TODO: Make this request error more specific if one of the parameters is not provided correctly. + + /chain/beacon/attestations/pending: + get: + tags: + - Phase0 + summary: 'Retrieve all pending attestations.' + description: "Request that the node return all attestations which is currently holding in a pending state; i.e. is not associated with a finalized slot." + responses: + 200: + description: Success response. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/Attestation' + 400: + $ref: '#/components/responses/InvalidRequest' + #TODO: Make this request error more specific if one of the parameters is not provided correctly. + + /chain/beacon/validators: + get: + tags: + - Phase0 + summary: "List the set of active validators for an epoch." + description: "Request the beacon node to list the active validators for the specified epoch, or the current epoch if none is specified." + parameters: + - name: epoch + description: "The epoch for which the list of validators should be returned." + in: query + required: false + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + properties: + epoch: + type: integer + format: uint64 + description: "The epoch in which the list of validators are active." + validators: + type: array + items: + $ref: '#/components/schemas/ValidatorInfo' + + /chain/beacon/validators/activesetchanges: + get: + tags: + - Phase0 + summary: "Retrieve the changes in active validator set." + description: "Request that the beacon node describe the changes that occurred at the specified epoch, as compared with the prior epoch." + parameters: + - name: epoch + description: "The epoch for which the validator change comparison should be made. The current epoch is used if this value is omitted." + in: query + required: false + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + properties: + epoch: + type: integer + format: uint64 + description: "The epoch for which the returned active validator changes are provided." + activated_public_keys: + type: array + description: "The list of validator public keys which were activated in the epoch." + items: + $ref: '#/components/schemas/pubkey' + exited_public_keys: + type: array + description: "The list of validator public keys which exited in the epoch." + items: + $ref: '#/components/schemas/pubkey' + ejected_public_keys: + type: array + description: "The list of validator public keys which were ejected in the epoch." + items: + $ref: '#/components/schemas/pubkey' + + /chain/beacon/validators/assignments: + get: + tags: + - Phase0 + summary: "Retrieve the assigned responsibilities for validators in a particular epoch." + description: "Request that the beacon node list the duties which have been assigned to the active validator set in a particular epoch." + parameters: + - name: epoch + description: "The epoch for which the validator assignments should be made. The current epoch is used if this value is omitted." + in: query + required: false + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + properties: + epoch: + type: integer + format: uint64 + description: "The epoch for which the returned active validator changes are provided." + duties: + type: array + items: + $ref: '#/components/schemas/ValidatorDuty' + #TODO: This does not include the crosslink committee value, which must be included for Phase1? + + /chain/beacon/validators/indices: + get: + tags: + - Phase0 + summary: "Resolve a set of validator public keys to their validator indices." + description: "Attempt to resolve the public key of a set of validators to their corresponding ValidatorIndex values. Generally the mapping from validator public key to index should never change, however it is possible in some scenarios." + parameters: + - name: pubkeys + in: query + required: true + description: "An array of hex-encoded BLS public keys, for which the ValidatorIndex values should be returned." + schema: + type: array + items: + $ref: '#/components/schemas/pubkey' + minItems: 1 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ValidatorIndexMapping' + + /chain/beacon/validators/pubkeys: + get: + tags: + - Phase0 + summary: "Resolve a set of validator indicies to their public keys." + description: "Attempt to resolve the ValidatorIndex of a set of validators to their corresponding public keys. Generally the mapping from ValidatorIndex to public key should never change, however it is possible in some scenarios." + parameters: + - name: indices + in: query + required: true + description: "An array of ValidatorIndex values, for which the public keys should be returned." + schema: + type: array + items: + type: integer + format: uint64 + description: "The ValidatorIndex values to be resolved." + minItems: 1 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ValidatorIndexMapping' + + /chain/beacon/validators/balances: + get: + tags: + - Phase0 + summary: "Retrieve the balances of validators at a specified epoch." + description: "Retrieve the balances of validators at a specified epoch (or the current epoch if none specified). The list of balances can be filtered by providing a list of validator public keys or indices." + parameters: + - name: epoch + in: query + required: false + description: "The epoch at which the balances are to be measured." + schema: + type: integer + format: uint64 + - name: validator_pubkeys + in: query + required: false + description: "An array of hex-encoded BLS public keys, for which the balances should be returned." + schema: + type: array + items: + $ref: '#/components/schemas/pubkey' + minItems: 1 + - name: validator_indices + in: query + required: false + description: "An array of ValidatorIndex values, for which the balances should be returned." + schema: + type: array + items: + $ref: '#/components/schemas/pubkey' + minItems: 1 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + properties: + epoch: + type: integer + format: uint64 + description: "The epoch for which the returned active validator changes are provided." + balances: + type: array + items: + title: ValidatorBalances + type: object + properties: + pubkey: + $ref: '#/components/schemas/pubkey' + index: + type: integer + format: uint64 + description: "The global ValidatorIndex of the validator." + balance: + type: integer + format: uint64 + description: "The balance of the validator at the specified epoch, expressed in Gwei" + + /chain/beacon/validators/participation: + get: + tags: + - Phase0 + summary: "Retrieve aggregate information about validator participation in an epoch." + description: "Retrieve some aggregate information about the participation of validators in a specified epoch (or the current epoch if none specified)." + parameters: + - name: epoch + in: query + required: false + description: "The epoch at which the participation is to be measured." + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + properties: + epoch: + type: string + format: uint64 + description: "The epoch for which the participation information is provided." + finalized: + type: boolean + format: boolean + description: "Whether the epoch has been finalized or not." + global_participation_rate: + type: number + format: float + description: "The percentage of validator participation in the given epoch." + minimum: 0.0 + maximum: 1.0 + voted_ether: + type: integer + format: uint64 + description: "The total amount of ether, expressed in Gwei, that has been used in voting for the specified epoch." + eligible_ether: + type: integer + format: uint64 + description: "The total amount of ether, expressed in Gwei, that is eligible for voting in the specified epoch." + + /chain/beacon/validators/queue: + get: + tags: + - Phase0 + summary: "Retrieve information about the validator queue at the specified epoch." + description: "Retrieve information about the queue of validators for the specified epoch (or the current epoch if none specified)." + parameters: + - name: epoch + in: query + required: false + description: "The epoch at which the validator queue is to be measured." + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + properties: + epoch: + type: string + format: uint64 + description: "The epoch for which the validator queue information is provided." + churn_limit: + type: integer + format: uint64 + description: "The validator churn limit for the specified epoch." + activation_public_keys: + type: array + description: "The public keys of validators which are queued for activation." + items: + $ref: "#/components/schemas/pubkey" + exit_public_keys: + type: array + description: "The public keys of validators which are queued for exiting." + items: + $ref: '#/components/schemas/pubkey' + +components: + schemas: + pubkey: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{96}$" + description: "The validator's BLS public key, uniquely identifying them. _48-bytes, hex encoded with 0x prefix, case insensitive._" + example: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc" + + version: + type: string + description: "A string which uniquely identifies the client implementation and its version; similar to [HTTP User-Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3)." + example: "Lighthouse / v0.1.5 (Linux x86_64)" + + genesis_time: + type: integer + format: uint64 + description: "The genesis_time configured for the beacon node, which is the unix time at which the Eth2.0 chain began." + example: 1557716289 + + connection_duration: + type: integer + format: uint64 + description: "The number of seconds that an established network connection has persisted." + #TODO: Is it reasonable to store the connection duration? Do we have this information? Need to ask Age. + example: 3600 + + multiaddr: + type: string + description: "The multiaddr address of a network peer." + nullable: true + #TODO Define an example and pattern for a multiaddr string. + + ethereum_address: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "A hex encoded ethereum address." + + Peer: + type: object + properties: + connection_status: + $ref: '#/components/schemas/ConnectionStatus' + connection_duration: + $ref: '#/components/schemas/connection_duration' + multiaddr: + $ref: '#/components/schemas/multiaddr' + measured_delay: + type: integer + format: uint64 + description: "The round trip network delay to the peer, measured in milliseconds" + #TODO: Do we have the RTT information? + + ConnectionStatus: + type: string + #TODO: Define the ENUM and possible connection states + + ValidatorIndexMapping: + type: object + properties: + pubkey: + $ref: '#/components/schemas/pubkey' + index: + type: integer + format: uint64 + description: "The global ValidatorIndex value." + + ValidatorInfo: + type: object + properties: + public_key: + $ref: '#/components/schemas/pubkey' + withdrawal_credentials: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The 32 byte hash of the public key which the validator uses for withdrawing their rewards." + activation_eligiblity_epoch: + type: integer + format: uint64 + description: "The epoch when the validator became or will become eligible for activation. This field may be zero if the validator was present in the Ethereum 2.0 genesis." + activation_epoch: + type: integer + format: uint64 + description: "Epoch when the validator was or will be activated. This field may be zero if the validator was present in the Ethereum 2.0 genesis." + exit_epoch: + type: integer + format: uint64 + nullable: true + description: "Epoch when the validator was exited, or null if the validator has not exited." + withdrawable_epoch: + type: integer + format: uint64 + nullable: true + description: "Epoch when the validator is eligible to withdraw their funds, or null if the validator has not exited." + slashed: + type: boolean + description: "Whether the validator has or has not been slashed." + effective_balance: + type: integer + format: uint64 + description: "The effective balance of the validator, measured in Gwei." + + ValidatorDuty: + type: object + properties: + validator_pubkey: + $ref: '#/components/schemas/pubkey' + attestation_slot: + type: integer + format: uint64 + description: "The slot at which the validator must attest." + attestation_shard: + type: integer + format: uint64 + description: "The shard in which the validator must attest." + block_proposal_slot: + type: integer + format: uint64 + nullable: true + description: "The slot in which a validator must propose a block, or `null` if block production is not required." + + SyncingStatus: + type: object + nullable: true + properties: + starting_slot: + type: integer + format: uint64 + description: "The slot at which syncing started (will only be reset after the sync reached its head)" + current_slot: + type: integer + format: uint64 + description: "The most recent slot synchronised by the beacon node." + highest_slot: + type: integer + format: uint64 + description: "Globally, the estimated most recent slot number, or current target slot number." + + BeaconBlock: + description: "The [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblock) object from the Eth2.0 spec." + allOf: + - $ref: '#/components/schemas/BeaconBlockCommon' + - type: object + properties: + body: + $ref: '#/components/schemas/BeaconBlockBody' + + BeaconBlockHeader: + description: "The [`BeaconBlockHeader`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblockheader) object from the Eth2.0 spec." + allOf: + - $ref: '#/components/schemas/BeaconBlockCommon' + - type: object + properties: + body_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The tree hash merkle root of the `BeaconBlockBody` for the `BeaconBlock`" + + BeaconBlockCommon: + # An abstract object to collect the common fields between the BeaconBlockHeader and the BeaconBlock objects + type: object + properties: + slot: + type: integer + format: uint64 + description: "The slot to which this block corresponds." + parent_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The signing merkle root of the parent `BeaconBlock`." + state_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The tree hash merkle root of the `BeaconState` for the `BeaconBlock`." + signature: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{192}$" + example: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" + description: "The BLS signature of the `BeaconBlock` made by the validator of the block." + + BeaconBlockBody: + type: object + description: "The [`BeaconBlockBody`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblockbody) object from the Eth2.0 spec." + properties: + randao_reveal: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{192}$" + description: "The RanDAO reveal value provided by the validator." + eth1_data: + title: Eth1Data + type: object + description: "The [`Eth1Data`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#eth1data) object from the Eth2.0 spec." + properties: + deposit_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Root of the deposit tree." + deposit_count: + type: integer + format: uint64 + description: "Total number of deposits." + block_hash: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Ethereum 1.x block hash." + graffiti: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + proposer_slashings: + type: array + items: + title: ProposerSlashings + type: object + description: "The [`ProposerSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposerslashing) object from the Eth2.0 spec." + properties: + proposer_index: + type: integer + format: uint64 + description: "The index of the proposer to be slashed." + header_1: + $ref: '#/components/schemas/BeaconBlockHeader' + header_2: + $ref: '#/components/schemas/BeaconBlockHeader' + attester_slashings: + type: array + items: + title: AttesterSlashings + type: object + description: "The [`AttesterSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attesterslashing) object from the Eth2.0 spec." + properties: + attestation_1: + $ref: '#/components/schemas/IndexedAttestation' + attestation_2: + $ref: '#/components/schemas/IndexedAttestation' + attestations: + type: array + items: + $ref: '#/components/schemas/Attestation' + deposits: + type: array + items: + title: Deposit + type: object + description: "The [`Deposit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposit) object from the Eth2.0 spec." + properties: + proof: + type: array + description: "Branch in the deposit tree." + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + minItems: 32 + maxItems: 32 + index: + type: integer + format: uint64 + description: "Index in the deposit tree." + data: + title: DepositData + type: object + description: "The [`DepositData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#depositdata) object from the Eth2.0 spec." + properties: + pubkey: + $ref: '#/components/schemas/pubkey' + withdrawal_credentials: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The withdrawal credentials." + amount: + type: integer + format: uint64 + description: "Amount in Gwei." + signature: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{192}$" + description: "Container self-signature." + voluntary_exits: + type: array + items: + title: VoluntaryExit + type: object + description: "The [`VoluntaryExit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntaryexit) object from the Eth2.0 spec." + properties: + epoch: + type: integer + format: uint64 + description: "Minimum epoch for processing exit." + validator_index: + type: integer + format: uint64 + description: "Index of the exiting validator." + signature: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{192}$" + description: "Validator signature." + transfers: + type: array + items: + title: Transfer + type: object + description: "The [`Transfer`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#transfer) object from the Eth2.0 spec." + properties: + sender: + type: integer + format: uint64 + description: "Sender index." + recipient: + type: integer + format: uint64 + description: "Recipient index." + amount: + type: integer + format: uint64 + description: "Amount in Gwei." + fee: + type: integer + format: uint64 + description: "Fee in Gwei for block producer." + slot: + type: integer + format: uint64 + description: "Inclusion slot." + pubkey: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{96}$" + description: "Sender withdrawal public key." + signature: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{192}$" + description: "Sender signature." + + Fork: + type: object + description: "The [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#Fork) object from the Eth2.0 spec." + properties: + previous_version: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{8}$" + description: "Previous fork version." + current_version: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{8}$" + description: "Current fork version." + epoch: + type: integer + format: uint64 + description: "Fork epoch number." + + Attestation: + type: object + description: "The [`Attestation`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestation) object from the Eth2.0 spec." + properties: + aggregation_bitfield: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]+$" + description: "Attester aggregation bitfield." + custody_bitfield: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]+$" + description: "Custody bitfield." + signature: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{192}$" + description: "BLS aggregate signature." + data: + $ref: '#/components/schemas/AttestationData' + + IndexedAttestation: + type: object + description: "The [`IndexedAttestation`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#indexedattestation) object from the Eth2.0 spec." + properties: + custody_bit_0_indices: + type: array + description: "Validator indices for 0 bits." + items: + type: integer + format: uint64 + custody_bit_1_indices: + type: array + description: "Validator indices for 1 bits." + items: + type: integer + format: uint64 + signature: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{192}$" + example: "0x1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505cc411d61252fb6cb3fa0017b679f8bb2305b26a285fa2737f175668d0dff91cc1b66ac1fb663c9bc59509846d6ec05345bd908eda73e670af888da41af171505" + description: "The BLS signature of the `IndexedAttestation`, created by the validator of the attestation." + data: + $ref: '#/components/schemas/AttestationData' + + AttestationData: + type: object + description: "The [`AttestationData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestationdata) object from the Eth2.0 spec." + properties: + beacon_block_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "LMD GHOST vote." + source_epoch: + type: integer + format: uint64 + description: "Source epoch from FFG vote." + source_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Source root from FFG vote." + target_epoch: + type: integer + format: uint64 + description: "Target epoch from FFG vote." + target_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Target root from FFG vote." + crosslink: + title: CrossLink + type: object + description: "The [`Crosslink`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#crosslink) object from the Eth2.0 spec, contains data from epochs [`start_epoch`, `end_epoch`)." + properties: + shard: + type: integer + format: uint64 + description: "The shard number." + start_epoch: + type: integer + format: uint64 + description: "The first epoch which the crosslinking data references." + end_epoch: + type: integer + format: uint64 + description: "The 'end' epoch referred to by the crosslinking data; no data in this Crosslink should refer to the `end_epoch` since it is not included in the crosslinking data interval." + parent_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Root of the previous crosslink." + data_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Root of the crosslinked shard data since the previous crosslink." + + + responses: + Success: + description: "Request successful." + #TODO: Make response descriptions consistent + InvalidRequest: + description: "Invalid request syntax." + InternalError: + description: "Beacon node internal error." + CurrentlySyncing: + description: "Beacon node is currently syncing, try again later." + NotFound: + description: "The requested API endpoint does not exist." + +externalDocs: + description: Ethereum 2.0 Specification on Github + url: 'https://github.com/ethereum/eth2.0-specs' + + +#TODO: Define the components of the WebSockets API From 7458022fcf1a1efb22fcd89a402a4a39d026dfe1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 29 Jul 2019 12:08:52 +1000 Subject: [PATCH 05/25] Fork choice bug fixes (#449) * Change reduced tree for adding weightless node * Add more comments for reduced tree fork choice * Small refactor on reduced tree for readability * Move test_harness forking logic into itself * Add new `AncestorIter` trait to store * Add unfinished tests to fork choice * Make `beacon_state.genesis_block_root` public * Add failing lmd_ghost fork choice tests * Extend fork_choice tests, create failing test * Implement Debug for generic ReducedTree * Add lazy_static to fork choice tests * Add verify_integrity fn to reduced tree * Fix bugs in reduced tree * Ensure all reduced tree tests verify integrity * Slightly alter reduce tree test params * Add (failing) reduced tree test * Fix bug in fork choice Iter ancestors was not working well with skip slots * Put maximum depth for common ancestor search Ensures that we don't search back past the finalized root. * Add basic finalization tests for reduced tree * Change fork choice to use beacon_block_root Previously it was using target_root, which was wrong * Make ancestor iter return option * Disable fork choice test when !debug_assertions * Fix type, removed code fragment * Tidy some borrow-checker evading * Lower reduced tree random test iterations --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/fork_choice.rs | 9 +- beacon_node/beacon_chain/src/test_utils.rs | 46 +++ beacon_node/beacon_chain/tests/tests.rs | 22 +- beacon_node/store/src/iter.rs | 16 + eth2/lmd_ghost/Cargo.toml | 2 + eth2/lmd_ghost/src/reduced_tree.rs | 358 ++++++++++++------ eth2/lmd_ghost/tests/test.rs | 359 +++++++++++++++++++ 8 files changed, 673 insertions(+), 141 deletions(-) create mode 100644 eth2/lmd_ghost/tests/test.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e5a163a16..90dc82966 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -77,7 +77,7 @@ pub struct BeaconChain { /// to `per_slot_processing`. state: RwLock>, /// The root of the genesis block. - genesis_block_root: Hash256, + pub genesis_block_root: Hash256, /// A state-machine that is updated with information from the network and chooses a canonical /// head block. pub fork_choice: ForkChoice, diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 30b20ffe6..b1cacd763 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -125,14 +125,14 @@ impl ForkChoice { state: &BeaconState, attestation: &Attestation, ) -> Result<()> { - // Note: `get_attesting_indices_unsorted` requires that the beacon state caches be built. let validator_indices = get_attesting_indices_unsorted( state, &attestation.data, &attestation.aggregation_bitfield, )?; + let block_slot = state.get_attestation_slot(&attestation.data)?; - let block_hash = attestation.data.target_root; + let block_hash = attestation.data.beacon_block_root; // Ignore any attestations to the zero hash. // @@ -148,11 +148,6 @@ impl ForkChoice { // fine because votes to the genesis block are not useful; all validators implicitly attest // to genesis just by being present in the chain. if block_hash != Hash256::zero() { - let block_slot = attestation - .data - .target_epoch - .start_slot(T::EthSpec::slots_per_epoch()); - for validator_index in validator_indices { self.backend .process_attestation(validator_index, block_hash, block_slot)?; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 991d29418..7071c861f 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -64,6 +64,8 @@ where /// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and /// attestations. +/// +/// Used for testing. pub struct BeaconChainHarness where L: LmdGhost, @@ -337,6 +339,50 @@ where }); } + /// Creates two forks: + /// + /// - The "honest" fork: created by the `honest_validators` who have built `honest_fork_blocks` + /// on the head + /// - The "faulty" fork: created by the `faulty_validators` who skipped a slot and + /// then built `faulty_fork_blocks`. + /// + /// Returns `(honest_head, faulty_head)`, the roots of the blocks at the top of each chain. + pub fn generate_two_forks_by_skipping_a_block( + &self, + honest_validators: &[usize], + faulty_validators: &[usize], + honest_fork_blocks: usize, + faulty_fork_blocks: usize, + ) -> (Hash256, Hash256) { + let initial_head_slot = self.chain.head().beacon_block.slot; + + // Move to the next slot so we may produce some more blocks on the head. + self.advance_slot(); + + // Extend the chain with blocks where only honest validators agree. + let honest_head = self.extend_chain( + honest_fork_blocks, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::SomeValidators(honest_validators.to_vec()), + ); + + // Go back to the last block where all agreed, and build blocks upon it where only faulty nodes + // agree. + let faulty_head = self.extend_chain( + faulty_fork_blocks, + BlockStrategy::ForkCanonicalChainAt { + previous_slot: Slot::from(initial_head_slot), + // `initial_head_slot + 2` means one slot is skipped. + first_slot: Slot::from(initial_head_slot + 2), + }, + AttestationStrategy::SomeValidators(faulty_validators.to_vec()), + ); + + assert!(honest_head != faulty_head, "forks should be distinct"); + + (honest_head, faulty_head) + } + /// Returns the secret key for the given validator index. fn get_sk(&self, validator_index: usize) -> &SecretKey { &self.keypairs[validator_index].sk diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 882d9f235..9a560a15a 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -25,7 +25,7 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness { + /// Returns an iterator over the roots of the ancestors of `self`. + fn try_iter_ancestor_roots(&self, store: Arc) -> Option; +} + +impl<'a, U: Store, E: EthSpec> AncestorIter> for BeaconBlock { + /// Iterates across all the prior block roots of `self`, starting at the most recent and ending + /// at genesis. + fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { + let state = store.get::>(&self.state_root).ok()??; + + Some(BestBlockRootsIterator::owned(store, state, self.slot)) + } +} + #[derive(Clone)] pub struct StateRootsIterator<'a, T: EthSpec, U> { store: Arc, diff --git a/eth2/lmd_ghost/Cargo.toml b/eth2/lmd_ghost/Cargo.toml index eaf41730e..636076c46 100644 --- a/eth2/lmd_ghost/Cargo.toml +++ b/eth2/lmd_ghost/Cargo.toml @@ -17,3 +17,5 @@ bls = { path = "../utils/bls" } slot_clock = { path = "../utils/slot_clock" } beacon_chain = { path = "../../beacon_node/beacon_chain" } env_logger = "0.6.0" +lazy_static = "1.3.0" +rand = "0.7" diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index 7baf45b38..bdf9680a3 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -6,9 +6,10 @@ use super::{LmdGhost, Result as SuperResult}; use parking_lot::RwLock; use std::collections::HashMap; +use std::fmt; use std::marker::PhantomData; use std::sync::Arc; -use store::{iter::BestBlockRootsIterator, Error as StoreError, Store}; +use store::{iter::BlockRootsIterator, Error as StoreError, Store}; use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot}; type Result = std::result::Result; @@ -35,6 +36,23 @@ pub struct ThreadSafeReducedTree { core: RwLock>, } +impl fmt::Debug for ThreadSafeReducedTree { + /// `Debug` just defers to the implementation of `self.core`. + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.core.fmt(f) + } +} + +impl ThreadSafeReducedTree +where + T: Store, + E: EthSpec, +{ + pub fn verify_integrity(&self) -> std::result::Result<(), String> { + self.core.read().verify_integrity() + } +} + impl LmdGhost for ThreadSafeReducedTree where T: Store, @@ -100,6 +118,12 @@ struct ReducedTree { _phantom: PhantomData, } +impl fmt::Debug for ReducedTree { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.nodes.fmt(f) + } +} + impl ReducedTree where T: Store, @@ -126,6 +150,10 @@ where } } + /// Set the root node (the node without any parents) to the given `new_slot` and `new_root`. + /// + /// The given `new_root` must be in the block tree (but not necessarily in the reduced tree). + /// Any nodes which are not a descendant of `new_root` will be removed from the store. pub fn update_root(&mut self, new_slot: Slot, new_root: Hash256) -> Result<()> { if !self.nodes.contains_key(&new_root) { let node = Node { @@ -276,55 +304,54 @@ where Ok(weight) } + /// Removes the vote from `validator_index` from the reduced tree. + /// + /// If the validator had a vote in the tree, the removal of that vote may cause a node to + /// become redundant and removed from the reduced tree. fn remove_latest_message(&mut self, validator_index: usize) -> Result<()> { - if self.latest_votes.get(validator_index).is_some() { - // Unwrap is safe as prior `if` statements ensures the result is `Some`. - let vote = self.latest_votes.get(validator_index).unwrap(); + if let Some(vote) = self.latest_votes.get(validator_index).clone() { + self.get_mut_node(vote.hash)?.remove_voter(validator_index); + let node = self.get_node(vote.hash)?.clone(); - let should_delete = { - self.get_mut_node(vote.hash)?.remove_voter(validator_index); - let node = self.get_node(vote.hash)?.clone(); + if let Some(parent_hash) = node.parent_hash { + if node.has_votes() || node.children.len() > 1 { + // A node with votes or more than one child is never removed. + } else if node.children.len() == 1 { + // A node which has only one child may be removed. + // + // Load the child of the node and set it's parent to be the parent of this + // node (viz., graft the node's child to the node's parent) + let child = self.get_mut_node(node.children[0])?; + child.parent_hash = node.parent_hash; - if let Some(parent_hash) = node.parent_hash { - if node.has_votes() || node.children.len() > 1 { - // A node with votes or more than one child is never removed. - false - } else if node.children.len() == 1 { - // A node which has only one child may be removed. - // - // Load the child of the node and set it's parent to be the parent of this - // node (viz., graft the node's child to the node's parent) - let child = self.get_mut_node(node.children[0])?; - child.parent_hash = node.parent_hash; - - // Graft the parent of this node to it's child. - if let Some(parent_hash) = node.parent_hash { - let parent = self.get_mut_node(parent_hash)?; - parent.replace_child(node.block_hash, node.children[0])?; - } - - true - } else if node.children.is_empty() { - // A node which has no children may be deleted and potentially it's parent - // too. - self.maybe_delete_node(parent_hash)?; - - true - } else { - // It is impossible for a node to have a number of children that is not 0, 1 or - // greater than one. - // - // This code is strictly unnecessary, however we keep it for readability. - unreachable!(); + // Graft the parent of this node to it's child. + if let Some(parent_hash) = node.parent_hash { + let parent = self.get_mut_node(parent_hash)?; + parent.replace_child(node.block_hash, node.children[0])?; } - } else { - // A node without a parent is the genesis/finalized node and should never be removed. - false - } - }; - if should_delete { - self.nodes.remove(&vote.hash); + self.nodes.remove(&vote.hash); + } else if node.children.is_empty() { + // Remove the to-be-deleted node from it's parent. + if let Some(parent_hash) = node.parent_hash { + self.get_mut_node(parent_hash)? + .remove_child(node.block_hash)?; + } + + self.nodes.remove(&vote.hash); + + // A node which has no children may be deleted and potentially it's parent + // too. + self.maybe_delete_node(parent_hash)?; + } else { + // It is impossible for a node to have a number of children that is not 0, 1 or + // greater than one. + // + // This code is strictly unnecessary, however we keep it for readability. + unreachable!(); + } + } else { + // A node without a parent is the genesis/finalized node and should never be removed. } self.latest_votes.insert(validator_index, Some(vote)); @@ -333,23 +360,27 @@ where Ok(()) } + /// Deletes a node if it is unnecessary. + /// + /// Any node is unnecessary if all of the following are true: + /// + /// - it is not the root node. + /// - it only has one child. + /// - it does not have any votes. fn maybe_delete_node(&mut self, hash: Hash256) -> Result<()> { let should_delete = { let node = self.get_node(hash)?.clone(); if let Some(parent_hash) = node.parent_hash { if (node.children.len() == 1) && !node.has_votes() { - // Graft the child to it's grandparent. - let child_hash = { - let child_node = self.get_mut_node(node.children[0])?; - child_node.parent_hash = node.parent_hash; + let child_hash = node.children[0]; - child_node.block_hash - }; + // Graft the single descendant `node` to the `parent` of node. + self.get_mut_node(child_hash)?.parent_hash = Some(parent_hash); - // Graft the grandparent to it's grandchild. - let parent_node = self.get_mut_node(parent_hash)?; - parent_node.replace_child(node.block_hash, child_hash)?; + // Detach `node` from `parent`, replacing it with `child`. + self.get_mut_node(parent_hash)? + .replace_child(hash, child_hash)?; true } else { @@ -385,7 +416,7 @@ where } fn add_weightless_node(&mut self, slot: Slot, hash: Hash256) -> Result<()> { - if slot >= self.root_slot() && !self.nodes.contains_key(&hash) { + if slot > self.root_slot() && !self.nodes.contains_key(&hash) { let node = Node { block_hash: hash, ..Node::default() @@ -393,6 +424,8 @@ where self.add_node(node)?; + // Read the `parent_hash` from the newly created node. If it has a parent (i.e., it's + // not the root), see if it is superfluous. if let Some(parent_hash) = self.get_node(hash)?.parent_hash { self.maybe_delete_node(parent_hash)?; } @@ -401,75 +434,108 @@ where Ok(()) } + /// Add `node` to the reduced tree, returning an error if `node` is not rooted in the tree. fn add_node(&mut self, mut node: Node) -> Result<()> { - // Find the highest (by slot) ancestor of the given hash/block that is in the reduced tree. - let mut prev_in_tree = { - let hash = self - .find_prev_in_tree(node.block_hash) - .ok_or_else(|| Error::NotInTree(node.block_hash))?; - self.get_mut_node(hash)?.clone() - }; - - let mut added = false; + // Find the highest (by slot) ancestor of the given node in the reduced tree. + // + // If this node has no ancestor in the tree, exit early. + let mut prev_in_tree = self + .find_prev_in_tree(node.block_hash) + .ok_or_else(|| Error::NotInTree(node.block_hash)) + .and_then(|hash| self.get_node(hash))? + .clone(); + // If the ancestor of `node` has children, there are three possible operations: + // + // 1. Graft the `node` between two existing nodes. + // 2. Create another node that will be grafted between two existing nodes, then graft + // `node` to it. + // 3. Graft `node` to an existing node. if !prev_in_tree.children.is_empty() { for &child_hash in &prev_in_tree.children { + // 1. Graft the new node between two existing nodes. + // + // If `node` is a descendant of `prev_in_tree` but an ancestor of a child connected to + // `prev_in_tree`. + // + // This means that `node` can be grafted between `prev_in_tree` and the child that is a + // descendant of both `node` and `prev_in_tree`. if self .iter_ancestors(child_hash)? .any(|(ancestor, _slot)| ancestor == node.block_hash) { let child = self.get_mut_node(child_hash)?; + // Graft `child` to `node`. child.parent_hash = Some(node.block_hash); + // Graft `node` to `child`. node.children.push(child_hash); + // Detach `child` from `prev_in_tree`, replacing it with `node`. prev_in_tree.replace_child(child_hash, node.block_hash)?; + // Graft `node` to `prev_in_tree`. node.parent_hash = Some(prev_in_tree.block_hash); - added = true; - break; } } - if !added { + // 2. Create another node that will be grafted between two existing nodes, then graft + // `node` to it. + // + // Note: given that `prev_in_tree` has children and that `node` is not an ancestor of + // any of the children of `prev_in_tree`, we know that `node` is on a different fork to + // all of the children of `prev_in_tree`. + if node.parent_hash.is_none() { for &child_hash in &prev_in_tree.children { + // Find the highest (by slot) common ancestor between `node` and `child`. + // + // The common ancestor is the last block before `node` and `child` forked. let ancestor_hash = - self.find_least_common_ancestor(node.block_hash, child_hash)?; + self.find_highest_common_ancestor(node.block_hash, child_hash)?; + // If the block before `node` and `child` forked is _not_ `prev_in_tree` we + // must add this new block into the tree (because it is a decision node + // between two forks). if ancestor_hash != prev_in_tree.block_hash { let child = self.get_mut_node(child_hash)?; + + // Create a new `common_ancestor` node which represents the `ancestor_hash` + // block, has `prev_in_tree` as the parent and has both `node` and `child` + // as children. let common_ancestor = Node { block_hash: ancestor_hash, parent_hash: Some(prev_in_tree.block_hash), children: vec![node.block_hash, child_hash], ..Node::default() }; + + // Graft `child` and `node` to `common_ancestor`. child.parent_hash = Some(common_ancestor.block_hash); node.parent_hash = Some(common_ancestor.block_hash); - prev_in_tree.replace_child(child_hash, ancestor_hash)?; + // Detach `child` from `prev_in_tree`, replacing it with `common_ancestor`. + prev_in_tree.replace_child(child_hash, common_ancestor.block_hash)?; + // Store the new `common_ancestor` node. self.nodes .insert(common_ancestor.block_hash, common_ancestor); - added = true; - break; } } } } - if !added { + if node.parent_hash.is_none() { + // 3. Graft `node` to an existing node. + // + // Graft `node` to `prev_in_tree` and `prev_in_tree` to `node` node.parent_hash = Some(prev_in_tree.block_hash); prev_in_tree.children.push(node.block_hash); } // Update `prev_in_tree`. A mutable reference was not maintained to satisfy the borrow - // checker. - // - // This is not an ideal solution and results in unnecessary memory copies -- a better - // solution is certainly possible. + // checker. Perhaps there's a better way? self.nodes.insert(prev_in_tree.block_hash, prev_in_tree); self.nodes.insert(node.block_hash, node); @@ -485,62 +551,112 @@ where .and_then(|(root, _slot)| Some(root)) } - /// For the given `child` block hash, return the block's ancestor at the given `target` slot. - fn find_ancestor_at_slot(&self, child: Hash256, target: Slot) -> Result { - let (root, slot) = self - .iter_ancestors(child)? - .find(|(_block, slot)| *slot <= target) - .ok_or_else(|| Error::NotInTree(child))?; - - // Explicitly check that the slot is the target in the case that the given child has a slot - // above target. - if slot == target { - Ok(root) - } else { - Err(Error::NotInTree(child)) - } - } - /// For the two given block roots (`a_root` and `b_root`), find the first block they share in /// the tree. Viz, find the block that these two distinct blocks forked from. - fn find_least_common_ancestor(&self, a_root: Hash256, b_root: Hash256) -> Result { - // If the blocks behind `a_root` and `b_root` are not at the same slot, take the highest - // block (by slot) down to be equal with the lower slot. - // - // The result is two roots which identify two blocks at the same height. - let (a_root, b_root) = { - let a = self.get_block(a_root)?; - let b = self.get_block(b_root)?; + fn find_highest_common_ancestor(&self, a_root: Hash256, b_root: Hash256) -> Result { + let mut a_iter = self.iter_ancestors(a_root)?; + let mut b_iter = self.iter_ancestors(b_root)?; - if a.slot > b.slot { - (self.find_ancestor_at_slot(a_root, b.slot)?, b_root) - } else if b.slot > a.slot { - (a_root, self.find_ancestor_at_slot(b_root, a.slot)?) - } else { - (a_root, b_root) + // Combines the `next()` fns on the `a_iter` and `b_iter` and returns the roots of two + // blocks at the same slot, or `None` if we have gone past genesis or the root of this tree. + let mut iter_blocks_at_same_height = || -> Option<(Hash256, Hash256)> { + match (a_iter.next(), b_iter.next()) { + (Some((mut a_root, a_slot)), Some((mut b_root, b_slot))) => { + // If either of the slots are lower than the root of this tree, exit early. + if a_slot < self.root.1 || b_slot < self.root.1 { + None + } else { + if a_slot < b_slot { + for _ in a_slot.as_u64()..b_slot.as_u64() { + b_root = b_iter.next()?.0; + } + } else if a_slot > b_slot { + for _ in b_slot.as_u64()..a_slot.as_u64() { + a_root = a_iter.next()?.0; + } + } + + Some((a_root, b_root)) + } + } + _ => None, } }; - let ((a_root, _a_slot), (_b_root, _b_slot)) = self - .iter_ancestors(a_root)? - .zip(self.iter_ancestors(b_root)?) - .find(|((a_root, _), (b_root, _))| a_root == b_root) - .ok_or_else(|| Error::NoCommonAncestor((a_root, b_root)))?; - - Ok(a_root) + loop { + match iter_blocks_at_same_height() { + Some((a_root, b_root)) if a_root == b_root => break Ok(a_root), + Some(_) => (), + None => break Err(Error::NoCommonAncestor((a_root, b_root))), + } + } } - fn iter_ancestors(&self, child: Hash256) -> Result> { + fn iter_ancestors(&self, child: Hash256) -> Result> { let block = self.get_block(child)?; let state = self.get_state(block.state_root)?; - Ok(BestBlockRootsIterator::owned( + Ok(BlockRootsIterator::owned( self.store.clone(), state, block.slot - 1, )) } + /// Verify the integrity of `self`. Returns `Ok(())` if the tree has integrity, otherwise returns `Err(description)`. + /// + /// Tries to detect the following erroneous conditions: + /// + /// - Dangling references inside the tree. + /// - Any scenario where there's not exactly one root node. + /// + /// ## Notes + /// + /// Computationally intensive, likely only useful during testing. + pub fn verify_integrity(&self) -> std::result::Result<(), String> { + let num_root_nodes = self + .nodes + .iter() + .filter(|(_key, node)| node.parent_hash.is_none()) + .count(); + + if num_root_nodes != 1 { + return Err(format!( + "Tree has {} roots, should have exactly one.", + num_root_nodes + )); + } + + let verify_node_exists = |key: Hash256, msg: String| -> std::result::Result<(), String> { + if self.nodes.contains_key(&key) { + Ok(()) + } else { + Err(msg) + } + }; + + // Iterate through all the nodes and ensure all references they store are valid. + self.nodes + .iter() + .map(|(_key, node)| { + if let Some(parent_hash) = node.parent_hash { + verify_node_exists(parent_hash, "parent must exist".to_string())?; + } + + node.children + .iter() + .map(|child| verify_node_exists(*child, "child_must_exist".to_string())) + .collect::>()?; + + verify_node_exists(node.block_hash, "block hash must exist".to_string())?; + + Ok(()) + }) + .collect::>()?; + + Ok(()) + } + fn get_node(&self, hash: Hash256) -> Result<&Node> { self.nodes .get(&hash) @@ -595,6 +711,18 @@ impl Node { Ok(()) } + pub fn remove_child(&mut self, child: Hash256) -> Result<()> { + let i = self + .children + .iter() + .position(|&c| c == child) + .ok_or_else(|| Error::MissingChild(child))?; + + self.children.remove(i); + + Ok(()) + } + pub fn remove_voter(&mut self, voter: usize) -> Option { let i = self.voters.iter().position(|&v| v == voter)?; Some(self.voters.remove(i)) diff --git a/eth2/lmd_ghost/tests/test.rs b/eth2/lmd_ghost/tests/test.rs new file mode 100644 index 000000000..5c6f01155 --- /dev/null +++ b/eth2/lmd_ghost/tests/test.rs @@ -0,0 +1,359 @@ +#![cfg(not(debug_assertions))] + +#[macro_use] +extern crate lazy_static; + +use beacon_chain::test_utils::{ + AttestationStrategy, BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy, +}; +use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree}; +use rand::{prelude::*, rngs::StdRng}; +use std::sync::Arc; +use store::{ + iter::{AncestorIter, BestBlockRootsIterator}, + MemoryStore, Store, +}; +use types::{BeaconBlock, EthSpec, Hash256, MinimalEthSpec, Slot}; + +// Should ideally be divisible by 3. +pub const VALIDATOR_COUNT: usize = 3 * 8; + +type TestEthSpec = MinimalEthSpec; +type ThreadSafeReducedTree = BaseThreadSafeReducedTree; +type BeaconChainHarness = BaseBeaconChainHarness; +type RootAndSlot = (Hash256, Slot); + +lazy_static! { + /// A lazy-static instance of a `BeaconChainHarness` that contains two forks. + /// + /// Reduces test setup time by providing a common harness. + static ref FORKED_HARNESS: ForkedHarness = ForkedHarness::new(); +} + +/// Contains a `BeaconChainHarness` that has two forks, caused by a validator skipping a slot and +/// then some validators building on one head and some on the other. +/// +/// Care should be taken to ensure that the `ForkedHarness` does not expose any interior mutability +/// from it's fields. This would cause cross-contamination between tests when used with +/// `lazy_static`. +struct ForkedHarness { + /// Private (not `pub`) because the `BeaconChainHarness` has interior mutability. We + /// don't expose it to avoid contamination between tests. + harness: BeaconChainHarness, + pub genesis_block_root: Hash256, + pub genesis_block: BeaconBlock, + pub honest_head: RootAndSlot, + pub faulty_head: RootAndSlot, + pub honest_roots: Vec, + pub faulty_roots: Vec, +} + +impl ForkedHarness { + /// A new standard instance of with constant parameters. + pub fn new() -> Self { + // let (harness, honest_roots, faulty_roots) = get_harness_containing_two_forks(); + let harness = BeaconChainHarness::new(VALIDATOR_COUNT); + + // Move past the zero slot. + harness.advance_slot(); + + let delay = TestEthSpec::default_spec().min_attestation_inclusion_delay as usize; + + let initial_blocks = delay + 5; + + // Build an initial chain where all validators agree. + harness.extend_chain( + initial_blocks, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + let two_thirds = (VALIDATOR_COUNT / 3) * 2; + let honest_validators: Vec = (0..two_thirds).collect(); + let faulty_validators: Vec = (two_thirds..VALIDATOR_COUNT).collect(); + let honest_fork_blocks = delay + 5; + let faulty_fork_blocks = delay + 5; + + let (honest_head, faulty_head) = harness.generate_two_forks_by_skipping_a_block( + &honest_validators, + &faulty_validators, + honest_fork_blocks, + faulty_fork_blocks, + ); + + let mut honest_roots = + get_ancestor_roots::(harness.chain.store.clone(), honest_head); + + honest_roots.insert( + 0, + (honest_head, get_slot_for_block_root(&harness, honest_head)), + ); + + let mut faulty_roots = + get_ancestor_roots::(harness.chain.store.clone(), faulty_head); + + faulty_roots.insert( + 0, + (faulty_head, get_slot_for_block_root(&harness, faulty_head)), + ); + + let genesis_block_root = harness.chain.genesis_block_root; + let genesis_block = harness + .chain + .store + .get::(&genesis_block_root) + .expect("Genesis block should exist") + .expect("DB should not error"); + + Self { + harness, + genesis_block_root, + genesis_block, + honest_head: *honest_roots.last().expect("Chain cannot be empty"), + faulty_head: *faulty_roots.last().expect("Chain cannot be empty"), + honest_roots, + faulty_roots, + } + } + + pub fn store_clone(&self) -> MemoryStore { + (*self.harness.chain.store).clone() + } + + /// Return a brand-new, empty fork choice with a reference to `harness.store`. + pub fn new_fork_choice(&self) -> ThreadSafeReducedTree { + // Take a full clone of the store built by the harness. + // + // Taking a clone here ensures that each fork choice gets it's own store so there is no + // cross-contamination between tests. + let store: MemoryStore = self.store_clone(); + + ThreadSafeReducedTree::new( + Arc::new(store), + &self.genesis_block, + self.genesis_block_root, + ) + } + + pub fn all_block_roots(&self) -> Vec { + let mut all_roots = self.honest_roots.clone(); + all_roots.append(&mut self.faulty_roots.clone()); + + all_roots.dedup(); + + all_roots + } + + pub fn weight_function(_validator_index: usize) -> Option { + Some(1) + } +} + +/// Helper: returns all the ancestor roots and slots for a given block_root. +fn get_ancestor_roots( + store: Arc, + block_root: Hash256, +) -> Vec<(Hash256, Slot)> { + let block = store + .get::(&block_root) + .expect("block should exist") + .expect("store should not error"); + + >>::try_iter_ancestor_roots( + &block, store, + ) + .expect("should be able to create ancestor iter") + .collect() +} + +/// Helper: returns the slot for some block_root. +fn get_slot_for_block_root(harness: &BeaconChainHarness, block_root: Hash256) -> Slot { + harness + .chain + .store + .get::(&block_root) + .expect("head block should exist") + .expect("DB should not error") + .slot +} + +const RANDOM_ITERATIONS: usize = 50; +const RANDOM_ACTIONS_PER_ITERATION: usize = 100; + +/// Create a single LMD instance and have one validator vote in reverse (highest to lowest slot) +/// down the chain. +#[test] +fn random_scenario() { + let harness = &FORKED_HARNESS; + let block_roots = harness.all_block_roots(); + let validators: Vec = (0..VALIDATOR_COUNT).collect(); + let mut rng = StdRng::seed_from_u64(9375205782030385); // Keyboard mash. + + for _ in 0..RANDOM_ITERATIONS { + let lmd = harness.new_fork_choice(); + + for _ in 0..RANDOM_ACTIONS_PER_ITERATION { + let (root, slot) = block_roots[rng.next_u64() as usize % block_roots.len()]; + let validator_index = validators[rng.next_u64() as usize % validators.len()]; + + lmd.process_attestation(validator_index, root, slot) + .expect("fork choice should accept randomly-placed attestations"); + + assert_eq!( + lmd.verify_integrity(), + Ok(()), + "New tree should have integrity" + ); + } + } +} + +/// Create a single LMD instance and have one validator vote in reverse (highest to lowest slot) +/// down the chain. +#[test] +fn single_voter_persistent_instance_reverse_order() { + let harness = &FORKED_HARNESS; + + let lmd = harness.new_fork_choice(); + + assert_eq!( + lmd.verify_integrity(), + Ok(()), + "New tree should have integrity" + ); + + for (root, slot) in harness.honest_roots.iter().rev() { + lmd.process_attestation(0, *root, *slot) + .expect("fork choice should accept attestations to honest roots in reverse"); + + assert_eq!( + lmd.verify_integrity(), + Ok(()), + "Tree integrity should be maintained whilst processing attestations" + ); + } + + // The honest head should be selected. + let (head_root, head_slot) = harness.honest_roots.first().unwrap(); + let (finalized_root, _) = harness.honest_roots.last().unwrap(); + + assert_eq!( + lmd.find_head(*head_slot, *finalized_root, ForkedHarness::weight_function), + Ok(*head_root), + "Honest head should be selected" + ); +} + +/// A single validator applies a single vote to each block in the honest fork, using a new tree +/// each time. +#[test] +fn single_voter_many_instance_honest_blocks_voting_forwards() { + let harness = &FORKED_HARNESS; + + for (root, slot) in &harness.honest_roots { + let lmd = harness.new_fork_choice(); + lmd.process_attestation(0, *root, *slot) + .expect("fork choice should accept attestations to honest roots"); + + assert_eq!( + lmd.verify_integrity(), + Ok(()), + "Tree integrity should be maintained whilst processing attestations" + ); + } +} + +/// Same as above, but in reverse order (votes on the highest honest block first). +#[test] +fn single_voter_many_instance_honest_blocks_voting_in_reverse() { + let harness = &FORKED_HARNESS; + + // Same as above, but in reverse order (votes on the highest honest block first). + for (root, slot) in harness.honest_roots.iter().rev() { + let lmd = harness.new_fork_choice(); + lmd.process_attestation(0, *root, *slot) + .expect("fork choice should accept attestations to honest roots in reverse"); + + assert_eq!( + lmd.verify_integrity(), + Ok(()), + "Tree integrity should be maintained whilst processing attestations" + ); + } +} + +/// A single validator applies a single vote to each block in the faulty fork, using a new tree +/// each time. +#[test] +fn single_voter_many_instance_faulty_blocks_voting_forwards() { + let harness = &FORKED_HARNESS; + + for (root, slot) in &harness.faulty_roots { + let lmd = harness.new_fork_choice(); + lmd.process_attestation(0, *root, *slot) + .expect("fork choice should accept attestations to faulty roots"); + + assert_eq!( + lmd.verify_integrity(), + Ok(()), + "Tree integrity should be maintained whilst processing attestations" + ); + } +} + +/// Same as above, but in reverse order (votes on the highest faulty block first). +#[test] +fn single_voter_many_instance_faulty_blocks_voting_in_reverse() { + let harness = &FORKED_HARNESS; + + for (root, slot) in harness.faulty_roots.iter().rev() { + let lmd = harness.new_fork_choice(); + lmd.process_attestation(0, *root, *slot) + .expect("fork choice should accept attestations to faulty roots in reverse"); + + assert_eq!( + lmd.verify_integrity(), + Ok(()), + "Tree integrity should be maintained whilst processing attestations" + ); + } +} + +/// Ensures that the finalized root can be set to all values in `roots`. +fn test_update_finalized_root(roots: &[(Hash256, Slot)]) { + let harness = &FORKED_HARNESS; + + let lmd = harness.new_fork_choice(); + + for (root, _slot) in roots.iter().rev() { + let block = harness + .store_clone() + .get::(root) + .expect("block should exist") + .expect("db should not error"); + lmd.update_finalized_root(&block, *root) + .expect("finalized root should update for faulty fork"); + + assert_eq!( + lmd.verify_integrity(), + Ok(()), + "Tree integrity should be maintained after updating the finalized root" + ); + } +} + +/// Iterates from low-to-high slot through the faulty roots, updating the finalized root. +#[test] +fn update_finalized_root_faulty() { + let harness = &FORKED_HARNESS; + + test_update_finalized_root(&harness.faulty_roots) +} + +/// Iterates from low-to-high slot through the honest roots, updating the finalized root. +#[test] +fn update_finalized_root_honest() { + let harness = &FORKED_HARNESS; + + test_update_finalized_root(&harness.honest_roots) +} From 1b26a36ebc2117459c9605b6f58b874161af3d7d Mon Sep 17 00:00:00 2001 From: John Adler Date: Sun, 28 Jul 2019 22:17:04 -0400 Subject: [PATCH 06/25] Verify transfer now checks for amount + fees. (#457) --- .../src/per_block_processing/verify_transfer.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing/verify_transfer.rs b/eth2/state_processing/src/per_block_processing/verify_transfer.rs index 20a16959b..d42b7d1f2 100644 --- a/eth2/state_processing/src/per_block_processing/verify_transfer.rs +++ b/eth2/state_processing/src/per_block_processing/verify_transfer.rs @@ -62,8 +62,8 @@ fn verify_transfer_parametric( // Verify the sender has adequate balance. verify!( - time_independent_only || sender_balance >= transfer.amount, - Invalid::FromBalanceInsufficient(transfer.amount, sender_balance) + time_independent_only || sender_balance >= total_amount, + Invalid::FromBalanceInsufficient(total_amount, sender_balance) ); // Verify sender balance will not be "dust" (i.e., greater than zero but less than the minimum deposit From 177df12149f2719c305958a76bcdca71ee7e83e4 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 29 Jul 2019 13:45:45 +1000 Subject: [PATCH 07/25] Testnet stability (#451) * Change reduced tree for adding weightless node * Add more comments for reduced tree fork choice * Small refactor on reduced tree for readability * Move test_harness forking logic into itself * Add new `AncestorIter` trait to store * Add unfinished tests to fork choice * Make `beacon_state.genesis_block_root` public * Add failing lmd_ghost fork choice tests * Extend fork_choice tests, create failing test * Implement Debug for generic ReducedTree * Add lazy_static to fork choice tests * Add verify_integrity fn to reduced tree * Fix bugs in reduced tree * Ensure all reduced tree tests verify integrity * Slightly alter reduce tree test params * Add (failing) reduced tree test * Fix bug in fork choice Iter ancestors was not working well with skip slots * Put maximum depth for common ancestor search Ensures that we don't search back past the finalized root. * Add basic finalization tests for reduced tree * Change fork choice to use beacon_block_root Previously it was using target_root, which was wrong * Change reduced tree for adding weightless node * Add more comments for reduced tree fork choice * Small refactor on reduced tree for readability * Move test_harness forking logic into itself * Add new `AncestorIter` trait to store * Add unfinished tests to fork choice * Make `beacon_state.genesis_block_root` public * Add failing lmd_ghost fork choice tests * Extend fork_choice tests, create failing test * Implement Debug for generic ReducedTree * Add lazy_static to fork choice tests * Add verify_integrity fn to reduced tree * Fix bugs in reduced tree * Ensure all reduced tree tests verify integrity * Slightly alter reduce tree test params * Add (failing) reduced tree test * Fix bug in fork choice Iter ancestors was not working well with skip slots * Put maximum depth for common ancestor search Ensures that we don't search back past the finalized root. * Add basic finalization tests for reduced tree * Add network dir CLI flag * Simplify "NewSlot" log message * Rename network-dir CLI flag * Change fork choice to use beacon_block_root Previously it was using target_root, which was wrong * Update db dir size for metrics * Change slog to use `FullFormat` logging * Update some comments and log formatting * Add prom gauge for best block root * Only add known target blocks to fork choice * Add finalized and justified root prom metrics * Add CLI flag for setting log level * Add logger to beacon chain * Add debug-level CLI flag to validator * Allow block processing if fork choice fails * Create warn log when there's low libp2p peer count * Minor change to logging * Make ancestor iter return option * Disable fork choice test when !debug_assertions * Fix type, removed code fragment * Tidy some borrow-checker evading * Lower reduced tree random test iterations --- beacon_node/beacon_chain/Cargo.toml | 2 + beacon_node/beacon_chain/src/beacon_chain.rs | 43 +++++++++++++- beacon_node/beacon_chain/src/fork_choice.rs | 26 ++++++--- beacon_node/beacon_chain/src/test_utils.rs | 5 ++ beacon_node/client/src/beacon_chain_types.rs | 6 +- beacon_node/client/src/lib.rs | 45 +++++++++------ beacon_node/client/src/notifier.rs | 18 ++++-- beacon_node/eth2-libp2p/src/config.rs | 6 ++ .../http_server/src/metrics/local_metrics.rs | 57 +++++++++++++++++-- beacon_node/network/src/sync/simple_sync.rs | 2 +- beacon_node/src/main.rs | 30 +++++++++- beacon_node/src/run.rs | 12 +++- validator_client/src/main.rs | 24 +++++++- 13 files changed, 231 insertions(+), 45 deletions(-) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 5b5ae3780..af6736ede 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -12,6 +12,8 @@ log = "0.4" operation_pool = { path = "../../eth2/operation_pool" } serde = "1.0" serde_derive = "1.0" +slog = { version = "^2.2.3" , features = ["max_level_trace"] } +sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } eth2_ssz = { path = "../../eth2/utils/ssz" } eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 90dc82966..561832033 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -8,6 +8,7 @@ use log::trace; use operation_pool::DepositInsertStatus; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{RwLock, RwLockReadGuard}; +use slog::{error, info, warn, Logger}; use slot_clock::SlotClock; use state_processing::per_block_processing::errors::{ AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, @@ -83,6 +84,8 @@ pub struct BeaconChain { pub fork_choice: ForkChoice, /// Stores metrics about this `BeaconChain`. pub metrics: Metrics, + /// Logging to CLI, etc. + log: Logger, } impl BeaconChain { @@ -93,6 +96,7 @@ impl BeaconChain { mut genesis_state: BeaconState, genesis_block: BeaconBlock, spec: ChainSpec, + log: Logger, ) -> Result { genesis_state.build_all_caches(&spec)?; @@ -123,6 +127,7 @@ impl BeaconChain { fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root), metrics: Metrics::new()?, store, + log, }) } @@ -130,6 +135,7 @@ impl BeaconChain { pub fn from_store( store: Arc, spec: ChainSpec, + log: Logger, ) -> Result>, Error> { let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); let p: PersistedBeaconChain = match store.get(&key) { @@ -159,6 +165,7 @@ impl BeaconChain { genesis_block_root: p.genesis_block_root, metrics: Metrics::new()?, store, + log, })) } @@ -646,13 +653,27 @@ impl BeaconChain { self.store.put(&state_root, &state)?; // Register the new block with the fork choice service. - self.fork_choice.process_block(&state, &block, block_root)?; + if let Err(e) = self.fork_choice.process_block(&state, &block, block_root) { + error!( + self.log, + "fork choice failed to process_block"; + "error" => format!("{:?}", e), + "block_root" => format!("{}", block_root), + "block_slot" => format!("{}", block.slot) + ) + } // Execute the fork choice algorithm, enthroning a new head if discovered. // // Note: in the future we may choose to run fork-choice less often, potentially based upon // some heuristic around number of attestations seen for the block. - self.fork_choice()?; + if let Err(e) = self.fork_choice() { + error!( + self.log, + "fork choice failed to find head"; + "error" => format!("{:?}", e) + ) + }; self.metrics.block_processing_successes.inc(); self.metrics @@ -780,9 +801,27 @@ impl BeaconChain { .get(&beacon_state_root)? .ok_or_else(|| Error::MissingBeaconState(beacon_state_root))?; + let previous_slot = self.head().beacon_block.slot; + let new_slot = beacon_block.slot; + // If we switched to a new chain (instead of building atop the present chain). if self.head().beacon_block_root != beacon_block.previous_block_root { self.metrics.fork_choice_reorg_count.inc(); + warn!( + self.log, + "Beacon chain re-org"; + "previous_slot" => previous_slot, + "new_slot" => new_slot + ); + } else { + info!( + self.log, + "new head block"; + "justified_root" => format!("{}", beacon_state.current_justified_root), + "finalized_root" => format!("{}", beacon_state.finalized_root), + "root" => format!("{}", beacon_block_root), + "slot" => new_slot, + ); }; let old_finalized_epoch = self.head().beacon_state.finalized_epoch; diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index b1cacd763..f72fe65fe 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -18,6 +18,7 @@ pub enum Error { pub struct ForkChoice { backend: T::LmdGhost, + store: Arc, /// Used for resolving the `0x00..00` alias back to genesis. /// /// Does not necessarily need to be the _actual_ genesis, it suffices to be the finalized root @@ -36,6 +37,7 @@ impl ForkChoice { genesis_block_root: Hash256, ) -> Self { Self { + store: store.clone(), backend: T::LmdGhost::new(store, genesis_block, genesis_block_root), genesis_block_root, } @@ -125,13 +127,6 @@ impl ForkChoice { state: &BeaconState, attestation: &Attestation, ) -> Result<()> { - let validator_indices = get_attesting_indices_unsorted( - state, - &attestation.data, - &attestation.aggregation_bitfield, - )?; - let block_slot = state.get_attestation_slot(&attestation.data)?; - let block_hash = attestation.data.beacon_block_root; // Ignore any attestations to the zero hash. @@ -147,7 +142,22 @@ impl ForkChoice { // (1) becomes weird once we hit finality and fork choice drops the genesis block. (2) is // fine because votes to the genesis block are not useful; all validators implicitly attest // to genesis just by being present in the chain. - if block_hash != Hash256::zero() { + // + // Additionally, don't add any block hash to fork choice unless we have imported the block. + if block_hash != Hash256::zero() + && self + .store + .exists::(&block_hash) + .unwrap_or(false) + { + let validator_indices = get_attesting_indices_unsorted( + state, + &attestation.data, + &attestation.aggregation_bitfield, + )?; + + let block_slot = state.get_attestation_slot(&attestation.data)?; + for validator_index in validator_indices { self.backend .process_attestation(validator_index, block_hash, block_slot)?; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 7071c861f..19c1d9d15 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,5 +1,6 @@ use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use lmd_ghost::LmdGhost; +use sloggers::{null::NullLoggerBuilder, Build}; use slot_clock::SlotClock; use slot_clock::TestingSlotClock; use state_processing::per_slot_processing; @@ -94,6 +95,9 @@ where let mut genesis_block = BeaconBlock::empty(&spec); genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); + let builder = NullLoggerBuilder; + let log = builder.build().expect("logger should build"); + // Slot clock let slot_clock = TestingSlotClock::new( spec.genesis_slot, @@ -107,6 +111,7 @@ where genesis_state, genesis_block, spec.clone(), + log, ) .expect("Terminate if beacon chain generation fails"); diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index c923f724c..f332092ca 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -49,7 +49,9 @@ where T: BeaconChainTypes, T::LmdGhost: LmdGhost, { - if let Ok(Some(beacon_chain)) = BeaconChain::from_store(store.clone(), spec.clone()) { + if let Ok(Some(beacon_chain)) = + BeaconChain::from_store(store.clone(), spec.clone(), log.clone()) + { info!( log, "Loaded BeaconChain from store"; @@ -78,7 +80,7 @@ where // Genesis chain //TODO: Handle error correctly - BeaconChain::from_genesis(store, slot_clock, genesis_state, genesis_block, spec) + BeaconChain::from_genesis(store, slot_clock, genesis_state, genesis_block, spec, log) .expect("Terminate if beacon chain generation fails") } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 7eee8ac0a..1b9f320be 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -190,29 +190,38 @@ impl Drop for Client { } fn do_state_catchup(chain: &Arc>, log: &slog::Logger) { - if let Some(genesis_height) = chain.slots_since_genesis() { - let result = chain.catchup_state(); + // Only attempt to `catchup_state` if we can read the slot clock. + if let Some(current_slot) = chain.read_slot_clock() { + let state_catchup_result = chain.catchup_state(); + + let best_slot = chain.head().beacon_block.slot; + let latest_block_root = chain.head().beacon_block_root; let common = o!( - "best_slot" => chain.head().beacon_block.slot, - "latest_block_root" => format!("{}", chain.head().beacon_block_root), - "wall_clock_slot" => chain.read_slot_clock().unwrap(), - "state_slot" => chain.head().beacon_state.slot, - "slots_since_genesis" => genesis_height, + "skip_slots" => current_slot.saturating_sub(best_slot), + "best_block_root" => format!("{}", latest_block_root), + "best_block_slot" => best_slot, + "slot" => current_slot, ); - match result { - Ok(_) => info!( + if let Err(e) = state_catchup_result { + error!( log, - "NewSlot"; - common - ), - Err(e) => error!( - log, - "StateCatchupFailed"; + "State catchup failed"; "error" => format!("{:?}", e), common - ), - }; - } + ) + } else { + info!( + log, + "Slot start"; + common + ) + } + } else { + error!( + log, + "Beacon chain running whilst slot clock is unavailable." + ); + }; } diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index ff6c1b230..1c7cf3867 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -2,7 +2,7 @@ use crate::Client; use beacon_chain::BeaconChainTypes; use exit_future::Exit; use futures::{Future, Stream}; -use slog::{debug, o}; +use slog::{debug, o, warn}; use std::time::{Duration, Instant}; use tokio::runtime::TaskExecutor; use tokio::timer::Interval; @@ -10,6 +10,9 @@ use tokio::timer::Interval; /// The interval between heartbeat events. pub const HEARTBEAT_INTERVAL_SECONDS: u64 = 15; +/// Create a warning log whenever the peer count is at or below this value. +pub const WARN_PEER_COUNT: usize = 1; + /// Spawns a thread that can be used to run code periodically, on `HEARTBEAT_INTERVAL_SECONDS` /// durations. /// @@ -30,9 +33,16 @@ pub fn run( let libp2p = client.network.libp2p_service(); let heartbeat = move |_| { - // Notify the number of connected nodes - // Panic if libp2p is poisoned - debug!(log, ""; "Connected Peers" => libp2p.lock().swarm.connected_peers()); + // Number of libp2p (not discv5) peers connected. + // + // Panics if libp2p is poisoned. + let connected_peer_count = libp2p.lock().swarm.connected_peers(); + + debug!(log, "libp2p"; "peer_count" => connected_peer_count); + + if connected_peer_count <= WARN_PEER_COUNT { + warn!(log, "Low libp2p peer count"; "peer_count" => connected_peer_count); + } Ok(()) }; diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 4c6f0b6da..7391dba8a 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -79,10 +79,16 @@ impl Config { } pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), String> { + // If a `datadir` has been specified, set the network dir to be inside it. if let Some(dir) = args.value_of("datadir") { self.network_dir = PathBuf::from(dir).join("network"); }; + // If a network dir has been specified, override the `datadir` definition. + if let Some(dir) = args.value_of("network-dir") { + self.network_dir = PathBuf::from(dir); + }; + if let Some(listen_address_str) = args.value_of("listen-address") { let listen_address = listen_address_str .parse() diff --git a/beacon_node/http_server/src/metrics/local_metrics.rs b/beacon_node/http_server/src/metrics/local_metrics.rs index fa69ee0c4..7a52d7e45 100644 --- a/beacon_node/http_server/src/metrics/local_metrics.rs +++ b/beacon_node/http_server/src/metrics/local_metrics.rs @@ -1,7 +1,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use prometheus::{IntGauge, Opts, Registry}; use slot_clock::SlotClock; -use std::fs::File; +use std::fs; use std::path::PathBuf; use types::{EthSpec, Slot}; @@ -13,6 +13,9 @@ pub struct LocalMetrics { present_slot: IntGauge, present_epoch: IntGauge, best_slot: IntGauge, + best_beacon_block_root: IntGauge, + justified_beacon_block_root: IntGauge, + finalized_beacon_block_root: IntGauge, validator_count: IntGauge, justified_epoch: IntGauge, finalized_epoch: IntGauge, @@ -36,6 +39,24 @@ impl LocalMetrics { let opts = Opts::new("best_slot", "slot_of_block_at_chain_head"); IntGauge::with_opts(opts)? }, + best_beacon_block_root: { + let opts = Opts::new("best_beacon_block_root", "root_of_block_at_chain_head"); + IntGauge::with_opts(opts)? + }, + justified_beacon_block_root: { + let opts = Opts::new( + "justified_beacon_block_root", + "root_of_block_at_justified_head", + ); + IntGauge::with_opts(opts)? + }, + finalized_beacon_block_root: { + let opts = Opts::new( + "finalized_beacon_block_root", + "root_of_block_at_finalized_head", + ); + IntGauge::with_opts(opts)? + }, validator_count: { let opts = Opts::new("validator_count", "number_of_validators"); IntGauge::with_opts(opts)? @@ -64,6 +85,9 @@ impl LocalMetrics { registry.register(Box::new(self.present_slot.clone()))?; registry.register(Box::new(self.present_epoch.clone()))?; registry.register(Box::new(self.best_slot.clone()))?; + registry.register(Box::new(self.best_beacon_block_root.clone()))?; + registry.register(Box::new(self.justified_beacon_block_root.clone()))?; + registry.register(Box::new(self.finalized_beacon_block_root.clone()))?; registry.register(Box::new(self.validator_count.clone()))?; registry.register(Box::new(self.finalized_epoch.clone()))?; registry.register(Box::new(self.justified_epoch.clone()))?; @@ -87,6 +111,22 @@ impl LocalMetrics { .set(present_slot.epoch(T::EthSpec::slots_per_epoch()).as_u64() as i64); self.best_slot.set(state.slot.as_u64() as i64); + self.best_beacon_block_root + .set(beacon_chain.head().beacon_block_root.to_low_u64_le() as i64); + self.justified_beacon_block_root.set( + beacon_chain + .head() + .beacon_state + .current_justified_root + .to_low_u64_le() as i64, + ); + self.finalized_beacon_block_root.set( + beacon_chain + .head() + .beacon_state + .finalized_root + .to_low_u64_le() as i64, + ); self.validator_count .set(state.validator_registry.len() as i64); self.justified_epoch @@ -97,10 +137,17 @@ impl LocalMetrics { self.validator_balances_sum .set(state.balances.iter().sum::() as i64); } - let db_size = File::open(db_path) - .and_then(|f| f.metadata()) - .and_then(|m| Ok(m.len())) - .unwrap_or(0); + let db_size = if let Ok(iter) = fs::read_dir(db_path) { + iter.filter_map(Result::ok) + .map(size_of_dir_entry) + .fold(0_u64, |sum, val| sum + val) + } else { + 0 + }; self.database_size.set(db_size as i64); } } + +fn size_of_dir_entry(dir: fs::DirEntry) -> u64 { + dir.metadata().map(|m| m.len()).unwrap_or(0) +} diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index aeabd0507..b981d2040 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -296,7 +296,7 @@ impl SimpleSync { .collect(); if roots.len() as u64 != req.count { - warn!( + debug!( self.log, "BlockRootsRequest"; "peer" => format!("{:?}", peer_id), diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 55c86672a..1004ba19b 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -36,6 +36,13 @@ fn main() { .help("File path where output will be written.") .takes_value(true), ) + .arg( + Arg::with_name("network-dir") + .long("network-dir") + .value_name("NETWORK-DIR") + .help("Data directory for network keys.") + .takes_value(true) + ) // network related arguments .arg( Arg::with_name("listen-address") @@ -145,6 +152,16 @@ fn main() { .short("r") .help("When present, genesis will be within 30 minutes prior. Only for testing"), ) + .arg( + Arg::with_name("debug-level") + .long("debug-level") + .value_name("LEVEL") + .short("s") + .help("The title of the spec constants for chain config.") + .takes_value(true) + .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) + .default_value("info"), + ) .arg( Arg::with_name("verbosity") .short("v") @@ -156,9 +173,19 @@ fn main() { // build the initial logger let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::CompactFormat::new(decorator).build().fuse(); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); let drain = slog_async::Async::new(drain).build(); + let drain = match matches.value_of("debug-level") { + Some("info") => drain.filter_level(Level::Info), + Some("debug") => drain.filter_level(Level::Debug), + Some("trace") => drain.filter_level(Level::Trace), + Some("warn") => drain.filter_level(Level::Warning), + Some("error") => drain.filter_level(Level::Error), + Some("crit") => drain.filter_level(Level::Critical), + _ => unreachable!("guarded by clap"), + }; + let drain = match matches.occurrences_of("verbosity") { 0 => drain.filter_level(Level::Info), 1 => drain.filter_level(Level::Debug), @@ -263,6 +290,7 @@ fn main() { } }; + // Start the node using a `tokio` executor. match run::run_beacon_node(client_config, eth2_config, &log) { Ok(_) => {} Err(e) => crit!(log, "Beacon node failed to start"; "reason" => format!("{:}", e)), diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index fc46a3f44..24c6d09d1 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -15,6 +15,12 @@ use tokio::runtime::TaskExecutor; use tokio_timer::clock::Clock; use types::{MainnetEthSpec, MinimalEthSpec}; +/// Reads the configuration and initializes a `BeaconChain` with the required types and parameters. +/// +/// Spawns an executor which performs syncing, networking, block production, etc. +/// +/// Blocks the current thread, returning after the `BeaconChain` has exited or a `Ctrl+C` +/// signal. pub fn run_beacon_node( client_config: ClientConfig, eth2_config: Eth2Config, @@ -38,7 +44,7 @@ pub fn run_beacon_node( warn!( log, - "This software is EXPERIMENTAL and provides no guarantees or warranties." + "Ethereum 2.0 is pre-release. This software is experimental." ); info!( @@ -46,6 +52,7 @@ pub fn run_beacon_node( "Starting beacon node"; "p2p_listen_address" => format!("{:?}", &other_client_config.network.listen_address), "data_dir" => format!("{:?}", other_client_config.data_dir()), + "network_dir" => format!("{:?}", other_client_config.network.network_dir), "spec_constants" => &spec_constants, "db_type" => &other_client_config.db_type, ); @@ -92,7 +99,8 @@ pub fn run_beacon_node( result } -pub fn run( +/// Performs the type-generic parts of launching a `BeaconChain`. +fn run( db_path: &Path, client_config: ClientConfig, eth2_config: Eth2Config, diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index c12cae6a2..a4377e708 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -11,7 +11,7 @@ use crate::service::Service as ValidatorService; use clap::{App, Arg}; use eth2_config::{read_from_file, write_to_file, Eth2Config}; use protos::services_grpc::ValidatorServiceClient; -use slog::{crit, error, info, o, Drain}; +use slog::{crit, error, info, o, Drain, Level}; use std::fs; use std::path::PathBuf; use types::{Keypair, MainnetEthSpec, MinimalEthSpec}; @@ -26,7 +26,6 @@ fn main() { let decorator = slog_term::TermDecorator::new().build(); let drain = slog_term::CompactFormat::new(decorator).build().fuse(); let drain = slog_async::Async::new(drain).build().fuse(); - let mut log = slog::Logger::root(drain, o!()); // CLI let matches = App::new("Lighthouse Validator Client") @@ -73,8 +72,29 @@ fn main() { .possible_values(&["mainnet", "minimal"]) .default_value("minimal"), ) + .arg( + Arg::with_name("debug-level") + .long("debug-level") + .value_name("LEVEL") + .short("s") + .help("The title of the spec constants for chain config.") + .takes_value(true) + .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) + .default_value("info"), + ) .get_matches(); + let drain = match matches.value_of("debug-level") { + Some("info") => drain.filter_level(Level::Info), + Some("debug") => drain.filter_level(Level::Debug), + Some("trace") => drain.filter_level(Level::Trace), + Some("warn") => drain.filter_level(Level::Warning), + Some("error") => drain.filter_level(Level::Error), + Some("crit") => drain.filter_level(Level::Critical), + _ => unreachable!("guarded by clap"), + }; + let mut log = slog::Logger::root(drain.fuse(), o!()); + let data_dir = match matches .value_of("datadir") .and_then(|v| Some(PathBuf::from(v))) From a236003a7b8e2293432ceff7741a61471ee47089 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 30 Jul 2019 12:44:51 +1000 Subject: [PATCH 08/25] =?UTF-8?q?Update=20to=20frozen=20spec=20=E2=9D=84?= =?UTF-8?q?=EF=B8=8F=20(v0.8.1)=20(#444)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * types: first updates for v0.8 * state_processing: epoch processing v0.8.0 * state_processing: block processing v0.8.0 * tree_hash_derive: support generics in SignedRoot * types v0.8: update to use ssz_types * state_processing v0.8: use ssz_types * ssz_types: add bitwise methods and from_elem * types: fix v0.8 FIXMEs * ssz_types: add bitfield shift_up * ssz_types: iterators and DerefMut for VariableList * types,state_processing: use VariableList * ssz_types: fix BitVector Decode impl Fixed a typo in the implementation of ssz::Decode for BitVector, which caused it to be considered variable length! * types: fix test modules for v0.8 update * types: remove slow type-level arithmetic * state_processing: fix tests for v0.8 * op_pool: update for v0.8 * ssz_types: Bitfield difference length-independent Allow computing the difference of two bitfields of different lengths. * Implement compact committee support * epoch_processing: committee & active index roots * state_processing: genesis state builder v0.8 * state_processing: implement v0.8.1 * Further improve tree_hash * Strip examples, tests from cached_tree_hash * Update TreeHash, un-impl CachedTreeHash * Update bitfield TreeHash, un-impl CachedTreeHash * Update FixedLenVec TreeHash, unimpl CachedTreeHash * Update update tree_hash_derive for new TreeHash * Fix TreeHash, un-impl CachedTreeHash for ssz_types * Remove fixed_len_vec, ssz benches SSZ benches relied upon fixed_len_vec -- it is easier to just delete them and rebuild them later (when necessary) * Remove boolean_bitfield crate * Fix fake_crypto BLS compile errors * Update ef_tests for new v.8 type params * Update ef_tests submodule to v0.8.1 tag * Make fixes to support parsing ssz ef_tests * `compact_committee...` to `compact_committees...` * Derive more traits for `CompactCommittee` * Flip bitfield byte-endianness * Fix tree_hash for bitfields * Modify CLI output for ef_tests * Bump ssz crate version * Update ssz_types doc comment * Del cached tree hash tests from ssz_static tests * Tidy SSZ dependencies * Rename ssz_types crate to eth2_ssz_types * validator_client: update for v0.8 * ssz_types: update union/difference for bit order swap * beacon_node: update for v0.8, EthSpec * types: disable cached tree hash, update min spec * state_processing: fix slot bug in committee update * tests: temporarily disable fork choice harness test See #447 * committee cache: prevent out-of-bounds access In the case where we tried to access the committee of a shard that didn't have a committee in the current epoch, we were accessing elements beyond the end of the shuffling vector and panicking! This commit adds a check to make the failure safe and explicit. * fix bug in get_indexed_attestation and simplify There was a bug in our implementation of get_indexed_attestation whereby incorrect "committee indices" were used to index into the custody bitfield. The bug was only observable in the case where some bits of the custody bitfield were set to 1. The implementation has been simplified to remove the bug, and a test added. * state_proc: workaround for compact committees bug https://github.com/ethereum/eth2.0-specs/issues/1315 * v0.8: updates to make the EF tests pass * Remove redundant max operation checks. * Always supply both messages when checking attestation signatures -- allowing verification of an attestation with no signatures. * Swap the order of the fork and domain constant in `get_domain`, to match the spec. * rustfmt * ef_tests: add new epoch processing tests * Integrate v0.8 into master (compiles) * Remove unused crates, fix clippy lints * Replace v0.6.3 tags w/ v0.8.1 * Remove old comment * Ensure lmd ghost tests only run in release * Update readme --- Cargo.toml | 3 - README.md | 11 +- account_manager/src/main.rs | 2 +- beacon_node/beacon_chain/src/beacon_chain.rs | 127 ++-- beacon_node/beacon_chain/src/checkpoint.rs | 6 +- beacon_node/beacon_chain/src/fork_choice.rs | 36 +- .../src/persisted_beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 23 +- beacon_node/beacon_chain/tests/tests.rs | 20 +- beacon_node/eth2-libp2p/src/behaviour.rs | 54 +- beacon_node/eth2-libp2p/src/discovery.rs | 4 +- beacon_node/eth2-libp2p/src/rpc/codec/base.rs | 12 +- beacon_node/eth2-libp2p/src/rpc/handler.rs | 29 +- beacon_node/eth2-libp2p/src/rpc/methods.rs | 6 +- beacon_node/eth2-libp2p/src/rpc/mod.rs | 12 +- beacon_node/eth2-libp2p/src/service.rs | 17 +- beacon_node/http_server/src/lib.rs | 2 +- .../http_server/src/metrics/local_metrics.rs | 13 +- beacon_node/network/src/message_handler.rs | 26 +- beacon_node/network/src/service.rs | 33 +- beacon_node/network/src/sync/import_queue.rs | 49 +- beacon_node/network/src/sync/simple_sync.rs | 97 +-- beacon_node/rpc/src/attestation.rs | 2 +- beacon_node/rpc/src/beacon_block.rs | 2 +- beacon_node/rpc/src/lib.rs | 2 +- beacon_node/src/main.rs | 2 +- beacon_node/src/run.rs | 6 +- beacon_node/store/src/block_at_slot.rs | 50 +- beacon_node/store/src/impls.rs | 2 +- beacon_node/store/src/iter.rs | 26 +- beacon_node/store/src/lib.rs | 6 +- eth2/README.md | 2 - eth2/lmd_ghost/src/lib.rs | 6 +- eth2/lmd_ghost/src/reduced_tree.rs | 18 +- eth2/lmd_ghost/tests/test.rs | 12 +- eth2/operation_pool/Cargo.toml | 4 +- eth2/operation_pool/src/attestation.rs | 54 +- eth2/operation_pool/src/attestation_id.rs | 2 +- eth2/operation_pool/src/lib.rs | 147 ++-- eth2/operation_pool/src/max_cover.rs | 2 +- eth2/operation_pool/src/persistence.rs | 22 +- eth2/state_processing/Cargo.toml | 1 + .../benches/bench_epoch_processing.rs | 2 +- .../src/common/convert_to_indexed.rs | 33 - .../src/common/get_attesting_indices.rs | 37 +- .../src/common/get_compact_committees_root.rs | 49 ++ .../src/common/get_indexed_attestation.rs | 122 ++++ .../src/common/initiate_validator_exit.rs | 12 +- eth2/state_processing/src/common/mod.rs | 10 +- .../src/common/slash_validator.rs | 36 +- .../src/common/verify_bitfield.rs | 79 -- eth2/state_processing/src/genesis.rs | 61 ++ .../state_processing/src/get_genesis_state.rs | 56 -- eth2/state_processing/src/lib.rs | 4 +- .../src/per_block_processing.rs | 149 ++-- .../block_processing_builder.rs | 17 +- .../src/per_block_processing/errors.rs | 67 +- ...ion.rs => is_valid_indexed_attestation.rs} | 83 +-- .../src/per_block_processing/tests.rs | 2 +- .../validate_attestation.rs | 156 ---- .../verify_attestation.rs | 156 ++++ .../verify_attester_slashing.rs | 18 +- .../per_block_processing/verify_deposit.rs | 46 +- .../src/per_block_processing/verify_exit.rs | 17 +- .../verify_proposer_slashing.rs | 9 +- .../per_block_processing/verify_transfer.rs | 16 +- .../src/per_epoch_processing.rs | 116 +-- .../src/per_epoch_processing/apply_rewards.rs | 42 +- .../src/per_epoch_processing/errors.rs | 7 + .../per_epoch_processing/process_slashings.rs | 31 +- .../per_epoch_processing/registry_updates.rs | 16 +- .../validator_statuses.rs | 40 +- .../src/per_epoch_processing/winning_root.rs | 86 +-- .../src/per_slot_processing.rs | 23 +- eth2/types/Cargo.toml | 3 +- eth2/types/src/attestation.rs | 31 +- eth2/types/src/attestation_data.rs | 15 +- .../src/attestation_data_and_custody_bit.rs | 3 +- eth2/types/src/attester_slashing.rs | 16 +- eth2/types/src/beacon_block.rs | 55 +- eth2/types/src/beacon_block_body.rs | 22 +- eth2/types/src/beacon_block_header.rs | 14 +- eth2/types/src/beacon_state.rs | 489 +++++++------ .../src/beacon_state/beacon_state_types.rs | 150 ++-- .../types/src/beacon_state/committee_cache.rs | 60 +- .../src/beacon_state/committee_cache/tests.rs | 64 +- eth2/types/src/beacon_state/exit_cache.rs | 4 +- eth2/types/src/beacon_state/tests.rs | 16 +- eth2/types/src/chain_spec.rs | 108 ++- eth2/types/src/checkpoint.rs | 39 + eth2/types/src/compact_committee.rs | 35 + eth2/types/src/crosslink.rs | 11 +- eth2/types/src/deposit.rs | 7 +- eth2/types/src/deposit_data.rs | 4 +- eth2/types/src/eth1_data.rs | 2 +- eth2/types/src/fork.rs | 6 +- eth2/types/src/historical_batch.rs | 8 +- eth2/types/src/indexed_attestation.rs | 41 +- eth2/types/src/lib.rs | 11 +- eth2/types/src/pending_attestation.rs | 13 +- eth2/types/src/proposer_slashing.rs | 2 +- eth2/types/src/relative_epoch.rs | 6 +- .../builders/testing_attestation_builder.rs | 43 +- .../testing_attestation_data_builder.rs | 67 +- .../testing_attester_slashing_builder.rs | 39 +- .../builders/testing_beacon_block_builder.rs | 79 +- .../builders/testing_beacon_state_builder.rs | 25 +- .../builders/testing_deposit_builder.rs | 6 - .../testing_pending_attestation_builder.rs | 24 +- .../testing_proposer_slashing_builder.rs | 6 +- eth2/types/src/test_utils/macros.rs | 17 +- eth2/types/src/test_utils/test_random.rs | 21 +- .../src/test_utils/test_random/bitfield.rs | 16 +- eth2/types/src/transfer.rs | 2 +- eth2/types/src/validator.rs | 6 +- eth2/types/src/voluntary_exit.rs | 3 +- eth2/utils/bls/Cargo.toml | 1 + eth2/utils/bls/src/aggregate_signature.rs | 11 +- .../utils/bls/src/fake_aggregate_signature.rs | 11 +- eth2/utils/bls/src/fake_public_key.rs | 11 +- eth2/utils/bls/src/fake_signature.rs | 9 +- eth2/utils/bls/src/macros.rs | 48 ++ eth2/utils/bls/src/public_key.rs | 11 +- eth2/utils/bls/src/secret_key.rs | 9 +- eth2/utils/bls/src/signature.rs | 11 +- eth2/utils/boolean-bitfield/Cargo.toml | 17 - eth2/utils/boolean-bitfield/README.md | 3 - eth2/utils/boolean-bitfield/fuzz/.gitignore | 4 - eth2/utils/boolean-bitfield/fuzz/Cargo.toml | 33 - .../fuzz_targets/fuzz_target_from_bytes.rs | 9 - .../fuzz_targets/fuzz_target_ssz_decode.rs | 11 - .../fuzz_targets/fuzz_target_ssz_encode.rs | 13 - eth2/utils/boolean-bitfield/src/lib.rs | 572 --------------- .../examples/8k_hashes_cached.rs | 21 - .../examples/8k_hashes_standard.rs | 10 - eth2/utils/cached_tree_hash/tests/tests.rs | 677 ------------------ eth2/utils/eth2_config/src/lib.rs | 2 +- eth2/utils/fixed_len_vec/Cargo.toml | 13 - eth2/utils/fixed_len_vec/src/impls.rs | 140 ---- eth2/utils/fixed_len_vec/src/lib.rs | 134 ---- eth2/utils/honey-badger-split/Cargo.toml | 7 - eth2/utils/honey-badger-split/src/lib.rs | 117 --- eth2/utils/ssz/Cargo.toml | 7 +- eth2/utils/ssz/benches/benches.rs | 80 --- eth2/utils/ssz/src/lib.rs | 6 +- eth2/utils/ssz_types/Cargo.toml | 6 +- eth2/utils/ssz_types/src/bitfield.rs | 453 ++++++------ eth2/utils/ssz_types/src/fixed_vector.rs | 224 ++++-- eth2/utils/ssz_types/src/lib.rs | 5 + eth2/utils/ssz_types/src/tree_hash.rs | 48 ++ eth2/utils/ssz_types/src/variable_list.rs | 311 +++++--- eth2/utils/tree_hash/src/impls.rs | 49 +- eth2/utils/tree_hash/src/lib.rs | 40 +- eth2/utils/tree_hash/src/merkleize_padded.rs | 2 +- eth2/utils/tree_hash_derive/src/lib.rs | 7 +- eth2/utils/tree_hash_derive/tests/tests.rs | 179 ----- tests/ef_tests/eth2.0-spec-tests | 2 +- tests/ef_tests/src/cases.rs | 6 + tests/ef_tests/src/cases/bls_g2_compressed.rs | 6 - .../src/cases/epoch_processing_crosslinks.rs | 3 +- .../cases/epoch_processing_final_updates.rs | 41 ++ ...ocessing_justification_and_finalization.rs | 46 ++ .../epoch_processing_registry_updates.rs | 3 +- .../src/cases/epoch_processing_slashings.rs | 50 ++ .../src/cases/operations_attestation.rs | 5 +- .../src/cases/operations_attester_slashing.rs | 3 +- .../src/cases/operations_block_header.rs | 5 +- .../ef_tests/src/cases/operations_deposit.rs | 3 +- tests/ef_tests/src/cases/operations_exit.rs | 3 +- .../src/cases/operations_proposer_slashing.rs | 3 +- .../ef_tests/src/cases/operations_transfer.rs | 6 +- tests/ef_tests/src/cases/sanity_blocks.rs | 31 +- tests/ef_tests/src/cases/sanity_slots.rs | 3 +- tests/ef_tests/src/cases/ssz_static.rs | 41 +- tests/ef_tests/src/doc.rs | 29 +- tests/ef_tests/tests/tests.rs | 27 + .../beacon_node_attestation.rs | 6 +- .../src/attestation_producer/grpc.rs | 6 +- .../src/attestation_producer/mod.rs | 23 +- .../src/block_producer/beacon_node_block.rs | 11 +- validator_client/src/block_producer/grpc.rs | 11 +- validator_client/src/block_producer/mod.rs | 15 +- validator_client/src/main.rs | 6 +- validator_client/src/service.rs | 19 +- 184 files changed, 3332 insertions(+), 4542 deletions(-) delete mode 100644 eth2/state_processing/src/common/convert_to_indexed.rs create mode 100644 eth2/state_processing/src/common/get_compact_committees_root.rs create mode 100644 eth2/state_processing/src/common/get_indexed_attestation.rs delete mode 100644 eth2/state_processing/src/common/verify_bitfield.rs create mode 100644 eth2/state_processing/src/genesis.rs delete mode 100644 eth2/state_processing/src/get_genesis_state.rs rename eth2/state_processing/src/per_block_processing/{verify_indexed_attestation.rs => is_valid_indexed_attestation.rs} (57%) delete mode 100644 eth2/state_processing/src/per_block_processing/validate_attestation.rs create mode 100644 eth2/state_processing/src/per_block_processing/verify_attestation.rs create mode 100644 eth2/types/src/checkpoint.rs create mode 100644 eth2/types/src/compact_committee.rs delete mode 100644 eth2/utils/boolean-bitfield/Cargo.toml delete mode 100644 eth2/utils/boolean-bitfield/README.md delete mode 100644 eth2/utils/boolean-bitfield/fuzz/.gitignore delete mode 100644 eth2/utils/boolean-bitfield/fuzz/Cargo.toml delete mode 100644 eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_from_bytes.rs delete mode 100644 eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_decode.rs delete mode 100644 eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_encode.rs delete mode 100644 eth2/utils/boolean-bitfield/src/lib.rs delete mode 100644 eth2/utils/cached_tree_hash/examples/8k_hashes_cached.rs delete mode 100644 eth2/utils/cached_tree_hash/examples/8k_hashes_standard.rs delete mode 100644 eth2/utils/cached_tree_hash/tests/tests.rs delete mode 100644 eth2/utils/fixed_len_vec/Cargo.toml delete mode 100644 eth2/utils/fixed_len_vec/src/impls.rs delete mode 100644 eth2/utils/fixed_len_vec/src/lib.rs delete mode 100644 eth2/utils/honey-badger-split/Cargo.toml delete mode 100644 eth2/utils/honey-badger-split/src/lib.rs delete mode 100644 eth2/utils/ssz/benches/benches.rs create mode 100644 eth2/utils/ssz_types/src/tree_hash.rs delete mode 100644 eth2/utils/tree_hash_derive/tests/tests.rs create mode 100644 tests/ef_tests/src/cases/epoch_processing_final_updates.rs create mode 100644 tests/ef_tests/src/cases/epoch_processing_justification_and_finalization.rs create mode 100644 tests/ef_tests/src/cases/epoch_processing_slashings.rs diff --git a/Cargo.toml b/Cargo.toml index 22ec6fd98..66028ecd5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,14 +5,11 @@ members = [ "eth2/state_processing", "eth2/types", "eth2/utils/bls", - "eth2/utils/boolean-bitfield", "eth2/utils/cached_tree_hash", "eth2/utils/compare_fields", "eth2/utils/compare_fields_derive", "eth2/utils/eth2_config", - "eth2/utils/fixed_len_vec", "eth2/utils/hashing", - "eth2/utils/honey-badger-split", "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", "eth2/utils/serde_hex", diff --git a/README.md b/README.md index e4f2e8ccb..9f0e353c5 100644 --- a/README.md +++ b/README.md @@ -34,16 +34,15 @@ user-facing functionality. Current development overview: -- Specification `v0.6.3` implemented, optimized and passing test vectors. -- Rust-native libp2p integrated, with Gossipsub. -- Discv5 (P2P discovery mechanism) integration started. +- Specification `v0.8.1` implemented, optimized and passing test vectors. +- Rust-native libp2p with Gossipsub and Discv5. - Metrics via Prometheus. - Basic gRPC API, soon to be replaced with RESTful HTTP/JSON. ### Roadmap -- **July 2019**: `lighthouse-0.0.1` release: A stable testnet for developers with a useful - HTTP API. +- **Early-September 2019**: `lighthouse-0.0.1` release: A stable testnet for + developers with a useful HTTP API. - **September 2019**: Inter-operability with other Ethereum 2.0 clients. - **October 2019**: Public, multi-client testnet with user-facing functionality. - **January 2020**: Production Beacon Chain testnet. @@ -153,6 +152,8 @@ If you'd like some background on Sigma Prime, please see the [Lighthouse Update - [`protos/`](protos/): protobuf/gRPC definitions that are common across the Lighthouse project. - [`validator_client/`](validator_client/): the "Validator Client" binary and crates exclusively associated with it. +- [`tests/`](tests/): code specific to testing, most notably contains the + Ethereum Foundation test vectors. ## Contributing diff --git a/account_manager/src/main.rs b/account_manager/src/main.rs index 3c55c39e2..b7448ddf2 100644 --- a/account_manager/src/main.rs +++ b/account_manager/src/main.rs @@ -83,7 +83,7 @@ fn main() { } }; default_dir.push(DEFAULT_DATA_DIR); - PathBuf::from(default_dir) + default_dir } }; diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 561832033..d0c50af70 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -94,7 +94,7 @@ impl BeaconChain { store: Arc, slot_clock: T::SlotClock, mut genesis_state: BeaconState, - genesis_block: BeaconBlock, + genesis_block: BeaconBlock, spec: ChainSpec, log: Logger, ) -> Result { @@ -108,7 +108,7 @@ impl BeaconChain { // Also store the genesis block under the `ZERO_HASH` key. let genesis_block_root = genesis_block.block_header().canonical_root(); - store.put(&spec.zero_hash, &genesis_block)?; + store.put(&Hash256::zero(), &genesis_block)?; let canonical_head = RwLock::new(CheckPoint::new( genesis_block.clone(), @@ -150,7 +150,7 @@ impl BeaconChain { spec.seconds_per_slot, ); - let last_finalized_root = p.canonical_head.beacon_state.finalized_root; + let last_finalized_root = p.canonical_head.beacon_state.finalized_checkpoint.root; let last_finalized_block = &p.canonical_head.beacon_block; let op_pool = p.op_pool.into_operation_pool(&p.state, &spec); @@ -187,8 +187,11 @@ impl BeaconChain { /// Returns the beacon block body for each beacon block root in `roots`. /// /// Fails if any root in `roots` does not have a corresponding block. - pub fn get_block_bodies(&self, roots: &[Hash256]) -> Result, Error> { - let bodies: Result, _> = roots + pub fn get_block_bodies( + &self, + roots: &[Hash256], + ) -> Result>, Error> { + let bodies: Result, _> = roots .iter() .map(|root| match self.get_block(root)? { Some(block) => Ok(block.body), @@ -259,7 +262,10 @@ impl BeaconChain { /// ## Errors /// /// May return a database error. - pub fn get_block(&self, block_root: &Hash256) -> Result, Error> { + pub fn get_block( + &self, + block_root: &Hash256, + ) -> Result>, Error> { Ok(self.store.get(block_root)?) } @@ -321,15 +327,9 @@ impl BeaconChain { /// Returns the validator index (if any) for the given public key. /// - /// Information is retrieved from the present `beacon_state.validator_registry`. + /// Information is retrieved from the present `beacon_state.validators`. pub fn validator_index(&self, pubkey: &PublicKey) -> Option { - for (i, validator) in self - .head() - .beacon_state - .validator_registry - .iter() - .enumerate() - { + for (i, validator) in self.head().beacon_state.validators.iter().enumerate() { if validator.pubkey == *pubkey { return Some(i); } @@ -469,9 +469,22 @@ impl BeaconChain { } else { *state.get_block_root(current_epoch_start_slot)? }; + let target = Checkpoint { + epoch: state.current_epoch(), + root: target_root, + }; - let previous_crosslink_root = - Hash256::from_slice(&state.get_current_crosslink(shard)?.tree_hash_root()); + let parent_crosslink = state.get_current_crosslink(shard)?; + let crosslink = Crosslink { + shard, + parent_root: Hash256::from_slice(&parent_crosslink.tree_hash_root()), + start_epoch: parent_crosslink.end_epoch, + end_epoch: std::cmp::min( + target.epoch, + parent_crosslink.end_epoch + self.spec.max_epochs_per_crosslink, + ), + data_root: Hash256::zero(), + }; // Collect some metrics. self.metrics.attestation_production_successes.inc(); @@ -479,13 +492,9 @@ impl BeaconChain { Ok(AttestationData { beacon_block_root: head_block_root, - source_epoch: state.current_justified_epoch, - source_root: state.current_justified_root, - target_epoch: state.current_epoch(), - target_root, - shard, - previous_crosslink_root, - crosslink_data_root: Hash256::zero(), + source: state.current_justified_checkpoint.clone(), + target, + crosslink, }) } @@ -495,7 +504,7 @@ impl BeaconChain { /// if possible. pub fn process_attestation( &self, - attestation: Attestation, + attestation: Attestation, ) -> Result<(), AttestationValidationError> { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); @@ -527,9 +536,10 @@ impl BeaconChain { /// Accept some deposit and queue it for inclusion in an appropriate block. pub fn process_deposit( &self, + index: u64, deposit: Deposit, ) -> Result { - self.op_pool.insert_deposit(deposit) + self.op_pool.insert_deposit(index, deposit) } /// Accept some exit and queue it for inclusion in an appropriate block. @@ -556,7 +566,7 @@ impl BeaconChain { /// Accept some attester slashing and queue it for inclusion in an appropriate block. pub fn process_attester_slashing( &self, - attester_slashing: AttesterSlashing, + attester_slashing: AttesterSlashing, ) -> Result<(), AttesterSlashingValidationError> { self.op_pool .insert_attester_slashing(attester_slashing, &*self.state.read(), &self.spec) @@ -565,14 +575,18 @@ impl BeaconChain { /// Accept some block and attempt to add it to block DAG. /// /// Will accept blocks from prior slots, however it will reject any block from a future slot. - pub fn process_block(&self, block: BeaconBlock) -> Result { + pub fn process_block( + &self, + block: BeaconBlock, + ) -> Result { self.metrics.block_processing_requests.inc(); let timer = self.metrics.block_processing_times.start_timer(); let finalized_slot = self .state .read() - .finalized_epoch + .finalized_checkpoint + .epoch .start_slot(T::EthSpec::slots_per_epoch()); if block.slot <= finalized_slot { @@ -600,18 +614,17 @@ impl BeaconChain { }); } - if self.store.exists::(&block_root)? { + if self.store.exists::>(&block_root)? { return Ok(BlockProcessingOutcome::BlockIsAlreadyKnown); } // Load the blocks parent block from the database, returning invalid if that block is not // found. - let parent_block_root = block.previous_block_root; - let parent_block: BeaconBlock = match self.store.get(&parent_block_root)? { - Some(previous_block_root) => previous_block_root, + let parent_block: BeaconBlock = match self.store.get(&block.parent_root)? { + Some(block) => block, None => { return Ok(BlockProcessingOutcome::ParentUnknown { - parent: parent_block_root, + parent: block.parent_root, }); } }; @@ -691,7 +704,7 @@ impl BeaconChain { pub fn produce_block( &self, randao_reveal: Signature, - ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { + ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { let state = self.state.read().clone(); let slot = self .read_slot_clock() @@ -713,7 +726,7 @@ impl BeaconChain { mut state: BeaconState, produce_at_slot: Slot, randao_reveal: Signature, - ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { + ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { self.metrics.block_production_requests.inc(); let timer = self.metrics.block_production_times.start_timer(); @@ -724,7 +737,7 @@ impl BeaconChain { state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; - let previous_block_root = if state.slot > 0 { + let parent_root = if state.slot > 0 { *state .get_block_root(state.slot - 1) .map_err(|_| BlockProductionError::UnableToGetBlockRootFromState)? @@ -740,7 +753,7 @@ impl BeaconChain { let mut block = BeaconBlock { slot: state.slot, - previous_block_root, + parent_root, state_root: Hash256::zero(), // Updated after the state is calculated. signature: Signature::empty_signature(), // To be completed by a validator. body: BeaconBlockBody { @@ -752,12 +765,12 @@ impl BeaconChain { block_hash: Hash256::zero(), }, graffiti, - proposer_slashings, - attester_slashings, - attestations: self.op_pool.get_attestations(&state, &self.spec), - deposits: self.op_pool.get_deposits(&state, &self.spec), - voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec), - transfers: self.op_pool.get_transfers(&state, &self.spec), + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings.into(), + attestations: self.op_pool.get_attestations(&state, &self.spec).into(), + deposits: self.op_pool.get_deposits(&state).into(), + voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec).into(), + transfers: self.op_pool.get_transfers(&state, &self.spec).into(), }, }; @@ -790,7 +803,7 @@ impl BeaconChain { if beacon_block_root != self.head().beacon_block_root { self.metrics.fork_choice_changed_head.inc(); - let beacon_block: BeaconBlock = self + let beacon_block: BeaconBlock = self .store .get(&beacon_block_root)? .ok_or_else(|| Error::MissingBeaconBlock(beacon_block_root))?; @@ -805,7 +818,7 @@ impl BeaconChain { let new_slot = beacon_block.slot; // If we switched to a new chain (instead of building atop the present chain). - if self.head().beacon_block_root != beacon_block.previous_block_root { + if self.head().beacon_block_root != beacon_block.parent_root { self.metrics.fork_choice_reorg_count.inc(); warn!( self.log, @@ -817,16 +830,16 @@ impl BeaconChain { info!( self.log, "new head block"; - "justified_root" => format!("{}", beacon_state.current_justified_root), - "finalized_root" => format!("{}", beacon_state.finalized_root), + "justified_root" => format!("{}", beacon_state.current_justified_checkpoint.root), + "finalized_root" => format!("{}", beacon_state.finalized_checkpoint.root), "root" => format!("{}", beacon_block_root), "slot" => new_slot, ); }; - let old_finalized_epoch = self.head().beacon_state.finalized_epoch; - let new_finalized_epoch = beacon_state.finalized_epoch; - let finalized_root = beacon_state.finalized_root; + let old_finalized_epoch = self.head().beacon_state.finalized_checkpoint.epoch; + let new_finalized_epoch = beacon_state.finalized_checkpoint.epoch; + let finalized_root = beacon_state.finalized_checkpoint.root; // Never revert back past a finalized epoch. if new_finalized_epoch < old_finalized_epoch { @@ -836,7 +849,7 @@ impl BeaconChain { }) } else { self.update_canonical_head(CheckPoint { - beacon_block: beacon_block, + beacon_block, beacon_block_root, beacon_state, beacon_state_root, @@ -894,7 +907,7 @@ impl BeaconChain { ) -> Result<(), Error> { let finalized_block = self .store - .get::(&finalized_block_root)? + .get::>(&finalized_block_root)? .ok_or_else(|| Error::MissingBeaconBlock(finalized_block_root))?; let new_finalized_epoch = finalized_block.slot.epoch(T::EthSpec::slots_per_epoch()); @@ -914,7 +927,9 @@ impl BeaconChain { /// Returns `true` if the given block root has not been processed. pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result { - Ok(!self.store.exists::(beacon_block_root)?) + Ok(!self + .store + .exists::>(beacon_block_root)?) } /// Dumps the entire canonical chain, from the head to genesis to a vector for analysis. @@ -934,13 +949,13 @@ impl BeaconChain { dump.push(last_slot.clone()); loop { - let beacon_block_root = last_slot.beacon_block.previous_block_root; + let beacon_block_root = last_slot.beacon_block.parent_root; - if beacon_block_root == self.spec.zero_hash { + if beacon_block_root == Hash256::zero() { break; // Genesis has been reached. } - let beacon_block: BeaconBlock = + let beacon_block: BeaconBlock = self.store.get(&beacon_block_root)?.ok_or_else(|| { Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) })?; diff --git a/beacon_node/beacon_chain/src/checkpoint.rs b/beacon_node/beacon_chain/src/checkpoint.rs index c25e75a85..a043a4813 100644 --- a/beacon_node/beacon_chain/src/checkpoint.rs +++ b/beacon_node/beacon_chain/src/checkpoint.rs @@ -6,7 +6,7 @@ use types::{BeaconBlock, BeaconState, EthSpec, Hash256}; /// head, justified head and finalized head. #[derive(Clone, Serialize, PartialEq, Debug, Encode, Decode)] pub struct CheckPoint { - pub beacon_block: BeaconBlock, + pub beacon_block: BeaconBlock, pub beacon_block_root: Hash256, pub beacon_state: BeaconState, pub beacon_state_root: Hash256, @@ -15,7 +15,7 @@ pub struct CheckPoint { impl CheckPoint { /// Create a new checkpoint. pub fn new( - beacon_block: BeaconBlock, + beacon_block: BeaconBlock, beacon_block_root: Hash256, beacon_state: BeaconState, beacon_state_root: Hash256, @@ -31,7 +31,7 @@ impl CheckPoint { /// Update all fields of the checkpoint. pub fn update( &mut self, - beacon_block: BeaconBlock, + beacon_block: BeaconBlock, beacon_block_root: Hash256, beacon_state: BeaconState, beacon_state_root: Hash256, diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index f72fe65fe..b77979b74 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -1,6 +1,6 @@ use crate::{BeaconChain, BeaconChainTypes}; use lmd_ghost::LmdGhost; -use state_processing::common::get_attesting_indices_unsorted; +use state_processing::common::get_attesting_indices; use std::sync::Arc; use store::{Error as StoreError, Store}; use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256}; @@ -33,7 +33,7 @@ impl ForkChoice { /// block. pub fn new( store: Arc, - genesis_block: &BeaconBlock, + genesis_block: &BeaconBlock, genesis_block_root: Hash256, ) -> Self { Self { @@ -55,18 +55,21 @@ impl ForkChoice { let state = chain.current_state(); let (block_root, block_slot) = - if state.current_epoch() + 1 > state.current_justified_epoch { + if state.current_epoch() + 1 > state.current_justified_checkpoint.epoch { ( - state.current_justified_root, - start_slot(state.current_justified_epoch), + state.current_justified_checkpoint.root, + start_slot(state.current_justified_checkpoint.epoch), ) } else { - (state.finalized_root, start_slot(state.finalized_epoch)) + ( + state.finalized_checkpoint.root, + start_slot(state.finalized_checkpoint.epoch), + ) }; let block = chain .store - .get::(&block_root)? + .get::>(&block_root)? .ok_or_else(|| Error::MissingBlock(block_root))?; // Resolve the `0x00.. 00` alias back to genesis @@ -87,7 +90,7 @@ impl ForkChoice { // A function that returns the weight for some validator index. let weight = |validator_index: usize| -> Option { start_state - .validator_registry + .validators .get(validator_index) .map(|v| v.effective_balance) }; @@ -104,7 +107,7 @@ impl ForkChoice { pub fn process_block( &self, state: &BeaconState, - block: &BeaconBlock, + block: &BeaconBlock, block_root: Hash256, ) -> Result<()> { // Note: we never count the block as a latest message, only attestations. @@ -125,7 +128,7 @@ impl ForkChoice { fn process_attestation_from_block( &self, state: &BeaconState, - attestation: &Attestation, + attestation: &Attestation, ) -> Result<()> { let block_hash = attestation.data.beacon_block_root; @@ -147,16 +150,13 @@ impl ForkChoice { if block_hash != Hash256::zero() && self .store - .exists::(&block_hash) + .exists::>(&block_hash) .unwrap_or(false) { - let validator_indices = get_attesting_indices_unsorted( - state, - &attestation.data, - &attestation.aggregation_bitfield, - )?; + let validator_indices = + get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; - let block_slot = state.get_attestation_slot(&attestation.data)?; + let block_slot = state.get_attestation_data_slot(&attestation.data)?; for validator_index in validator_indices { self.backend @@ -173,7 +173,7 @@ impl ForkChoice { /// `finalized_block_root` must be the root of `finalized_block`. pub fn process_finalization( &self, - finalized_block: &BeaconBlock, + finalized_block: &BeaconBlock, finalized_block_root: Hash256, ) -> Result<()> { self.backend diff --git a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs index 479e1cd8e..8b9f78dc5 100644 --- a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs +++ b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs @@ -11,7 +11,7 @@ pub const BEACON_CHAIN_DB_KEY: &str = "PERSISTEDBEACONCHAINPERSISTEDBEA"; #[derive(Encode, Decode)] pub struct PersistedBeaconChain { pub canonical_head: CheckPoint, - pub op_pool: PersistedOperationPool, + pub op_pool: PersistedOperationPool, pub genesis_block_root: Hash256, pub state: BeaconState, } diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 19c1d9d15..6242b8a0a 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -11,7 +11,7 @@ use store::Store; use tree_hash::{SignedRoot, TreeHash}; use types::{ test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation, - AttestationDataAndCustodyBit, BeaconBlock, BeaconState, Bitfield, ChainSpec, Domain, EthSpec, + AttestationDataAndCustodyBit, BeaconBlock, BeaconState, BitList, ChainSpec, Domain, EthSpec, Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot, }; @@ -216,7 +216,7 @@ where mut state: BeaconState, slot: Slot, block_strategy: BlockStrategy, - ) -> (BeaconBlock, BeaconState) { + ) -> (BeaconBlock, BeaconState) { if slot < state.slot { panic!("produce slot cannot be prior to the state slot"); } @@ -302,12 +302,9 @@ where ) .expect("should produce attestation data"); - let mut aggregation_bitfield = Bitfield::new(); - aggregation_bitfield.set(i, true); - aggregation_bitfield.set(committee_size, false); - - let mut custody_bitfield = Bitfield::new(); - custody_bitfield.set(committee_size, false); + let mut aggregation_bits = BitList::with_capacity(committee_size).unwrap(); + aggregation_bits.set(i, true).unwrap(); + let custody_bits = BitList::with_capacity(committee_size).unwrap(); let signature = { let message = AttestationDataAndCustodyBit { @@ -317,7 +314,7 @@ where .tree_hash_root(); let domain = - spec.get_domain(data.target_epoch, Domain::Attestation, fork); + spec.get_domain(data.target.epoch, Domain::Attestation, fork); let mut agg_sig = AggregateSignature::new(); agg_sig.add(&Signature::new( @@ -330,9 +327,9 @@ where }; let attestation = Attestation { - aggregation_bitfield, + aggregation_bits, data, - custody_bitfield, + custody_bits, signature, }; @@ -376,9 +373,9 @@ where let faulty_head = self.extend_chain( faulty_fork_blocks, BlockStrategy::ForkCanonicalChainAt { - previous_slot: Slot::from(initial_head_slot), + previous_slot: initial_head_slot, // `initial_head_slot + 2` means one slot is skipped. - first_slot: Slot::from(initial_head_slot + 2), + first_slot: initial_head_slot + 2, }, AttestationStrategy::SomeValidators(faulty_validators.to_vec()), ); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 9a560a15a..babdbe5e1 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -93,12 +93,12 @@ fn finalizes_with_full_participation() { "head should be at the expected epoch" ); assert_eq!( - state.current_justified_epoch, + state.current_justified_checkpoint.epoch, state.current_epoch() - 1, "the head should be justified one behind the current epoch" ); assert_eq!( - state.finalized_epoch, + state.finalized_checkpoint.epoch, state.current_epoch() - 2, "the head should be finalized two behind the current epoch" ); @@ -136,12 +136,12 @@ fn finalizes_with_two_thirds_participation() { // included in blocks during that epoch. assert_eq!( - state.current_justified_epoch, + state.current_justified_checkpoint.epoch, state.current_epoch() - 2, "the head should be justified two behind the current epoch" ); assert_eq!( - state.finalized_epoch, + state.finalized_checkpoint.epoch, state.current_epoch() - 4, "the head should be finalized three behind the current epoch" ); @@ -175,11 +175,11 @@ fn does_not_finalize_with_less_than_two_thirds_participation() { "head should be at the expected epoch" ); assert_eq!( - state.current_justified_epoch, 0, + state.current_justified_checkpoint.epoch, 0, "no epoch should have been justified" ); assert_eq!( - state.finalized_epoch, 0, + state.finalized_checkpoint.epoch, 0, "no epoch should have been finalized" ); } @@ -208,11 +208,11 @@ fn does_not_finalize_without_attestation() { "head should be at the expected epoch" ); assert_eq!( - state.current_justified_epoch, 0, + state.current_justified_checkpoint.epoch, 0, "no epoch should have been justified" ); assert_eq!( - state.finalized_epoch, 0, + state.finalized_checkpoint.epoch, 0, "no epoch should have been finalized" ); } @@ -233,10 +233,10 @@ fn roundtrip_operation_pool() { // Add some deposits let rng = &mut XorShiftRng::from_seed([66; 16]); - for _ in 0..rng.gen_range(1, VALIDATOR_COUNT) { + for i in 0..rng.gen_range(1, VALIDATOR_COUNT) { harness .chain - .process_deposit(Deposit::random_for_test(rng)) + .process_deposit(i as u64, Deposit::random_for_test(rng)) .unwrap(); } diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 9a30a60b9..37e3419a3 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -18,31 +18,31 @@ use slog::{o, trace, warn}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; use std::num::NonZeroU32; use std::time::Duration; -use types::{Attestation, BeaconBlock}; +use types::{Attestation, BeaconBlock, EthSpec}; /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. #[derive(NetworkBehaviour)] -#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] -pub struct Behaviour { +#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] +pub struct Behaviour { /// The routing pub-sub mechanism for eth2. gossipsub: Gossipsub, /// The serenity RPC specified in the wire-0 protocol. - serenity_rpc: RPC, + serenity_rpc: RPC, /// Keep regular connection to peers and disconnect if absent. ping: Ping, /// Kademlia for peer discovery. discovery: Discovery, #[behaviour(ignore)] /// The events generated by this behaviour to be consumed in the swarm poll. - events: Vec, + events: Vec>, /// Logger for behaviour actions. #[behaviour(ignore)] log: slog::Logger, } -impl Behaviour { +impl Behaviour { pub fn new( local_key: &Keypair, net_conf: &NetworkConfig, @@ -68,8 +68,8 @@ impl Behaviour { } // Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, event: GossipsubEvent) { match event { @@ -101,8 +101,8 @@ impl NetworkBehaviourEventProcess NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, event: RPCMessage) { match event { @@ -119,19 +119,19 @@ impl NetworkBehaviourEventProcess NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, _event: PingEvent) { // not interested in ping responses at the moment. } } -impl Behaviour { +impl Behaviour { /// Consumes the events list when polled. fn poll( &mut self, - ) -> Async> { + ) -> Async>> { if !self.events.is_empty() { return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); } @@ -140,8 +140,8 @@ impl Behaviour { } } -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, _event: Discv5Event) { // discv5 has no events to inject @@ -149,7 +149,7 @@ impl NetworkBehaviourEventProcess Behaviour { +impl Behaviour { /* Pubsub behaviour functions */ /// Subscribes to a gossipsub topic. @@ -158,7 +158,7 @@ impl Behaviour { } /// Publishes a message on the pubsub (gossipsub) behaviour. - pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { + pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { let message_bytes = ssz_encode(&message); for topic in topics { self.gossipsub.publish(topic, message_bytes.clone()); @@ -179,28 +179,28 @@ impl Behaviour { } /// The types of events than can be obtained from polling the behaviour. -pub enum BehaviourEvent { +pub enum BehaviourEvent { RPC(PeerId, RPCEvent), PeerDialed(PeerId), PeerDisconnected(PeerId), GossipMessage { source: PeerId, topics: Vec, - message: Box, + message: Box>, }, } /// Messages that are passed to and from the pubsub (Gossipsub) behaviour. #[derive(Debug, Clone, PartialEq)] -pub enum PubsubMessage { +pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. - Block(BeaconBlock), + Block(BeaconBlock), /// Gossipsub message providing notification of a new attestation. - Attestation(Attestation), + Attestation(Attestation), } //TODO: Correctly encode/decode enums. Prefixing with integer for now. -impl Encode for PubsubMessage { +impl Encode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false } @@ -229,7 +229,7 @@ impl Encode for PubsubMessage { } } -impl Decode for PubsubMessage { +impl Decode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false } @@ -264,7 +264,9 @@ mod test { #[test] fn ssz_encoding() { - let original = PubsubMessage::Block(BeaconBlock::empty(&MainnetEthSpec::default_spec())); + let original = PubsubMessage::Block(BeaconBlock::::empty( + &MainnetEthSpec::default_spec(), + )); let encoded = ssz_encode(&original); diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 8523d694a..c2f008756 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -271,7 +271,7 @@ fn load_enr( // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers. let mut local_enr = EnrBuilder::new() - .ip(config.discovery_address.into()) + .ip(config.discovery_address) .tcp(config.libp2p_port) .udp(config.discovery_port) .build(&local_key) @@ -318,7 +318,7 @@ fn load_enr( Ok(local_enr) } -fn save_enr_to_disc(dir: &Path, enr: &Enr, log: &slog::Logger) -> () { +fn save_enr_to_disc(dir: &Path, enr: &Enr, log: &slog::Logger) { let _ = std::fs::create_dir_all(dir); match File::create(dir.join(Path::new(ENR_FILENAME))) .and_then(|mut f| f.write_all(&enr.to_base64().as_bytes())) diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/base.rs b/beacon_node/eth2-libp2p/src/rpc/codec/base.rs index 639a8a730..a8a239867 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/base.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/base.rs @@ -65,7 +65,7 @@ where dst.clear(); dst.reserve(1); dst.put_u8(item.as_u8()); - return self.inner.encode(item, dst); + self.inner.encode(item, dst) } } @@ -120,16 +120,14 @@ where if RPCErrorResponse::is_response(response_code) { // decode an actual response - return self - .inner + self.inner .decode(src) - .map(|r| r.map(|resp| RPCErrorResponse::Success(resp))); + .map(|r| r.map(RPCErrorResponse::Success)) } else { // decode an error - return self - .inner + self.inner .decode_error(src) - .map(|r| r.map(|resp| RPCErrorResponse::from_error(response_code, resp))); + .map(|r| r.map(|resp| RPCErrorResponse::from_error(response_code, resp))) } } } diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index df8769122..4e796f6fb 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -2,6 +2,7 @@ use super::methods::{RPCErrorResponse, RPCResponse, RequestId}; use super::protocol::{RPCError, RPCProtocol, RPCRequest}; use super::RPCEvent; use crate::rpc::protocol::{InboundFramed, OutboundFramed}; +use core::marker::PhantomData; use fnv::FnvHashMap; use futures::prelude::*; use libp2p::core::protocols_handler::{ @@ -11,14 +12,16 @@ use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; use smallvec::SmallVec; use std::time::{Duration, Instant}; use tokio_io::{AsyncRead, AsyncWrite}; +use types::EthSpec; /// The time (in seconds) before a substream that is awaiting a response times out. pub const RESPONSE_TIMEOUT: u64 = 9; /// Implementation of `ProtocolsHandler` for the RPC protocol. -pub struct RPCHandler +pub struct RPCHandler where TSubstream: AsyncRead + AsyncWrite, + E: EthSpec, { /// The upgrade for inbound substreams. listen_protocol: SubstreamProtocol, @@ -52,6 +55,9 @@ where /// After the given duration has elapsed, an inactive connection will shutdown. inactive_timeout: Duration, + + /// Phantom EthSpec. + _phantom: PhantomData, } /// An outbound substream is waiting a response from the user. @@ -84,9 +90,10 @@ where }, } -impl RPCHandler +impl RPCHandler where TSubstream: AsyncRead + AsyncWrite, + E: EthSpec, { pub fn new( listen_protocol: SubstreamProtocol, @@ -104,6 +111,7 @@ where max_dial_negotiated: 8, keep_alive: KeepAlive::Yes, inactive_timeout, + _phantom: PhantomData, } } @@ -137,18 +145,20 @@ where } } -impl Default for RPCHandler +impl Default for RPCHandler where TSubstream: AsyncRead + AsyncWrite, + E: EthSpec, { fn default() -> Self { RPCHandler::new(SubstreamProtocol::new(RPCProtocol), Duration::from_secs(30)) } } -impl ProtocolsHandler for RPCHandler +impl ProtocolsHandler for RPCHandler where TSubstream: AsyncRead + AsyncWrite, + E: EthSpec, { type InEvent = RPCEvent; type OutEvent = RPCEvent; @@ -276,13 +286,8 @@ where } // remove any streams that have expired - self.waiting_substreams.retain(|_k, waiting_stream| { - if Instant::now() > waiting_stream.timeout { - false - } else { - true - } - }); + self.waiting_substreams + .retain(|_k, waiting_stream| Instant::now() <= waiting_stream.timeout); // drive streams that need to be processed for n in (0..self.substreams.len()).rev() { @@ -334,7 +339,7 @@ where } Err(e) => { return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( - RPCEvent::Error(rpc_event.id(), e.into()), + RPCEvent::Error(rpc_event.id(), e), ))) } }, diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index 8cc336395..2e5a9a7ff 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -2,7 +2,7 @@ use ssz::{impl_decode_via_from, impl_encode_via_from}; use ssz_derive::{Decode, Encode}; -use types::{BeaconBlockBody, Epoch, Hash256, Slot}; +use types::{BeaconBlockBody, Epoch, EthSpec, Hash256, Slot}; /* Request/Response data structures for RPC methods */ @@ -154,11 +154,11 @@ pub struct BeaconBlockBodiesResponse { } /// The decoded version of `BeaconBlockBodiesResponse` which is expected in `SimpleSync`. -pub struct DecodedBeaconBlockBodiesResponse { +pub struct DecodedBeaconBlockBodiesResponse { /// The list of hashes sent in the request to get this response. pub block_roots: Vec, /// The valid decoded block bodies. - pub block_bodies: Vec, + pub block_bodies: Vec>, } /// Request values for tree hashes which yield a blocks `state_root`. diff --git a/beacon_node/eth2-libp2p/src/rpc/mod.rs b/beacon_node/eth2-libp2p/src/rpc/mod.rs index f1f341908..88060e602 100644 --- a/beacon_node/eth2-libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/mod.rs @@ -16,6 +16,7 @@ pub use protocol::{RPCError, RPCProtocol, RPCRequest}; use slog::o; use std::marker::PhantomData; use tokio::io::{AsyncRead, AsyncWrite}; +use types::EthSpec; pub(crate) mod codec; mod handler; @@ -49,16 +50,16 @@ impl RPCEvent { /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. -pub struct RPC { +pub struct RPC { /// Queue of events to processed. events: Vec>, /// Pins the generic substream. - marker: PhantomData, + marker: PhantomData<(TSubstream, E)>, /// Slog logger for RPC behaviour. _log: slog::Logger, } -impl RPC { +impl RPC { pub fn new(log: &slog::Logger) -> Self { let log = log.new(o!("Service" => "Libp2p-RPC")); RPC { @@ -79,11 +80,12 @@ impl RPC { } } -impl NetworkBehaviour for RPC +impl NetworkBehaviour for RPC where TSubstream: AsyncRead + AsyncWrite, + E: EthSpec, { - type ProtocolsHandler = RPCHandler; + type ProtocolsHandler = RPCHandler; type OutEvent = RPCMessage; fn new_handler(&mut self) -> Self::ProtocolsHandler { diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index a8c70a3da..05ae9e473 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -21,24 +21,25 @@ use std::fs::File; use std::io::prelude::*; use std::io::{Error, ErrorKind}; use std::time::Duration; +use types::EthSpec; type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>; -type Libp2pBehaviour = Behaviour>; +type Libp2pBehaviour = Behaviour, E>; const NETWORK_KEY_FILENAME: &str = "key"; /// The configuration and state of the libp2p components for the beacon node. -pub struct Service { +pub struct Service { /// The libp2p Swarm handler. //TODO: Make this private - pub swarm: Swarm, + pub swarm: Swarm>, /// This node's PeerId. _local_peer_id: PeerId, /// The libp2p logger handle. pub log: slog::Logger, } -impl Service { +impl Service { pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result { debug!(log, "Network-libp2p Service starting"); @@ -103,8 +104,8 @@ impl Service { } } -impl Stream for Service { - type Item = Libp2pEvent; +impl Stream for Service { + type Item = Libp2pEvent; type Error = crate::error::Error; fn poll(&mut self) -> Poll, Self::Error> { @@ -178,7 +179,7 @@ fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox) } /// Events that can be obtained from polling the Libp2p Service. -pub enum Libp2pEvent { +pub enum Libp2pEvent { /// An RPC response request has been received on the swarm. RPC(PeerId, RPCEvent), /// Initiated the connection to a new peer. @@ -189,7 +190,7 @@ pub enum Libp2pEvent { PubsubMessage { source: PeerId, topics: Vec, - message: Box, + message: Box>, }, } diff --git a/beacon_node/http_server/src/lib.rs b/beacon_node/http_server/src/lib.rs index f1d006a5b..b20e43de8 100644 --- a/beacon_node/http_server/src/lib.rs +++ b/beacon_node/http_server/src/lib.rs @@ -76,7 +76,7 @@ pub fn create_iron_http_server( pub fn start_service( config: &HttpServerConfig, executor: &TaskExecutor, - _network_chan: mpsc::UnboundedSender, + _network_chan: mpsc::UnboundedSender>, beacon_chain: Arc>, db_path: PathBuf, metrics_registry: Registry, diff --git a/beacon_node/http_server/src/metrics/local_metrics.rs b/beacon_node/http_server/src/metrics/local_metrics.rs index 7a52d7e45..b342cca81 100644 --- a/beacon_node/http_server/src/metrics/local_metrics.rs +++ b/beacon_node/http_server/src/metrics/local_metrics.rs @@ -117,22 +117,23 @@ impl LocalMetrics { beacon_chain .head() .beacon_state - .current_justified_root + .current_justified_checkpoint + .root .to_low_u64_le() as i64, ); self.finalized_beacon_block_root.set( beacon_chain .head() .beacon_state - .finalized_root + .finalized_checkpoint + .root .to_low_u64_le() as i64, ); - self.validator_count - .set(state.validator_registry.len() as i64); + self.validator_count.set(state.validators.len() as i64); self.justified_epoch - .set(state.current_justified_epoch.as_u64() as i64); + .set(state.current_justified_checkpoint.epoch.as_u64() as i64); self.finalized_epoch - .set(state.finalized_epoch.as_u64() as i64); + .set(state.finalized_checkpoint.epoch.as_u64() as i64); if SHOULD_SUM_VALIDATOR_BALANCES { self.validator_balances_sum .set(state.balances.iter().sum::() as i64); diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 4e510094f..eaddce533 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -14,7 +14,7 @@ use slog::{debug, warn}; use ssz::{Decode, DecodeError}; use std::sync::Arc; use tokio::sync::mpsc; -use types::BeaconBlockHeader; +use types::{BeaconBlockHeader, EthSpec}; /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { @@ -23,14 +23,14 @@ pub struct MessageHandler { /// The syncing framework. sync: SimpleSync, /// The context required to send messages to, and process messages from peers. - network_context: NetworkContext, + network_context: NetworkContext, /// The `MessageHandler` logger. log: slog::Logger, } /// Types of messages the handler can receive. #[derive(Debug)] -pub enum HandlerMessage { +pub enum HandlerMessage { /// We have initiated a connection to a new peer. PeerDialed(PeerId), /// Peer has disconnected, @@ -38,17 +38,17 @@ pub enum HandlerMessage { /// An RPC response/request has been received. RPC(PeerId, RPCEvent), /// A gossip message has been received. - PubsubMessage(PeerId, Box), + PubsubMessage(PeerId, Box>), } impl MessageHandler { /// Initializes and runs the MessageHandler. pub fn spawn( beacon_chain: Arc>, - network_send: mpsc::UnboundedSender, + network_send: mpsc::UnboundedSender>, executor: &tokio::runtime::TaskExecutor, log: slog::Logger, - ) -> error::Result> { + ) -> error::Result>> { debug!(log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); @@ -78,7 +78,7 @@ impl MessageHandler { } /// Handle all messages incoming from the network service. - fn handle_message(&mut self, message: HandlerMessage) { + fn handle_message(&mut self, message: HandlerMessage) { match message { // we have initiated a connection to a peer HandlerMessage::PeerDialed(peer_id) => { @@ -222,7 +222,7 @@ impl MessageHandler { fn decode_block_bodies( &self, bodies_response: BeaconBlockBodiesResponse, - ) -> Result { + ) -> Result, DecodeError> { //TODO: Implement faster block verification before decoding entirely let block_bodies = Vec::from_ssz_bytes(&bodies_response.block_bodies)?; Ok(DecodedBeaconBlockBodiesResponse { @@ -249,7 +249,7 @@ impl MessageHandler { } /// Handle RPC messages - fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { + fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { match gossip_message { PubsubMessage::Block(message) => { let _should_forward_on = @@ -265,15 +265,15 @@ impl MessageHandler { } // TODO: RPC Rewrite makes this struct fairly pointless -pub struct NetworkContext { +pub struct NetworkContext { /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender, + network_send: mpsc::UnboundedSender>, /// The `MessageHandler` logger. log: slog::Logger, } -impl NetworkContext { - pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { +impl NetworkContext { + pub fn new(network_send: mpsc::UnboundedSender>, log: slog::Logger) -> Self { Self { network_send, log } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a771f8add..e78714409 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -2,6 +2,7 @@ use crate::error; use crate::message_handler::{HandlerMessage, MessageHandler}; use crate::NetworkConfig; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use core::marker::PhantomData; use eth2_libp2p::Service as LibP2PService; use eth2_libp2p::Topic; use eth2_libp2p::{Libp2pEvent, PeerId}; @@ -10,16 +11,16 @@ use futures::prelude::*; use futures::Stream; use parking_lot::Mutex; use slog::{debug, info, o, trace}; -use std::marker::PhantomData; use std::sync::Arc; use tokio::runtime::TaskExecutor; use tokio::sync::{mpsc, oneshot}; +use types::EthSpec; /// Service that handles communication between internal services and the eth2_libp2p network service. pub struct Service { - libp2p_service: Arc>, + libp2p_service: Arc>>, _libp2p_exit: oneshot::Sender<()>, - _network_send: mpsc::UnboundedSender, + _network_send: mpsc::UnboundedSender>, _phantom: PhantomData, //message_handler: MessageHandler, //message_handler_send: Sender } @@ -30,9 +31,9 @@ impl Service { config: &NetworkConfig, executor: &TaskExecutor, log: slog::Logger, - ) -> error::Result<(Arc, mpsc::UnboundedSender)> { + ) -> error::Result<(Arc, mpsc::UnboundedSender>)> { // build the network channel - let (network_send, network_recv) = mpsc::unbounded_channel::(); + let (network_send, network_recv) = mpsc::unbounded_channel::>(); // launch message handler thread let message_handler_log = log.new(o!("Service" => "MessageHandler")); let message_handler_send = MessageHandler::spawn( @@ -64,15 +65,15 @@ impl Service { Ok((Arc::new(network_service), network_send)) } - pub fn libp2p_service(&self) -> Arc> { + pub fn libp2p_service(&self) -> Arc>> { self.libp2p_service.clone() } } -fn spawn_service( - libp2p_service: Arc>, - network_recv: mpsc::UnboundedReceiver, - message_handler_send: mpsc::UnboundedSender, +fn spawn_service( + libp2p_service: Arc>>, + network_recv: mpsc::UnboundedReceiver>, + message_handler_send: mpsc::UnboundedSender>, executor: &TaskExecutor, log: slog::Logger, ) -> error::Result> { @@ -98,10 +99,10 @@ fn spawn_service( } //TODO: Potentially handle channel errors -fn network_service( - libp2p_service: Arc>, - mut network_recv: mpsc::UnboundedReceiver, - mut message_handler_send: mpsc::UnboundedSender, +fn network_service( + libp2p_service: Arc>>, + mut network_recv: mpsc::UnboundedReceiver>, + mut message_handler_send: mpsc::UnboundedSender>, log: slog::Logger, ) -> impl futures::Future { futures::future::poll_fn(move || -> Result<_, eth2_libp2p::error::Error> { @@ -175,14 +176,14 @@ fn network_service( /// Types of messages that the network service can receive. #[derive(Debug)] -pub enum NetworkMessage { +pub enum NetworkMessage { /// Send a message to libp2p service. //TODO: Define typing for messages across the wire Send(PeerId, OutgoingMessage), /// Publish a message to pubsub mechanism. Publish { topics: Vec, - message: Box, + message: Box>, }, } diff --git a/beacon_node/network/src/sync/import_queue.rs b/beacon_node/network/src/sync/import_queue.rs index 504add4f8..5503ed64f 100644 --- a/beacon_node/network/src/sync/import_queue.rs +++ b/beacon_node/network/src/sync/import_queue.rs @@ -6,7 +6,7 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::{Duration, Instant}; use tree_hash::TreeHash; -use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Hash256, Slot}; +use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, EthSpec, Hash256, Slot}; /// Provides a queue for fully and partially built `BeaconBlock`s. /// @@ -23,7 +23,7 @@ use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Hash256, Slot}; pub struct ImportQueue { pub chain: Arc>, /// Partially imported blocks, keyed by the root of `BeaconBlockBody`. - partials: HashMap, + partials: HashMap>, /// Time before a queue entry is considered state. pub stale_time: Duration, /// Logging @@ -50,7 +50,10 @@ impl ImportQueue { /// /// Returns an Enum with a `PartialBeaconBlockCompletion`. /// Does not remove the `block_root` from the `import_queue`. - pub fn attempt_complete_block(&self, block_root: Hash256) -> PartialBeaconBlockCompletion { + pub fn attempt_complete_block( + &self, + block_root: Hash256, + ) -> PartialBeaconBlockCompletion { if let Some(partial) = self.partials.get(&block_root) { partial.attempt_complete() } else { @@ -60,7 +63,7 @@ impl ImportQueue { /// Removes the first `PartialBeaconBlock` with a matching `block_root`, returning the partial /// if it exists. - pub fn remove(&mut self, block_root: Hash256) -> Option { + pub fn remove(&mut self, block_root: Hash256) -> Option> { self.partials.remove(&block_root) } @@ -141,11 +144,11 @@ impl ImportQueue { for header in headers { let block_root = Hash256::from_slice(&header.canonical_root()[..]); - if self.chain_has_not_seen_block(&block_root) { - if !self.insert_header(block_root, header, sender.clone()) { - // If a body is empty - required_bodies.push(block_root); - } + if self.chain_has_not_seen_block(&block_root) + && !self.insert_header(block_root, header, sender.clone()) + { + // If a body is empty + required_bodies.push(block_root); } } @@ -157,7 +160,7 @@ impl ImportQueue { /// If there is no `header` for the `body`, the body is simply discarded. pub fn enqueue_bodies( &mut self, - bodies: Vec, + bodies: Vec>, sender: PeerId, ) -> Option { let mut last_block_hash = None; @@ -168,7 +171,7 @@ impl ImportQueue { last_block_hash } - pub fn enqueue_full_blocks(&mut self, blocks: Vec, sender: PeerId) { + pub fn enqueue_full_blocks(&mut self, blocks: Vec>, sender: PeerId) { for block in blocks { self.insert_full_block(block, sender.clone()); } @@ -211,13 +214,17 @@ impl ImportQueue { /// If the body already existed, the `inserted` time is set to `now`. /// /// Returns the block hash of the inserted body - fn insert_body(&mut self, body: BeaconBlockBody, sender: PeerId) -> Option { + fn insert_body( + &mut self, + body: BeaconBlockBody, + sender: PeerId, + ) -> Option { let body_root = Hash256::from_slice(&body.tree_hash_root()[..]); let mut last_root = None; self.partials.iter_mut().for_each(|(root, mut p)| { if let Some(header) = &mut p.header { - if body_root == header.block_body_root { + if body_root == header.body_root { p.inserted = Instant::now(); p.body = Some(body.clone()); p.sender = sender.clone(); @@ -232,7 +239,7 @@ impl ImportQueue { /// Updates an existing `partial` with the completed block, or adds a new (complete) partial. /// /// If the partial already existed, the `inserted` time is set to `now`. - fn insert_full_block(&mut self, block: BeaconBlock, sender: PeerId) { + fn insert_full_block(&mut self, block: BeaconBlock, sender: PeerId) { let block_root = Hash256::from_slice(&block.canonical_root()[..]); let partial = PartialBeaconBlock { @@ -254,12 +261,12 @@ impl ImportQueue { /// Individual components of a `BeaconBlock`, potentially all that are required to form a full /// `BeaconBlock`. #[derive(Clone, Debug)] -pub struct PartialBeaconBlock { +pub struct PartialBeaconBlock { pub slot: Slot, /// `BeaconBlock` root. pub block_root: Hash256, pub header: Option, - pub body: Option, + pub body: Option>, /// The instant at which this record was created or last meaningfully modified. Used to /// determine if an entry is stale and should be removed. pub inserted: Instant, @@ -267,11 +274,11 @@ pub struct PartialBeaconBlock { pub sender: PeerId, } -impl PartialBeaconBlock { +impl PartialBeaconBlock { /// Attempts to build a block. /// - /// Does not consume the `PartialBeaconBlock`. - pub fn attempt_complete(&self) -> PartialBeaconBlockCompletion { + /// Does not comsume the `PartialBeaconBlock`. + pub fn attempt_complete(&self) -> PartialBeaconBlockCompletion { if self.header.is_none() { PartialBeaconBlockCompletion::MissingHeader(self.slot) } else if self.body.is_none() { @@ -288,9 +295,9 @@ impl PartialBeaconBlock { } /// The result of trying to convert a `BeaconBlock` into a `PartialBeaconBlock`. -pub enum PartialBeaconBlockCompletion { +pub enum PartialBeaconBlockCompletion { /// The partial contains a valid BeaconBlock. - Complete(BeaconBlock), + Complete(BeaconBlock), /// The partial does not exist. MissingRoot, /// The partial contains a `BeaconBlockRoot` but no `BeaconBlockHeader`. diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index b981d2040..ac001415c 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -123,7 +123,7 @@ impl SimpleSync { /// Handle the connection of a new peer. /// /// Sends a `Hello` message to the peer. - pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { + pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id)); network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); @@ -137,7 +137,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, hello: HelloMessage, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); @@ -156,7 +156,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, hello: HelloMessage, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); @@ -171,7 +171,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, hello: HelloMessage, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { let remote = PeerSyncInfo::from(hello); let local = PeerSyncInfo::from(&self.chain); @@ -188,8 +188,8 @@ impl SimpleSync { network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); } else if remote.latest_finalized_epoch <= local.latest_finalized_epoch - && remote.latest_finalized_root != self.chain.spec.zero_hash - && local.latest_finalized_root != self.chain.spec.zero_hash + && remote.latest_finalized_root != Hash256::zero() + && local.latest_finalized_root != Hash256::zero() && (self.root_at_slot(start_slot(remote.latest_finalized_epoch)) != Some(remote.latest_finalized_root)) { @@ -226,7 +226,7 @@ impl SimpleSync { } else if self .chain .store - .exists::(&remote.best_root) + .exists::>(&remote.best_root) .unwrap_or_else(|_| false) { // If the node's best-block is already known to us, we have nothing to request. @@ -278,7 +278,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, req: BeaconBlockRootsRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -323,7 +323,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, res: BeaconBlockRootsResponse, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -387,7 +387,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -416,7 +416,11 @@ impl SimpleSync { .into_iter() .step_by(req.skip_slots as usize + 1) .filter_map(|root| { - let block = self.chain.store.get::(&root).ok()?; + let block = self + .chain + .store + .get::>(&root) + .ok()?; Some(block?.block_header()) }) .collect(); @@ -436,7 +440,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, headers: Vec, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -468,13 +472,13 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { - let block_bodies: Vec = req + let block_bodies: Vec> = req .block_roots .iter() .filter_map(|root| { - if let Ok(Some(block)) = self.chain.store.get::(root) { + if let Ok(Some(block)) = self.chain.store.get::>(root) { Some(block.body) } else { debug!( @@ -513,8 +517,8 @@ impl SimpleSync { pub fn on_beacon_block_bodies_response( &mut self, peer_id: PeerId, - res: DecodedBeaconBlockBodiesResponse, - network: &mut NetworkContext, + res: DecodedBeaconBlockBodiesResponse, + network: &mut NetworkContext, ) { debug!( self.log, @@ -531,12 +535,11 @@ impl SimpleSync { // Attempt to process all received bodies by recursively processing the latest block if let Some(root) = last_root { - match self.attempt_process_partial_block(peer_id, root, network, &"rpc") { - Some(BlockProcessingOutcome::Processed { block_root: _ }) => { - // If processing is successful remove from `import_queue` - self.import_queue.remove(root); - } - _ => {} + if let Some(BlockProcessingOutcome::Processed { .. }) = + self.attempt_process_partial_block(peer_id, root, network, &"rpc") + { + // If processing is successful remove from `import_queue` + self.import_queue.remove(root); } } } @@ -553,8 +556,8 @@ impl SimpleSync { pub fn on_block_gossip( &mut self, peer_id: PeerId, - block: BeaconBlock, - network: &mut NetworkContext, + block: BeaconBlock, + network: &mut NetworkContext, ) -> bool { if let Some(outcome) = self.process_block(peer_id.clone(), block.clone(), network, &"gossip") @@ -577,7 +580,8 @@ impl SimpleSync { .chain .head() .beacon_state - .finalized_epoch + .finalized_checkpoint + .epoch .start_slot(T::EthSpec::slots_per_epoch()); self.request_block_roots( peer_id, @@ -622,8 +626,8 @@ impl SimpleSync { pub fn on_attestation_gossip( &mut self, _peer_id: PeerId, - msg: Attestation, - _network: &mut NetworkContext, + msg: Attestation, + _network: &mut NetworkContext, ) { match self.chain.process_attestation(msg) { Ok(()) => info!(self.log, "ImportedAttestation"; "source" => "gossip"), @@ -638,7 +642,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, req: BeaconBlockRootsRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { // Potentially set state to sync. if self.state == SyncState::Idle && req.count > SLOT_IMPORT_TOLERANCE { @@ -662,7 +666,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -679,7 +683,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -715,7 +719,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, block_root: Hash256, - network: &mut NetworkContext, + network: &mut NetworkContext, source: &str, ) -> Option { match self.import_queue.attempt_complete_block(block_root) { @@ -807,8 +811,8 @@ impl SimpleSync { fn process_block( &mut self, peer_id: PeerId, - block: BeaconBlock, - network: &mut NetworkContext, + block: BeaconBlock, + network: &mut NetworkContext, source: &str, ) -> Option { let processing_result = self.chain.process_block(block.clone()); @@ -836,19 +840,18 @@ impl SimpleSync { ); // If the parent is in the `import_queue` attempt to complete it then process it. - match self.attempt_process_partial_block(peer_id, parent, network, source) { + // All other cases leave `parent` in `import_queue` and return original outcome. + if let Some(BlockProcessingOutcome::Processed { .. }) = + self.attempt_process_partial_block(peer_id, parent, network, source) + { // If processing parent is successful, re-process block and remove parent from queue - Some(BlockProcessingOutcome::Processed { block_root: _ }) => { - self.import_queue.remove(parent); + self.import_queue.remove(parent); - // Attempt to process `block` again - match self.chain.process_block(block) { - Ok(outcome) => return Some(outcome), - Err(_) => return None, - } + // Attempt to process `block` again + match self.chain.process_block(block) { + Ok(outcome) => return Some(outcome), + Err(_) => return None, } - // All other cases leave `parent` in `import_queue` and return original outcome. - _ => {} } } BlockProcessingOutcome::FutureSlot { @@ -913,9 +916,9 @@ fn hello_message(beacon_chain: &BeaconChain) -> HelloMes HelloMessage { //TODO: Correctly define the chain/network id network_id: spec.chain_id, - chain_id: spec.chain_id as u64, - latest_finalized_root: state.finalized_root, - latest_finalized_epoch: state.finalized_epoch, + chain_id: u64::from(spec.chain_id), + latest_finalized_root: state.finalized_checkpoint.root, + latest_finalized_epoch: state.finalized_checkpoint.epoch, best_root: beacon_chain.head().beacon_block_root, best_slot: state.slot, } diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index cedd184e3..5ea8368fd 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -19,7 +19,7 @@ use types::Attestation; #[derive(Clone)] pub struct AttestationServiceInstance { pub chain: Arc>, - pub network_chan: mpsc::UnboundedSender, + pub network_chan: mpsc::UnboundedSender>, pub log: slog::Logger, } diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index faaf2232a..b42bbb208 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -19,7 +19,7 @@ use types::{BeaconBlock, Signature, Slot}; #[derive(Clone)] pub struct BeaconBlockServiceInstance { pub chain: Arc>, - pub network_chan: mpsc::UnboundedSender, + pub network_chan: mpsc::UnboundedSender>, pub log: Logger, } diff --git a/beacon_node/rpc/src/lib.rs b/beacon_node/rpc/src/lib.rs index eef009292..de9039505 100644 --- a/beacon_node/rpc/src/lib.rs +++ b/beacon_node/rpc/src/lib.rs @@ -25,7 +25,7 @@ use tokio::sync::mpsc; pub fn start_server( config: &RPCConfig, executor: &TaskExecutor, - network_chan: mpsc::UnboundedSender, + network_chan: mpsc::UnboundedSender>, beacon_chain: Arc>, log: &slog::Logger, ) -> exit_future::Signal { diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 1004ba19b..5d967fc1c 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -210,7 +210,7 @@ fn main() { } }; default_dir.push(DEFAULT_DATA_DIR); - PathBuf::from(default_dir) + default_dir } }; diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 24c6d09d1..9e0b898aa 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -57,7 +57,7 @@ pub fn run_beacon_node( "db_type" => &other_client_config.db_type, ); - let result = match (db_type.as_str(), spec_constants.as_str()) { + match (db_type.as_str(), spec_constants.as_str()) { ("disk", "minimal") => run::>( &db_path, client_config, @@ -94,9 +94,7 @@ pub fn run_beacon_node( error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type); Err("Unknown specification and/or db_type.".into()) } - }; - - result + } } /// Performs the type-generic parts of launching a `BeaconChain`. diff --git a/beacon_node/store/src/block_at_slot.rs b/beacon_node/store/src/block_at_slot.rs index 12f1cccfe..84c5e4830 100644 --- a/beacon_node/store/src/block_at_slot.rs +++ b/beacon_node/store/src/block_at_slot.rs @@ -1,8 +1,11 @@ use super::*; use ssz::{Decode, DecodeError}; -fn get_block_bytes(store: &T, root: Hash256) -> Result>, Error> { - store.get_bytes(BeaconBlock::db_column().into(), &root[..]) +fn get_block_bytes( + store: &T, + root: Hash256, +) -> Result>, Error> { + store.get_bytes(BeaconBlock::::db_column().into(), &root[..]) } fn read_slot_from_block_bytes(bytes: &[u8]) -> Result { @@ -11,7 +14,7 @@ fn read_slot_from_block_bytes(bytes: &[u8]) -> Result { Slot::from_ssz_bytes(&bytes[0..end]) } -fn read_previous_block_root_from_block_bytes(bytes: &[u8]) -> Result { +fn read_parent_root_from_block_bytes(bytes: &[u8]) -> Result { let previous_bytes = Slot::ssz_fixed_len(); let slice = bytes .get(previous_bytes..previous_bytes + Hash256::ssz_fixed_len()) @@ -20,24 +23,26 @@ fn read_previous_block_root_from_block_bytes(bytes: &[u8]) -> Result( +pub fn get_block_at_preceeding_slot( store: &T, slot: Slot, start_root: Hash256, -) -> Result, Error> { - Ok(match get_at_preceding_slot(store, slot, start_root)? { - Some((hash, bytes)) => Some((hash, BeaconBlock::from_ssz_bytes(&bytes)?)), - None => None, - }) +) -> Result)>, Error> { + Ok( + match get_at_preceeding_slot::<_, E>(store, slot, start_root)? { + Some((hash, bytes)) => Some((hash, BeaconBlock::::from_ssz_bytes(&bytes)?)), + None => None, + }, + ) } -fn get_at_preceding_slot( +fn get_at_preceeding_slot( store: &T, slot: Slot, mut root: Hash256, ) -> Result)>, Error> { loop { - if let Some(bytes) = get_block_bytes(store, root)? { + if let Some(bytes) = get_block_bytes::<_, E>(store, root)? { let this_slot = read_slot_from_block_bytes(&bytes)?; if this_slot == slot { @@ -45,7 +50,7 @@ fn get_at_preceding_slot( } else if this_slot < slot { break Ok(None); } else { - root = read_previous_block_root_from_block_bytes(&bytes)?; + root = read_parent_root_from_block_bytes(&bytes)?; } } else { break Ok(None); @@ -59,6 +64,8 @@ mod tests { use ssz::Encode; use tree_hash::TreeHash; + type BeaconBlock = types::BeaconBlock; + #[test] fn read_slot() { let spec = MinimalEthSpec::default_spec(); @@ -84,17 +91,14 @@ mod tests { } #[test] - fn read_previous_block_root() { + fn read_parent_root() { let spec = MinimalEthSpec::default_spec(); let test_root = |root: Hash256| { let mut block = BeaconBlock::empty(&spec); - block.previous_block_root = root; + block.parent_root = root; let bytes = block.as_ssz_bytes(); - assert_eq!( - read_previous_block_root_from_block_bytes(&bytes).unwrap(), - root - ); + assert_eq!(read_parent_root_from_block_bytes(&bytes).unwrap(), root); }; test_root(Hash256::random()); @@ -114,7 +118,7 @@ mod tests { block.slot = Slot::from(*slot); if i > 0 { - block.previous_block_root = blocks_and_roots[i - 1].0; + block.parent_root = blocks_and_roots[i - 1].0; } let root = Hash256::from_slice(&block.tree_hash_root()); @@ -141,7 +145,7 @@ mod tests { let (target_root, target_block) = &blocks_and_roots[target]; let (found_root, found_block) = store - .get_block_at_preceding_slot(*source_root, target_block.slot) + .get_block_at_preceeding_slot(*source_root, target_block.slot) .unwrap() .unwrap(); @@ -166,7 +170,7 @@ mod tests { let (target_root, target_block) = &blocks_and_roots[target]; let (found_root, found_block) = store - .get_block_at_preceding_slot(*source_root, target_block.slot) + .get_block_at_preceeding_slot(*source_root, target_block.slot) .unwrap() .unwrap(); @@ -177,14 +181,14 @@ mod tests { // Slot that doesn't exist let (source_root, _source_block) = &blocks_and_roots[3]; assert!(store - .get_block_at_preceding_slot(*source_root, Slot::new(3)) + .get_block_at_preceeding_slot::(*source_root, Slot::new(3)) .unwrap() .is_none()); // Slot too high let (source_root, _source_block) = &blocks_and_roots[3]; assert!(store - .get_block_at_preceding_slot(*source_root, Slot::new(3)) + .get_block_at_preceeding_slot::(*source_root, Slot::new(3)) .unwrap() .is_none()); } diff --git a/beacon_node/store/src/impls.rs b/beacon_node/store/src/impls.rs index 418fcade1..e88b70f39 100644 --- a/beacon_node/store/src/impls.rs +++ b/beacon_node/store/src/impls.rs @@ -3,7 +3,7 @@ use ssz::{Decode, Encode}; mod beacon_state; -impl StoreItem for BeaconBlock { +impl StoreItem for BeaconBlock { fn db_column() -> DBColumn { DBColumn::BeaconBlock } diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 863511620..55c525b11 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -9,7 +9,9 @@ pub trait AncestorIter { fn try_iter_ancestor_roots(&self, store: Arc) -> Option; } -impl<'a, U: Store, E: EthSpec> AncestorIter> for BeaconBlock { +impl<'a, U: Store, E: EthSpec> AncestorIter> + for BeaconBlock +{ /// Iterates across all the prior block roots of `self`, starting at the most recent and ending /// at genesis. fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { @@ -98,7 +100,7 @@ impl<'a, T: EthSpec, U: Store> BlockIterator<'a, T, U> { } impl<'a, T: EthSpec, U: Store> Iterator for BlockIterator<'a, T, U> { - type Item = BeaconBlock; + type Item = BeaconBlock; fn next(&mut self) -> Option { let (root, _slot) = self.roots.next()?; @@ -109,8 +111,8 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockIterator<'a, T, U> { /// Iterates backwards through block roots. If any specified slot is unable to be retrieved, the /// iterator returns `None` indefinitely. /// -/// Uses the `latest_block_roots` field of `BeaconState` to as the source of block roots and will -/// perform a lookup on the `Store` for a prior `BeaconState` if `latest_block_roots` has been +/// Uses the `block_roots` field of `BeaconState` to as the source of block roots and will +/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been /// exhausted. /// /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. @@ -191,8 +193,8 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> { /// /// This is distinct from `BestBlockRootsIterator`. /// -/// Uses the `latest_block_roots` field of `BeaconState` to as the source of block roots and will -/// perform a lookup on the `Store` for a prior `BeaconState` if `latest_block_roots` has been +/// Uses the `block_roots` field of `BeaconState` to as the source of block roots and will +/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been /// exhausted. /// /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. @@ -305,15 +307,15 @@ mod test { let mut hashes = (0..).into_iter().map(|i| Hash256::from(i)); - for root in &mut state_a.latest_block_roots[..] { + for root in &mut state_a.block_roots[..] { *root = hashes.next().unwrap() } - for root in &mut state_b.latest_block_roots[..] { + for root in &mut state_b.block_roots[..] { *root = hashes.next().unwrap() } let state_a_root = hashes.next().unwrap(); - state_b.latest_state_roots[0] = state_a_root; + state_b.state_roots[0] = state_a_root; store.put(&state_a_root, &state_a).unwrap(); let iter = BlockRootsIterator::new(store.clone(), &state_b, state_b.slot - 1); @@ -348,15 +350,15 @@ mod test { let mut hashes = (0..).into_iter().map(|i| Hash256::from(i)); - for root in &mut state_a.latest_block_roots[..] { + for root in &mut state_a.block_roots[..] { *root = hashes.next().unwrap() } - for root in &mut state_b.latest_block_roots[..] { + for root in &mut state_b.block_roots[..] { *root = hashes.next().unwrap() } let state_a_root = hashes.next().unwrap(); - state_b.latest_state_roots[0] = state_a_root; + state_b.state_roots[0] = state_a_root; store.put(&state_a_root, &state_a).unwrap(); let iter = BestBlockRootsIterator::new(store.clone(), &state_b, state_b.slot); diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index f4e335ab7..5b8d58320 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -52,12 +52,12 @@ pub trait Store: Sync + Send + Sized { /// /// Returns `None` if no parent block exists at that slot, or if `slot` is greater than the /// slot of `start_block_root`. - fn get_block_at_preceding_slot( + fn get_block_at_preceeding_slot( &self, start_block_root: Hash256, slot: Slot, - ) -> Result, Error> { - block_at_slot::get_block_at_preceding_slot(self, slot, start_block_root) + ) -> Result)>, Error> { + block_at_slot::get_block_at_preceeding_slot::<_, E>(self, slot, start_block_root) } /// Retrieve some bytes in `column` with `key`. diff --git a/eth2/README.md b/eth2/README.md index 2159e2fd3..5f1264372 100644 --- a/eth2/README.md +++ b/eth2/README.md @@ -14,8 +14,6 @@ Rust crates containing logic common across the Lighthouse project. `BeaconState`, etc). - [`utils/`](utils/): - [`bls`](utils/bls/): A wrapper for an external BLS encryption library. - - [`boolean-bitfield`](utils/boolean-bitfield/): Provides an expandable vector - of bools, specifically for use in Eth2. - [`fisher-yates-shuffle`](utils/fisher-yates-shuffle/): shuffles a list pseudo-randomly. - [`hashing`](utils/hashing/): A wrapper for external hashing libraries. diff --git a/eth2/lmd_ghost/src/lib.rs b/eth2/lmd_ghost/src/lib.rs index dd413e2eb..de9bdd860 100644 --- a/eth2/lmd_ghost/src/lib.rs +++ b/eth2/lmd_ghost/src/lib.rs @@ -10,7 +10,7 @@ pub type Result = std::result::Result; pub trait LmdGhost: Send + Sync { /// Create a new instance, with the given `store` and `finalized_root`. - fn new(store: Arc, finalized_block: &BeaconBlock, finalized_root: Hash256) -> Self; + fn new(store: Arc, finalized_block: &BeaconBlock, finalized_root: Hash256) -> Self; /// Process an attestation message from some validator that attests to some `block_hash` /// representing a block at some `block_slot`. @@ -22,7 +22,7 @@ pub trait LmdGhost: Send + Sync { ) -> Result<()>; /// Process a block that was seen on the network. - fn process_block(&self, block: &BeaconBlock, block_hash: Hash256) -> Result<()>; + fn process_block(&self, block: &BeaconBlock, block_hash: Hash256) -> Result<()>; /// Returns the head of the chain, starting the search at `start_block_root` and moving upwards /// (in block height). @@ -40,7 +40,7 @@ pub trait LmdGhost: Send + Sync { /// `finalized_block_root` must be the root of `finalized_block`. fn update_finalized_root( &self, - finalized_block: &BeaconBlock, + finalized_block: &BeaconBlock, finalized_block_root: Hash256, ) -> Result<()>; } diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index bdf9680a3..a3cf4e105 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -58,7 +58,7 @@ where T: Store, E: EthSpec, { - fn new(store: Arc, genesis_block: &BeaconBlock, genesis_root: Hash256) -> Self { + fn new(store: Arc, genesis_block: &BeaconBlock, genesis_root: Hash256) -> Self { ThreadSafeReducedTree { core: RwLock::new(ReducedTree::new(store, genesis_block, genesis_root)), } @@ -77,7 +77,7 @@ where } /// Process a block that was seen on the network. - fn process_block(&self, block: &BeaconBlock, block_hash: Hash256) -> SuperResult<()> { + fn process_block(&self, block: &BeaconBlock, block_hash: Hash256) -> SuperResult<()> { self.core .write() .add_weightless_node(block.slot, block_hash) @@ -99,7 +99,11 @@ where .map_err(|e| format!("find_head failed: {:?}", e)) } - fn update_finalized_root(&self, new_block: &BeaconBlock, new_root: Hash256) -> SuperResult<()> { + fn update_finalized_root( + &self, + new_block: &BeaconBlock, + new_root: Hash256, + ) -> SuperResult<()> { self.core .write() .update_root(new_block.slot, new_root) @@ -129,7 +133,7 @@ where T: Store, E: EthSpec, { - pub fn new(store: Arc, genesis_block: &BeaconBlock, genesis_root: Hash256) -> Self { + pub fn new(store: Arc, genesis_block: &BeaconBlock, genesis_root: Hash256) -> Self { let mut nodes = HashMap::new(); // Insert the genesis node. @@ -309,7 +313,7 @@ where /// If the validator had a vote in the tree, the removal of that vote may cause a node to /// become redundant and removed from the reduced tree. fn remove_latest_message(&mut self, validator_index: usize) -> Result<()> { - if let Some(vote) = self.latest_votes.get(validator_index).clone() { + if let Some(vote) = *self.latest_votes.get(validator_index) { self.get_mut_node(vote.hash)?.remove_voter(validator_index); let node = self.get_node(vote.hash)?.clone(); @@ -669,9 +673,9 @@ where .ok_or_else(|| Error::MissingNode(hash)) } - fn get_block(&self, block_root: Hash256) -> Result { + fn get_block(&self, block_root: Hash256) -> Result> { self.store - .get::(&block_root)? + .get::>(&block_root)? .ok_or_else(|| Error::MissingBlock(block_root)) } diff --git a/eth2/lmd_ghost/tests/test.rs b/eth2/lmd_ghost/tests/test.rs index 5c6f01155..fbe385560 100644 --- a/eth2/lmd_ghost/tests/test.rs +++ b/eth2/lmd_ghost/tests/test.rs @@ -41,7 +41,7 @@ struct ForkedHarness { /// don't expose it to avoid contamination between tests. harness: BeaconChainHarness, pub genesis_block_root: Hash256, - pub genesis_block: BeaconBlock, + pub genesis_block: BeaconBlock, pub honest_head: RootAndSlot, pub faulty_head: RootAndSlot, pub honest_roots: Vec, @@ -101,7 +101,7 @@ impl ForkedHarness { let genesis_block = harness .chain .store - .get::(&genesis_block_root) + .get::>(&genesis_block_root) .expect("Genesis block should exist") .expect("DB should not error"); @@ -155,11 +155,11 @@ fn get_ancestor_roots( block_root: Hash256, ) -> Vec<(Hash256, Slot)> { let block = store - .get::(&block_root) + .get::>(&block_root) .expect("block should exist") .expect("store should not error"); - >>::try_iter_ancestor_roots( + as AncestorIter<_, BestBlockRootsIterator>>::try_iter_ancestor_roots( &block, store, ) .expect("should be able to create ancestor iter") @@ -171,7 +171,7 @@ fn get_slot_for_block_root(harness: &BeaconChainHarness, block_root: Hash256) -> harness .chain .store - .get::(&block_root) + .get::>(&block_root) .expect("head block should exist") .expect("DB should not error") .slot @@ -328,7 +328,7 @@ fn test_update_finalized_root(roots: &[(Hash256, Slot)]) { for (root, _slot) in roots.iter().rev() { let block = harness .store_clone() - .get::(root) + .get::>(root) .expect("block should exist") .expect("db should not error"); lmd.update_finalized_root(&block, *root) diff --git a/eth2/operation_pool/Cargo.toml b/eth2/operation_pool/Cargo.toml index d1fd18191..02bed11de 100644 --- a/eth2/operation_pool/Cargo.toml +++ b/eth2/operation_pool/Cargo.toml @@ -5,7 +5,6 @@ authors = ["Michael Sproul "] edition = "2018" [dependencies] -boolean-bitfield = { path = "../utils/boolean-bitfield" } int_to_bytes = { path = "../utils/int_to_bytes" } itertools = "0.8" parking_lot = "0.7" @@ -13,3 +12,6 @@ types = { path = "../types" } state_processing = { path = "../state_processing" } eth2_ssz = { path = "../utils/ssz" } eth2_ssz_derive = { path = "../utils/ssz_derive" } + +[dev-dependencies] +rand = "0.5.5" diff --git a/eth2/operation_pool/src/attestation.rs b/eth2/operation_pool/src/attestation.rs index a2f71c3a4..de07b2f7b 100644 --- a/eth2/operation_pool/src/attestation.rs +++ b/eth2/operation_pool/src/attestation.rs @@ -1,16 +1,18 @@ use crate::max_cover::MaxCover; -use boolean_bitfield::BooleanBitfield; -use types::{Attestation, BeaconState, EthSpec}; +use types::{Attestation, BeaconState, BitList, EthSpec}; -pub struct AttMaxCover<'a> { +pub struct AttMaxCover<'a, T: EthSpec> { /// Underlying attestation. - att: &'a Attestation, + att: &'a Attestation, /// Bitfield of validators that are covered by this attestation. - fresh_validators: BooleanBitfield, + fresh_validators: BitList, } -impl<'a> AttMaxCover<'a> { - pub fn new(att: &'a Attestation, fresh_validators: BooleanBitfield) -> Self { +impl<'a, T: EthSpec> AttMaxCover<'a, T> { + pub fn new( + att: &'a Attestation, + fresh_validators: BitList, + ) -> Self { Self { att, fresh_validators, @@ -18,15 +20,15 @@ impl<'a> AttMaxCover<'a> { } } -impl<'a> MaxCover for AttMaxCover<'a> { - type Object = Attestation; - type Set = BooleanBitfield; +impl<'a, T: EthSpec> MaxCover for AttMaxCover<'a, T> { + type Object = Attestation; + type Set = BitList; - fn object(&self) -> Attestation { + fn object(&self) -> Attestation { self.att.clone() } - fn covering_set(&self) -> &BooleanBitfield { + fn covering_set(&self) -> &BitList { &self.fresh_validators } @@ -37,11 +39,11 @@ impl<'a> MaxCover for AttMaxCover<'a> { /// that a shard and epoch uniquely identify a committee. fn update_covering_set( &mut self, - best_att: &Attestation, - covered_validators: &BooleanBitfield, + best_att: &Attestation, + covered_validators: &BitList, ) { - if self.att.data.shard == best_att.data.shard - && self.att.data.target_epoch == best_att.data.target_epoch + if self.att.data.crosslink.shard == best_att.data.crosslink.shard + && self.att.data.target.epoch == best_att.data.target.epoch { self.fresh_validators.difference_inplace(covered_validators); } @@ -58,22 +60,22 @@ impl<'a> MaxCover for AttMaxCover<'a> { /// of validators for which the included attestation is their first in the epoch. The attestation /// is judged against the state's `current_epoch_attestations` or `previous_epoch_attestations` /// depending on when it was created, and all those validators who have already attested are -/// removed from the `aggregation_bitfield` before returning it. +/// removed from the `aggregation_bits` before returning it. // TODO: This could be optimised with a map from validator index to whether that validator has // attested in each of the current and previous epochs. Currently quadratic in number of validators. pub fn earliest_attestation_validators( - attestation: &Attestation, + attestation: &Attestation, state: &BeaconState, -) -> BooleanBitfield { +) -> BitList { // Bitfield of validators whose attestations are new/fresh. - let mut new_validators = attestation.aggregation_bitfield.clone(); + let mut new_validators = attestation.aggregation_bits.clone(); - let state_attestations = if attestation.data.target_epoch == state.current_epoch() { + let state_attestations = if attestation.data.target.epoch == state.current_epoch() { &state.current_epoch_attestations - } else if attestation.data.target_epoch == state.previous_epoch() { + } else if attestation.data.target.epoch == state.previous_epoch() { &state.previous_epoch_attestations } else { - return BooleanBitfield::from_elem(attestation.aggregation_bitfield.len(), false); + return BitList::with_capacity(0).unwrap(); }; state_attestations @@ -81,10 +83,12 @@ pub fn earliest_attestation_validators( // In a single epoch, an attester should only be attesting for one shard. // TODO: we avoid including slashable attestations in the state here, // but maybe we should do something else with them (like construct slashings). - .filter(|existing_attestation| existing_attestation.data.shard == attestation.data.shard) + .filter(|existing_attestation| { + existing_attestation.data.crosslink.shard == attestation.data.crosslink.shard + }) .for_each(|existing_attestation| { // Remove the validators who have signed the existing attestation (they are not new) - new_validators.difference_inplace(&existing_attestation.aggregation_bitfield); + new_validators.difference_inplace(&existing_attestation.aggregation_bits); }); new_validators diff --git a/eth2/operation_pool/src/attestation_id.rs b/eth2/operation_pool/src/attestation_id.rs index a79023a69..e435bae7f 100644 --- a/eth2/operation_pool/src/attestation_id.rs +++ b/eth2/operation_pool/src/attestation_id.rs @@ -19,7 +19,7 @@ impl AttestationId { spec: &ChainSpec, ) -> Self { let mut bytes = ssz_encode(attestation); - let epoch = attestation.target_epoch; + let epoch = attestation.target.epoch; bytes.extend_from_slice(&AttestationId::compute_domain_bytes(epoch, state, spec)); AttestationId { v: bytes } } diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index a39fcce33..92d5fb168 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -15,22 +15,21 @@ use state_processing::per_block_processing::errors::{ ExitValidationError, ProposerSlashingValidationError, TransferValidationError, }; use state_processing::per_block_processing::{ - get_slashable_indices_modular, validate_attestation, - validate_attestation_time_independent_only, verify_attester_slashing, verify_exit, - verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer, - verify_transfer_time_independent_only, + get_slashable_indices_modular, verify_attestation, verify_attestation_time_independent_only, + verify_attester_slashing, verify_exit, verify_exit_time_independent_only, + verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only, }; use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; use std::marker::PhantomData; use types::{ - Attestation, AttesterSlashing, BeaconState, ChainSpec, Deposit, EthSpec, ProposerSlashing, - Transfer, Validator, VoluntaryExit, + typenum::Unsigned, Attestation, AttesterSlashing, BeaconState, ChainSpec, Deposit, EthSpec, + ProposerSlashing, Transfer, Validator, VoluntaryExit, }; #[derive(Default, Debug)] pub struct OperationPool { /// Map from attestation ID (see below) to vectors of attestations. - attestations: RwLock>>, + attestations: RwLock>>>, /// Map from deposit index to deposit data. // NOTE: We assume that there is only one deposit per index // because the Eth1 data is updated (at most) once per epoch, @@ -38,7 +37,7 @@ pub struct OperationPool { // longer than an epoch deposits: RwLock>, /// Map from two attestation IDs to a slashing for those IDs. - attester_slashings: RwLock>, + attester_slashings: RwLock>>, /// Map from proposer index to slashing. proposer_slashings: RwLock>, /// Map from exiting validator to their exit data. @@ -67,12 +66,12 @@ impl OperationPool { /// Insert an attestation into the pool, aggregating it with existing attestations if possible. pub fn insert_attestation( &self, - attestation: Attestation, + attestation: Attestation, state: &BeaconState, spec: &ChainSpec, ) -> Result<(), AttestationValidationError> { // Check that attestation signatures are valid. - validate_attestation_time_independent_only(state, &attestation, spec)?; + verify_attestation_time_independent_only(state, &attestation, spec)?; let id = AttestationId::from_data(&attestation.data, state, spec); @@ -110,7 +109,11 @@ impl OperationPool { } /// Get a list of attestations for inclusion in a block. - pub fn get_attestations(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { + pub fn get_attestations( + &self, + state: &BeaconState, + spec: &ChainSpec, + ) -> Vec> { // Attestations for the current fork, which may be from the current or previous epoch. let prev_epoch = state.previous_epoch(); let current_epoch = state.current_epoch(); @@ -125,10 +128,10 @@ impl OperationPool { }) .flat_map(|(_, attestations)| attestations) // That are valid... - .filter(|attestation| validate_attestation(state, attestation, spec).is_ok()) + .filter(|attestation| verify_attestation(state, attestation, spec).is_ok()) .map(|att| AttMaxCover::new(att, earliest_attestation_validators(att, state))); - maximum_cover(valid_attestations, spec.max_attestations as usize) + maximum_cover(valid_attestations, T::MaxAttestations::to_usize()) } /// Remove attestations which are too old to be included in a block. @@ -141,7 +144,7 @@ impl OperationPool { // All the attestations in this bucket have the same data, so we only need to // check the first one. attestations.first().map_or(false, |att| { - finalized_state.current_epoch() <= att.data.target_epoch + 1 + finalized_state.current_epoch() <= att.data.target.epoch + 1 }) }); } @@ -149,13 +152,15 @@ impl OperationPool { /// Add a deposit to the pool. /// /// No two distinct deposits should be added with the same index. + // TODO: we need to rethink this entirely pub fn insert_deposit( &self, + index: u64, deposit: Deposit, ) -> Result { use DepositInsertStatus::*; - match self.deposits.write().entry(deposit.index) { + match self.deposits.write().entry(index) { Entry::Vacant(entry) => { entry.insert(deposit); Ok(Fresh) @@ -173,12 +178,12 @@ impl OperationPool { /// Get an ordered list of deposits for inclusion in a block. /// /// Take at most the maximum number of deposits, beginning from the current deposit index. - pub fn get_deposits(&self, state: &BeaconState, spec: &ChainSpec) -> Vec { + pub fn get_deposits(&self, state: &BeaconState) -> Vec { // TODO: We need to update the Merkle proofs for existing deposits as more deposits // are added. It probably makes sense to construct the proofs from scratch when forming // a block, using fresh info from the ETH1 chain for the current deposit root. - let start_idx = state.deposit_index; - (start_idx..start_idx + spec.max_deposits) + let start_idx = state.eth1_deposit_index; + (start_idx..start_idx + T::MaxDeposits::to_u64()) .map(|idx| self.deposits.read().get(&idx).cloned()) .take_while(Option::is_some) .flatten() @@ -187,7 +192,7 @@ impl OperationPool { /// Remove all deposits with index less than the deposit index of the latest finalised block. pub fn prune_deposits(&self, state: &BeaconState) -> BTreeMap { - let deposits_keep = self.deposits.write().split_off(&state.deposit_index); + let deposits_keep = self.deposits.write().split_off(&state.eth1_deposit_index); std::mem::replace(&mut self.deposits.write(), deposits_keep) } @@ -216,7 +221,7 @@ impl OperationPool { /// /// Depends on the fork field of the state, but not on the state's epoch. fn attester_slashing_id( - slashing: &AttesterSlashing, + slashing: &AttesterSlashing, state: &BeaconState, spec: &ChainSpec, ) -> (AttestationId, AttestationId) { @@ -229,7 +234,7 @@ impl OperationPool { /// Insert an attester slashing into the pool. pub fn insert_attester_slashing( &self, - slashing: AttesterSlashing, + slashing: AttesterSlashing, state: &BeaconState, spec: &ChainSpec, ) -> Result<(), AttesterSlashingValidationError> { @@ -248,16 +253,16 @@ impl OperationPool { &self, state: &BeaconState, spec: &ChainSpec, - ) -> (Vec, Vec) { + ) -> (Vec, Vec>) { let proposer_slashings = filter_limit_operations( self.proposer_slashings.read().values(), |slashing| { state - .validator_registry + .validators .get(slashing.proposer_index as usize) .map_or(false, |validator| !validator.slashed) }, - spec.max_proposer_slashings, + T::MaxProposerSlashings::to_usize(), ); // Set of validators to be slashed, so we don't attempt to construct invalid attester @@ -291,7 +296,7 @@ impl OperationPool { false } }) - .take(spec.max_attester_slashings as usize) + .take(T::MaxAttesterSlashings::to_usize()) .map(|(_, slashing)| slashing.clone()) .collect(); @@ -347,7 +352,7 @@ impl OperationPool { filter_limit_operations( self.voluntary_exits.read().values(), |exit| verify_exit(state, exit, spec).is_ok(), - spec.max_voluntary_exits, + T::MaxVoluntaryExits::to_usize(), ) } @@ -384,7 +389,7 @@ impl OperationPool { .iter() .filter(|transfer| verify_transfer(state, transfer, spec).is_ok()) .sorted_by_key(|transfer| std::cmp::Reverse(transfer.fee)) - .take(spec.max_transfers as usize) + .take(T::MaxTransfers::to_usize()) .cloned() .collect() } @@ -408,7 +413,7 @@ impl OperationPool { } /// Filter up to a maximum number of operations out of an iterator. -fn filter_limit_operations<'a, T: 'a, I, F>(operations: I, filter: F, limit: u64) -> Vec +fn filter_limit_operations<'a, T: 'a, I, F>(operations: I, filter: F, limit: usize) -> Vec where I: IntoIterator, F: Fn(&T) -> bool, @@ -417,7 +422,7 @@ where operations .into_iter() .filter(|x| filter(*x)) - .take(limit as usize) + .take(limit) .cloned() .collect() } @@ -436,7 +441,7 @@ fn prune_validator_hash_map( { map.retain(|&validator_index, _| { finalized_state - .validator_registry + .validators .get(validator_index as usize) .map_or(true, |validator| !prune_if(validator)) }); @@ -458,6 +463,7 @@ impl PartialEq for OperationPool { mod tests { use super::DepositInsertStatus::*; use super::*; + use rand::Rng; use types::test_utils::*; use types::*; @@ -466,13 +472,16 @@ mod tests { let rng = &mut XorShiftRng::from_seed([42; 16]); let op_pool = OperationPool::::new(); let deposit1 = make_deposit(rng); - let mut deposit2 = make_deposit(rng); - deposit2.index = deposit1.index; + let deposit2 = make_deposit(rng); + let index = rng.gen(); - assert_eq!(op_pool.insert_deposit(deposit1.clone()), Ok(Fresh)); - assert_eq!(op_pool.insert_deposit(deposit1.clone()), Ok(Duplicate)); + assert_eq!(op_pool.insert_deposit(index, deposit1.clone()), Ok(Fresh)); assert_eq!( - op_pool.insert_deposit(deposit2), + op_pool.insert_deposit(index, deposit1.clone()), + Ok(Duplicate) + ); + assert_eq!( + op_pool.insert_deposit(index, deposit2), Ok(Replaced(Box::new(deposit1))) ); } @@ -480,28 +489,29 @@ mod tests { #[test] fn get_deposits_max() { let rng = &mut XorShiftRng::from_seed([42; 16]); - let (spec, mut state) = test_state(rng); + let (_, mut state) = test_state(rng); let op_pool = OperationPool::new(); let start = 10000; - let max_deposits = spec.max_deposits; + let max_deposits = ::MaxDeposits::to_u64(); let extra = 5; let offset = 1; assert!(offset <= extra); let deposits = dummy_deposits(rng, start, max_deposits + extra); - for deposit in &deposits { - assert_eq!(op_pool.insert_deposit(deposit.clone()), Ok(Fresh)); + for (i, deposit) in &deposits { + assert_eq!(op_pool.insert_deposit(*i, deposit.clone()), Ok(Fresh)); } - state.deposit_index = start + offset; - let deposits_for_block = op_pool.get_deposits(&state, &spec); + state.eth1_deposit_index = start + offset; + let deposits_for_block = op_pool.get_deposits(&state); assert_eq!(deposits_for_block.len() as u64, max_deposits); - assert_eq!( - deposits_for_block[..], - deposits[offset as usize..(offset + max_deposits) as usize] - ); + let expected = deposits[offset as usize..(offset + max_deposits) as usize] + .iter() + .map(|(_, d)| d.clone()) + .collect::>(); + assert_eq!(deposits_for_block[..], expected[..]); } #[test] @@ -518,20 +528,20 @@ mod tests { let deposits1 = dummy_deposits(rng, start1, count); let deposits2 = dummy_deposits(rng, start2, count); - for d in deposits1.into_iter().chain(deposits2) { - assert!(op_pool.insert_deposit(d).is_ok()); + for (i, d) in deposits1.into_iter().chain(deposits2) { + assert!(op_pool.insert_deposit(i, d).is_ok()); } assert_eq!(op_pool.num_deposits(), 2 * count as usize); let mut state = BeaconState::random_for_test(rng); - state.deposit_index = start1; + state.eth1_deposit_index = start1; // Pruning the first bunch of deposits in batches of 5 should work. let step = 5; let mut pool_size = step + 2 * count as usize; for i in (start1..=(start1 + count)).step_by(step) { - state.deposit_index = i; + state.eth1_deposit_index = i; op_pool.prune_deposits(&state); pool_size -= step; assert_eq!(op_pool.num_deposits(), pool_size); @@ -539,14 +549,14 @@ mod tests { assert_eq!(pool_size, count as usize); // Pruning in the gap should do nothing. for i in (start1 + count..start2).step_by(step) { - state.deposit_index = i; + state.eth1_deposit_index = i; op_pool.prune_deposits(&state); assert_eq!(op_pool.num_deposits(), count as usize); } // Same again for the later deposits. pool_size += step; for i in (start2..=(start2 + count)).step_by(step) { - state.deposit_index = i; + state.eth1_deposit_index = i; op_pool.prune_deposits(&state); pool_size -= step; assert_eq!(op_pool.num_deposits(), pool_size); @@ -560,13 +570,13 @@ mod tests { } // Create `count` dummy deposits with sequential deposit IDs beginning from `start`. - fn dummy_deposits(rng: &mut XorShiftRng, start: u64, count: u64) -> Vec { + fn dummy_deposits(rng: &mut XorShiftRng, start: u64, count: u64) -> Vec<(u64, Deposit)> { let proto_deposit = make_deposit(rng); (start..start + count) .map(|index| { let mut deposit = proto_deposit.clone(); - deposit.index = index; - deposit + deposit.data.amount = index * 1000; + (index, deposit) }) .collect() } @@ -596,11 +606,11 @@ mod tests { state: &BeaconState, spec: &ChainSpec, extra_signer: Option, - ) -> Attestation { + ) -> Attestation { let mut builder = TestingAttestationBuilder::new(state, committee, slot, shard, spec); let signers = &committee[signing_range]; let committee_keys = signers.iter().map(|&i| &keypairs[i].sk).collect::>(); - builder.sign(signers, &committee_keys, &state.fork, spec); + builder.sign(signers, &committee_keys, &state.fork, spec, false); extra_signer.map(|c_idx| { let validator_index = committee[c_idx]; builder.sign( @@ -608,6 +618,7 @@ mod tests { &[&keypairs[validator_index].sk], &state.fork, spec, + false, ) }); builder.build() @@ -668,15 +679,18 @@ mod tests { ); assert_eq!( - att1.aggregation_bitfield.num_set_bits(), + att1.aggregation_bits.num_set_bits(), earliest_attestation_validators(&att1, state).num_set_bits() ); - state.current_epoch_attestations.push(PendingAttestation { - aggregation_bitfield: att1.aggregation_bitfield.clone(), - data: att1.data.clone(), - inclusion_delay: 0, - proposer_index: 0, - }); + state + .current_epoch_attestations + .push(PendingAttestation { + aggregation_bits: att1.aggregation_bits.clone(), + data: att1.data.clone(), + inclusion_delay: 0, + proposer_index: 0, + }) + .unwrap(); assert_eq!( cc.committee.len() - 2, @@ -728,6 +742,7 @@ mod tests { assert_eq!(op_pool.num_attestations(), committees.len()); // Before the min attestation inclusion delay, get_attestations shouldn't return anything. + state.slot -= 1; assert_eq!(op_pool.get_attestations(state, spec).len(), 0); // Then once the delay has elapsed, we should get a single aggregated attestation. @@ -738,7 +753,7 @@ mod tests { let agg_att = &block_attestations[0]; assert_eq!( - agg_att.aggregation_bitfield.num_set_bits(), + agg_att.aggregation_bits.num_set_bits(), spec.target_committee_size as usize ); @@ -854,7 +869,7 @@ mod tests { .map(CrosslinkCommittee::into_owned) .collect::>(); - let max_attestations = spec.max_attestations as usize; + let max_attestations = ::MaxAttestations::to_usize(); let target_committee_size = spec.target_committee_size as usize; let insert_attestations = |cc: &OwnedCrosslinkCommittee, step_size| { @@ -897,7 +912,7 @@ mod tests { // All the best attestations should be signed by at least `big_step_size` (4) validators. for att in &best_attestations { - assert!(att.aggregation_bitfield.num_set_bits() >= big_step_size); + assert!(att.aggregation_bits.num_set_bits() >= big_step_size); } } } diff --git a/eth2/operation_pool/src/max_cover.rs b/eth2/operation_pool/src/max_cover.rs index 75ac14054..15d528e45 100644 --- a/eth2/operation_pool/src/max_cover.rs +++ b/eth2/operation_pool/src/max_cover.rs @@ -42,7 +42,7 @@ impl MaxCoverItem { /// /// * Time complexity: `O(limit * items_iter.len())` /// * Space complexity: `O(item_iter.len())` -pub fn maximum_cover<'a, I, T>(items_iter: I, limit: usize) -> Vec +pub fn maximum_cover(items_iter: I, limit: usize) -> Vec where I: IntoIterator, T: MaxCover, diff --git a/eth2/operation_pool/src/persistence.rs b/eth2/operation_pool/src/persistence.rs index aa6df597c..00d1cd2f1 100644 --- a/eth2/operation_pool/src/persistence.rs +++ b/eth2/operation_pool/src/persistence.rs @@ -9,14 +9,14 @@ use types::*; /// Operations are stored in arbitrary order, so it's not a good idea to compare instances /// of this type (or its encoded form) for equality. Convert back to an `OperationPool` first. #[derive(Encode, Decode)] -pub struct PersistedOperationPool { +pub struct PersistedOperationPool { /// Mapping from attestation ID to attestation mappings. // We could save space by not storing the attestation ID, but it might // be difficult to make that roundtrip due to eager aggregation. - attestations: Vec<(AttestationId, Vec)>, - deposits: Vec, + attestations: Vec<(AttestationId, Vec>)>, + deposits: Vec<(u64, Deposit)>, /// Attester slashings. - attester_slashings: Vec, + attester_slashings: Vec>, /// Proposer slashings. proposer_slashings: Vec, /// Voluntary exits. @@ -25,9 +25,9 @@ pub struct PersistedOperationPool { transfers: Vec, } -impl PersistedOperationPool { +impl PersistedOperationPool { /// Convert an `OperationPool` into serializable form. - pub fn from_operation_pool(operation_pool: &OperationPool) -> Self { + pub fn from_operation_pool(operation_pool: &OperationPool) -> Self { let attestations = operation_pool .attestations .read() @@ -39,7 +39,7 @@ impl PersistedOperationPool { .deposits .read() .iter() - .map(|(_, d)| d.clone()) + .map(|(index, d)| (*index, d.clone())) .collect(); let attester_slashings = operation_pool @@ -76,13 +76,9 @@ impl PersistedOperationPool { } /// Reconstruct an `OperationPool`. - pub fn into_operation_pool( - self, - state: &BeaconState, - spec: &ChainSpec, - ) -> OperationPool { + pub fn into_operation_pool(self, state: &BeaconState, spec: &ChainSpec) -> OperationPool { let attestations = RwLock::new(self.attestations.into_iter().collect()); - let deposits = RwLock::new(self.deposits.into_iter().map(|d| (d.index, d)).collect()); + let deposits = RwLock::new(self.deposits.into_iter().collect()); let attester_slashings = RwLock::new( self.attester_slashings .into_iter() diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index cf51ee564..b6941d739 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -19,6 +19,7 @@ serde_yaml = "0.8" bls = { path = "../utils/bls" } integer-sqrt = "0.1" itertools = "0.8" +eth2_ssz_types = { path = "../utils/ssz_types" } merkle_proof = { path = "../utils/merkle_proof" } tree_hash = { path = "../utils/tree_hash" } tree_hash_derive = { path = "../utils/tree_hash_derive" } diff --git a/eth2/state_processing/benches/bench_epoch_processing.rs b/eth2/state_processing/benches/bench_epoch_processing.rs index 977464513..ee9e39a7d 100644 --- a/eth2/state_processing/benches/bench_epoch_processing.rs +++ b/eth2/state_processing/benches/bench_epoch_processing.rs @@ -37,7 +37,7 @@ pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: u // Assert that the state has an attestations for each committee that is able to include an // attestation in the state. - let committees_per_epoch = spec.get_epoch_committee_count(validator_count); + let committees_per_epoch = spec.get_committee_count(validator_count); let committees_per_slot = committees_per_epoch / T::slots_per_epoch(); let previous_epoch_attestations = committees_per_epoch; let current_epoch_attestations = diff --git a/eth2/state_processing/src/common/convert_to_indexed.rs b/eth2/state_processing/src/common/convert_to_indexed.rs deleted file mode 100644 index 1854d32d1..000000000 --- a/eth2/state_processing/src/common/convert_to_indexed.rs +++ /dev/null @@ -1,33 +0,0 @@ -use super::{get_attesting_indices, get_attesting_indices_unsorted}; -use itertools::{Either, Itertools}; -use types::*; - -/// Convert `attestation` to (almost) indexed-verifiable form. -/// -/// Spec v0.6.3 -pub fn convert_to_indexed( - state: &BeaconState, - attestation: &Attestation, -) -> Result { - let attesting_indices = - get_attesting_indices(state, &attestation.data, &attestation.aggregation_bitfield)?; - - // We verify the custody bitfield by calling `get_attesting_indices_unsorted` and throwing - // away the result. This avoids double-sorting - the partition below takes care of the ordering. - get_attesting_indices_unsorted(state, &attestation.data, &attestation.custody_bitfield)?; - - let (custody_bit_0_indices, custody_bit_1_indices) = - attesting_indices.into_iter().enumerate().partition_map( - |(committee_idx, validator_idx)| match attestation.custody_bitfield.get(committee_idx) { - Ok(true) => Either::Right(validator_idx as u64), - _ => Either::Left(validator_idx as u64), - }, - ); - - Ok(IndexedAttestation { - custody_bit_0_indices, - custody_bit_1_indices, - data: attestation.data.clone(), - signature: attestation.signature.clone(), - }) -} diff --git a/eth2/state_processing/src/common/get_attesting_indices.rs b/eth2/state_processing/src/common/get_attesting_indices.rs index c627c366b..f558909f6 100644 --- a/eth2/state_processing/src/common/get_attesting_indices.rs +++ b/eth2/state_processing/src/common/get_attesting_indices.rs @@ -1,44 +1,33 @@ -use crate::common::verify_bitfield_length; +use std::collections::BTreeSet; use types::*; /// Returns validator indices which participated in the attestation, sorted by increasing index. /// -/// Spec v0.6.3 +/// Spec v0.8.1 pub fn get_attesting_indices( state: &BeaconState, attestation_data: &AttestationData, - bitfield: &Bitfield, -) -> Result, BeaconStateError> { - get_attesting_indices_unsorted(state, attestation_data, bitfield).map(|mut indices| { - // Fast unstable sort is safe because validator indices are unique - indices.sort_unstable(); - indices - }) -} - -/// Returns validator indices which participated in the attestation, unsorted. -/// -/// Spec v0.6.3 -pub fn get_attesting_indices_unsorted( - state: &BeaconState, - attestation_data: &AttestationData, - bitfield: &Bitfield, -) -> Result, BeaconStateError> { + bitlist: &BitList, +) -> Result, BeaconStateError> { let target_relative_epoch = - RelativeEpoch::from_epoch(state.current_epoch(), attestation_data.target_epoch)?; + RelativeEpoch::from_epoch(state.current_epoch(), attestation_data.target.epoch)?; - let committee = - state.get_crosslink_committee_for_shard(attestation_data.shard, target_relative_epoch)?; + let committee = state.get_crosslink_committee_for_shard( + attestation_data.crosslink.shard, + target_relative_epoch, + )?; - if !verify_bitfield_length(&bitfield, committee.committee.len()) { + /* TODO(freeze): re-enable this? + if bitlist.len() > committee.committee.len() { return Err(BeaconStateError::InvalidBitfield); } + */ Ok(committee .committee .iter() .enumerate() - .filter_map(|(i, validator_index)| match bitfield.get(i) { + .filter_map(|(i, validator_index)| match bitlist.get(i) { Ok(true) => Some(*validator_index), _ => None, }) diff --git a/eth2/state_processing/src/common/get_compact_committees_root.rs b/eth2/state_processing/src/common/get_compact_committees_root.rs new file mode 100644 index 000000000..3a1f3998b --- /dev/null +++ b/eth2/state_processing/src/common/get_compact_committees_root.rs @@ -0,0 +1,49 @@ +use tree_hash::TreeHash; +use types::*; + +/// Return the compact committee root at `relative_epoch`. +/// +/// Spec v0.8.0 +pub fn get_compact_committees_root( + state: &BeaconState, + relative_epoch: RelativeEpoch, + spec: &ChainSpec, +) -> Result { + let mut committees = + FixedVector::<_, T::ShardCount>::from_elem(CompactCommittee::::default()); + // FIXME: this is a spec bug, whereby the start shard for the epoch after the next epoch + // is mistakenly used. The start shard from the cache SHOULD work. + // Waiting on a release to fix https://github.com/ethereum/eth2.0-specs/issues/1315 + // let start_shard = state.get_epoch_start_shard(relative_epoch)?; + let start_shard = state.next_epoch_start_shard(spec)?; + + for committee_number in 0..state.get_committee_count(relative_epoch)? { + let shard = (start_shard + committee_number) % T::ShardCount::to_u64(); + // FIXME: this is a partial workaround for the above, but it only works in the case + // where there's a committee for every shard in every epoch. It works for the minimal + // tests but not the mainnet ones. + let fake_shard = (shard + 1) % T::ShardCount::to_u64(); + + for &index in state + .get_crosslink_committee_for_shard(fake_shard, relative_epoch)? + .committee + { + let validator = state + .validators + .get(index) + .ok_or(BeaconStateError::UnknownValidator)?; + committees[shard as usize] + .pubkeys + .push(validator.pubkey.clone())?; + let compact_balance = validator.effective_balance / spec.effective_balance_increment; + // `index` (top 6 bytes) + `slashed` (16th bit) + `compact_balance` (bottom 15 bits) + let compact_validator: u64 = + ((index as u64) << 16) + (u64::from(validator.slashed) << 15) + compact_balance; + committees[shard as usize] + .compact_validators + .push(compact_validator)?; + } + } + + Ok(Hash256::from_slice(&committees.tree_hash_root())) +} diff --git a/eth2/state_processing/src/common/get_indexed_attestation.rs b/eth2/state_processing/src/common/get_indexed_attestation.rs new file mode 100644 index 000000000..7c08c8708 --- /dev/null +++ b/eth2/state_processing/src/common/get_indexed_attestation.rs @@ -0,0 +1,122 @@ +use super::get_attesting_indices; +use crate::per_block_processing::errors::{ + AttestationInvalid as Invalid, AttestationValidationError as Error, +}; +use types::*; + +/// Convert `attestation` to (almost) indexed-verifiable form. +/// +/// Spec v0.8.0 +pub fn get_indexed_attestation( + state: &BeaconState, + attestation: &Attestation, +) -> Result, Error> { + let attesting_indices = + get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; + + let custody_bit_1_indices = + get_attesting_indices(state, &attestation.data, &attestation.custody_bits)?; + + verify!( + custody_bit_1_indices.is_subset(&attesting_indices), + Invalid::CustodyBitfieldNotSubset + ); + + let custody_bit_0_indices = &attesting_indices - &custody_bit_1_indices; + + Ok(IndexedAttestation { + custody_bit_0_indices: VariableList::new( + custody_bit_0_indices + .into_iter() + .map(|x| x as u64) + .collect(), + )?, + custody_bit_1_indices: VariableList::new( + custody_bit_1_indices + .into_iter() + .map(|x| x as u64) + .collect(), + )?, + data: attestation.data.clone(), + signature: attestation.signature.clone(), + }) +} + +#[cfg(test)] +mod test { + use super::*; + use itertools::{Either, Itertools}; + use types::test_utils::*; + + #[test] + fn custody_bitfield_indexing() { + let validator_count = 128; + let spec = MinimalEthSpec::default_spec(); + let state_builder = + TestingBeaconStateBuilder::::from_default_keypairs_file_if_exists( + validator_count, + &spec, + ); + let (mut state, keypairs) = state_builder.build(); + state.build_all_caches(&spec).unwrap(); + state.slot += 1; + + let shard = 0; + let cc = state + .get_crosslink_committee_for_shard(shard, RelativeEpoch::Current) + .unwrap(); + + // Make a third of the validators sign with custody bit 0, a third with custody bit 1 + // and a third not sign at all. + assert!( + cc.committee.len() >= 4, + "need at least 4 validators per committee for this test to work" + ); + let (mut bit_0_indices, mut bit_1_indices): (Vec<_>, Vec<_>) = cc + .committee + .iter() + .enumerate() + .filter(|(i, _)| i % 3 != 0) + .partition_map(|(i, index)| { + if i % 3 == 1 { + Either::Left(*index) + } else { + Either::Right(*index) + } + }); + assert!(!bit_0_indices.is_empty()); + assert!(!bit_1_indices.is_empty()); + + let bit_0_keys = bit_0_indices + .iter() + .map(|validator_index| &keypairs[*validator_index].sk) + .collect::>(); + let bit_1_keys = bit_1_indices + .iter() + .map(|validator_index| &keypairs[*validator_index].sk) + .collect::>(); + + let mut attestation_builder = + TestingAttestationBuilder::new(&state, &cc.committee, cc.slot, shard, &spec); + attestation_builder + .sign(&bit_0_indices, &bit_0_keys, &state.fork, &spec, false) + .sign(&bit_1_indices, &bit_1_keys, &state.fork, &spec, true); + let attestation = attestation_builder.build(); + + let indexed_attestation = get_indexed_attestation(&state, &attestation).unwrap(); + + bit_0_indices.sort(); + bit_1_indices.sort(); + + assert!(indexed_attestation + .custody_bit_0_indices + .iter() + .copied() + .eq(bit_0_indices.iter().map(|idx| *idx as u64))); + assert!(indexed_attestation + .custody_bit_1_indices + .iter() + .copied() + .eq(bit_1_indices.iter().map(|idx| *idx as u64))); + } +} diff --git a/eth2/state_processing/src/common/initiate_validator_exit.rs b/eth2/state_processing/src/common/initiate_validator_exit.rs index 40b3d80fa..092906971 100644 --- a/eth2/state_processing/src/common/initiate_validator_exit.rs +++ b/eth2/state_processing/src/common/initiate_validator_exit.rs @@ -3,23 +3,23 @@ use types::{BeaconStateError as Error, *}; /// Initiate the exit of the validator of the given `index`. /// -/// Spec v0.6.3 +/// Spec v0.8.1 pub fn initiate_validator_exit( state: &mut BeaconState, index: usize, spec: &ChainSpec, ) -> Result<(), Error> { - if index >= state.validator_registry.len() { + if index >= state.validators.len() { return Err(Error::UnknownValidator); } // Return if the validator already initiated exit - if state.validator_registry[index].exit_epoch != spec.far_future_epoch { + if state.validators[index].exit_epoch != spec.far_future_epoch { return Ok(()); } // Compute exit queue epoch - let delayed_epoch = state.get_delayed_activation_exit_epoch(state.current_epoch(), spec); + let delayed_epoch = state.compute_activation_exit_epoch(state.current_epoch(), spec); let mut exit_queue_epoch = state .exit_cache .max_epoch() @@ -31,8 +31,8 @@ pub fn initiate_validator_exit( } state.exit_cache.record_validator_exit(exit_queue_epoch); - state.validator_registry[index].exit_epoch = exit_queue_epoch; - state.validator_registry[index].withdrawable_epoch = + state.validators[index].exit_epoch = exit_queue_epoch; + state.validators[index].withdrawable_epoch = exit_queue_epoch + spec.min_validator_withdrawability_delay; Ok(()) diff --git a/eth2/state_processing/src/common/mod.rs b/eth2/state_processing/src/common/mod.rs index 26302fed0..8ce7b7107 100644 --- a/eth2/state_processing/src/common/mod.rs +++ b/eth2/state_processing/src/common/mod.rs @@ -1,11 +1,11 @@ -mod convert_to_indexed; mod get_attesting_indices; +mod get_compact_committees_root; +mod get_indexed_attestation; mod initiate_validator_exit; mod slash_validator; -mod verify_bitfield; -pub use convert_to_indexed::convert_to_indexed; -pub use get_attesting_indices::{get_attesting_indices, get_attesting_indices_unsorted}; +pub use get_attesting_indices::get_attesting_indices; +pub use get_compact_committees_root::get_compact_committees_root; +pub use get_indexed_attestation::get_indexed_attestation; pub use initiate_validator_exit::initiate_validator_exit; pub use slash_validator::slash_validator; -pub use verify_bitfield::verify_bitfield_length; diff --git a/eth2/state_processing/src/common/slash_validator.rs b/eth2/state_processing/src/common/slash_validator.rs index 0908f4a39..5b91c4a07 100644 --- a/eth2/state_processing/src/common/slash_validator.rs +++ b/eth2/state_processing/src/common/slash_validator.rs @@ -1,45 +1,51 @@ use crate::common::initiate_validator_exit; +use std::cmp; use types::{BeaconStateError as Error, *}; /// Slash the validator with index ``index``. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn slash_validator( state: &mut BeaconState, slashed_index: usize, opt_whistleblower_index: Option, spec: &ChainSpec, ) -> Result<(), Error> { - if slashed_index >= state.validator_registry.len() || slashed_index >= state.balances.len() { + if slashed_index >= state.validators.len() || slashed_index >= state.balances.len() { return Err(BeaconStateError::UnknownValidator); } - let current_epoch = state.current_epoch(); + let epoch = state.current_epoch(); initiate_validator_exit(state, slashed_index, spec)?; - state.validator_registry[slashed_index].slashed = true; - state.validator_registry[slashed_index].withdrawable_epoch = - current_epoch + Epoch::from(T::latest_slashed_exit_length()); - let slashed_balance = state.get_effective_balance(slashed_index, spec)?; - - state.set_slashed_balance( - current_epoch, - state.get_slashed_balance(current_epoch)? + slashed_balance, + state.validators[slashed_index].slashed = true; + state.validators[slashed_index].withdrawable_epoch = cmp::max( + state.validators[slashed_index].withdrawable_epoch, + epoch + Epoch::from(T::EpochsPerSlashingsVector::to_u64()), + ); + let validator_effective_balance = state.get_effective_balance(slashed_index, spec)?; + state.set_slashings( + epoch, + state.get_slashings(epoch)? + validator_effective_balance, )?; + safe_sub_assign!( + state.balances[slashed_index], + validator_effective_balance / spec.min_slashing_penalty_quotient + ); + // Apply proposer and whistleblower rewards let proposer_index = state.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)?; let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); - let whistleblowing_reward = slashed_balance / spec.whistleblowing_reward_quotient; - let proposer_reward = whistleblowing_reward / spec.proposer_reward_quotient; + let whistleblower_reward = validator_effective_balance / spec.whistleblower_reward_quotient; + let proposer_reward = whistleblower_reward / spec.proposer_reward_quotient; safe_add_assign!(state.balances[proposer_index], proposer_reward); safe_add_assign!( state.balances[whistleblower_index], - whistleblowing_reward.saturating_sub(proposer_reward) + whistleblower_reward.saturating_sub(proposer_reward) ); - safe_sub_assign!(state.balances[slashed_index], whistleblowing_reward); Ok(()) } diff --git a/eth2/state_processing/src/common/verify_bitfield.rs b/eth2/state_processing/src/common/verify_bitfield.rs deleted file mode 100644 index 0d4045c2e..000000000 --- a/eth2/state_processing/src/common/verify_bitfield.rs +++ /dev/null @@ -1,79 +0,0 @@ -use types::*; - -/// Verify ``bitfield`` against the ``committee_size``. -/// -/// Is title `verify_bitfield` in spec. -/// -/// Spec v0.6.3 -pub fn verify_bitfield_length(bitfield: &Bitfield, committee_size: usize) -> bool { - if bitfield.num_bytes() != ((committee_size + 7) / 8) { - return false; - } - - for i in committee_size..(bitfield.num_bytes() * 8) { - if bitfield.get(i).unwrap_or(false) { - return false; - } - } - - true -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn bitfield_length() { - assert_eq!( - verify_bitfield_length(&Bitfield::from_bytes(&[0b0000_0001]), 4), - true - ); - - assert_eq!( - verify_bitfield_length(&Bitfield::from_bytes(&[0b0001_0001]), 4), - false - ); - - assert_eq!( - verify_bitfield_length(&Bitfield::from_bytes(&[0b0000_0000]), 4), - true - ); - - assert_eq!( - verify_bitfield_length(&Bitfield::from_bytes(&[0b1000_0000]), 8), - true - ); - - assert_eq!( - verify_bitfield_length(&Bitfield::from_bytes(&[0b1000_0000, 0b0000_0000]), 16), - true - ); - - assert_eq!( - verify_bitfield_length(&Bitfield::from_bytes(&[0b1000_0000, 0b0000_0000]), 15), - false - ); - - assert_eq!( - verify_bitfield_length(&Bitfield::from_bytes(&[0b0000_0000, 0b0000_0000]), 8), - false - ); - - assert_eq!( - verify_bitfield_length( - &Bitfield::from_bytes(&[0b0000_0000, 0b0000_0000, 0b0000_0000]), - 8 - ), - false - ); - - assert_eq!( - verify_bitfield_length( - &Bitfield::from_bytes(&[0b0000_0000, 0b0000_0000, 0b0000_0000]), - 24 - ), - true - ); - } -} diff --git a/eth2/state_processing/src/genesis.rs b/eth2/state_processing/src/genesis.rs new file mode 100644 index 000000000..6f1f2819e --- /dev/null +++ b/eth2/state_processing/src/genesis.rs @@ -0,0 +1,61 @@ +use super::per_block_processing::{errors::BlockProcessingError, process_deposits}; +use crate::common::get_compact_committees_root; +use tree_hash::TreeHash; +use types::typenum::U4294967296; +use types::*; + +/// Initialize a `BeaconState` from genesis data. +/// +/// Spec v0.8.0 +// TODO: this is quite inefficient and we probably want to rethink how we do this +pub fn initialize_beacon_state_from_eth1( + eth1_block_hash: Hash256, + eth1_timestamp: u64, + deposits: Vec, + spec: &ChainSpec, +) -> Result, BlockProcessingError> { + let genesis_time = + eth1_timestamp - eth1_timestamp % spec.seconds_per_day + 2 * spec.seconds_per_day; + let eth1_data = Eth1Data { + // Temporary deposit root + deposit_root: Hash256::zero(), + deposit_count: deposits.len() as u64, + block_hash: eth1_block_hash, + }; + let mut state = BeaconState::new(genesis_time, eth1_data, spec); + + // Process deposits + let leaves: Vec<_> = deposits + .iter() + .map(|deposit| deposit.data.clone()) + .collect(); + for (index, deposit) in deposits.into_iter().enumerate() { + let deposit_data_list = VariableList::<_, U4294967296>::from(leaves[..=index].to_vec()); + state.eth1_data.deposit_root = Hash256::from_slice(&deposit_data_list.tree_hash_root()); + process_deposits(&mut state, &[deposit], spec)?; + } + + // Process activations + for (index, validator) in state.validators.iter_mut().enumerate() { + let balance = state.balances[index]; + validator.effective_balance = std::cmp::min( + balance - balance % spec.effective_balance_increment, + spec.max_effective_balance, + ); + if validator.effective_balance == spec.max_effective_balance { + validator.activation_eligibility_epoch = T::genesis_epoch(); + validator.activation_epoch = T::genesis_epoch(); + } + } + + // Populate active_index_roots and compact_committees_roots + let indices_list = VariableList::::from( + state.get_active_validator_indices(T::genesis_epoch()), + ); + let active_index_root = Hash256::from_slice(&indices_list.tree_hash_root()); + let committee_root = get_compact_committees_root(&state, RelativeEpoch::Current, spec)?; + state.fill_active_index_roots_with(active_index_root); + state.fill_compact_committees_roots_with(committee_root); + + Ok(state) +} diff --git a/eth2/state_processing/src/get_genesis_state.rs b/eth2/state_processing/src/get_genesis_state.rs deleted file mode 100644 index 5cb8648ee..000000000 --- a/eth2/state_processing/src/get_genesis_state.rs +++ /dev/null @@ -1,56 +0,0 @@ -use super::per_block_processing::{errors::BlockProcessingError, process_deposits}; -use tree_hash::TreeHash; -use types::*; - -pub enum GenesisError { - BlockProcessingError(BlockProcessingError), - BeaconStateError(BeaconStateError), -} - -/// Returns the genesis `BeaconState` -/// -/// Spec v0.6.3 -pub fn get_genesis_beacon_state( - genesis_validator_deposits: &[Deposit], - genesis_time: u64, - genesis_eth1_data: Eth1Data, - spec: &ChainSpec, -) -> Result, BlockProcessingError> { - // Get the genesis `BeaconState` - let mut state = BeaconState::genesis(genesis_time, genesis_eth1_data, spec); - - // Process genesis deposits. - process_deposits(&mut state, genesis_validator_deposits, spec)?; - - // Process genesis activations. - for validator in &mut state.validator_registry { - if validator.effective_balance >= spec.max_effective_balance { - validator.activation_eligibility_epoch = T::genesis_epoch(); - validator.activation_epoch = T::genesis_epoch(); - } - } - - // Ensure the current epoch cache is built. - state.build_committee_cache(RelativeEpoch::Current, spec)?; - - // Set all the active index roots to be the genesis active index root. - let active_validator_indices = state - .get_cached_active_validator_indices(RelativeEpoch::Current)? - .to_vec(); - let genesis_active_index_root = Hash256::from_slice(&active_validator_indices.tree_hash_root()); - state.fill_active_index_roots_with(genesis_active_index_root); - - Ok(state) -} - -impl From for GenesisError { - fn from(e: BlockProcessingError) -> GenesisError { - GenesisError::BlockProcessingError(e) - } -} - -impl From for GenesisError { - fn from(e: BeaconStateError) -> GenesisError { - GenesisError::BeaconStateError(e) - } -} diff --git a/eth2/state_processing/src/lib.rs b/eth2/state_processing/src/lib.rs index e040c1525..90f89b599 100644 --- a/eth2/state_processing/src/lib.rs +++ b/eth2/state_processing/src/lib.rs @@ -2,12 +2,12 @@ mod macros; pub mod common; -pub mod get_genesis_state; +pub mod genesis; pub mod per_block_processing; pub mod per_epoch_processing; pub mod per_slot_processing; -pub use get_genesis_state::get_genesis_beacon_state; +pub use genesis::initialize_beacon_state_from_eth1; pub use per_block_processing::{ errors::{BlockInvalid, BlockProcessingError}, per_block_processing, per_block_processing_without_verifying_block_signature, diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index ab7e5a320..4d58b6b18 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -1,6 +1,8 @@ use crate::common::{initiate_validator_exit, slash_validator}; use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex}; use rayon::prelude::*; +use std::collections::HashSet; +use std::iter::FromIterator; use tree_hash::{SignedRoot, TreeHash}; use types::*; @@ -8,30 +10,29 @@ pub use self::verify_attester_slashing::{ get_slashable_indices, get_slashable_indices_modular, verify_attester_slashing, }; pub use self::verify_proposer_slashing::verify_proposer_slashing; -pub use validate_attestation::{ - validate_attestation, validate_attestation_time_independent_only, - validate_attestation_without_signature, +pub use is_valid_indexed_attestation::{ + is_valid_indexed_attestation, is_valid_indexed_attestation_without_signature, +}; +pub use verify_attestation::{ + verify_attestation, verify_attestation_time_independent_only, + verify_attestation_without_signature, }; pub use verify_deposit::{ - get_existing_validator_index, verify_deposit_index, verify_deposit_merkle_proof, - verify_deposit_signature, + get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, }; pub use verify_exit::{verify_exit, verify_exit_time_independent_only}; -pub use verify_indexed_attestation::{ - verify_indexed_attestation, verify_indexed_attestation_without_signature, -}; pub use verify_transfer::{ execute_transfer, verify_transfer, verify_transfer_time_independent_only, }; pub mod block_processing_builder; pub mod errors; +mod is_valid_indexed_attestation; pub mod tests; -mod validate_attestation; +mod verify_attestation; mod verify_attester_slashing; mod verify_deposit; mod verify_exit; -mod verify_indexed_attestation; mod verify_proposer_slashing; mod verify_transfer; @@ -40,10 +41,10 @@ mod verify_transfer; /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise /// returns an error describing why the block was invalid or how the function failed to execute. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn per_block_processing( state: &mut BeaconState, - block: &BeaconBlock, + block: &BeaconBlock, spec: &ChainSpec, ) -> Result<(), Error> { per_block_processing_signature_optional(state, block, true, spec) @@ -55,10 +56,10 @@ pub fn per_block_processing( /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise /// returns an error describing why the block was invalid or how the function failed to execute. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn per_block_processing_without_verifying_block_signature( state: &mut BeaconState, - block: &BeaconBlock, + block: &BeaconBlock, spec: &ChainSpec, ) -> Result<(), Error> { per_block_processing_signature_optional(state, block, false, spec) @@ -70,10 +71,10 @@ pub fn per_block_processing_without_verifying_block_signature( /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise /// returns an error describing why the block was invalid or how the function failed to execute. /// -/// Spec v0.6.3 +/// Spec v0.8.0 fn per_block_processing_signature_optional( mut state: &mut BeaconState, - block: &BeaconBlock, + block: &BeaconBlock, should_verify_block_signature: bool, spec: &ChainSpec, ) -> Result<(), Error> { @@ -84,7 +85,7 @@ fn per_block_processing_signature_optional( state.build_committee_cache(RelativeEpoch::Current, spec)?; process_randao(&mut state, &block, &spec)?; - process_eth1_data(&mut state, &block.body.eth1_data, spec)?; + process_eth1_data(&mut state, &block.body.eth1_data)?; process_proposer_slashings(&mut state, &block.body.proposer_slashings, spec)?; process_attester_slashings(&mut state, &block.body.attester_slashings, spec)?; process_attestations(&mut state, &block.body.attestations, spec)?; @@ -97,10 +98,10 @@ fn per_block_processing_signature_optional( /// Processes the block header. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_block_header( state: &mut BeaconState, - block: &BeaconBlock, + block: &BeaconBlock, spec: &ChainSpec, should_verify_block_signature: bool, ) -> Result<(), Error> { @@ -109,18 +110,18 @@ pub fn process_block_header( let expected_previous_block_root = Hash256::from_slice(&state.latest_block_header.signed_root()); verify!( - block.previous_block_root == expected_previous_block_root, + block.parent_root == expected_previous_block_root, Invalid::ParentBlockRootMismatch { state: expected_previous_block_root, - block: block.previous_block_root, + block: block.parent_root, } ); - state.latest_block_header = block.temporary_block_header(spec); + state.latest_block_header = block.temporary_block_header(); // Verify proposer is not slashed let proposer_idx = state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?; - let proposer = &state.validator_registry[proposer_idx]; + let proposer = &state.validators[proposer_idx]; verify!(!proposer.slashed, Invalid::ProposerSlashed(proposer_idx)); if should_verify_block_signature { @@ -132,13 +133,13 @@ pub fn process_block_header( /// Verifies the signature of a block. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn verify_block_signature( state: &BeaconState, - block: &BeaconBlock, + block: &BeaconBlock, spec: &ChainSpec, ) -> Result<(), Error> { - let block_proposer = &state.validator_registry + let block_proposer = &state.validators [state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?]; let domain = spec.get_domain( @@ -160,16 +161,16 @@ pub fn verify_block_signature( /// Verifies the `randao_reveal` against the block's proposer pubkey and updates /// `state.latest_randao_mixes`. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_randao( state: &mut BeaconState, - block: &BeaconBlock, + block: &BeaconBlock, spec: &ChainSpec, ) -> Result<(), Error> { - let block_proposer = &state.validator_registry + let block_proposer = &state.validators [state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?]; - // Verify the RANDAO is a valid signature of the proposer. + // Verify RANDAO reveal. verify!( block.body.randao_reveal.verify( &state.current_epoch().tree_hash_root()[..], @@ -191,22 +192,21 @@ pub fn process_randao( /// Update the `state.eth1_data_votes` based upon the `eth1_data` provided. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_eth1_data( state: &mut BeaconState, eth1_data: &Eth1Data, - spec: &ChainSpec, ) -> Result<(), Error> { - state.eth1_data_votes.push(eth1_data.clone()); + state.eth1_data_votes.push(eth1_data.clone())?; let num_votes = state .eth1_data_votes .iter() .filter(|vote| *vote == eth1_data) - .count() as u64; + .count(); - if num_votes * 2 > spec.slots_per_eth1_voting_period { - state.latest_eth1_data = eth1_data.clone(); + if num_votes * 2 > T::SlotsPerEth1VotingPeriod::to_usize() { + state.eth1_data = eth1_data.clone(); } Ok(()) @@ -217,17 +217,12 @@ pub fn process_eth1_data( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_proposer_slashings( state: &mut BeaconState, proposer_slashings: &[ProposerSlashing], spec: &ChainSpec, ) -> Result<(), Error> { - verify!( - proposer_slashings.len() as u64 <= spec.max_proposer_slashings, - Invalid::MaxProposerSlashingsExceeded - ); - // Verify proposer slashings in parallel. proposer_slashings .par_iter() @@ -250,21 +245,15 @@ pub fn process_proposer_slashings( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_attester_slashings( state: &mut BeaconState, - attester_slashings: &[AttesterSlashing], + attester_slashings: &[AttesterSlashing], spec: &ChainSpec, ) -> Result<(), Error> { - verify!( - attester_slashings.len() as u64 <= spec.max_attester_slashings, - Invalid::MaxAttesterSlashingsExceed - ); - // Verify the `IndexedAttestation`s in parallel (these are the resource-consuming objects, not // the `AttesterSlashing`s themselves). - let mut indexed_attestations: Vec<&IndexedAttestation> = - Vec::with_capacity(attester_slashings.len() * 2); + let mut indexed_attestations: Vec<&_> = Vec::with_capacity(attester_slashings.len() * 2); for attester_slashing in attester_slashings { indexed_attestations.push(&attester_slashing.attestation_1); indexed_attestations.push(&attester_slashing.attestation_2); @@ -275,7 +264,7 @@ pub fn process_attester_slashings( .par_iter() .enumerate() .try_for_each(|(i, indexed_attestation)| { - verify_indexed_attestation(&state, indexed_attestation, spec) + is_valid_indexed_attestation(&state, indexed_attestation, spec) .map_err(|e| e.into_with_index(i)) })?; let all_indexed_attestations_have_been_checked = true; @@ -308,17 +297,12 @@ pub fn process_attester_slashings( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_attestations( state: &mut BeaconState, - attestations: &[Attestation], + attestations: &[Attestation], spec: &ChainSpec, ) -> Result<(), Error> { - verify!( - attestations.len() as u64 <= spec.max_attestations, - Invalid::MaxAttestationsExceeded - ); - // Ensure the previous epoch cache exists. state.build_committee_cache(RelativeEpoch::Previous, spec)?; @@ -327,25 +311,27 @@ pub fn process_attestations( .par_iter() .enumerate() .try_for_each(|(i, attestation)| { - validate_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i)) + verify_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i)) })?; // Update the state in series. let proposer_index = state.get_beacon_proposer_index(state.slot, RelativeEpoch::Current, spec)? as u64; for attestation in attestations { - let attestation_slot = state.get_attestation_slot(&attestation.data)?; + let attestation_slot = state.get_attestation_data_slot(&attestation.data)?; let pending_attestation = PendingAttestation { - aggregation_bitfield: attestation.aggregation_bitfield.clone(), + aggregation_bits: attestation.aggregation_bits.clone(), data: attestation.data.clone(), inclusion_delay: (state.slot - attestation_slot).as_u64(), proposer_index, }; - if attestation.data.target_epoch == state.current_epoch() { - state.current_epoch_attestations.push(pending_attestation) + if attestation.data.target.epoch == state.current_epoch() { + state.current_epoch_attestations.push(pending_attestation)?; } else { - state.previous_epoch_attestations.push(pending_attestation) + state + .previous_epoch_attestations + .push(pending_attestation)?; } } @@ -357,7 +343,7 @@ pub fn process_attestations( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_deposits( state: &mut BeaconState, deposits: &[Deposit], @@ -366,8 +352,8 @@ pub fn process_deposits( verify!( deposits.len() as u64 == std::cmp::min( - spec.max_deposits, - state.latest_eth1_data.deposit_count - state.deposit_index + T::MaxDeposits::to_u64(), + state.eth1_data.deposit_count - state.eth1_deposit_index ), Invalid::DepositCountInvalid ); @@ -377,14 +363,13 @@ pub fn process_deposits( .par_iter() .enumerate() .try_for_each(|(i, deposit)| { - verify_deposit_merkle_proof(state, deposit, spec).map_err(|e| e.into_with_index(i)) + verify_deposit_merkle_proof(state, deposit, state.eth1_deposit_index + i as u64, spec) + .map_err(|e| e.into_with_index(i)) })?; - // Check `state.deposit_index` and update the state in series. + // Update the state in series. for (i, deposit) in deposits.iter().enumerate() { - verify_deposit_index(state, deposit).map_err(|e| e.into_with_index(i))?; - - state.deposit_index += 1; + state.eth1_deposit_index += 1; // Ensure the state's pubkey cache is fully up-to-date, it will be used to check to see if the // depositing validator already exists in the registry. @@ -421,8 +406,8 @@ pub fn process_deposits( ), slashed: false, }; - state.validator_registry.push(validator); - state.balances.push(deposit.data.amount); + state.validators.push(validator)?; + state.balances.push(deposit.data.amount)?; } } @@ -434,17 +419,12 @@ pub fn process_deposits( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_exits( state: &mut BeaconState, voluntary_exits: &[VoluntaryExit], spec: &ChainSpec, ) -> Result<(), Error> { - verify!( - voluntary_exits.len() as u64 <= spec.max_voluntary_exits, - Invalid::MaxExitsExceeded - ); - // Verify exits in parallel. voluntary_exits .par_iter() @@ -466,15 +446,16 @@ pub fn process_exits( /// Returns `Ok(())` if the validation and state updates completed successfully, otherwise returns /// an `Err` describing the invalid object or cause of failure. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_transfers( state: &mut BeaconState, transfers: &[Transfer], spec: &ChainSpec, ) -> Result<(), Error> { + // Verify that there are no duplicate transfers verify!( - transfers.len() as u64 <= spec.max_transfers, - Invalid::MaxTransfersExceed + transfers.len() == HashSet::<_>::from_iter(transfers).len(), + Invalid::DuplicateTransfers ); transfers diff --git a/eth2/state_processing/src/per_block_processing/block_processing_builder.rs b/eth2/state_processing/src/per_block_processing/block_processing_builder.rs index 05a5a2de2..329583759 100644 --- a/eth2/state_processing/src/per_block_processing/block_processing_builder.rs +++ b/eth2/state_processing/src/per_block_processing/block_processing_builder.rs @@ -4,8 +4,7 @@ use types::*; pub struct BlockProcessingBuilder { pub state_builder: TestingBeaconStateBuilder, - pub block_builder: TestingBeaconBlockBuilder, - + pub block_builder: TestingBeaconBlockBuilder, pub num_validators: usize, } @@ -36,15 +35,15 @@ impl BlockProcessingBuilder { randao_sk: Option, previous_block_root: Option, spec: &ChainSpec, - ) -> (BeaconBlock, BeaconState) { + ) -> (BeaconBlock, BeaconState) { let (state, keypairs) = self.state_builder.build(); let builder = &mut self.block_builder; builder.set_slot(state.slot); match previous_block_root { - Some(root) => builder.set_previous_block_root(root), - None => builder.set_previous_block_root(Hash256::from_slice( + Some(root) => builder.set_parent_root(root), + None => builder.set_parent_root(Hash256::from_slice( &state.latest_block_header.signed_root(), )), } @@ -55,13 +54,11 @@ impl BlockProcessingBuilder { let keypair = &keypairs[proposer_index]; match randao_sk { - Some(sk) => builder.set_randao_reveal::(&sk, &state.fork, spec), - None => builder.set_randao_reveal::(&keypair.sk, &state.fork, spec), + Some(sk) => builder.set_randao_reveal(&sk, &state.fork, spec), + None => builder.set_randao_reveal(&keypair.sk, &state.fork, spec), } - let block = self - .block_builder - .build::(&keypair.sk, &state.fork, spec); + let block = self.block_builder.build(&keypair.sk, &state.fork, spec); (block, state) } diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index 8c8c365cc..e2b908c73 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -59,6 +59,8 @@ pub enum BlockProcessingError { Invalid(BlockInvalid), /// Encountered a `BeaconStateError` whilst attempting to determine validity. BeaconStateError(BeaconStateError), + /// Encountered an `ssz_types::Error` whilst attempting to determine validity. + SszTypesError(ssz_types::Error), } impl_from_beacon_state_error!(BlockProcessingError); @@ -78,6 +80,7 @@ pub enum BlockInvalid { MaxAttesterSlashingsExceed, MaxProposerSlashingsExceeded, DepositCountInvalid, + DuplicateTransfers, MaxExitsExceeded, MaxTransfersExceed, AttestationInvalid(usize, AttestationInvalid), @@ -92,6 +95,15 @@ pub enum BlockInvalid { DepositProcessingFailed(usize), ExitInvalid(usize, ExitInvalid), TransferInvalid(usize, TransferInvalid), + // NOTE: this is only used in tests, normally a state root mismatch is handled + // in the beacon_chain rather than in state_processing + StateRootMismatch, +} + +impl From for BlockProcessingError { + fn from(error: ssz_types::Error) -> Self { + BlockProcessingError::SszTypesError(error) + } } impl Into for BlockInvalid { @@ -116,8 +128,8 @@ pub enum AttestationValidationError { /// Describes why an object is invalid. #[derive(Debug, PartialEq)] pub enum AttestationInvalid { - /// Attestation references a pre-genesis slot. - PreGenesis { genesis: Slot, attestation: Slot }, + /// Shard exceeds SHARD_COUNT. + BadShard, /// Attestation included before the inclusion delay. IncludedTooEarly { state: Slot, @@ -128,27 +140,23 @@ pub enum AttestationInvalid { IncludedTooLate { state: Slot, attestation: Slot }, /// Attestation target epoch does not match the current or previous epoch. BadTargetEpoch, - /// Attestation justified epoch does not match the states current or previous justified epoch. + /// Attestation justified checkpoint doesn't match the state's current or previous justified + /// checkpoint. /// /// `is_current` is `true` if the attestation was compared to the - /// `state.current_justified_epoch`, `false` if compared to `state.previous_justified_epoch`. - WrongJustifiedEpoch { - state: Epoch, - attestation: Epoch, - is_current: bool, - }, - /// Attestation justified epoch root does not match root known to the state. - /// - /// `is_current` is `true` if the attestation was compared to the - /// `state.current_justified_epoch`, `false` if compared to `state.previous_justified_epoch`. - WrongJustifiedRoot { - state: Hash256, - attestation: Hash256, + /// `state.current_justified_checkpoint`, `false` if compared to `state.previous_justified_checkpoint`. + WrongJustifiedCheckpoint { + state: Checkpoint, + attestation: Checkpoint, is_current: bool, }, /// Attestation crosslink root does not match the state crosslink root for the attestations /// slot. - BadPreviousCrosslink, + BadParentCrosslinkHash, + /// Attestation crosslink start epoch does not match the end epoch of the state crosslink. + BadParentCrosslinkStartEpoch, + /// Attestation crosslink end epoch does not match the expected value. + BadParentCrosslinkEndEpoch, /// The custody bitfield has some bits set `true`. This is not allowed in phase 0. CustodyBitfieldHasSetBits, /// There are no set bits on the attestation -- an attestation must be signed by at least one @@ -164,6 +172,8 @@ pub enum AttestationInvalid { committee_len: usize, bitfield_len: usize, }, + /// The bits set in the custody bitfield are not a subset of those set in the aggregation bits. + CustodyBitfieldNotSubset, /// There was no known committee in this `epoch` for the given shard and slot. NoCommitteeForShard { shard: u64, slot: Slot }, /// The validator index was unknown. @@ -186,6 +196,12 @@ impl From for AttestationValidationError { } } +impl From for AttestationValidationError { + fn from(error: ssz_types::Error) -> Self { + Self::from(IndexedAttestationValidationError::from(error)) + } +} + /* * `AttesterSlashing` Validation */ @@ -239,12 +255,14 @@ pub enum IndexedAttestationInvalid { CustodyBitValidatorsIntersect, /// The custody bitfield has some bits set `true`. This is not allowed in phase 0. CustodyBitfieldHasSetBits, + /// The custody bitfield violated a type-level bound. + CustodyBitfieldBoundsError(ssz_types::Error), /// No validator indices were specified. NoValidatorIndices, /// The number of indices exceeds the global maximum. /// /// (max_indices, indices_given) - MaxIndicesExceed(u64, usize), + MaxIndicesExceed(usize, usize), /// The validator indices were not in increasing order. /// /// The error occurred between the given `index` and `index + 1` @@ -263,6 +281,14 @@ impl Into for IndexedAttestationValidationError { } } +impl From for IndexedAttestationValidationError { + fn from(error: ssz_types::Error) -> Self { + IndexedAttestationValidationError::Invalid( + IndexedAttestationInvalid::CustodyBitfieldBoundsError(error), + ) + } +} + impl_into_with_index_without_beacon_error!( IndexedAttestationValidationError, IndexedAttestationInvalid @@ -356,7 +382,10 @@ pub enum ExitInvalid { /// The exit is for a future epoch. FutureEpoch { state: Epoch, exit: Epoch }, /// The validator has not been active for long enough. - TooYoungToLeave { lifespan: Epoch, expected: u64 }, + TooYoungToExit { + current_epoch: Epoch, + earliest_exit_epoch: Epoch, + }, /// The exit signature was not signed by the validator. BadSignature, } diff --git a/eth2/state_processing/src/per_block_processing/verify_indexed_attestation.rs b/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs similarity index 57% rename from eth2/state_processing/src/per_block_processing/verify_indexed_attestation.rs rename to eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs index 4597082df..3f8097ae0 100644 --- a/eth2/state_processing/src/per_block_processing/verify_indexed_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs @@ -8,60 +8,58 @@ use types::*; /// Verify an `IndexedAttestation`. /// -/// Spec v0.6.3 -pub fn verify_indexed_attestation( +/// Spec v0.8.0 +pub fn is_valid_indexed_attestation( state: &BeaconState, - indexed_attestation: &IndexedAttestation, + indexed_attestation: &IndexedAttestation, spec: &ChainSpec, ) -> Result<(), Error> { - verify_indexed_attestation_parametric(state, indexed_attestation, spec, true) + is_valid_indexed_attestation_parametric(state, indexed_attestation, spec, true) } /// Verify but don't check the signature. /// -/// Spec v0.6.3 -pub fn verify_indexed_attestation_without_signature( +/// Spec v0.8.0 +pub fn is_valid_indexed_attestation_without_signature( state: &BeaconState, - indexed_attestation: &IndexedAttestation, + indexed_attestation: &IndexedAttestation, spec: &ChainSpec, ) -> Result<(), Error> { - verify_indexed_attestation_parametric(state, indexed_attestation, spec, false) + is_valid_indexed_attestation_parametric(state, indexed_attestation, spec, false) } /// Optionally check the signature. /// -/// Spec v0.6.3 -fn verify_indexed_attestation_parametric( +/// Spec v0.8.0 +fn is_valid_indexed_attestation_parametric( state: &BeaconState, - indexed_attestation: &IndexedAttestation, + indexed_attestation: &IndexedAttestation, spec: &ChainSpec, verify_signature: bool, ) -> Result<(), Error> { - let custody_bit_0_indices = &indexed_attestation.custody_bit_0_indices; - let custody_bit_1_indices = &indexed_attestation.custody_bit_1_indices; + let bit_0_indices = &indexed_attestation.custody_bit_0_indices; + let bit_1_indices = &indexed_attestation.custody_bit_1_indices; - // Ensure no duplicate indices across custody bits + // Verify no index has custody bit equal to 1 [to be removed in phase 1] + verify!(bit_1_indices.is_empty(), Invalid::CustodyBitfieldHasSetBits); + + // Verify max number of indices + let total_indices = bit_0_indices.len() + bit_1_indices.len(); + verify!( + total_indices <= T::MaxValidatorsPerCommittee::to_usize(), + Invalid::MaxIndicesExceed(T::MaxValidatorsPerCommittee::to_usize(), total_indices) + ); + + // Verify index sets are disjoint let custody_bit_intersection: HashSet<&u64> = - &HashSet::from_iter(custody_bit_0_indices) & &HashSet::from_iter(custody_bit_1_indices); + &HashSet::from_iter(bit_0_indices.iter()) & &HashSet::from_iter(bit_1_indices.iter()); verify!( custody_bit_intersection.is_empty(), Invalid::CustodyBitValidatorsIntersect ); - // Check that nobody signed with custody bit 1 (to be removed in phase 1) - if !custody_bit_1_indices.is_empty() { - invalid!(Invalid::CustodyBitfieldHasSetBits); - } - - let total_indices = custody_bit_0_indices.len() + custody_bit_1_indices.len(); - verify!(1 <= total_indices, Invalid::NoValidatorIndices); - verify!( - total_indices as u64 <= spec.max_indices_per_attestation, - Invalid::MaxIndicesExceed(spec.max_indices_per_attestation, total_indices) - ); - // Check that both vectors of indices are sorted - let check_sorted = |list: &Vec| { + let check_sorted = |list: &[u64]| -> Result<(), Error> { list.windows(2).enumerate().try_for_each(|(i, pair)| { if pair[0] >= pair[1] { invalid!(Invalid::BadValidatorIndicesOrdering(i)); @@ -71,11 +69,11 @@ fn verify_indexed_attestation_parametric( })?; Ok(()) }; - check_sorted(custody_bit_0_indices)?; - check_sorted(custody_bit_1_indices)?; + check_sorted(&bit_0_indices)?; + check_sorted(&bit_1_indices)?; if verify_signature { - verify_indexed_attestation_signature(state, indexed_attestation, spec)?; + is_valid_indexed_attestation_signature(state, indexed_attestation, spec)?; } Ok(()) @@ -94,7 +92,7 @@ where AggregatePublicKey::new(), |mut aggregate_pubkey, &validator_idx| { state - .validator_registry + .validators .get(validator_idx as usize) .ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(validator_idx))) .map(|validator| { @@ -107,10 +105,10 @@ where /// Verify the signature of an IndexedAttestation. /// -/// Spec v0.6.3 -fn verify_indexed_attestation_signature( +/// Spec v0.8.0 +fn is_valid_indexed_attestation_signature( state: &BeaconState, - indexed_attestation: &IndexedAttestation, + indexed_attestation: &IndexedAttestation, spec: &ChainSpec, ) -> Result<(), Error> { let bit_0_pubkey = create_aggregate_pubkey(state, &indexed_attestation.custody_bit_0_indices)?; @@ -127,20 +125,11 @@ fn verify_indexed_attestation_signature( } .tree_hash_root(); - let mut messages = vec![]; - let mut keys = vec![]; - - if !indexed_attestation.custody_bit_0_indices.is_empty() { - messages.push(&message_0[..]); - keys.push(&bit_0_pubkey); - } - if !indexed_attestation.custody_bit_1_indices.is_empty() { - messages.push(&message_1[..]); - keys.push(&bit_1_pubkey); - } + let messages = vec![&message_0[..], &message_1[..]]; + let keys = vec![&bit_0_pubkey, &bit_1_pubkey]; let domain = spec.get_domain( - indexed_attestation.data.target_epoch, + indexed_attestation.data.target.epoch, Domain::Attestation, &state.fork, ); diff --git a/eth2/state_processing/src/per_block_processing/tests.rs b/eth2/state_processing/src/per_block_processing/tests.rs index 6c9593c49..4c73a4212 100644 --- a/eth2/state_processing/src/per_block_processing/tests.rs +++ b/eth2/state_processing/src/per_block_processing/tests.rs @@ -51,7 +51,7 @@ fn invalid_parent_block_root() { Err(BlockProcessingError::Invalid( BlockInvalid::ParentBlockRootMismatch { state: Hash256::from_slice(&state.latest_block_header.signed_root()), - block: block.previous_block_root + block: block.parent_root } )) ); diff --git a/eth2/state_processing/src/per_block_processing/validate_attestation.rs b/eth2/state_processing/src/per_block_processing/validate_attestation.rs deleted file mode 100644 index a2ee268bb..000000000 --- a/eth2/state_processing/src/per_block_processing/validate_attestation.rs +++ /dev/null @@ -1,156 +0,0 @@ -use super::errors::{AttestationInvalid as Invalid, AttestationValidationError as Error}; -use crate::common::convert_to_indexed; -use crate::per_block_processing::{ - verify_indexed_attestation, verify_indexed_attestation_without_signature, -}; -use tree_hash::TreeHash; -use types::*; - -/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the -/// given state. -/// -/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. -/// -/// Spec v0.6.3 -pub fn validate_attestation( - state: &BeaconState, - attestation: &Attestation, - spec: &ChainSpec, -) -> Result<(), Error> { - validate_attestation_parametric(state, attestation, spec, true, false) -} - -/// Like `validate_attestation` but doesn't run checks which may become true in future states. -pub fn validate_attestation_time_independent_only( - state: &BeaconState, - attestation: &Attestation, - spec: &ChainSpec, -) -> Result<(), Error> { - validate_attestation_parametric(state, attestation, spec, true, true) -} - -/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the -/// given state, without validating the aggregate signature. -/// -/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. -/// -/// Spec v0.6.3 -pub fn validate_attestation_without_signature( - state: &BeaconState, - attestation: &Attestation, - spec: &ChainSpec, -) -> Result<(), Error> { - validate_attestation_parametric(state, attestation, spec, false, false) -} - -/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the -/// given state, optionally validating the aggregate signature. -/// -/// -/// Spec v0.6.3 -fn validate_attestation_parametric( - state: &BeaconState, - attestation: &Attestation, - spec: &ChainSpec, - verify_signature: bool, - time_independent_only: bool, -) -> Result<(), Error> { - let attestation_slot = state.get_attestation_slot(&attestation.data)?; - - // Check attestation slot. - verify!( - time_independent_only - || attestation_slot + spec.min_attestation_inclusion_delay <= state.slot, - Invalid::IncludedTooEarly { - state: state.slot, - delay: spec.min_attestation_inclusion_delay, - attestation: attestation_slot - } - ); - verify!( - state.slot <= attestation_slot + T::slots_per_epoch(), - Invalid::IncludedTooLate { - state: state.slot, - attestation: attestation_slot - } - ); - - // Verify the Casper FFG vote. - if !time_independent_only { - verify_casper_ffg_vote(attestation, state)?; - } - - // Crosslink data root is zero (to be removed in phase 1). - verify!( - attestation.data.crosslink_data_root == spec.zero_hash, - Invalid::ShardBlockRootNotZero - ); - - // Check signature and bitfields - let indexed_attestation = convert_to_indexed(state, attestation)?; - if verify_signature { - verify_indexed_attestation(state, &indexed_attestation, spec)?; - } else { - verify_indexed_attestation_without_signature(state, &indexed_attestation, spec)?; - } - - Ok(()) -} - -/// Check target epoch, source epoch, source root, and source crosslink. -/// -/// Spec v0.6.3 -fn verify_casper_ffg_vote( - attestation: &Attestation, - state: &BeaconState, -) -> Result<(), Error> { - let data = &attestation.data; - if data.target_epoch == state.current_epoch() { - verify!( - data.source_epoch == state.current_justified_epoch, - Invalid::WrongJustifiedEpoch { - state: state.current_justified_epoch, - attestation: data.source_epoch, - is_current: true, - } - ); - verify!( - data.source_root == state.current_justified_root, - Invalid::WrongJustifiedRoot { - state: state.current_justified_root, - attestation: data.source_root, - is_current: true, - } - ); - verify!( - data.previous_crosslink_root - == Hash256::from_slice(&state.get_current_crosslink(data.shard)?.tree_hash_root()), - Invalid::BadPreviousCrosslink - ); - } else if data.target_epoch == state.previous_epoch() { - verify!( - data.source_epoch == state.previous_justified_epoch, - Invalid::WrongJustifiedEpoch { - state: state.previous_justified_epoch, - attestation: data.source_epoch, - is_current: false, - } - ); - verify!( - data.source_root == state.previous_justified_root, - Invalid::WrongJustifiedRoot { - state: state.previous_justified_root, - attestation: data.source_root, - is_current: false, - } - ); - verify!( - data.previous_crosslink_root - == Hash256::from_slice(&state.get_previous_crosslink(data.shard)?.tree_hash_root()), - Invalid::BadPreviousCrosslink - ); - } else { - invalid!(Invalid::BadTargetEpoch) - } - Ok(()) -} diff --git a/eth2/state_processing/src/per_block_processing/verify_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_attestation.rs new file mode 100644 index 000000000..af2530045 --- /dev/null +++ b/eth2/state_processing/src/per_block_processing/verify_attestation.rs @@ -0,0 +1,156 @@ +use super::errors::{AttestationInvalid as Invalid, AttestationValidationError as Error}; +use crate::common::get_indexed_attestation; +use crate::per_block_processing::{ + is_valid_indexed_attestation, is_valid_indexed_attestation_without_signature, +}; +use tree_hash::TreeHash; +use types::*; + +/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the +/// given state. +/// +/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. +/// +/// Spec v0.8.0 +pub fn verify_attestation( + state: &BeaconState, + attestation: &Attestation, + spec: &ChainSpec, +) -> Result<(), Error> { + verify_attestation_parametric(state, attestation, spec, true, false) +} + +/// Like `verify_attestation` but doesn't run checks which may become true in future states. +pub fn verify_attestation_time_independent_only( + state: &BeaconState, + attestation: &Attestation, + spec: &ChainSpec, +) -> Result<(), Error> { + verify_attestation_parametric(state, attestation, spec, true, true) +} + +/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the +/// given state, without validating the aggregate signature. +/// +/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. +/// +/// Spec v0.8.0 +pub fn verify_attestation_without_signature( + state: &BeaconState, + attestation: &Attestation, + spec: &ChainSpec, +) -> Result<(), Error> { + verify_attestation_parametric(state, attestation, spec, false, false) +} + +/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the +/// given state, optionally validating the aggregate signature. +/// +/// +/// Spec v0.8.0 +fn verify_attestation_parametric( + state: &BeaconState, + attestation: &Attestation, + spec: &ChainSpec, + verify_signature: bool, + time_independent_only: bool, +) -> Result<(), Error> { + let data = &attestation.data; + verify!( + data.crosslink.shard < T::ShardCount::to_u64(), + Invalid::BadShard + ); + + // Check attestation slot. + let attestation_slot = state.get_attestation_data_slot(&data)?; + + verify!( + time_independent_only + || attestation_slot + spec.min_attestation_inclusion_delay <= state.slot, + Invalid::IncludedTooEarly { + state: state.slot, + delay: spec.min_attestation_inclusion_delay, + attestation: attestation_slot + } + ); + verify!( + state.slot <= attestation_slot + T::slots_per_epoch(), + Invalid::IncludedTooLate { + state: state.slot, + attestation: attestation_slot + } + ); + + // Verify the Casper FFG vote and crosslink data. + if !time_independent_only { + let parent_crosslink = verify_casper_ffg_vote(attestation, state)?; + + verify!( + data.crosslink.parent_root == Hash256::from_slice(&parent_crosslink.tree_hash_root()), + Invalid::BadParentCrosslinkHash + ); + verify!( + data.crosslink.start_epoch == parent_crosslink.end_epoch, + Invalid::BadParentCrosslinkStartEpoch + ); + verify!( + data.crosslink.end_epoch + == std::cmp::min( + data.target.epoch, + parent_crosslink.end_epoch + spec.max_epochs_per_crosslink + ), + Invalid::BadParentCrosslinkEndEpoch + ); + } + + // Crosslink data root is zero (to be removed in phase 1). + verify!( + attestation.data.crosslink.data_root == Hash256::zero(), + Invalid::ShardBlockRootNotZero + ); + + // Check signature and bitfields + let indexed_attestation = get_indexed_attestation(state, attestation)?; + if verify_signature { + is_valid_indexed_attestation(state, &indexed_attestation, spec)?; + } else { + is_valid_indexed_attestation_without_signature(state, &indexed_attestation, spec)?; + } + + Ok(()) +} + +/// Check target epoch and source checkpoint. +/// +/// Return the parent crosslink for further checks. +/// +/// Spec v0.8.0 +fn verify_casper_ffg_vote<'a, T: EthSpec>( + attestation: &Attestation, + state: &'a BeaconState, +) -> Result<&'a Crosslink, Error> { + let data = &attestation.data; + if data.target.epoch == state.current_epoch() { + verify!( + data.source == state.current_justified_checkpoint, + Invalid::WrongJustifiedCheckpoint { + state: state.current_justified_checkpoint.clone(), + attestation: data.source.clone(), + is_current: true, + } + ); + Ok(state.get_current_crosslink(data.crosslink.shard)?) + } else if data.target.epoch == state.previous_epoch() { + verify!( + data.source == state.previous_justified_checkpoint, + Invalid::WrongJustifiedCheckpoint { + state: state.previous_justified_checkpoint.clone(), + attestation: data.source.clone(), + is_current: false, + } + ); + Ok(state.get_previous_crosslink(data.crosslink.shard)?) + } else { + invalid!(Invalid::BadTargetEpoch) + } +} diff --git a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs index 3ae32d72a..840098cad 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attester_slashing.rs @@ -1,5 +1,5 @@ use super::errors::{AttesterSlashingInvalid as Invalid, AttesterSlashingValidationError as Error}; -use super::verify_indexed_attestation::verify_indexed_attestation; +use super::is_valid_indexed_attestation::is_valid_indexed_attestation; use std::collections::BTreeSet; use types::*; @@ -8,10 +8,10 @@ use types::*; /// /// Returns `Ok(())` if the `AttesterSlashing` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.6.3 +/// Spec v0.8.1 pub fn verify_attester_slashing( state: &BeaconState, - attester_slashing: &AttesterSlashing, + attester_slashing: &AttesterSlashing, should_verify_indexed_attestations: bool, spec: &ChainSpec, ) -> Result<(), Error> { @@ -26,9 +26,9 @@ pub fn verify_attester_slashing( ); if should_verify_indexed_attestations { - verify_indexed_attestation(state, &attestation_1, spec) + is_valid_indexed_attestation(state, &attestation_1, spec) .map_err(|e| Error::Invalid(Invalid::IndexedAttestation1Invalid(e.into())))?; - verify_indexed_attestation(state, &attestation_2, spec) + is_valid_indexed_attestation(state, &attestation_2, spec) .map_err(|e| Error::Invalid(Invalid::IndexedAttestation2Invalid(e.into())))?; } @@ -39,10 +39,10 @@ pub fn verify_attester_slashing( /// /// Returns Ok(indices) if `indices.len() > 0`. /// -/// Spec v0.6.3 +/// Spec v0.8.1 pub fn get_slashable_indices( state: &BeaconState, - attester_slashing: &AttesterSlashing, + attester_slashing: &AttesterSlashing, ) -> Result, Error> { get_slashable_indices_modular(state, attester_slashing, |_, validator| { validator.is_slashable_at(state.current_epoch()) @@ -53,7 +53,7 @@ pub fn get_slashable_indices( /// for determining whether a given validator should be considered slashable. pub fn get_slashable_indices_modular( state: &BeaconState, - attester_slashing: &AttesterSlashing, + attester_slashing: &AttesterSlashing, is_slashable: F, ) -> Result, Error> where @@ -79,7 +79,7 @@ where for index in &attesting_indices_1 & &attesting_indices_2 { let validator = state - .validator_registry + .validators .get(index as usize) .ok_or_else(|| Error::Invalid(Invalid::UnknownValidator(index)))?; diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index 860e9cd26..5642c7a5f 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -5,43 +5,27 @@ use types::*; /// Verify `Deposit.pubkey` signed `Deposit.signature`. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn verify_deposit_signature( state: &BeaconState, deposit: &Deposit, spec: &ChainSpec, ) -> Result<(), Error> { + // Note: Deposits are valid across forks, thus the deposit domain is computed + // with the fork zeroed. + let domain = spec.get_domain(state.current_epoch(), Domain::Deposit, &Fork::default()); verify!( - deposit.data.signature.verify( - &deposit.data.signed_root(), - spec.get_domain(state.current_epoch(), Domain::Deposit, &state.fork), - &deposit.data.pubkey, - ), + deposit + .data + .signature + .verify(&deposit.data.signed_root(), domain, &deposit.data.pubkey,), Invalid::BadSignature ); Ok(()) } -/// Verify that the `Deposit` index is correct. -/// -/// Spec v0.6.3 -pub fn verify_deposit_index( - state: &BeaconState, - deposit: &Deposit, -) -> Result<(), Error> { - verify!( - deposit.index == state.deposit_index, - Invalid::BadIndex { - state: state.deposit_index, - deposit: deposit.index - } - ); - - Ok(()) -} - -/// Returns a `Some(validator index)` if a pubkey already exists in the `validator_registry`, +/// Returns a `Some(validator index)` if a pubkey already exists in the `validators`, /// otherwise returns `None`. /// /// ## Errors @@ -57,10 +41,14 @@ pub fn get_existing_validator_index( /// Verify that a deposit is included in the state's eth1 deposit root. /// -/// Spec v0.6.3 +/// The deposit index is provided as a parameter so we can check proofs +/// before they're due to be processed, and in parallel. +/// +/// Spec v0.8.0 pub fn verify_deposit_merkle_proof( state: &BeaconState, deposit: &Deposit, + deposit_index: u64, spec: &ChainSpec, ) -> Result<(), Error> { let leaf = deposit.data.tree_hash_root(); @@ -69,9 +57,9 @@ pub fn verify_deposit_merkle_proof( verify_merkle_proof( Hash256::from_slice(&leaf), &deposit.proof[..], - spec.deposit_contract_tree_depth as usize, - deposit.index as usize, - state.latest_eth1_data.deposit_root, + spec.deposit_contract_tree_depth as usize + 1, + deposit_index as usize, + state.eth1_data.deposit_root, ), Invalid::BadMerkleProof ); diff --git a/eth2/state_processing/src/per_block_processing/verify_exit.rs b/eth2/state_processing/src/per_block_processing/verify_exit.rs index 4bfad5f19..1e0bbdd78 100644 --- a/eth2/state_processing/src/per_block_processing/verify_exit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_exit.rs @@ -7,7 +7,7 @@ use types::*; /// /// Returns `Ok(())` if the `Exit` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn verify_exit( state: &BeaconState, exit: &VoluntaryExit, @@ -18,7 +18,7 @@ pub fn verify_exit( /// Like `verify_exit` but doesn't run checks which may become true in future states. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn verify_exit_time_independent_only( state: &BeaconState, exit: &VoluntaryExit, @@ -29,7 +29,7 @@ pub fn verify_exit_time_independent_only( /// Parametric version of `verify_exit` that skips some checks if `time_independent_only` is true. /// -/// Spec v0.6.3 +/// Spec v0.8.0 fn verify_exit_parametric( state: &BeaconState, exit: &VoluntaryExit, @@ -37,7 +37,7 @@ fn verify_exit_parametric( time_independent_only: bool, ) -> Result<(), Error> { let validator = state - .validator_registry + .validators .get(exit.validator_index as usize) .ok_or_else(|| Error::Invalid(Invalid::ValidatorUnknown(exit.validator_index)))?; @@ -63,12 +63,11 @@ fn verify_exit_parametric( ); // Verify the validator has been active long enough. - let lifespan = state.current_epoch() - validator.activation_epoch; verify!( - lifespan >= spec.persistent_committee_period, - Invalid::TooYoungToLeave { - lifespan, - expected: spec.persistent_committee_period, + state.current_epoch() >= validator.activation_epoch + spec.persistent_committee_period, + Invalid::TooYoungToExit { + current_epoch: state.current_epoch(), + earliest_exit_epoch: validator.activation_epoch + spec.persistent_committee_period, } ); diff --git a/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs index b2419a05b..5a9eb328c 100644 --- a/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs +++ b/eth2/state_processing/src/per_block_processing/verify_proposer_slashing.rs @@ -7,19 +7,20 @@ use types::*; /// /// Returns `Ok(())` if the `ProposerSlashing` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn verify_proposer_slashing( proposer_slashing: &ProposerSlashing, state: &BeaconState, spec: &ChainSpec, ) -> Result<(), Error> { let proposer = state - .validator_registry + .validators .get(proposer_slashing.proposer_index as usize) .ok_or_else(|| { Error::Invalid(Invalid::ProposerUnknown(proposer_slashing.proposer_index)) })?; + // Verify that the epoch is the same verify!( proposer_slashing.header_1.slot.epoch(T::slots_per_epoch()) == proposer_slashing.header_2.slot.epoch(T::slots_per_epoch()), @@ -29,11 +30,13 @@ pub fn verify_proposer_slashing( ) ); + // But the headers are different verify!( proposer_slashing.header_1 != proposer_slashing.header_2, Invalid::ProposalsIdentical ); + // Check proposer is slashable verify!( proposer.is_slashable_at(state.current_epoch()), Invalid::ProposerNotSlashable(proposer_slashing.proposer_index) @@ -65,7 +68,7 @@ pub fn verify_proposer_slashing( /// /// Returns `true` if the signature is valid. /// -/// Spec v0.6.3 +/// Spec v0.8.0 fn verify_header_signature( header: &BeaconBlockHeader, pubkey: &PublicKey, diff --git a/eth2/state_processing/src/per_block_processing/verify_transfer.rs b/eth2/state_processing/src/per_block_processing/verify_transfer.rs index d42b7d1f2..f34bea65a 100644 --- a/eth2/state_processing/src/per_block_processing/verify_transfer.rs +++ b/eth2/state_processing/src/per_block_processing/verify_transfer.rs @@ -8,7 +8,7 @@ use types::*; /// /// Returns `Ok(())` if the `Transfer` is valid, otherwise indicates the reason for invalidity. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn verify_transfer( state: &BeaconState, transfer: &Transfer, @@ -19,7 +19,7 @@ pub fn verify_transfer( /// Like `verify_transfer` but doesn't run checks which may become true in future states. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn verify_transfer_time_independent_only( state: &BeaconState, transfer: &Transfer, @@ -37,7 +37,7 @@ pub fn verify_transfer_time_independent_only( /// present or future. /// - Validator transfer eligibility (e.g., is withdrawable) /// -/// Spec v0.6.3 +/// Spec v0.8.0 fn verify_transfer_parametric( state: &BeaconState, transfer: &Transfer, @@ -97,22 +97,20 @@ fn verify_transfer_parametric( // Load the sender `Validator` record from the state. let sender_validator = state - .validator_registry + .validators .get(transfer.sender as usize) .ok_or_else(|| Error::Invalid(Invalid::FromValidatorUnknown(transfer.sender)))?; - let epoch = state.slot.epoch(T::slots_per_epoch()); - // Ensure one of the following is met: // // - Time dependent checks are being ignored. - // - The sender has not been activated. + // - The sender has never been eligible for activation. // - The sender is withdrawable at the state's epoch. // - The transfer will not reduce the sender below the max effective balance. verify!( time_independent_only || sender_validator.activation_eligibility_epoch == spec.far_future_epoch - || sender_validator.is_withdrawable_at(epoch) + || sender_validator.is_withdrawable_at(state.current_epoch()) || total_amount + spec.max_effective_balance <= sender_balance, Invalid::FromValidatorIneligibleForTransfer(transfer.sender) ); @@ -154,7 +152,7 @@ fn verify_transfer_parametric( /// /// Does not check that the transfer is valid, however checks for overflow in all actions. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn execute_transfer( state: &mut BeaconState, transfer: &Transfer, diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index c1d601b47..8d6153aea 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,3 +1,4 @@ +use crate::common::get_compact_committees_root; use apply_rewards::process_rewards_and_penalties; use errors::EpochProcessingError as Error; use process_slashings::process_slashings; @@ -26,14 +27,15 @@ pub type WinningRootHashSet = HashMap; /// Mutates the given `BeaconState`, returning early if an error is encountered. If an error is /// returned, a state might be "half-processed" and therefore in an invalid state. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn per_epoch_processing( state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), Error> { - // Ensure the previous and next epoch caches are built. + // Ensure the committee caches are built. state.build_committee_cache(RelativeEpoch::Previous, spec)?; state.build_committee_cache(RelativeEpoch::Current, spec)?; + state.build_committee_cache(RelativeEpoch::Next, spec)?; // Load the struct we use to assign validators into sets based on their participation. // @@ -80,61 +82,67 @@ pub fn per_epoch_processing( /// - `finalized_epoch` /// - `finalized_root` /// -/// Spec v0.6.3 +/// Spec v0.8.0 +#[allow(clippy::if_same_then_else)] // For readability and consistency with spec. pub fn process_justification_and_finalization( state: &mut BeaconState, total_balances: &TotalBalances, ) -> Result<(), Error> { - if state.current_epoch() == T::genesis_epoch() { + if state.current_epoch() <= T::genesis_epoch() + 1 { return Ok(()); } let previous_epoch = state.previous_epoch(); let current_epoch = state.current_epoch(); - let old_previous_justified_epoch = state.previous_justified_epoch; - let old_current_justified_epoch = state.current_justified_epoch; + let old_previous_justified_checkpoint = state.previous_justified_checkpoint.clone(); + let old_current_justified_checkpoint = state.current_justified_checkpoint.clone(); // Process justifications - state.previous_justified_epoch = state.current_justified_epoch; - state.previous_justified_root = state.current_justified_root; - state.justification_bitfield <<= 1; + state.previous_justified_checkpoint = state.current_justified_checkpoint.clone(); + state.justification_bits.shift_up(1)?; - if total_balances.previous_epoch_target_attesters * 3 >= total_balances.previous_epoch * 2 { - state.current_justified_epoch = previous_epoch; - state.current_justified_root = - *state.get_block_root_at_epoch(state.current_justified_epoch)?; - state.justification_bitfield |= 2; + if total_balances.previous_epoch_target_attesters * 3 >= total_balances.current_epoch * 2 { + state.current_justified_checkpoint = Checkpoint { + epoch: previous_epoch, + root: *state.get_block_root_at_epoch(previous_epoch)?, + }; + state.justification_bits.set(1, true)?; } // If the current epoch gets justified, fill the last bit. if total_balances.current_epoch_target_attesters * 3 >= total_balances.current_epoch * 2 { - state.current_justified_epoch = current_epoch; - state.current_justified_root = - *state.get_block_root_at_epoch(state.current_justified_epoch)?; - state.justification_bitfield |= 1; + state.current_justified_checkpoint = Checkpoint { + epoch: current_epoch, + root: *state.get_block_root_at_epoch(current_epoch)?, + }; + state.justification_bits.set(0, true)?; } - let bitfield = state.justification_bitfield; + let bits = &state.justification_bits; // The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source. - if (bitfield >> 1) % 8 == 0b111 && old_previous_justified_epoch == current_epoch - 3 { - state.finalized_epoch = old_previous_justified_epoch; - state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch)?; + if (1..4).all(|i| bits.get(i).unwrap_or(false)) + && old_previous_justified_checkpoint.epoch + 3 == current_epoch + { + state.finalized_checkpoint = old_previous_justified_checkpoint; } // The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source. - if (bitfield >> 1) % 4 == 0b11 && old_previous_justified_epoch == current_epoch - 2 { - state.finalized_epoch = old_previous_justified_epoch; - state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch)?; + else if (1..3).all(|i| bits.get(i).unwrap_or(false)) + && old_previous_justified_checkpoint.epoch + 2 == current_epoch + { + state.finalized_checkpoint = old_previous_justified_checkpoint; } - // The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 2nd as source. - if bitfield % 8 == 0b111 && old_current_justified_epoch == current_epoch - 2 { - state.finalized_epoch = old_current_justified_epoch; - state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch)?; + // The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 3nd as source. + if (0..3).all(|i| bits.get(i).unwrap_or(false)) + && old_current_justified_checkpoint.epoch + 2 == current_epoch + { + state.finalized_checkpoint = old_current_justified_checkpoint; } // The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source. - if bitfield % 4 == 0b11 && old_current_justified_epoch == current_epoch - 1 { - state.finalized_epoch = old_current_justified_epoch; - state.finalized_root = *state.get_block_root_at_epoch(state.finalized_epoch)?; + else if (0..2).all(|i| bits.get(i).unwrap_or(false)) + && old_current_justified_checkpoint.epoch + 1 == current_epoch + { + state.finalized_checkpoint = old_current_justified_checkpoint; } Ok(()) @@ -147,7 +155,7 @@ pub fn process_justification_and_finalization( /// /// Also returns a `WinningRootHashSet` for later use during epoch processing. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_crosslinks( state: &mut BeaconState, spec: &ChainSpec, @@ -158,7 +166,7 @@ pub fn process_crosslinks( for &relative_epoch in &[RelativeEpoch::Previous, RelativeEpoch::Current] { let epoch = relative_epoch.into_epoch(state.current_epoch()); - for offset in 0..state.get_epoch_committee_count(relative_epoch)? { + for offset in 0..state.get_committee_count(relative_epoch)? { let shard = (state.get_epoch_start_shard(relative_epoch)? + offset) % T::ShardCount::to_u64(); let crosslink_committee = @@ -183,7 +191,7 @@ pub fn process_crosslinks( /// Finish up an epoch update. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_final_updates( state: &mut BeaconState, spec: &ChainSpec, @@ -192,12 +200,12 @@ pub fn process_final_updates( let next_epoch = state.next_epoch(); // Reset eth1 data votes. - if (state.slot + 1) % spec.slots_per_eth1_voting_period == 0 { - state.eth1_data_votes = vec![]; + if (state.slot + 1) % T::SlotsPerEth1VotingPeriod::to_u64() == 0 { + state.eth1_data_votes = VariableList::empty(); } // Update effective balances with hysteresis (lag). - for (index, validator) in state.validator_registry.iter_mut().enumerate() { + for (index, validator) in state.validators.iter_mut().enumerate() { let balance = state.balances[index]; let half_increment = spec.effective_balance_increment / 2; if balance < validator.effective_balance @@ -211,7 +219,7 @@ pub fn process_final_updates( } // Update start shard. - state.latest_start_shard = state.next_epoch_start_shard(spec)?; + state.start_shard = state.next_epoch_start_shard(spec)?; // This is a hack to allow us to update index roots and slashed balances for the next epoch. // @@ -220,19 +228,18 @@ pub fn process_final_updates( state.slot += 1; // Set active index root - let active_index_root = Hash256::from_slice( - &state - .get_active_validator_indices(next_epoch + spec.activation_exit_delay) - .tree_hash_root()[..], + let index_epoch = next_epoch + spec.activation_exit_delay; + let indices_list = VariableList::::from( + state.get_active_validator_indices(index_epoch), ); state.set_active_index_root( - next_epoch + spec.activation_exit_delay, - active_index_root, + index_epoch, + Hash256::from_slice(&indices_list.tree_hash_root()), spec, )?; - // Set total slashed balances - state.set_slashed_balance(next_epoch, state.get_slashed_balance(current_epoch)?)?; + // Reset slashings + state.set_slashings(next_epoch, 0)?; // Set randao mix state.set_randao_mix(next_epoch, *state.get_randao_mix(current_epoch)?)?; @@ -240,16 +247,27 @@ pub fn process_final_updates( state.slot -= 1; } + // Set committees root + // Note: we do this out-of-order w.r.t. to the spec, because we don't want the slot to be + // incremented. It's safe because the updates to slashings and the RANDAO mix (above) don't + // affect this. + state.set_compact_committee_root( + next_epoch, + get_compact_committees_root(state, RelativeEpoch::Next, spec)?, + spec, + )?; + + // Set historical root accumulator if next_epoch.as_u64() % (T::SlotsPerHistoricalRoot::to_u64() / T::slots_per_epoch()) == 0 { let historical_batch = state.historical_batch(); state .historical_roots - .push(Hash256::from_slice(&historical_batch.tree_hash_root()[..])); + .push(Hash256::from_slice(&historical_batch.tree_hash_root()))?; } // Rotate current/previous epoch attestations state.previous_epoch_attestations = - std::mem::replace(&mut state.current_epoch_attestations, vec![]); + std::mem::replace(&mut state.current_epoch_attestations, VariableList::empty()); Ok(()) } diff --git a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs index 88b51aae8..9bd53077a 100644 --- a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs +++ b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs @@ -32,7 +32,7 @@ impl std::ops::AddAssign for Delta { /// Apply attester and proposer rewards. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_rewards_and_penalties( state: &mut BeaconState, validator_statuses: &mut ValidatorStatuses, @@ -45,7 +45,7 @@ pub fn process_rewards_and_penalties( // Guard against an out-of-bounds during the validator balance update. if validator_statuses.statuses.len() != state.balances.len() - || validator_statuses.statuses.len() != state.validator_registry.len() + || validator_statuses.statuses.len() != state.validators.len() { return Err(Error::ValidatorStatusesInconsistent); } @@ -74,7 +74,7 @@ pub fn process_rewards_and_penalties( /// For each attesting validator, reward the proposer who was first to include their attestation. /// -/// Spec v0.6.3 +/// Spec v0.8.0 fn get_proposer_deltas( deltas: &mut Vec, state: &BeaconState, @@ -85,7 +85,7 @@ fn get_proposer_deltas( // Update statuses with the information from winning roots. validator_statuses.process_winning_roots(state, winning_root_for_shards, spec)?; - for validator in &validator_statuses.statuses { + for (index, validator) in validator_statuses.statuses.iter().enumerate() { if validator.is_previous_epoch_attester { let inclusion = validator .inclusion_info @@ -93,7 +93,7 @@ fn get_proposer_deltas( let base_reward = get_base_reward( state, - inclusion.proposer_index, + index, validator_statuses.total_balances.current_epoch, spec, )?; @@ -111,14 +111,14 @@ fn get_proposer_deltas( /// Apply rewards for participation in attestations during the previous epoch. /// -/// Spec v0.6.3 +/// Spec v0.8.0 fn get_attestation_deltas( deltas: &mut Vec, state: &BeaconState, validator_statuses: &ValidatorStatuses, spec: &ChainSpec, ) -> Result<(), Error> { - let finality_delay = (state.previous_epoch() - state.finalized_epoch).as_u64(); + let finality_delay = (state.previous_epoch() - state.finalized_checkpoint.epoch).as_u64(); for (index, validator) in validator_statuses.statuses.iter().enumerate() { let base_reward = get_base_reward( @@ -128,7 +128,7 @@ fn get_attestation_deltas( spec, )?; - let delta = get_attestation_delta( + let delta = get_attestation_delta::( &validator, &validator_statuses.total_balances, base_reward, @@ -144,8 +144,8 @@ fn get_attestation_deltas( /// Determine the delta for a single validator, sans proposer rewards. /// -/// Spec v0.6.3 -fn get_attestation_delta( +/// Spec v0.8.0 +fn get_attestation_delta( validator: &ValidatorStatus, total_balances: &TotalBalances, base_reward: u64, @@ -174,10 +174,17 @@ fn get_attestation_delta( if validator.is_previous_epoch_attester && !validator.is_slashed { delta.reward(base_reward * total_attesting_balance / total_balance); // Inclusion speed bonus + let proposer_reward = base_reward / spec.proposer_reward_quotient; + let max_attester_reward = base_reward - proposer_reward; let inclusion = validator .inclusion_info .expect("It is a logic error for an attester not to have an inclusion distance."); - delta.reward(base_reward * spec.min_attestation_inclusion_delay / inclusion.distance); + delta.reward( + max_attester_reward + * (T::SlotsPerEpoch::to_u64() + spec.min_attestation_inclusion_delay + - inclusion.distance) + / T::SlotsPerEpoch::to_u64(), + ); } else { delta.penalize(base_reward); } @@ -224,7 +231,7 @@ fn get_attestation_delta( /// Calculate the deltas based upon the winning roots for attestations during the previous epoch. /// -/// Spec v0.6.3 +/// Spec v0.8.0 fn get_crosslink_deltas( deltas: &mut Vec, state: &BeaconState, @@ -258,7 +265,7 @@ fn get_crosslink_deltas( /// Returns the base reward for some validator. /// -/// Spec v0.6.3 +/// Spec v0.8.0 fn get_base_reward( state: &BeaconState, index: usize, @@ -269,9 +276,10 @@ fn get_base_reward( if total_active_balance == 0 { Ok(0) } else { - let adjusted_quotient = total_active_balance.integer_sqrt() / spec.base_reward_quotient; - Ok(state.get_effective_balance(index, spec)? - / adjusted_quotient - / spec.base_rewards_per_epoch) + Ok( + state.get_effective_balance(index, spec)? * spec.base_reward_factor + / total_active_balance.integer_sqrt() + / spec.base_rewards_per_epoch, + ) } } diff --git a/eth2/state_processing/src/per_epoch_processing/errors.rs b/eth2/state_processing/src/per_epoch_processing/errors.rs index 4632e83bb..98e012e90 100644 --- a/eth2/state_processing/src/per_epoch_processing/errors.rs +++ b/eth2/state_processing/src/per_epoch_processing/errors.rs @@ -17,6 +17,7 @@ pub enum EpochProcessingError { InclusionSlotsInconsistent(usize), BeaconStateError(BeaconStateError), InclusionError(InclusionError), + SszTypesError(ssz_types::Error), } impl From for EpochProcessingError { @@ -31,6 +32,12 @@ impl From for EpochProcessingError { } } +impl From for EpochProcessingError { + fn from(e: ssz_types::Error) -> EpochProcessingError { + EpochProcessingError::SszTypesError(e) + } +} + #[derive(Debug, PartialEq)] pub enum InclusionError { /// The validator did not participate in an attestation in this period. diff --git a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs index df743c553..d244955ee 100644 --- a/eth2/state_processing/src/per_epoch_processing/process_slashings.rs +++ b/eth2/state_processing/src/per_epoch_processing/process_slashings.rs @@ -2,30 +2,23 @@ use types::{BeaconStateError as Error, *}; /// Process slashings. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_slashings( state: &mut BeaconState, - current_total_balance: u64, + total_balance: u64, spec: &ChainSpec, ) -> Result<(), Error> { - let current_epoch = state.current_epoch(); + let epoch = state.current_epoch(); + let sum_slashings = state.get_all_slashings().iter().sum::(); - let total_at_start = state.get_slashed_balance(current_epoch + 1)?; - let total_at_end = state.get_slashed_balance(current_epoch)?; - let total_penalties = total_at_end - total_at_start; - - for (index, validator) in state.validator_registry.iter().enumerate() { - let should_penalize = current_epoch.as_usize() + T::LatestSlashedExitLength::to_usize() / 2 - == validator.withdrawable_epoch.as_usize(); - - if validator.slashed && should_penalize { - let effective_balance = state.get_effective_balance(index, spec)?; - - let penalty = std::cmp::max( - effective_balance * std::cmp::min(total_penalties * 3, current_total_balance) - / current_total_balance, - effective_balance / spec.min_slashing_penalty_quotient, - ); + for (index, validator) in state.validators.iter().enumerate() { + if validator.slashed + && epoch + T::EpochsPerSlashingsVector::to_u64() / 2 == validator.withdrawable_epoch + { + let increment = spec.effective_balance_increment; + let penalty_numerator = validator.effective_balance / increment + * std::cmp::min(sum_slashings * 3, total_balance); + let penalty = penalty_numerator / total_balance * increment; safe_sub_assign!(state.balances[index], penalty); } diff --git a/eth2/state_processing/src/per_epoch_processing/registry_updates.rs b/eth2/state_processing/src/per_epoch_processing/registry_updates.rs index b18111faf..3f654e442 100644 --- a/eth2/state_processing/src/per_epoch_processing/registry_updates.rs +++ b/eth2/state_processing/src/per_epoch_processing/registry_updates.rs @@ -5,7 +5,7 @@ use types::*; /// Performs a validator registry update, if required. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn process_registry_updates( state: &mut BeaconState, spec: &ChainSpec, @@ -17,14 +17,14 @@ pub fn process_registry_updates( let current_epoch = state.current_epoch(); let is_eligible = |validator: &Validator| { validator.activation_eligibility_epoch == spec.far_future_epoch - && validator.effective_balance >= spec.max_effective_balance + && validator.effective_balance == spec.max_effective_balance }; let is_exiting_validator = |validator: &Validator| { validator.is_active_at(current_epoch) && validator.effective_balance <= spec.ejection_balance }; let (eligible_validators, exiting_validators): (Vec<_>, Vec<_>) = state - .validator_registry + .validators .iter() .enumerate() .filter(|(_, validator)| is_eligible(validator) || is_exiting_validator(validator)) @@ -36,7 +36,7 @@ pub fn process_registry_updates( } }); for index in eligible_validators { - state.validator_registry[index].activation_eligibility_epoch = current_epoch; + state.validators[index].activation_eligibility_epoch = current_epoch; } for index in exiting_validators { initiate_validator_exit(state, index, spec)?; @@ -44,22 +44,22 @@ pub fn process_registry_updates( // Queue validators eligible for activation and not dequeued for activation prior to finalized epoch let activation_queue = state - .validator_registry + .validators .iter() .enumerate() .filter(|(_, validator)| { validator.activation_eligibility_epoch != spec.far_future_epoch && validator.activation_epoch - >= state.get_delayed_activation_exit_epoch(state.finalized_epoch, spec) + >= state.compute_activation_exit_epoch(state.finalized_checkpoint.epoch, spec) }) .sorted_by_key(|(_, validator)| validator.activation_eligibility_epoch) .map(|(index, _)| index) .collect_vec(); let churn_limit = state.get_churn_limit(spec)? as usize; - let delayed_activation_epoch = state.get_delayed_activation_exit_epoch(current_epoch, spec); + let delayed_activation_epoch = state.compute_activation_exit_epoch(current_epoch, spec); for index in activation_queue.into_iter().take(churn_limit) { - let validator = &mut state.validator_registry[index]; + let validator = &mut state.validators[index]; if validator.activation_epoch == spec.far_future_epoch { validator.activation_epoch = delayed_activation_epoch; } diff --git a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs index 9f05b8204..8a7d07d57 100644 --- a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -1,5 +1,5 @@ use super::WinningRootHashSet; -use crate::common::get_attesting_indices_unsorted; +use crate::common::get_attesting_indices; use types::*; /// Sets the boolean `var` on `self` to be true if it is true on `other`. Otherwise leaves `self` @@ -162,15 +162,15 @@ impl ValidatorStatuses { /// - Active validators /// - Total balances for the current and previous epochs. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn new( state: &BeaconState, spec: &ChainSpec, ) -> Result { - let mut statuses = Vec::with_capacity(state.validator_registry.len()); + let mut statuses = Vec::with_capacity(state.validators.len()); let mut total_balances = TotalBalances::default(); - for (i, validator) in state.validator_registry.iter().enumerate() { + for (i, validator) in state.validators.iter().enumerate() { let effective_balance = state.get_effective_balance(i, spec)?; let mut status = ValidatorStatus { is_slashed: validator.slashed, @@ -202,7 +202,7 @@ impl ValidatorStatuses { /// Process some attestations from the given `state` updating the `statuses` and /// `total_balances` fields. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn process_attestations( &mut self, state: &BeaconState, @@ -213,24 +213,23 @@ impl ValidatorStatuses { .iter() .chain(state.current_epoch_attestations.iter()) { - let attesting_indices = - get_attesting_indices_unsorted(state, &a.data, &a.aggregation_bitfield)?; + let attesting_indices = get_attesting_indices(state, &a.data, &a.aggregation_bits)?; let mut status = ValidatorStatus::default(); // Profile this attestation, updating the total balances and generating an // `ValidatorStatus` object that applies to all participants in the attestation. - if is_from_epoch(a, state.current_epoch()) { + if a.data.target.epoch == state.current_epoch() { status.is_current_epoch_attester = true; if target_matches_epoch_start_block(a, state, state.current_epoch())? { status.is_current_epoch_target_attester = true; } - } else if is_from_epoch(a, state.previous_epoch()) { + } else if a.data.target.epoch == state.previous_epoch() { status.is_previous_epoch_attester = true; // The inclusion slot and distance are only required for previous epoch attesters. - let attestation_slot = state.get_attestation_slot(&a.data)?; + let attestation_slot = state.get_attestation_data_slot(&a.data)?; let inclusion_slot = attestation_slot + a.inclusion_delay; let relative_epoch = RelativeEpoch::from_slot(state.slot, inclusion_slot, T::slots_per_epoch())?; @@ -289,7 +288,7 @@ impl ValidatorStatuses { /// Update the `statuses` for each validator based upon whether or not they attested to the /// "winning" shard block root for the previous epoch. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn process_winning_roots( &mut self, state: &BeaconState, @@ -321,37 +320,30 @@ impl ValidatorStatuses { } } -/// Returns `true` if some `PendingAttestation` is from the supplied `epoch`. -/// -/// Spec v0.6.3 -fn is_from_epoch(a: &PendingAttestation, epoch: Epoch) -> bool { - a.data.target_epoch == epoch -} - /// Returns `true` if the attestation's FFG target is equal to the hash of the `state`'s first /// beacon block in the given `epoch`. /// -/// Spec v0.6.3 +/// Spec v0.8.1 fn target_matches_epoch_start_block( - a: &PendingAttestation, + a: &PendingAttestation, state: &BeaconState, epoch: Epoch, ) -> Result { let slot = epoch.start_slot(T::slots_per_epoch()); let state_boundary_root = *state.get_block_root(slot)?; - Ok(a.data.target_root == state_boundary_root) + Ok(a.data.target.root == state_boundary_root) } /// Returns `true` if a `PendingAttestation` and `BeaconState` share the same beacon block hash for /// the current slot of the `PendingAttestation`. /// -/// Spec v0.6.3 +/// Spec v0.8.1 fn has_common_beacon_block_root( - a: &PendingAttestation, + a: &PendingAttestation, state: &BeaconState, ) -> Result { - let attestation_slot = state.get_attestation_slot(&a.data)?; + let attestation_slot = state.get_attestation_data_slot(&a.data)?; let state_block_root = *state.get_block_root(attestation_slot)?; Ok(a.data.beacon_block_root == state_block_root) diff --git a/eth2/state_processing/src/per_epoch_processing/winning_root.rs b/eth2/state_processing/src/per_epoch_processing/winning_root.rs index ab4381a3c..874e11d6c 100644 --- a/eth2/state_processing/src/per_epoch_processing/winning_root.rs +++ b/eth2/state_processing/src/per_epoch_processing/winning_root.rs @@ -1,4 +1,4 @@ -use crate::common::get_attesting_indices_unsorted; +use crate::common::get_attesting_indices; use std::collections::{HashMap, HashSet}; use tree_hash::TreeHash; use types::*; @@ -16,65 +16,48 @@ impl WinningRoot { /// A winning root is "better" than another if it has a higher `total_attesting_balance`. Ties /// are broken by favouring the higher `crosslink_data_root` value. /// - /// Spec v0.6.3 + /// Spec v0.8.0 pub fn is_better_than(&self, other: &Self) -> bool { - ( - self.total_attesting_balance, - self.crosslink.crosslink_data_root, - ) > ( - other.total_attesting_balance, - other.crosslink.crosslink_data_root, - ) + (self.total_attesting_balance, self.crosslink.data_root) + > (other.total_attesting_balance, other.crosslink.data_root) } } -/// Returns the `crosslink_data_root` with the highest total attesting balance for the given shard. -/// Breaks ties by favouring the smaller `crosslink_data_root` hash. +/// Returns the crosslink `data_root` with the highest total attesting balance for the given shard. +/// Breaks ties by favouring the smaller crosslink `data_root` hash. /// /// The `WinningRoot` object also contains additional fields that are useful in later stages of /// per-epoch processing. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn winning_root( state: &BeaconState, shard: u64, epoch: Epoch, spec: &ChainSpec, ) -> Result, BeaconStateError> { - let shard_attestations: Vec<&PendingAttestation> = state + let attestations: Vec<&_> = state .get_matching_source_attestations(epoch)? .iter() - .filter(|a| a.data.shard == shard) + .filter(|a| a.data.crosslink.shard == shard) .collect(); - let mut shard_crosslinks = Vec::with_capacity(shard_attestations.len()); - for att in shard_attestations { - shard_crosslinks.push(( - att, - state.get_crosslink_from_attestation_data(&att.data, spec)?, - )); - } - + // Build a map from crosslinks to attestations that support that crosslink. + let mut candidate_crosslink_map = HashMap::new(); let current_shard_crosslink_root = state.get_current_crosslink(shard)?.tree_hash_root(); - let candidate_crosslinks = shard_crosslinks.into_iter().filter(|(_, c)| { - c.previous_crosslink_root.as_bytes() == ¤t_shard_crosslink_root[..] - || c.tree_hash_root() == current_shard_crosslink_root - }); - // Build a map from candidate crosslink to attestations that support that crosslink. - let mut candidate_crosslink_map: HashMap> = HashMap::new(); - - for (attestation, crosslink) in candidate_crosslinks { - let supporting_attestations = candidate_crosslink_map - .entry(crosslink) - .or_insert_with(Vec::new); - supporting_attestations.push(attestation); - } - - if candidate_crosslink_map.is_empty() { - return Ok(None); + for a in attestations { + if a.data.crosslink.parent_root.as_bytes() == ¤t_shard_crosslink_root[..] + || a.data.crosslink.tree_hash_root() == current_shard_crosslink_root + { + let supporting_attestations = candidate_crosslink_map + .entry(&a.data.crosslink) + .or_insert_with(Vec::new); + supporting_attestations.push(a); + } } + // Find the maximum crosslink. let mut winning_root = None; for (crosslink, attestations) in candidate_crosslink_map { let attesting_validator_indices = @@ -83,7 +66,7 @@ pub fn winning_root( state.get_total_balance(&attesting_validator_indices, spec)?; let candidate = WinningRoot { - crosslink, + crosslink: crosslink.clone(), attesting_validator_indices, total_attesting_balance, }; @@ -102,24 +85,15 @@ pub fn winning_root( pub fn get_unslashed_attesting_indices_unsorted( state: &BeaconState, - attestations: &[&PendingAttestation], + attestations: &[&PendingAttestation], ) -> Result, BeaconStateError> { let mut output = HashSet::new(); for a in attestations { - output.extend(get_attesting_indices_unsorted( - state, - &a.data, - &a.aggregation_bitfield, - )?); + output.extend(get_attesting_indices(state, &a.data, &a.aggregation_bits)?); } Ok(output .into_iter() - .filter(|index| { - state - .validator_registry - .get(*index) - .map_or(false, |v| !v.slashed) - }) + .filter(|index| state.validators.get(*index).map_or(false, |v| !v.slashed)) .collect()) } @@ -131,16 +105,18 @@ mod tests { fn is_better_than() { let worse = WinningRoot { crosslink: Crosslink { - epoch: Epoch::new(0), - previous_crosslink_root: Hash256::from_slice(&[0; 32]), - crosslink_data_root: Hash256::from_slice(&[1; 32]), + shard: 0, + start_epoch: Epoch::new(0), + end_epoch: Epoch::new(1), + parent_root: Hash256::from_slice(&[0; 32]), + data_root: Hash256::from_slice(&[1; 32]), }, attesting_validator_indices: vec![], total_attesting_balance: 42, }; let mut better = worse.clone(); - better.crosslink.crosslink_data_root = Hash256::from_slice(&[2; 32]); + better.crosslink.data_root = Hash256::from_slice(&[2; 32]); assert!(better.is_better_than(&worse)); diff --git a/eth2/state_processing/src/per_slot_processing.rs b/eth2/state_processing/src/per_slot_processing.rs index 6abd0a075..a1c68edd9 100644 --- a/eth2/state_processing/src/per_slot_processing.rs +++ b/eth2/state_processing/src/per_slot_processing.rs @@ -9,14 +9,14 @@ pub enum Error { /// Advances a state forward by one slot, performing per-epoch processing if required. /// -/// Spec v0.6.3 +/// Spec v0.8.0 pub fn per_slot_processing( state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), Error> { - cache_state(state, spec)?; + cache_state(state)?; - if (state.slot > spec.genesis_slot) && ((state.slot + 1) % T::slots_per_epoch() == 0) { + if state.slot > spec.genesis_slot && (state.slot + 1) % T::slots_per_epoch() == 0 { per_epoch_processing(state, spec)?; } @@ -25,8 +25,8 @@ pub fn per_slot_processing( Ok(()) } -fn cache_state(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), Error> { - let previous_slot_state_root = state.update_tree_hash_cache()?; +fn cache_state(state: &mut BeaconState) -> Result<(), Error> { + let previous_state_root = state.update_tree_hash_cache()?; // Note: increment the state slot here to allow use of our `state_root` and `block_root` // getter/setter functions. @@ -35,14 +35,15 @@ fn cache_state(state: &mut BeaconState, spec: &ChainSpec) -> Resu let previous_slot = state.slot; state.slot += 1; - // Store the previous slot's post-state transition root. - if state.latest_block_header.state_root == spec.zero_hash { - state.latest_block_header.state_root = previous_slot_state_root + // Store the previous slot's post state transition root. + state.set_state_root(previous_slot, previous_state_root)?; + + // Cache latest block header state root + if state.latest_block_header.state_root == Hash256::zero() { + state.latest_block_header.state_root = previous_state_root; } - // Store the previous slot's post state transition root. - state.set_state_root(previous_slot, previous_slot_state_root)?; - + // Cache block root let latest_block_root = state.latest_block_header.canonical_root(); state.set_block_root(previous_slot, latest_block_root)?; diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index ed71598d7..a49e46d93 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -6,14 +6,12 @@ edition = "2018" [dependencies] bls = { path = "../utils/bls" } -boolean-bitfield = { path = "../utils/boolean-bitfield" } cached_tree_hash = { path = "../utils/cached_tree_hash" } compare_fields = { path = "../utils/compare_fields" } compare_fields_derive = { path = "../utils/compare_fields_derive" } dirs = "1.0" derivative = "1.0" ethereum-types = "0.5" -fixed_len_vec = { path = "../utils/fixed_len_vec" } hashing = { path = "../utils/hashing" } hex = "0.3" int_to_bytes = { path = "../utils/int_to_bytes" } @@ -25,6 +23,7 @@ serde_derive = "1.0" slog = "^2.2.3" eth2_ssz = { path = "../utils/ssz" } eth2_ssz_derive = { path = "../utils/ssz_derive" } +eth2_ssz_types = { path = "../utils/ssz_types" } swap_or_not_shuffle = { path = "../utils/swap_or_not_shuffle" } test_random_derive = { path = "../utils/test_random_derive" } tree_hash = { path = "../utils/tree_hash" } diff --git a/eth2/types/src/attestation.rs b/eth2/types/src/attestation.rs index 40f97119d..c5fab262d 100644 --- a/eth2/types/src/attestation.rs +++ b/eth2/types/src/attestation.rs @@ -1,4 +1,4 @@ -use super::{AggregateSignature, AttestationData, Bitfield}; +use super::{AggregateSignature, AttestationData, BitList, EthSpec}; use crate::test_utils::TestRandom; use serde_derive::{Deserialize, Serialize}; @@ -9,7 +9,7 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; /// Details an attestation that can be slashable. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, Clone, @@ -23,32 +23,32 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; TestRandom, SignedRoot, )] -pub struct Attestation { - pub aggregation_bitfield: Bitfield, +#[serde(bound = "T: EthSpec")] +pub struct Attestation { + pub aggregation_bits: BitList, pub data: AttestationData, - pub custody_bitfield: Bitfield, + pub custody_bits: BitList, #[signed_root(skip_hashing)] pub signature: AggregateSignature, } -impl Attestation { +impl Attestation { /// Are the aggregation bitfields of these attestations disjoint? - pub fn signers_disjoint_from(&self, other: &Attestation) -> bool { - self.aggregation_bitfield - .intersection(&other.aggregation_bitfield) + pub fn signers_disjoint_from(&self, other: &Self) -> bool { + self.aggregation_bits + .intersection(&other.aggregation_bits) .is_zero() } /// Aggregate another Attestation into this one. /// /// The aggregation bitfields must be disjoint, and the data must be the same. - pub fn aggregate(&mut self, other: &Attestation) { + pub fn aggregate(&mut self, other: &Self) { debug_assert_eq!(self.data, other.data); debug_assert!(self.signers_disjoint_from(other)); - self.aggregation_bitfield - .union_inplace(&other.aggregation_bitfield); - self.custody_bitfield.union_inplace(&other.custody_bitfield); + self.aggregation_bits = self.aggregation_bits.union(&other.aggregation_bits); + self.custody_bits = self.custody_bits.union(&other.custody_bits); self.signature.add_aggregate(&other.signature); } } @@ -56,7 +56,8 @@ impl Attestation { #[cfg(test)] mod tests { use super::*; + use crate::*; - ssz_tests!(Attestation); - cached_tree_hash_tests!(Attestation); + ssz_tests!(Attestation); + cached_tree_hash_tests!(Attestation); } diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index e3e989baa..677354d56 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::{Epoch, Hash256}; +use crate::{Checkpoint, Crosslink, Hash256}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -9,13 +9,12 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; /// The data upon which an attestation is based. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, Clone, PartialEq, Eq, - Default, Serialize, Deserialize, Hash, @@ -31,15 +30,11 @@ pub struct AttestationData { pub beacon_block_root: Hash256, // FFG Vote - pub source_epoch: Epoch, - pub source_root: Hash256, - pub target_epoch: Epoch, - pub target_root: Hash256, + pub source: Checkpoint, + pub target: Checkpoint, // Crosslink Vote - pub shard: u64, - pub previous_crosslink_root: Hash256, - pub crosslink_data_root: Hash256, + pub crosslink: Crosslink, } #[cfg(test)] diff --git a/eth2/types/src/attestation_data_and_custody_bit.rs b/eth2/types/src/attestation_data_and_custody_bit.rs index 601bc4041..8a829c079 100644 --- a/eth2/types/src/attestation_data_and_custody_bit.rs +++ b/eth2/types/src/attestation_data_and_custody_bit.rs @@ -7,12 +7,11 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; /// Used for pairing an attestation with a proof-of-custody. /// -/// Spec v0.6.3 +/// Spec v0.8.1 #[derive( Debug, Clone, PartialEq, - Default, Serialize, Deserialize, Encode, diff --git a/eth2/types/src/attester_slashing.rs b/eth2/types/src/attester_slashing.rs index 85770d290..ef80ad310 100644 --- a/eth2/types/src/attester_slashing.rs +++ b/eth2/types/src/attester_slashing.rs @@ -1,4 +1,4 @@ -use crate::{test_utils::TestRandom, IndexedAttestation}; +use crate::{test_utils::TestRandom, EthSpec, IndexedAttestation}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -7,7 +7,7 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; /// Two conflicting attestations. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, PartialEq, @@ -20,15 +20,17 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; CachedTreeHash, TestRandom, )] -pub struct AttesterSlashing { - pub attestation_1: IndexedAttestation, - pub attestation_2: IndexedAttestation, +#[serde(bound = "T: EthSpec")] +pub struct AttesterSlashing { + pub attestation_1: IndexedAttestation, + pub attestation_2: IndexedAttestation, } #[cfg(test)] mod tests { use super::*; + use crate::*; - ssz_tests!(AttesterSlashing); - cached_tree_hash_tests!(AttesterSlashing); + ssz_tests!(AttesterSlashing); + cached_tree_hash_tests!(AttesterSlashing); } diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 18e5a37ec..772ef0c46 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -10,7 +10,7 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; /// A block of the `BeaconChain`. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, PartialEq, @@ -24,38 +24,39 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; TestRandom, SignedRoot, )] -pub struct BeaconBlock { +#[serde(bound = "T: EthSpec")] +pub struct BeaconBlock { pub slot: Slot, - pub previous_block_root: Hash256, + pub parent_root: Hash256, pub state_root: Hash256, - pub body: BeaconBlockBody, + pub body: BeaconBlockBody, #[signed_root(skip_hashing)] pub signature: Signature, } -impl BeaconBlock { +impl BeaconBlock { /// Returns an empty block to be used during genesis. /// - /// Spec v0.6.3 - pub fn empty(spec: &ChainSpec) -> BeaconBlock { + /// Spec v0.8.1 + pub fn empty(spec: &ChainSpec) -> Self { BeaconBlock { slot: spec.genesis_slot, - previous_block_root: spec.zero_hash, - state_root: spec.zero_hash, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), body: BeaconBlockBody { randao_reveal: Signature::empty_signature(), eth1_data: Eth1Data { - deposit_root: spec.zero_hash, - block_hash: spec.zero_hash, + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), deposit_count: 0, }, graffiti: [0; 32], - proposer_slashings: vec![], - attester_slashings: vec![], - attestations: vec![], - deposits: vec![], - voluntary_exits: vec![], - transfers: vec![], + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + transfers: VariableList::empty(), }, signature: Signature::empty_signature(), } @@ -63,7 +64,7 @@ impl BeaconBlock { /// Returns the `signed_root` of the block. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.signed_root()[..]) } @@ -75,23 +76,23 @@ impl BeaconBlock { /// /// Note: performs a full tree-hash of `self.body`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn block_header(&self) -> BeaconBlockHeader { BeaconBlockHeader { slot: self.slot, - previous_block_root: self.previous_block_root, + parent_root: self.parent_root, state_root: self.state_root, - block_body_root: Hash256::from_slice(&self.body.tree_hash_root()[..]), + body_root: Hash256::from_slice(&self.body.tree_hash_root()[..]), signature: self.signature.clone(), } } - /// Returns a "temporary" header, where the `state_root` is `spec.zero_hash`. + /// Returns a "temporary" header, where the `state_root` is `Hash256::zero()`. /// - /// Spec v0.6.3 - pub fn temporary_block_header(&self, spec: &ChainSpec) -> BeaconBlockHeader { + /// Spec v0.8.0 + pub fn temporary_block_header(&self) -> BeaconBlockHeader { BeaconBlockHeader { - state_root: spec.zero_hash, + state_root: Hash256::zero(), signature: Signature::empty_signature(), ..self.block_header() } @@ -102,6 +103,6 @@ impl BeaconBlock { mod tests { use super::*; - ssz_tests!(BeaconBlock); - cached_tree_hash_tests!(BeaconBlock); + ssz_tests!(BeaconBlock); + cached_tree_hash_tests!(BeaconBlock); } diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index 6b0eb1401..b1252420f 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -3,12 +3,13 @@ use crate::*; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use test_random_derive::TestRandom; use tree_hash_derive::{CachedTreeHash, TreeHash}; /// The body of a `BeaconChain` block, containing operations. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, PartialEq, @@ -21,23 +22,24 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; CachedTreeHash, TestRandom, )] -pub struct BeaconBlockBody { +#[serde(bound = "T: EthSpec")] +pub struct BeaconBlockBody { pub randao_reveal: Signature, pub eth1_data: Eth1Data, #[serde(deserialize_with = "graffiti_from_hex_str")] pub graffiti: [u8; 32], - pub proposer_slashings: Vec, - pub attester_slashings: Vec, - pub attestations: Vec, - pub deposits: Vec, - pub voluntary_exits: Vec, - pub transfers: Vec, + pub proposer_slashings: VariableList, + pub attester_slashings: VariableList, T::MaxAttesterSlashings>, + pub attestations: VariableList, T::MaxAttestations>, + pub deposits: VariableList, + pub voluntary_exits: VariableList, + pub transfers: VariableList, } #[cfg(test)] mod tests { use super::*; - ssz_tests!(BeaconBlockBody); - cached_tree_hash_tests!(BeaconBlockBody); + ssz_tests!(BeaconBlockBody); + cached_tree_hash_tests!(BeaconBlockBody); } diff --git a/eth2/types/src/beacon_block_header.rs b/eth2/types/src/beacon_block_header.rs index 829130222..73370d67a 100644 --- a/eth2/types/src/beacon_block_header.rs +++ b/eth2/types/src/beacon_block_header.rs @@ -10,7 +10,7 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; /// A header of a `BeaconBlock`. /// -/// Spec v0.6.3 +/// Spec v0.8.1 #[derive( Debug, PartialEq, @@ -26,9 +26,9 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; )] pub struct BeaconBlockHeader { pub slot: Slot, - pub previous_block_root: Hash256, + pub parent_root: Hash256, pub state_root: Hash256, - pub block_body_root: Hash256, + pub body_root: Hash256, #[signed_root(skip_hashing)] pub signature: Signature, } @@ -36,18 +36,18 @@ pub struct BeaconBlockHeader { impl BeaconBlockHeader { /// Returns the `tree_hash_root` of the header. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.signed_root()[..]) } /// Given a `body`, consumes `self` and returns a complete `BeaconBlock`. /// - /// Spec v0.6.3 - pub fn into_block(self, body: BeaconBlockBody) -> BeaconBlock { + /// Spec v0.8.0 + pub fn into_block(self, body: BeaconBlockBody) -> BeaconBlock { BeaconBlock { slot: self.slot, - previous_block_root: self.previous_block_root, + parent_root: self.parent_root, state_root: self.state_root, body, signature: self.signature, diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 1be6eac23..129b05f79 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -4,13 +4,13 @@ use crate::test_utils::TestRandom; use crate::*; use cached_tree_hash::{Error as TreeHashCacheError, TreeHashCache}; use compare_fields_derive::CompareFields; -use fixed_len_vec::{typenum::Unsigned, FixedLenVec}; use hashing::hash; use int_to_bytes::{int_to_bytes32, int_to_bytes8}; use pubkey_cache::PubkeyCache; use serde_derive::{Deserialize, Serialize}; use ssz::ssz_encode; use ssz_derive::{Decode, Encode}; +use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::{CachedTreeHash, TreeHash}; @@ -18,6 +18,7 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; pub use self::committee_cache::CommitteeCache; pub use beacon_state_types::*; +#[macro_use] mod beacon_state_types; mod committee_cache; mod exit_cache; @@ -44,7 +45,6 @@ pub enum Error { InsufficientIndexRoots, InsufficientAttestations, InsufficientCommittees, - InsufficientSlashedBalances, InsufficientStateRoots, NoCommitteeForShard, NoCommitteeForSlot, @@ -59,11 +59,12 @@ pub enum Error { RelativeEpochError(RelativeEpochError), CommitteeCacheUninitialized(RelativeEpoch), TreeHashCacheError(TreeHashCacheError), + SszTypesError(ssz_types::Error), } /// The state of the `BeaconChain` at some slot. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, PartialEq, @@ -74,55 +75,63 @@ pub enum Error { Encode, Decode, TreeHash, - CachedTreeHash, CompareFields, + CachedTreeHash, )] +#[serde(bound = "T: EthSpec")] pub struct BeaconState where T: EthSpec, { - // Misc - pub slot: Slot, + // Versioning pub genesis_time: u64, + pub slot: Slot, pub fork: Fork, - // Validator registry - #[compare_fields(as_slice)] - pub validator_registry: Vec, - #[compare_fields(as_slice)] - pub balances: Vec, - - // Randomness and committees - pub latest_randao_mixes: FixedLenVec, - pub latest_start_shard: u64, - - // Finality - pub previous_epoch_attestations: Vec, - pub current_epoch_attestations: Vec, - pub previous_justified_epoch: Epoch, - pub current_justified_epoch: Epoch, - pub previous_justified_root: Hash256, - pub current_justified_root: Hash256, - pub justification_bitfield: u64, - pub finalized_epoch: Epoch, - pub finalized_root: Hash256, - - // Recent state - pub current_crosslinks: FixedLenVec, - pub previous_crosslinks: FixedLenVec, - pub latest_block_roots: FixedLenVec, - #[compare_fields(as_slice)] - pub latest_state_roots: FixedLenVec, - #[compare_fields(as_slice)] - latest_active_index_roots: FixedLenVec, - latest_slashed_balances: FixedLenVec, + // History pub latest_block_header: BeaconBlockHeader, - pub historical_roots: Vec, + #[compare_fields(as_slice)] + pub block_roots: FixedVector, + #[compare_fields(as_slice)] + pub state_roots: FixedVector, + pub historical_roots: VariableList, // Ethereum 1.0 chain data - pub latest_eth1_data: Eth1Data, - pub eth1_data_votes: Vec, - pub deposit_index: u64, + pub eth1_data: Eth1Data, + pub eth1_data_votes: VariableList, + pub eth1_deposit_index: u64, + + // Registry + #[compare_fields(as_slice)] + pub validators: VariableList, + #[compare_fields(as_slice)] + pub balances: VariableList, + + // Shuffling + pub start_shard: u64, + pub randao_mixes: FixedVector, + #[compare_fields(as_slice)] + active_index_roots: FixedVector, + #[compare_fields(as_slice)] + compact_committees_roots: FixedVector, + + // Slashings + slashings: FixedVector, + + // Attestations + pub previous_epoch_attestations: VariableList, T::MaxPendingAttestations>, + pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, + + // Crosslinks + pub previous_crosslinks: FixedVector, + pub current_crosslinks: FixedVector, + + // Finality + #[test_random(default)] + pub justification_bits: BitVector, + pub previous_justified_checkpoint: Checkpoint, + pub current_justified_checkpoint: Checkpoint, + pub finalized_checkpoint: Checkpoint, // Caching (not in the spec) #[serde(default)] @@ -152,75 +161,57 @@ where } impl BeaconState { - /// Produce the first state of the Beacon Chain. + /// Create a new BeaconState suitable for genesis. /// - /// This does not fully build a genesis beacon state, it omits processing of initial validator - /// deposits. To obtain a full genesis beacon state, use the `BeaconStateBuilder`. + /// Not a complete genesis state, see `initialize_beacon_state_from_eth1`. /// - /// Spec v0.6.3 - pub fn genesis( - genesis_time: u64, - latest_eth1_data: Eth1Data, - spec: &ChainSpec, - ) -> BeaconState { - let initial_crosslink = Crosslink { - epoch: T::genesis_epoch(), - previous_crosslink_root: spec.zero_hash, - crosslink_data_root: spec.zero_hash, - }; - + /// Spec v0.8.0 + pub fn new(genesis_time: u64, eth1_data: Eth1Data, spec: &ChainSpec) -> Self { BeaconState { - // Misc - slot: spec.genesis_slot, + // Versioning genesis_time, + slot: spec.genesis_slot, fork: Fork::genesis(T::genesis_epoch()), - // Validator registry - validator_registry: vec![], // Set later in the function. - balances: vec![], // Set later in the function. + // History + latest_block_header: BeaconBlock::::empty(spec).temporary_block_header(), + block_roots: FixedVector::from_elem(Hash256::zero()), + state_roots: FixedVector::from_elem(Hash256::zero()), + historical_roots: VariableList::empty(), - // Randomness and committees - latest_randao_mixes: FixedLenVec::from(vec![ - spec.zero_hash; - T::LatestRandaoMixesLength::to_usize() - ]), - latest_start_shard: 0, + // Eth1 + eth1_data, + eth1_data_votes: VariableList::empty(), + eth1_deposit_index: 0, + + // Validator registry + validators: VariableList::empty(), // Set later. + balances: VariableList::empty(), // Set later. + + // Shuffling + start_shard: 0, + randao_mixes: FixedVector::from_elem(Hash256::zero()), + active_index_roots: FixedVector::from_elem(Hash256::zero()), + compact_committees_roots: FixedVector::from_elem(Hash256::zero()), + + // Slashings + slashings: FixedVector::from_elem(0), + + // Attestations + previous_epoch_attestations: VariableList::empty(), + current_epoch_attestations: VariableList::empty(), + + // Crosslinks + previous_crosslinks: FixedVector::from_elem(Crosslink::default()), + current_crosslinks: FixedVector::from_elem(Crosslink::default()), // Finality - previous_epoch_attestations: vec![], - current_epoch_attestations: vec![], - previous_justified_epoch: T::genesis_epoch(), - current_justified_epoch: T::genesis_epoch(), - previous_justified_root: spec.zero_hash, - current_justified_root: spec.zero_hash, - justification_bitfield: 0, - finalized_epoch: T::genesis_epoch(), - finalized_root: spec.zero_hash, + justification_bits: BitVector::new(), + previous_justified_checkpoint: Checkpoint::default(), + current_justified_checkpoint: Checkpoint::default(), + finalized_checkpoint: Checkpoint::default(), - // Recent state - current_crosslinks: vec![initial_crosslink.clone(); T::ShardCount::to_usize()].into(), - previous_crosslinks: vec![initial_crosslink; T::ShardCount::to_usize()].into(), - latest_block_roots: vec![spec.zero_hash; T::SlotsPerHistoricalRoot::to_usize()].into(), - latest_state_roots: vec![spec.zero_hash; T::SlotsPerHistoricalRoot::to_usize()].into(), - latest_active_index_roots: vec![ - spec.zero_hash; - T::LatestActiveIndexRootsLength::to_usize() - ] - .into(), - latest_slashed_balances: vec![0; T::LatestSlashedExitLength::to_usize()].into(), - latest_block_header: BeaconBlock::empty(spec).temporary_block_header(spec), - historical_roots: vec![], - - /* - * PoW receipt root - */ - latest_eth1_data, - eth1_data_votes: vec![], - deposit_index: 0, - - /* - * Caching (not in spec) - */ + // Caching (not in spec) committee_caches: [ CommitteeCache::default(), CommitteeCache::default(), @@ -234,15 +225,15 @@ impl BeaconState { /// Returns the `tree_hash_root` of the state. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn canonical_root(&self) -> Hash256 { Hash256::from_slice(&self.tree_hash_root()[..]) } pub fn historical_batch(&self) -> HistoricalBatch { HistoricalBatch { - block_roots: self.latest_block_roots.clone(), - state_roots: self.latest_state_roots.clone(), + block_roots: self.block_roots.clone(), + state_roots: self.state_roots.clone(), } } @@ -251,19 +242,19 @@ impl BeaconState { /// /// Requires a fully up-to-date `pubkey_cache`, returns an error if this is not the case. pub fn get_validator_index(&self, pubkey: &PublicKey) -> Result, Error> { - if self.pubkey_cache.len() == self.validator_registry.len() { + if self.pubkey_cache.len() == self.validators.len() { Ok(self.pubkey_cache.get(pubkey)) } else { Err(Error::PubkeyCacheIncomplete { cache_len: self.pubkey_cache.len(), - registry_len: self.validator_registry.len(), + registry_len: self.validators.len(), }) } } /// The epoch corresponding to `self.slot`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn current_epoch(&self) -> Epoch { self.slot.epoch(T::slots_per_epoch()) } @@ -272,7 +263,7 @@ impl BeaconState { /// /// If the current epoch is the genesis epoch, the genesis_epoch is returned. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn previous_epoch(&self) -> Epoch { let current_epoch = self.current_epoch(); if current_epoch > T::genesis_epoch() { @@ -284,12 +275,12 @@ impl BeaconState { /// The epoch following `self.current_epoch()`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn next_epoch(&self) -> Epoch { self.current_epoch() + 1 } - pub fn get_epoch_committee_count(&self, relative_epoch: RelativeEpoch) -> Result { + pub fn get_committee_count(&self, relative_epoch: RelativeEpoch) -> Result { let cache = self.cache(relative_epoch)?; Ok(cache.epoch_committee_count() as u64) @@ -306,20 +297,25 @@ impl BeaconState { let active_validator_count = cache.active_validator_count(); let shard_delta = T::get_shard_delta(active_validator_count, spec.target_committee_size); - Ok((self.latest_start_shard + shard_delta) % T::ShardCount::to_u64()) + Ok((self.start_shard + shard_delta) % T::ShardCount::to_u64()) } /// Get the slot of an attestation. /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// - /// Spec v0.6.3 - pub fn get_attestation_slot(&self, attestation_data: &AttestationData) -> Result { + /// Spec v0.8.0 + pub fn get_attestation_data_slot( + &self, + attestation_data: &AttestationData, + ) -> Result { let target_relative_epoch = - RelativeEpoch::from_epoch(self.current_epoch(), attestation_data.target_epoch)?; + RelativeEpoch::from_epoch(self.current_epoch(), attestation_data.target.epoch)?; - let cc = - self.get_crosslink_committee_for_shard(attestation_data.shard, target_relative_epoch)?; + let cc = self.get_crosslink_committee_for_shard( + attestation_data.crosslink.shard, + target_relative_epoch, + )?; Ok(cc.slot) } @@ -342,9 +338,9 @@ impl BeaconState { /// /// Does not utilize the cache, performs a full iteration over the validator registry. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_active_validator_indices(&self, epoch: Epoch) -> Vec { - get_active_validator_indices(&self.validator_registry, epoch) + get_active_validator_indices(&self.validators, epoch) } /// Return the cached active validator indices at some epoch. @@ -362,7 +358,7 @@ impl BeaconState { /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_crosslink_committees_at_slot( &self, slot: Slot, @@ -379,7 +375,7 @@ impl BeaconState { /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_crosslink_committee_for_shard( &self, shard: u64, @@ -396,7 +392,7 @@ impl BeaconState { /// Returns the beacon proposer index for the `slot` in the given `relative_epoch`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 // NOTE: be sure to test this bad boy. pub fn get_beacon_proposer_index( &self, @@ -410,7 +406,7 @@ impl BeaconState { let first_committee = cache .first_committee_at_slot(slot) .ok_or_else(|| Error::SlotOutOfBounds)?; - let seed = self.generate_seed(epoch, spec)?; + let seed = self.get_seed(epoch, spec)?; let mut i = 0; Ok(loop { @@ -421,7 +417,7 @@ impl BeaconState { let hash = hash(&preimage); hash[i % 32] }; - let effective_balance = self.validator_registry[candidate_index].effective_balance; + let effective_balance = self.validators[candidate_index].effective_balance; if (effective_balance * MAX_RANDOM_BYTE) >= (spec.max_effective_balance * u64::from(random_byte)) { @@ -433,10 +429,10 @@ impl BeaconState { /// Safely obtains the index for latest block roots, given some `slot`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 fn get_latest_block_roots_index(&self, slot: Slot) -> Result { - if (slot < self.slot) && (self.slot <= slot + self.latest_block_roots.len() as u64) { - Ok(slot.as_usize() % self.latest_block_roots.len()) + if (slot < self.slot) && (self.slot <= slot + self.block_roots.len() as u64) { + Ok(slot.as_usize() % self.block_roots.len()) } else { Err(BeaconStateError::SlotOutOfBounds) } @@ -444,15 +440,15 @@ impl BeaconState { /// Return the block root at a recent `slot`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_block_root(&self, slot: Slot) -> Result<&Hash256, BeaconStateError> { let i = self.get_latest_block_roots_index(slot)?; - Ok(&self.latest_block_roots[i]) + Ok(&self.block_roots[i]) } /// Return the block root at a recent `epoch`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 // NOTE: the spec calls this get_block_root pub fn get_block_root_at_epoch(&self, epoch: Epoch) -> Result<&Hash256, BeaconStateError> { self.get_block_root(epoch.start_slot(T::slots_per_epoch())) @@ -460,25 +456,25 @@ impl BeaconState { /// Sets the block root for some given slot. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn set_block_root( &mut self, slot: Slot, block_root: Hash256, ) -> Result<(), BeaconStateError> { let i = self.get_latest_block_roots_index(slot)?; - self.latest_block_roots[i] = block_root; + self.block_roots[i] = block_root; Ok(()) } - /// Safely obtains the index for `latest_randao_mixes` + /// Safely obtains the index for `randao_mixes` /// - /// Spec v0.6.3 + /// Spec v0.8.0 fn get_randao_mix_index(&self, epoch: Epoch) -> Result { let current_epoch = self.current_epoch(); - let len = T::LatestRandaoMixesLength::to_u64(); + let len = T::EpochsPerHistoricalVector::to_u64(); - if (epoch + len > current_epoch) & (epoch <= current_epoch) { + if epoch + len > current_epoch && epoch <= current_epoch { Ok(epoch.as_usize() % len as usize) } else { Err(Error::EpochOutOfBounds) @@ -491,45 +487,45 @@ impl BeaconState { /// /// See `Self::get_randao_mix`. /// - /// Spec v0.6.3 + /// Spec v0.8.0 pub fn update_randao_mix(&mut self, epoch: Epoch, signature: &Signature) -> Result<(), Error> { - let i = epoch.as_usize() % T::LatestRandaoMixesLength::to_usize(); + let i = epoch.as_usize() % T::EpochsPerHistoricalVector::to_usize(); let signature_hash = Hash256::from_slice(&hash(&ssz_encode(signature))); - self.latest_randao_mixes[i] = *self.get_randao_mix(epoch)? ^ signature_hash; + self.randao_mixes[i] = *self.get_randao_mix(epoch)? ^ signature_hash; Ok(()) } /// Return the randao mix at a recent ``epoch``. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, Error> { let i = self.get_randao_mix_index(epoch)?; - Ok(&self.latest_randao_mixes[i]) + Ok(&self.randao_mixes[i]) } /// Set the randao mix at a recent ``epoch``. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), Error> { let i = self.get_randao_mix_index(epoch)?; - self.latest_randao_mixes[i] = mix; + self.randao_mixes[i] = mix; Ok(()) } - /// Safely obtains the index for `latest_active_index_roots`, given some `epoch`. + /// Safely obtains the index for `active_index_roots`, given some `epoch`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 fn get_active_index_root_index(&self, epoch: Epoch, spec: &ChainSpec) -> Result { let current_epoch = self.current_epoch(); let lookahead = spec.activation_exit_delay; - let lookback = self.latest_active_index_roots.len() as u64 - lookahead; + let lookback = self.active_index_roots.len() as u64 - lookahead; - if (epoch + lookback > current_epoch) && (current_epoch + lookahead >= epoch) { - Ok(epoch.as_usize() % self.latest_active_index_roots.len()) + if epoch + lookback > current_epoch && current_epoch + lookahead >= epoch { + Ok(epoch.as_usize() % self.active_index_roots.len()) } else { Err(Error::EpochOutOfBounds) } @@ -537,15 +533,15 @@ impl BeaconState { /// Return the `active_index_root` at a recent `epoch`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_active_index_root(&self, epoch: Epoch, spec: &ChainSpec) -> Result { let i = self.get_active_index_root_index(epoch, spec)?; - Ok(self.latest_active_index_roots[i]) + Ok(self.active_index_roots[i]) } /// Set the `active_index_root` at a recent `epoch`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn set_active_index_root( &mut self, epoch: Epoch, @@ -553,24 +549,76 @@ impl BeaconState { spec: &ChainSpec, ) -> Result<(), Error> { let i = self.get_active_index_root_index(epoch, spec)?; - self.latest_active_index_roots[i] = index_root; + self.active_index_roots[i] = index_root; Ok(()) } /// Replace `active_index_roots` with clones of `index_root`. /// - /// Spec v0.6.3 + /// Spec v0.8.0 pub fn fill_active_index_roots_with(&mut self, index_root: Hash256) { - self.latest_active_index_roots = - vec![index_root; self.latest_active_index_roots.len()].into() + self.active_index_roots = FixedVector::from_elem(index_root); + } + + /// Safely obtains the index for `compact_committees_roots`, given some `epoch`. + /// + /// Spec v0.8.0 + fn get_compact_committee_root_index( + &self, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result { + let current_epoch = self.current_epoch(); + + let lookahead = spec.activation_exit_delay; + let lookback = self.compact_committees_roots.len() as u64 - lookahead; + + if epoch + lookback > current_epoch && current_epoch + lookahead >= epoch { + Ok(epoch.as_usize() % self.compact_committees_roots.len()) + } else { + Err(Error::EpochOutOfBounds) + } + } + + /// Return the `compact_committee_root` at a recent `epoch`. + /// + /// Spec v0.8.0 + pub fn get_compact_committee_root( + &self, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result { + let i = self.get_compact_committee_root_index(epoch, spec)?; + Ok(self.compact_committees_roots[i]) + } + + /// Set the `compact_committee_root` at a recent `epoch`. + /// + /// Spec v0.8.0 + pub fn set_compact_committee_root( + &mut self, + epoch: Epoch, + index_root: Hash256, + spec: &ChainSpec, + ) -> Result<(), Error> { + let i = self.get_compact_committee_root_index(epoch, spec)?; + self.compact_committees_roots[i] = index_root; + Ok(()) + } + + /// Replace `compact_committees_roots` with clones of `committee_root`. + /// + /// Spec v0.8.0 + pub fn fill_compact_committees_roots_with(&mut self, committee_root: Hash256) { + self.compact_committees_roots = FixedVector::from_elem(committee_root); } /// Safely obtains the index for latest state roots, given some `slot`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 fn get_latest_state_roots_index(&self, slot: Slot) -> Result { - if (slot < self.slot) && (self.slot <= slot + Slot::from(self.latest_state_roots.len())) { - Ok(slot.as_usize() % self.latest_state_roots.len()) + if (slot < self.slot) && (self.slot <= slot + Slot::from(self.state_roots.len())) { + Ok(slot.as_usize() % self.state_roots.len()) } else { Err(BeaconStateError::SlotOutOfBounds) } @@ -578,69 +626,76 @@ impl BeaconState { /// Gets the state root for some slot. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_state_root(&self, slot: Slot) -> Result<&Hash256, Error> { let i = self.get_latest_state_roots_index(slot)?; - Ok(&self.latest_state_roots[i]) + Ok(&self.state_roots[i]) } /// Gets the oldest (earliest slot) state root. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_oldest_state_root(&self) -> Result<&Hash256, Error> { - let i = self - .get_latest_state_roots_index(self.slot - Slot::from(self.latest_state_roots.len()))?; - Ok(&self.latest_state_roots[i]) + let i = + self.get_latest_state_roots_index(self.slot - Slot::from(self.state_roots.len()))?; + Ok(&self.state_roots[i]) } /// Sets the latest state root for slot. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn set_state_root(&mut self, slot: Slot, state_root: Hash256) -> Result<(), Error> { let i = self.get_latest_state_roots_index(slot)?; - self.latest_state_roots[i] = state_root; + self.state_roots[i] = state_root; Ok(()) } - /// Safely obtains the index for `latest_slashed_balances`, given some `epoch`. + /// Safely obtain the index for `slashings`, given some `epoch`. /// - /// Spec v0.6.3 - fn get_slashed_balance_index(&self, epoch: Epoch) -> Result { - let i = epoch.as_usize() % self.latest_slashed_balances.len(); - - // NOTE: the validity of the epoch is not checked. It is not in the spec but it's probably - // useful to have. - if i < self.latest_slashed_balances.len() { - Ok(i) + /// Spec v0.8.0 + fn get_slashings_index(&self, epoch: Epoch) -> Result { + // We allow the slashings vector to be accessed at any cached epoch at or before + // the current epoch. + if epoch <= self.current_epoch() + && epoch + T::EpochsPerSlashingsVector::to_u64() >= self.current_epoch() + 1 + { + Ok((epoch.as_u64() % T::EpochsPerSlashingsVector::to_u64()) as usize) } else { - Err(Error::InsufficientSlashedBalances) + Err(Error::EpochOutOfBounds) } } - /// Gets the total slashed balances for some epoch. + /// Get a reference to the entire `slashings` vector. /// - /// Spec v0.6.3 - pub fn get_slashed_balance(&self, epoch: Epoch) -> Result { - let i = self.get_slashed_balance_index(epoch)?; - Ok(self.latest_slashed_balances[i]) + /// Spec v0.8.0 + pub fn get_all_slashings(&self) -> &[u64] { + &self.slashings } - /// Sets the total slashed balances for some epoch. + /// Get the total slashed balances for some epoch. /// - /// Spec v0.6.3 - pub fn set_slashed_balance(&mut self, epoch: Epoch, balance: u64) -> Result<(), Error> { - let i = self.get_slashed_balance_index(epoch)?; - self.latest_slashed_balances[i] = balance; + /// Spec v0.8.0 + pub fn get_slashings(&self, epoch: Epoch) -> Result { + let i = self.get_slashings_index(epoch)?; + Ok(self.slashings[i]) + } + + /// Set the total slashed balances for some epoch. + /// + /// Spec v0.8.0 + pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), Error> { + let i = self.get_slashings_index(epoch)?; + self.slashings[i] = value; Ok(()) } /// Get the attestations from the current or previous epoch. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_matching_source_attestations( &self, epoch: Epoch, - ) -> Result<&[PendingAttestation], Error> { + ) -> Result<&[PendingAttestation], Error> { if epoch == self.current_epoch() { Ok(&self.current_epoch_attestations) } else if epoch == self.previous_epoch() { @@ -652,7 +707,7 @@ impl BeaconState { /// Get the current crosslink for a shard. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_current_crosslink(&self, shard: u64) -> Result<&Crosslink, Error> { self.current_crosslinks .get(shard as usize) @@ -661,41 +716,22 @@ impl BeaconState { /// Get the previous crosslink for a shard. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_previous_crosslink(&self, shard: u64) -> Result<&Crosslink, Error> { self.previous_crosslinks .get(shard as usize) .ok_or(Error::ShardOutOfBounds) } - /// Transform an attestation into the crosslink that it reinforces. - /// - /// Spec v0.6.3 - pub fn get_crosslink_from_attestation_data( - &self, - data: &AttestationData, - spec: &ChainSpec, - ) -> Result { - let current_crosslink_epoch = self.get_current_crosslink(data.shard)?.epoch; - Ok(Crosslink { - epoch: std::cmp::min( - data.target_epoch, - current_crosslink_epoch + spec.max_crosslink_epochs, - ), - previous_crosslink_root: data.previous_crosslink_root, - crosslink_data_root: data.crosslink_data_root, - }) - } - /// Generate a seed for the given `epoch`. /// - /// Spec v0.6.3 - pub fn generate_seed(&self, epoch: Epoch, spec: &ChainSpec) -> Result { + /// Spec v0.8.0 + pub fn get_seed(&self, epoch: Epoch, spec: &ChainSpec) -> Result { // Bypass the safe getter for RANDAO so we can gracefully handle the scenario where `epoch // == 0`. let randao = { - let i = epoch + T::latest_randao_mixes_length() as u64 - spec.min_seed_lookahead; - self.latest_randao_mixes[i.as_usize() % self.latest_randao_mixes.len()] + let i = epoch + T::EpochsPerHistoricalVector::to_u64() - spec.min_seed_lookahead - 1; + self.randao_mixes[i.as_usize() % self.randao_mixes.len()] }; let active_index_root = self.get_active_index_root(epoch, spec)?; let epoch_bytes = int_to_bytes32(epoch.as_u64()); @@ -710,13 +746,13 @@ impl BeaconState { /// Return the effective balance (also known as "balance at stake") for a validator with the given ``index``. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_effective_balance( &self, validator_index: usize, _spec: &ChainSpec, ) -> Result { - self.validator_registry + self.validators .get(validator_index) .map(|v| v.effective_balance) .ok_or_else(|| Error::UnknownValidator) @@ -724,8 +760,8 @@ impl BeaconState { /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. /// - /// Spec v0.6.3 - pub fn get_delayed_activation_exit_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Epoch { + /// Spec v0.8.1 + pub fn compute_activation_exit_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> Epoch { epoch + 1 + spec.activation_exit_delay } @@ -733,7 +769,7 @@ impl BeaconState { /// /// Uses the epoch cache, and will error if it isn't initialized. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_churn_limit(&self, spec: &ChainSpec) -> Result { Ok(std::cmp::max( spec.min_per_epoch_churn_limit, @@ -747,7 +783,7 @@ impl BeaconState { /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_attestation_duties( &self, validator_index: usize, @@ -760,7 +796,7 @@ impl BeaconState { /// Return the combined effective balance of an array of validators. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_total_balance( &self, validator_indices: &[usize], @@ -779,8 +815,7 @@ impl BeaconState { self.build_committee_cache(RelativeEpoch::Next, spec)?; self.update_pubkey_cache()?; self.update_tree_hash_cache()?; - self.exit_cache - .build_from_registry(&self.validator_registry, spec); + self.exit_cache.build_from_registry(&self.validators, spec); Ok(()) } @@ -867,11 +902,11 @@ impl BeaconState { /// Updates the pubkey cache, if required. /// - /// Adds all `pubkeys` from the `validator_registry` which are not already in the cache. Will + /// Adds all `pubkeys` from the `validators` which are not already in the cache. Will /// never re-add a pubkey. pub fn update_pubkey_cache(&mut self) -> Result<(), Error> { for (i, validator) in self - .validator_registry + .validators .iter() .enumerate() .skip(self.pubkey_cache.len()) @@ -895,6 +930,7 @@ impl BeaconState { /// Returns the `tree_hash_root` resulting from the update. This root can be considered the /// canonical root of `self`. pub fn update_tree_hash_cache(&mut self) -> Result { + /* TODO(#440): re-enable cached tree hash if self.tree_hash_cache.is_empty() { self.tree_hash_cache = TreeHashCache::new(self)?; } else { @@ -908,6 +944,8 @@ impl BeaconState { } self.cached_tree_hash_root() + */ + Ok(Hash256::from_slice(&self.tree_hash_root())) } /// Returns the tree hash root determined by the last execution of `self.update_tree_hash_cache(..)`. @@ -917,10 +955,13 @@ impl BeaconState { /// Returns an error if the cache is not initialized or if an error is encountered during the /// cache update. pub fn cached_tree_hash_root(&self) -> Result { + /* TODO(#440): re-enable cached tree hash self.tree_hash_cache .tree_hash_root() .and_then(|b| Ok(Hash256::from_slice(b))) .map_err(Into::into) + */ + Ok(Hash256::from_slice(&self.tree_hash_root())) } /// Completely drops the tree hash cache, replacing it with a new, empty cache. @@ -940,3 +981,9 @@ impl From for Error { Error::TreeHashCacheError(e) } } + +impl From for Error { + fn from(e: ssz_types::Error) -> Error { + Error::SszTypesError(e) + } +} diff --git a/eth2/types/src/beacon_state/beacon_state_types.rs b/eth2/types/src/beacon_state/beacon_state_types.rs index 7e4a04258..1dc34e195 100644 --- a/eth2/types/src/beacon_state/beacon_state_types.rs +++ b/eth2/types/src/beacon_state/beacon_state_types.rs @@ -1,18 +1,56 @@ use crate::*; -use fixed_len_vec::typenum::{Unsigned, U0, U1024, U64, U8, U8192}; use serde_derive::{Deserialize, Serialize}; +use ssz_types::typenum::{ + Unsigned, U0, U1, U1024, U1099511627776, U128, U16, U16777216, U4, U4096, U64, U65536, U8, + U8192, +}; use std::fmt::Debug; pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { + /* + * Constants + */ + type JustificationBitsLength: Unsigned + Clone + Sync + Send + Debug + PartialEq + Default; + /* + * Misc + */ type ShardCount: Unsigned + Clone + Sync + Send + Debug + PartialEq; - type SlotsPerHistoricalRoot: Unsigned + Clone + Sync + Send + Debug + PartialEq; - type LatestRandaoMixesLength: Unsigned + Clone + Sync + Send + Debug + PartialEq; - type LatestActiveIndexRootsLength: Unsigned + Clone + Sync + Send + Debug + PartialEq; - type LatestSlashedExitLength: Unsigned + Clone + Sync + Send + Debug + PartialEq; - /// Note: `SlotsPerEpoch` is not necessarily required to be a compile-time constant. We include - /// it here just for the convenience of not passing `slots_per_epoch` around all the time. - type SlotsPerEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxValidatorsPerCommittee: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * Initial values + */ type GenesisEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * Time parameters + */ + type SlotsPerEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type SlotsPerEth1VotingPeriod: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type SlotsPerHistoricalRoot: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * State list lengths + */ + type EpochsPerHistoricalVector: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type EpochsPerSlashingsVector: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type HistoricalRootsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type ValidatorRegistryLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * Max operations per block + */ + type MaxProposerSlashings: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxAttesterSlashings: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxAttestations: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxDeposits: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxVoluntaryExits: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxTransfers: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * Derived values (set these CAREFULLY) + */ + /// The length of the `{previous,current}_epoch_attestations` lists. + /// + /// Must be set to `MaxAttestations * SlotsPerEpoch` + // NOTE: we could safely instantiate this by using type-level arithmetic, but doing + // so adds ~25s to the time required to type-check this crate + type MaxPendingAttestations: Unsigned + Clone + Sync + Send + Debug + PartialEq; fn default_spec() -> ChainSpec; @@ -22,11 +60,8 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { /// Return the number of committees in one epoch. /// - /// Spec v0.6.3 - fn get_epoch_committee_count( - active_validator_count: usize, - target_committee_size: usize, - ) -> usize { + /// Spec v0.8.1 + fn get_committee_count(active_validator_count: usize, target_committee_size: usize) -> usize { let shard_count = Self::shard_count(); let slots_per_epoch = Self::slots_per_epoch() as usize; @@ -39,12 +74,12 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { ) * slots_per_epoch } - /// Return the number of shards to increment `state.latest_start_shard` by in a given epoch. + /// Return the number of shards to increment `state.start_shard` by in a given epoch. /// - /// Spec v0.6.3 + /// Spec v0.8.1 fn get_shard_delta(active_validator_count: usize, target_committee_size: usize) -> u64 { std::cmp::min( - Self::get_epoch_committee_count(active_validator_count, target_committee_size) as u64, + Self::get_committee_count(active_validator_count, target_committee_size) as u64, Self::ShardCount::to_u64() - Self::ShardCount::to_u64() / Self::slots_per_epoch(), ) } @@ -60,61 +95,66 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { /// Returns the `SLOTS_PER_EPOCH` constant for this specification. /// - /// Spec v0.6.3 + /// Spec v0.8.1 fn slots_per_epoch() -> u64 { Self::SlotsPerEpoch::to_u64() } /// Returns the `SHARD_COUNT` constant for this specification. /// - /// Spec v0.6.3 + /// Spec v0.8.1 fn shard_count() -> usize { Self::ShardCount::to_usize() } /// Returns the `SLOTS_PER_HISTORICAL_ROOT` constant for this specification. /// - /// Spec v0.6.3 + /// Spec v0.8.1 fn slots_per_historical_root() -> usize { Self::SlotsPerHistoricalRoot::to_usize() } - /// Returns the `LATEST_RANDAO_MIXES_LENGTH` constant for this specification. + /// Returns the `EPOCHS_PER_HISTORICAL_VECTOR` constant for this specification. /// - /// Spec v0.6.3 - fn latest_randao_mixes_length() -> usize { - Self::LatestRandaoMixesLength::to_usize() + /// Spec v0.8.1 + fn epochs_per_historical_vector() -> usize { + Self::EpochsPerHistoricalVector::to_usize() } +} - /// Returns the `LATEST_ACTIVE_INDEX_ROOTS` constant for this specification. - /// - /// Spec v0.6.3 - fn latest_active_index_roots() -> usize { - Self::LatestActiveIndexRootsLength::to_usize() - } - - /// Returns the `LATEST_SLASHED_EXIT_LENGTH` constant for this specification. - /// - /// Spec v0.6.3 - fn latest_slashed_exit_length() -> usize { - Self::LatestSlashedExitLength::to_usize() +/// Macro to inherit some type values from another EthSpec. +#[macro_export] +macro_rules! params_from_eth_spec { + ($spec_ty:ty { $($ty_name:ident),+ }) => { + $(type $ty_name = <$spec_ty as EthSpec>::$ty_name;)+ } } /// Ethereum Foundation specifications. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)] pub struct MainnetEthSpec; impl EthSpec for MainnetEthSpec { + type JustificationBitsLength = U4; type ShardCount = U1024; - type SlotsPerHistoricalRoot = U8192; - type LatestRandaoMixesLength = U8192; - type LatestActiveIndexRootsLength = U8192; - type LatestSlashedExitLength = U8192; - type SlotsPerEpoch = U64; + type MaxValidatorsPerCommittee = U4096; type GenesisEpoch = U0; + type SlotsPerEpoch = U64; + type SlotsPerEth1VotingPeriod = U1024; + type SlotsPerHistoricalRoot = U8192; + type EpochsPerHistoricalVector = U65536; + type EpochsPerSlashingsVector = U8192; + type HistoricalRootsLimit = U16777216; + type ValidatorRegistryLimit = U1099511627776; + type MaxProposerSlashings = U16; + type MaxAttesterSlashings = U1; + type MaxAttestations = U128; + type MaxDeposits = U16; + type MaxVoluntaryExits = U16; + type MaxTransfers = U0; + type MaxPendingAttestations = U8192; // 128 max attestations * 64 slots per epoch fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -125,20 +165,34 @@ pub type FoundationBeaconState = BeaconState; /// Ethereum Foundation minimal spec, as defined here: /// -/// https://github.com/ethereum/eth2.0-specs/blob/v0.6.3/configs/constant_presets/minimal.yaml +/// https://github.com/ethereum/eth2.0-specs/blob/v0.8.0/configs/constant_presets/minimal.yaml /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)] pub struct MinimalEthSpec; impl EthSpec for MinimalEthSpec { type ShardCount = U8; - type SlotsPerHistoricalRoot = U64; - type LatestRandaoMixesLength = U64; - type LatestActiveIndexRootsLength = U64; - type LatestSlashedExitLength = U64; type SlotsPerEpoch = U8; - type GenesisEpoch = U0; + type SlotsPerEth1VotingPeriod = U16; + type SlotsPerHistoricalRoot = U64; + type EpochsPerHistoricalVector = U64; + type EpochsPerSlashingsVector = U64; + type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch + + params_from_eth_spec!(MainnetEthSpec { + JustificationBitsLength, + MaxValidatorsPerCommittee, + GenesisEpoch, + HistoricalRootsLimit, + ValidatorRegistryLimit, + MaxProposerSlashings, + MaxAttesterSlashings, + MaxAttestations, + MaxDeposits, + MaxVoluntaryExits, + MaxTransfers + }); fn default_spec() -> ChainSpec { ChainSpec::minimal() diff --git a/eth2/types/src/beacon_state/committee_cache.rs b/eth2/types/src/beacon_state/committee_cache.rs index 54564d95d..d9d2e9864 100644 --- a/eth2/types/src/beacon_state/committee_cache.rs +++ b/eth2/types/src/beacon_state/committee_cache.rs @@ -24,7 +24,7 @@ pub struct CommitteeCache { impl CommitteeCache { /// Return a new, fully initialized cache. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn initialized( state: &BeaconState, epoch: Epoch, @@ -38,22 +38,20 @@ impl CommitteeCache { return Err(Error::ZeroSlotsPerEpoch); } - let active_validator_indices = - get_active_validator_indices(&state.validator_registry, epoch); + let active_validator_indices = get_active_validator_indices(&state.validators, epoch); if active_validator_indices.is_empty() { return Err(Error::InsufficientValidators); } - let committee_count = T::get_epoch_committee_count( - active_validator_indices.len(), - spec.target_committee_size, - ) as usize; + let committee_count = + T::get_committee_count(active_validator_indices.len(), spec.target_committee_size) + as usize; let shuffling_start_shard = Self::compute_start_shard(state, relative_epoch, active_validator_indices.len(), spec); - let seed = state.generate_seed(epoch, spec)?; + let seed = state.get_seed(epoch, spec)?; let shuffling = shuffle_list( active_validator_indices, @@ -64,11 +62,11 @@ impl CommitteeCache { .ok_or_else(|| Error::UnableToShuffle)?; // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. - if state.validator_registry.len() > usize::max_value() - 1 { + if state.validators.len() > usize::max_value() - 1 { return Err(Error::TooManyValidators); } - let mut shuffling_positions = vec![None; state.validator_registry.len()]; + let mut shuffling_positions = vec![None; state.validators.len()]; for (i, v) in shuffling.iter().enumerate() { shuffling_positions[*v] = NonZeroUsize::new(i + 1); } @@ -88,7 +86,7 @@ impl CommitteeCache { /// /// The `active_validator_count` must be the number of validators active at `relative_epoch`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn compute_start_shard( state: &BeaconState, relative_epoch: RelativeEpoch, @@ -96,21 +94,21 @@ impl CommitteeCache { spec: &ChainSpec, ) -> u64 { match relative_epoch { - RelativeEpoch::Current => state.latest_start_shard, + RelativeEpoch::Current => state.start_shard, RelativeEpoch::Previous => { let shard_delta = T::get_shard_delta(active_validator_count, spec.target_committee_size); - (state.latest_start_shard + T::ShardCount::to_u64() - shard_delta) + (state.start_shard + T::ShardCount::to_u64() - shard_delta) % T::ShardCount::to_u64() } RelativeEpoch::Next => { let current_active_validators = - get_active_validator_count(&state.validator_registry, state.current_epoch()); + get_active_validator_count(&state.validators, state.current_epoch()); let shard_delta = T::get_shard_delta(current_active_validators, spec.target_committee_size); - (state.latest_start_shard + shard_delta) % T::ShardCount::to_u64() + (state.start_shard + shard_delta) % T::ShardCount::to_u64() } } } @@ -128,7 +126,7 @@ impl CommitteeCache { /// /// Always returns `&[]` for a non-initialized epoch. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn active_validator_indices(&self) -> &[usize] { &self.shuffling } @@ -137,7 +135,7 @@ impl CommitteeCache { /// /// Always returns `&[]` for a non-initialized epoch. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn shuffling(&self) -> &[usize] { &self.shuffling } @@ -147,7 +145,7 @@ impl CommitteeCache { /// /// Always returns `None` for a non-initialized epoch. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_crosslink_committee_for_shard(&self, shard: Shard) -> Option { if shard >= self.shard_count || self.initialized_epoch.is_none() { return None; @@ -201,7 +199,7 @@ impl CommitteeCache { /// /// Always returns `usize::default()` for a non-initialized epoch. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn active_validator_count(&self) -> usize { self.shuffling.len() } @@ -210,7 +208,7 @@ impl CommitteeCache { /// /// Always returns `usize::default()` for a non-initialized epoch. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn epoch_committee_count(&self) -> usize { self.committee_count } @@ -226,7 +224,7 @@ impl CommitteeCache { /// /// Returns `None` if `slot` is not in the initialized epoch, or if `Self` is not initialized. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_crosslink_committees_for_slot(&self, slot: Slot) -> Option> { let position = self .initialized_epoch? @@ -258,7 +256,7 @@ impl CommitteeCache { /// /// Always returns `None` for a non-initialized epoch. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn first_committee_at_slot(&self, slot: Slot) -> Option<&[usize]> { self.get_crosslink_committees_for_slot(slot)? .first() @@ -267,7 +265,7 @@ impl CommitteeCache { /// Returns a slice of `self.shuffling` that represents the `index`'th committee in the epoch. /// - /// Spec v0.6.3 + /// Spec v0.8.1 fn compute_committee(&self, index: usize) -> Option<&[usize]> { Some(&self.shuffling[self.compute_committee_range(index)?]) } @@ -276,9 +274,11 @@ impl CommitteeCache { /// /// To avoid a divide-by-zero, returns `None` if `self.committee_count` is zero. /// - /// Spec v0.6.3 + /// Will also return `None` if the index is out of bounds. + /// + /// Spec v0.8.1 fn compute_committee_range(&self, index: usize) -> Option> { - if self.committee_count == 0 { + if self.committee_count == 0 || index >= self.committee_count { return None; } @@ -295,7 +295,7 @@ impl CommitteeCache { /// /// Always returns `None` for a non-initialized epoch. /// - /// Spec v0.6.3 + /// Spec v0.8.1 fn crosslink_slot_for_shard(&self, shard: u64) -> Option { let offset = (shard + self.shard_count - self.shuffling_start_shard) % self.shard_count; Some( @@ -314,10 +314,10 @@ impl CommitteeCache { } } -/// Returns a list of all `validator_registry` indices where the validator is active at the given +/// Returns a list of all `validators` indices where the validator is active at the given /// `epoch`. /// -/// Spec v0.6.3 +/// Spec v0.8.1 pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec { let mut active = Vec::with_capacity(validators.len()); @@ -332,10 +332,10 @@ pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> V active } -/// Returns the count of all `validator_registry` indices where the validator is active at the given +/// Returns the count of all `validators` indices where the validator is active at the given /// `epoch`. /// -/// Spec v0.6.3 +/// Spec v0.8.1 fn get_active_validator_count(validators: &[Validator], epoch: Epoch) -> usize { validators.iter().filter(|v| v.is_active_at(epoch)).count() } diff --git a/eth2/types/src/beacon_state/committee_cache/tests.rs b/eth2/types/src/beacon_state/committee_cache/tests.rs index f25a4f727..0fe2fb8a4 100644 --- a/eth2/types/src/beacon_state/committee_cache/tests.rs +++ b/eth2/types/src/beacon_state/committee_cache/tests.rs @@ -1,8 +1,8 @@ #![cfg(test)] use super::*; use crate::{test_utils::*, *}; -use fixed_len_vec::typenum::*; use serde_derive::{Deserialize, Serialize}; +use ssz_types::typenum::*; #[test] fn default_values() { @@ -63,6 +63,8 @@ fn initializes_with_the_right_epoch() { #[test] fn shuffles_for_the_right_epoch() { + use crate::EthSpec; + let num_validators = MinimalEthSpec::minimum_validator_count() * 2; let epoch = Epoch::new(100_000_000); let slot = epoch.start_slot(MinimalEthSpec::slots_per_epoch()); @@ -70,16 +72,16 @@ fn shuffles_for_the_right_epoch() { let mut state = new_state::(num_validators, slot); let spec = &MinimalEthSpec::default_spec(); - let distinct_hashes: Vec = (0..MinimalEthSpec::latest_randao_mixes_length()) + let distinct_hashes: Vec = (0..MinimalEthSpec::epochs_per_historical_vector()) .into_iter() .map(|i| Hash256::from(i as u64)) .collect(); - state.latest_randao_mixes = FixedLenVec::from(distinct_hashes); + state.randao_mixes = FixedVector::from(distinct_hashes); - let previous_seed = state.generate_seed(state.previous_epoch(), spec).unwrap(); - let current_seed = state.generate_seed(state.current_epoch(), spec).unwrap(); - let next_seed = state.generate_seed(state.next_epoch(), spec).unwrap(); + let previous_seed = state.get_seed(state.previous_epoch(), spec).unwrap(); + let current_seed = state.get_seed(state.current_epoch(), spec).unwrap(); + let next_seed = state.get_seed(state.next_epoch(), spec).unwrap(); assert!((previous_seed != current_seed) && (current_seed != next_seed)); @@ -131,7 +133,7 @@ fn can_start_on_any_shard() { let shard_count = MinimalEthSpec::shard_count() as u64; for i in 0..MinimalEthSpec::shard_count() as u64 { - state.latest_start_shard = i; + state.start_shard = i; let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap(); assert_eq!(cache.shuffling_start_shard, i); @@ -154,12 +156,26 @@ pub struct ExcessShardsEthSpec; impl EthSpec for ExcessShardsEthSpec { type ShardCount = U128; - type SlotsPerHistoricalRoot = U8192; - type LatestRandaoMixesLength = U8192; - type LatestActiveIndexRootsLength = U8192; - type LatestSlashedExitLength = U8192; type SlotsPerEpoch = U8; - type GenesisEpoch = U0; + type MaxPendingAttestations = U1024; + + params_from_eth_spec!(MinimalEthSpec { + JustificationBitsLength, + MaxValidatorsPerCommittee, + GenesisEpoch, + SlotsPerEth1VotingPeriod, + SlotsPerHistoricalRoot, + EpochsPerHistoricalVector, + EpochsPerSlashingsVector, + HistoricalRootsLimit, + ValidatorRegistryLimit, + MaxProposerSlashings, + MaxAttesterSlashings, + MaxAttestations, + MaxDeposits, + MaxVoluntaryExits, + MaxTransfers + }); fn default_spec() -> ChainSpec { ChainSpec::minimal() @@ -177,13 +193,13 @@ fn starts_on_the_correct_shard() { let mut state = new_state::(num_validators, slot); - let validator_count = state.validator_registry.len(); + let validator_count = state.validators.len(); let previous_epoch = state.previous_epoch(); let current_epoch = state.current_epoch(); let next_epoch = state.next_epoch(); - for (i, mut v) in state.validator_registry.iter_mut().enumerate() { + for (i, mut v) in state.validators.iter_mut().enumerate() { let epoch = if i < validator_count / 4 { previous_epoch } else if i < validator_count / 2 { @@ -196,28 +212,28 @@ fn starts_on_the_correct_shard() { } assert_eq!( - get_active_validator_count(&state.validator_registry, previous_epoch), + get_active_validator_count(&state.validators, previous_epoch), validator_count / 4 ); assert_eq!( - get_active_validator_count(&state.validator_registry, current_epoch), + get_active_validator_count(&state.validators, current_epoch), validator_count / 2 ); assert_eq!( - get_active_validator_count(&state.validator_registry, next_epoch), + get_active_validator_count(&state.validators, next_epoch), validator_count ); - let previous_shards = ExcessShardsEthSpec::get_epoch_committee_count( - get_active_validator_count(&state.validator_registry, previous_epoch), + let previous_shards = ExcessShardsEthSpec::get_committee_count( + get_active_validator_count(&state.validators, previous_epoch), spec.target_committee_size, ); - let current_shards = ExcessShardsEthSpec::get_epoch_committee_count( - get_active_validator_count(&state.validator_registry, current_epoch), + let current_shards = ExcessShardsEthSpec::get_committee_count( + get_active_validator_count(&state.validators, current_epoch), spec.target_committee_size, ); - let next_shards = ExcessShardsEthSpec::get_epoch_committee_count( - get_active_validator_count(&state.validator_registry, next_epoch), + let next_shards = ExcessShardsEthSpec::get_committee_count( + get_active_validator_count(&state.validators, next_epoch), spec.target_committee_size, ); @@ -233,7 +249,7 @@ fn starts_on_the_correct_shard() { let shard_count = ExcessShardsEthSpec::shard_count(); for i in 0..ExcessShardsEthSpec::shard_count() { - state.latest_start_shard = i as u64; + state.start_shard = i as u64; let cache = CommitteeCache::initialized(&state, state.current_epoch(), spec).unwrap(); assert_eq!(cache.shuffling_start_shard as usize, i); diff --git a/eth2/types/src/beacon_state/exit_cache.rs b/eth2/types/src/beacon_state/exit_cache.rs index c129d70a2..475dab3d6 100644 --- a/eth2/types/src/beacon_state/exit_cache.rs +++ b/eth2/types/src/beacon_state/exit_cache.rs @@ -8,8 +8,8 @@ pub struct ExitCache(HashMap); impl ExitCache { /// Add all validators with a non-trivial exit epoch to the cache. - pub fn build_from_registry(&mut self, validator_registry: &[Validator], spec: &ChainSpec) { - validator_registry + pub fn build_from_registry(&mut self, validators: &[Validator], spec: &ChainSpec) { + validators .iter() .filter(|validator| validator.exit_epoch != spec.far_future_epoch) .for_each(|validator| self.record_validator_exit(validator.exit_epoch)); diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index fd30a816e..cff034e56 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -44,7 +44,7 @@ fn test_beacon_proposer_index() { // Test with two validators per slot, first validator has zero balance. let mut state = build_state(T::slots_per_epoch() as usize * 2); let shuffling = state.get_shuffling(relative_epoch).unwrap().to_vec(); - state.validator_registry[shuffling[0]].effective_balance = 0; + state.validators[shuffling[0]].effective_balance = 0; test(&state, Slot::new(0), 1); for i in 1..T::slots_per_epoch() { test(&state, Slot::from(i), i as usize * 2); @@ -64,7 +64,7 @@ fn active_index_range(current_epoch: Epoch) -> RangeInclusive let delay = T::default_spec().activation_exit_delay; let start: i32 = - current_epoch.as_u64() as i32 - T::latest_active_index_roots() as i32 + delay as i32; + current_epoch.as_u64() as i32 - T::epochs_per_historical_vector() as i32 + delay as i32; let end = current_epoch + delay; let start: Epoch = if start < 0 { @@ -87,7 +87,7 @@ fn test_active_index(state_slot: Slot) { let range = active_index_range::(state.current_epoch()); - let modulo = |epoch: Epoch| epoch.as_usize() % T::latest_active_index_roots(); + let modulo = |epoch: Epoch| epoch.as_usize() % T::epochs_per_historical_vector(); // Test the start and end of the range. assert_eq!( @@ -117,7 +117,7 @@ fn test_active_index(state_slot: Slot) { fn get_active_index_root_index() { test_active_index::(Slot::new(0)); - let epoch = Epoch::from(MainnetEthSpec::latest_active_index_roots() * 4); + let epoch = Epoch::from(MainnetEthSpec::epochs_per_historical_vector() * 4); let slot = epoch.start_slot(MainnetEthSpec::slots_per_epoch()); test_active_index::(slot); } @@ -213,7 +213,7 @@ mod committees { spec: &ChainSpec, ) { let active_indices: Vec = (0..validator_count).collect(); - let seed = state.generate_seed(epoch, spec).unwrap(); + let seed = state.get_seed(epoch, spec).unwrap(); let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch).unwrap(); let start_shard = CommitteeCache::compute_start_shard(&state, relative_epoch, active_indices.len(), spec); @@ -244,7 +244,7 @@ mod committees { // of committees in an epoch. assert_eq!( crosslink_committees.len() as u64, - state.get_epoch_committee_count(relative_epoch).unwrap() / T::slots_per_epoch() + state.get_committee_count(relative_epoch).unwrap() / T::slots_per_epoch() ); for cc in crosslink_committees { @@ -306,11 +306,11 @@ mod committees { let (mut state, _keypairs): (BeaconState, _) = builder.build(); - let distinct_hashes: Vec = (0..T::latest_randao_mixes_length()) + let distinct_hashes: Vec = (0..T::epochs_per_historical_vector()) .into_iter() .map(|i| Hash256::from(i as u64)) .collect(); - state.latest_randao_mixes = FixedLenVec::from(distinct_hashes); + state.randao_mixes = FixedVector::from(distinct_hashes); state .build_committee_cache(RelativeEpoch::Previous, spec) diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 6073fb32e..e42b628ac 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -5,7 +5,7 @@ use test_utils::{u8_from_hex_str, u8_to_hex_str}; /// Each of the BLS signature domains. /// -/// Spec v0.6.3 +/// Spec v0.8.1 pub enum Domain { BeaconProposer, Randao, @@ -17,24 +17,28 @@ pub enum Domain { /// Holds all the "constants" for a BeaconChain. /// -/// Spec v0.6.3 +/// Spec v0.8.1 #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] #[serde(default)] pub struct ChainSpec { + /* + * Constants + */ + #[serde(skip_serializing)] // skipped because Serde TOML has trouble with u64::max + pub far_future_epoch: Epoch, + pub base_rewards_per_epoch: u64, + pub deposit_contract_tree_depth: u64, + pub seconds_per_day: u64, + /* * Misc */ pub target_committee_size: usize, - pub max_indices_per_attestation: u64, pub min_per_epoch_churn_limit: u64, pub churn_limit_quotient: u64, - pub base_rewards_per_epoch: u64, pub shuffle_round_count: u8, - - /* - * Deposit contract - */ - pub deposit_contract_tree_depth: u64, + pub min_genesis_active_validator_count: u64, + pub min_genesis_time: u64, /* * Gwei values @@ -48,47 +52,30 @@ pub struct ChainSpec { * Initial Values */ pub genesis_slot: Slot, - // Skipped because serde TOML can't handle u64::max_value, the typical value for this field. - #[serde(skip_serializing)] - pub far_future_epoch: Epoch, - pub zero_hash: Hash256, #[serde(deserialize_with = "u8_from_hex_str", serialize_with = "u8_to_hex_str")] pub bls_withdrawal_prefix_byte: u8, /* * Time parameters */ - pub genesis_time: u64, pub seconds_per_slot: u64, pub min_attestation_inclusion_delay: u64, pub min_seed_lookahead: Epoch, pub activation_exit_delay: u64, - pub slots_per_eth1_voting_period: u64, - pub slots_per_historical_root: usize, pub min_validator_withdrawability_delay: Epoch, pub persistent_committee_period: u64, - pub max_crosslink_epochs: u64, + pub max_epochs_per_crosslink: u64, pub min_epochs_to_inactivity_penalty: u64, /* * Reward and penalty quotients */ - pub base_reward_quotient: u64, - pub whistleblowing_reward_quotient: u64, + pub base_reward_factor: u64, + pub whistleblower_reward_quotient: u64, pub proposer_reward_quotient: u64, pub inactivity_penalty_quotient: u64, pub min_slashing_penalty_quotient: u64, - /* - * Max operations per block - */ - pub max_proposer_slashings: u64, - pub max_attester_slashings: u64, - pub max_attestations: u64, - pub max_deposits: u64, - pub max_voluntary_exits: u64, - pub max_transfers: u64, - /* * Signature domains * @@ -111,7 +98,7 @@ pub struct ChainSpec { impl ChainSpec { /// Get the domain number that represents the fork meta and signature domain. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_domain(&self, epoch: Epoch, domain: Domain, fork: &Fork) -> u64 { let domain_constant = match domain { Domain::BeaconProposer => self.domain_beacon_proposer, @@ -122,8 +109,8 @@ impl ChainSpec { Domain::Transfer => self.domain_transfer, }; - let mut bytes: Vec = fork.get_fork_version(epoch).to_vec(); - bytes.append(&mut int_to_bytes4(domain_constant)); + let mut bytes: Vec = int_to_bytes4(domain_constant); + bytes.append(&mut fork.get_fork_version(epoch).to_vec()); let mut fork_and_domain = [0; 8]; fork_and_domain.copy_from_slice(&bytes); @@ -133,23 +120,26 @@ impl ChainSpec { /// Returns a `ChainSpec` compatible with the Ethereum Foundation specification. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn mainnet() -> Self { Self { + /* + * Constants + */ + far_future_epoch: Epoch::new(u64::max_value()), + base_rewards_per_epoch: 5, + deposit_contract_tree_depth: 32, + seconds_per_day: 86400, + /* * Misc */ target_committee_size: 128, - max_indices_per_attestation: 4096, min_per_epoch_churn_limit: 4, churn_limit_quotient: 65_536, - base_rewards_per_epoch: 5, shuffle_round_count: 90, - - /* - * Deposit contract - */ - deposit_contract_tree_depth: 32, + min_genesis_active_validator_count: 65_536, + min_genesis_time: 1_578_009_600, // Jan 3, 2020 /* * Gwei values @@ -163,44 +153,29 @@ impl ChainSpec { * Initial Values */ genesis_slot: Slot::new(0), - far_future_epoch: Epoch::new(u64::max_value()), - zero_hash: Hash256::zero(), bls_withdrawal_prefix_byte: 0, /* * Time parameters */ - genesis_time: u64::from(u32::max_value()), seconds_per_slot: 6, - min_attestation_inclusion_delay: 4, + min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), activation_exit_delay: 4, - slots_per_eth1_voting_period: 1_024, - slots_per_historical_root: 8_192, min_validator_withdrawability_delay: Epoch::new(256), persistent_committee_period: 2_048, - max_crosslink_epochs: 64, + max_epochs_per_crosslink: 64, min_epochs_to_inactivity_penalty: 4, /* * Reward and penalty quotients */ - base_reward_quotient: 32, - whistleblowing_reward_quotient: 512, + base_reward_factor: 64, + whistleblower_reward_quotient: 512, proposer_reward_quotient: 8, inactivity_penalty_quotient: 33_554_432, min_slashing_penalty_quotient: 32, - /* - * Max operations per block - */ - max_proposer_slashings: 16, - max_attester_slashings: 1, - max_attestations: 128, - max_deposits: 16, - max_voluntary_exits: 16, - max_transfers: 0, - /* * Signature domains */ @@ -221,21 +196,18 @@ impl ChainSpec { /// Ethereum Foundation minimal spec, as defined here: /// - /// https://github.com/ethereum/eth2.0-specs/blob/v0.6.3/configs/constant_presets/minimal.yaml + /// https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/configs/constant_presets/minimal.yaml /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn minimal() -> Self { - let genesis_slot = Slot::new(0); - // Note: bootnodes to be updated when static nodes exist. let boot_nodes = vec![]; Self { target_committee_size: 4, shuffle_round_count: 10, - min_attestation_inclusion_delay: 2, - slots_per_eth1_voting_period: 16, - genesis_slot, + min_genesis_active_validator_count: 64, + max_epochs_per_crosslink: 4, chain_id: 2, // lighthouse testnet chain id boot_nodes, ..ChainSpec::mainnet() @@ -265,8 +237,8 @@ mod tests { let domain = spec.get_domain(epoch, domain_type, &fork); - let mut expected = fork.get_fork_version(epoch).to_vec(); - expected.append(&mut int_to_bytes4(raw_domain)); + let mut expected = int_to_bytes4(raw_domain); + expected.append(&mut fork.get_fork_version(epoch).to_vec()); assert_eq!(int_to_bytes8(domain), expected); } diff --git a/eth2/types/src/checkpoint.rs b/eth2/types/src/checkpoint.rs new file mode 100644 index 000000000..e753e9110 --- /dev/null +++ b/eth2/types/src/checkpoint.rs @@ -0,0 +1,39 @@ +use crate::test_utils::TestRandom; +use crate::{Epoch, Hash256}; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash::TreeHash; +use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; + +/// Casper FFG checkpoint, used in attestations. +/// +/// Spec v0.8.0 +#[derive( + Debug, + Clone, + PartialEq, + Eq, + Default, + Hash, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + CachedTreeHash, + TestRandom, + SignedRoot, +)] +pub struct Checkpoint { + pub epoch: Epoch, + pub root: Hash256, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_tests!(Checkpoint); + cached_tree_hash_tests!(Checkpoint); +} diff --git a/eth2/types/src/compact_committee.rs b/eth2/types/src/compact_committee.rs new file mode 100644 index 000000000..546a705d5 --- /dev/null +++ b/eth2/types/src/compact_committee.rs @@ -0,0 +1,35 @@ +use crate::test_utils::TestRandom; +use crate::{EthSpec, PublicKey}; +use serde_derive::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use test_random_derive::TestRandom; +use tree_hash_derive::{CachedTreeHash, TreeHash}; + +/// Spec v0.8.0 +#[derive( + Clone, + Debug, + PartialEq, + TreeHash, + CachedTreeHash, + Encode, + Decode, + Serialize, + Deserialize, + TestRandom, +)] +#[serde(bound = "T: EthSpec")] +pub struct CompactCommittee { + pub pubkeys: VariableList, + pub compact_validators: VariableList, +} + +impl Default for CompactCommittee { + fn default() -> Self { + Self { + pubkeys: VariableList::empty(), + compact_validators: VariableList::empty(), + } + } +} diff --git a/eth2/types/src/crosslink.rs b/eth2/types/src/crosslink.rs index d7d77ec4a..c3d30adcd 100644 --- a/eth2/types/src/crosslink.rs +++ b/eth2/types/src/crosslink.rs @@ -8,7 +8,7 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; /// Specifies the block hash for a shard at an epoch. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, Clone, @@ -25,9 +25,12 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; TestRandom, )] pub struct Crosslink { - pub epoch: Epoch, - pub previous_crosslink_root: Hash256, - pub crosslink_data_root: Hash256, + pub shard: u64, + pub parent_root: Hash256, + // Crosslinking data + pub start_epoch: Epoch, + pub end_epoch: Epoch, + pub data_root: Hash256, } #[cfg(test)] diff --git a/eth2/types/src/deposit.rs b/eth2/types/src/deposit.rs index df814c297..17432e0e5 100644 --- a/eth2/types/src/deposit.rs +++ b/eth2/types/src/deposit.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::*; -use fixed_len_vec::typenum::U32; +use ssz_types::typenum::U33; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -9,7 +9,7 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; /// A deposit to potentially become a beacon chain validator. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, PartialEq, @@ -23,8 +23,7 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; TestRandom, )] pub struct Deposit { - pub proof: FixedLenVec, - pub index: u64, + pub proof: FixedVector, pub data: DepositData, } diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs index 895e47e59..8e5088889 100644 --- a/eth2/types/src/deposit_data.rs +++ b/eth2/types/src/deposit_data.rs @@ -10,7 +10,7 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; /// The data supplied by the user to the deposit contract. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, PartialEq, @@ -35,7 +35,7 @@ pub struct DepositData { impl DepositData { /// Generate the signature for a given DepositData details. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn create_signature( &self, secret_key: &SecretKey, diff --git a/eth2/types/src/eth1_data.rs b/eth2/types/src/eth1_data.rs index dedaf9f00..3b81175ba 100644 --- a/eth2/types/src/eth1_data.rs +++ b/eth2/types/src/eth1_data.rs @@ -8,7 +8,7 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; /// Contains data obtained from the Eth1 chain. /// -/// Spec v0.6.3 +/// Spec v0.8.1 #[derive( Debug, PartialEq, diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index 4546fd5f7..be75d5ca2 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -10,7 +10,7 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// -/// Spec v0.6.3 +/// Spec v0.8.1 #[derive( Debug, Clone, @@ -35,7 +35,7 @@ pub struct Fork { impl Fork { /// Initialize the `Fork` from the genesis parameters in the `spec`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn genesis(genesis_epoch: Epoch) -> Self { Self { previous_version: [0; 4], @@ -46,7 +46,7 @@ impl Fork { /// Return the fork version of the given ``epoch``. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn get_fork_version(&self, epoch: Epoch) -> [u8; 4] { if epoch < self.epoch { return self.previous_version; diff --git a/eth2/types/src/historical_batch.rs b/eth2/types/src/historical_batch.rs index 30206ae63..03e8316ba 100644 --- a/eth2/types/src/historical_batch.rs +++ b/eth2/types/src/historical_batch.rs @@ -1,15 +1,15 @@ use crate::test_utils::TestRandom; use crate::*; -use fixed_len_vec::FixedLenVec; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::FixedVector; use test_random_derive::TestRandom; use tree_hash_derive::{CachedTreeHash, TreeHash}; /// Historical block and state roots. /// -/// Spec v0.6.3 +/// Spec v0.8.1 #[derive( Debug, Clone, @@ -23,8 +23,8 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; TestRandom, )] pub struct HistoricalBatch { - pub block_roots: FixedLenVec, - pub state_roots: FixedLenVec, + pub block_roots: FixedVector, + pub state_roots: FixedVector, } #[cfg(test)] diff --git a/eth2/types/src/indexed_attestation.rs b/eth2/types/src/indexed_attestation.rs index 1758521e1..9c00467b1 100644 --- a/eth2/types/src/indexed_attestation.rs +++ b/eth2/types/src/indexed_attestation.rs @@ -1,4 +1,4 @@ -use crate::{test_utils::TestRandom, AggregateSignature, AttestationData}; +use crate::{test_utils::TestRandom, AggregateSignature, AttestationData, EthSpec, VariableList}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; @@ -9,7 +9,7 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; /// /// To be included in an `AttesterSlashing`. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, PartialEq, @@ -23,29 +23,30 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; TestRandom, SignedRoot, )] -pub struct IndexedAttestation { +#[serde(bound = "T: EthSpec")] +pub struct IndexedAttestation { /// Lists validator registry indices, not committee indices. - pub custody_bit_0_indices: Vec, - pub custody_bit_1_indices: Vec, + pub custody_bit_0_indices: VariableList, + pub custody_bit_1_indices: VariableList, pub data: AttestationData, #[signed_root(skip_hashing)] pub signature: AggregateSignature, } -impl IndexedAttestation { +impl IndexedAttestation { /// Check if ``attestation_data_1`` and ``attestation_data_2`` have the same target. /// - /// Spec v0.6.3 - pub fn is_double_vote(&self, other: &IndexedAttestation) -> bool { - self.data.target_epoch == other.data.target_epoch && self.data != other.data + /// Spec v0.8.0 + pub fn is_double_vote(&self, other: &Self) -> bool { + self.data.target.epoch == other.data.target.epoch && self.data != other.data } /// Check if ``attestation_data_1`` surrounds ``attestation_data_2``. /// - /// Spec v0.6.3 - pub fn is_surround_vote(&self, other: &IndexedAttestation) -> bool { - self.data.source_epoch < other.data.source_epoch - && other.data.target_epoch < self.data.target_epoch + /// Spec v0.8.0 + pub fn is_surround_vote(&self, other: &Self) -> bool { + self.data.source.epoch < other.data.source.epoch + && other.data.target.epoch < self.data.target.epoch } } @@ -54,6 +55,7 @@ mod tests { use super::*; use crate::slot_epoch::Epoch; use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; + use crate::MainnetEthSpec; #[test] pub fn test_is_double_vote_true() { @@ -121,15 +123,18 @@ mod tests { ); } - ssz_tests!(IndexedAttestation); - cached_tree_hash_tests!(IndexedAttestation); + ssz_tests!(IndexedAttestation); + cached_tree_hash_tests!(IndexedAttestation); - fn create_indexed_attestation(target_epoch: u64, source_epoch: u64) -> IndexedAttestation { + fn create_indexed_attestation( + target_epoch: u64, + source_epoch: u64, + ) -> IndexedAttestation { let mut rng = XorShiftRng::from_seed([42; 16]); let mut indexed_vote = IndexedAttestation::random_for_test(&mut rng); - indexed_vote.data.source_epoch = Epoch::new(source_epoch); - indexed_vote.data.target_epoch = Epoch::new(target_epoch); + indexed_vote.data.source.epoch = Epoch::new(source_epoch); + indexed_vote.data.target.epoch = Epoch::new(target_epoch); indexed_vote } } diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 2406c3a18..a8dd04a45 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -1,5 +1,8 @@ //! Ethereum 2.0 types +// Required for big type-level numbers +#![recursion_limit = "128"] + #[macro_use] pub mod test_utils; @@ -13,6 +16,8 @@ pub mod beacon_block_body; pub mod beacon_block_header; pub mod beacon_state; pub mod chain_spec; +pub mod checkpoint; +pub mod compact_committee; pub mod crosslink; pub mod crosslink_committee; pub mod deposit; @@ -46,6 +51,8 @@ pub use crate::beacon_block_body::BeaconBlockBody; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_state::{Error as BeaconStateError, *}; pub use crate::chain_spec::{ChainSpec, Domain}; +pub use crate::checkpoint::Checkpoint; +pub use crate::compact_committee::CompactCommittee; pub use crate::crosslink::Crosslink; pub use crate::crosslink_committee::{CrosslinkCommittee, OwnedCrosslinkCommittee}; pub use crate::deposit::Deposit; @@ -71,8 +78,6 @@ pub type CrosslinkCommittees = Vec<(Committee, u64)>; pub type Hash256 = H256; pub type Address = H160; pub type EthBalance = U256; -pub type Bitfield = boolean_bitfield::BooleanBitfield; -pub type BitfieldError = boolean_bitfield::Error; /// Maps a (slot, shard_id) to attestation_indices. pub type AttesterMap = HashMap<(u64, u64), Vec>; @@ -81,4 +86,4 @@ pub type AttesterMap = HashMap<(u64, u64), Vec>; pub type ProposerMap = HashMap; pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, SecretKey, Signature}; -pub use fixed_len_vec::{typenum, typenum::Unsigned, FixedLenVec}; +pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; diff --git a/eth2/types/src/pending_attestation.rs b/eth2/types/src/pending_attestation.rs index 53e381a6f..fdf36d462 100644 --- a/eth2/types/src/pending_attestation.rs +++ b/eth2/types/src/pending_attestation.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::{AttestationData, Bitfield}; +use crate::{AttestationData, BitList, EthSpec}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -8,7 +8,7 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; /// An attestation that has been included in the state but not yet fully processed. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, Clone, @@ -21,8 +21,8 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; CachedTreeHash, TestRandom, )] -pub struct PendingAttestation { - pub aggregation_bitfield: Bitfield, +pub struct PendingAttestation { + pub aggregation_bits: BitList, pub data: AttestationData, pub inclusion_delay: u64, pub proposer_index: u64, @@ -31,7 +31,8 @@ pub struct PendingAttestation { #[cfg(test)] mod tests { use super::*; + use crate::*; - ssz_tests!(PendingAttestation); - cached_tree_hash_tests!(PendingAttestation); + ssz_tests!(PendingAttestation); + cached_tree_hash_tests!(PendingAttestation); } diff --git a/eth2/types/src/proposer_slashing.rs b/eth2/types/src/proposer_slashing.rs index 591fdad49..d21bef99c 100644 --- a/eth2/types/src/proposer_slashing.rs +++ b/eth2/types/src/proposer_slashing.rs @@ -8,7 +8,7 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; /// Two conflicting proposals from the same proposer (validator). /// -/// Spec v0.6.3 +/// Spec v0.8.1 #[derive( Debug, PartialEq, diff --git a/eth2/types/src/relative_epoch.rs b/eth2/types/src/relative_epoch.rs index eeeca65f3..321919dfc 100644 --- a/eth2/types/src/relative_epoch.rs +++ b/eth2/types/src/relative_epoch.rs @@ -9,7 +9,7 @@ pub enum Error { /// Defines the epochs relative to some epoch. Most useful when referring to the committees prior /// to and following some epoch. /// -/// Spec v0.6.3 +/// Spec v0.8.1 #[derive(Debug, PartialEq, Clone, Copy)] pub enum RelativeEpoch { /// The prior epoch. @@ -23,7 +23,7 @@ pub enum RelativeEpoch { impl RelativeEpoch { /// Returns the `epoch` that `self` refers to, with respect to the `base` epoch. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn into_epoch(self, base: Epoch) -> Epoch { match self { // Due to saturating nature of epoch, check for current first. @@ -40,7 +40,7 @@ impl RelativeEpoch { /// - `EpochTooLow` when `other` is more than 1 prior to `base`. /// - `EpochTooHigh` when `other` is more than 1 after `base`. /// - /// Spec v0.6.3 + /// Spec v0.8.1 pub fn from_epoch(base: Epoch, other: Epoch) -> Result { // Due to saturating nature of epoch, check for current first. if other == base { diff --git a/eth2/types/src/test_utils/builders/testing_attestation_builder.rs b/eth2/types/src/test_utils/builders/testing_attestation_builder.rs index 27fae4e76..f794919f3 100644 --- a/eth2/types/src/test_utils/builders/testing_attestation_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_attestation_builder.rs @@ -5,14 +5,14 @@ use tree_hash::TreeHash; /// Builds an attestation to be used for testing purposes. /// /// This struct should **never be used for production purposes.** -pub struct TestingAttestationBuilder { +pub struct TestingAttestationBuilder { committee: Vec, - attestation: Attestation, + attestation: Attestation, } -impl TestingAttestationBuilder { +impl TestingAttestationBuilder { /// Create a new attestation builder. - pub fn new( + pub fn new( state: &BeaconState, committee: &[usize], slot: Slot, @@ -21,18 +21,18 @@ impl TestingAttestationBuilder { ) -> Self { let data_builder = TestingAttestationDataBuilder::new(state, shard, slot, spec); - let mut aggregation_bitfield = Bitfield::new(); - let mut custody_bitfield = Bitfield::new(); + let mut aggregation_bits = BitList::with_capacity(committee.len()).unwrap(); + let mut custody_bits = BitList::with_capacity(committee.len()).unwrap(); for (i, _) in committee.iter().enumerate() { - custody_bitfield.set(i, false); - aggregation_bitfield.set(i, false); + custody_bits.set(i, false).unwrap(); + aggregation_bits.set(i, false).unwrap(); } let attestation = Attestation { - aggregation_bitfield, + aggregation_bits, data: data_builder.build(), - custody_bitfield, + custody_bits, signature: AggregateSignature::new(), }; @@ -52,7 +52,8 @@ impl TestingAttestationBuilder { secret_keys: &[&SecretKey], fork: &Fork, spec: &ChainSpec, - ) { + custody_bit: bool, + ) -> &mut Self { assert_eq!( signing_validators.len(), secret_keys.len(), @@ -67,17 +68,25 @@ impl TestingAttestationBuilder { .expect("Signing validator not in attestation committee"); self.attestation - .aggregation_bitfield - .set(committee_index, true); + .aggregation_bits + .set(committee_index, true) + .unwrap(); + + if custody_bit { + self.attestation + .custody_bits + .set(committee_index, true) + .unwrap(); + } let message = AttestationDataAndCustodyBit { data: self.attestation.data.clone(), - custody_bit: false, + custody_bit, } .tree_hash_root(); let domain = spec.get_domain( - self.attestation.data.target_epoch, + self.attestation.data.target.epoch, Domain::Attestation, fork, ); @@ -85,10 +94,12 @@ impl TestingAttestationBuilder { let signature = Signature::new(&message, domain, secret_keys[key_index]); self.attestation.signature.add(&signature) } + + self } /// Consume the builder and return the attestation. - pub fn build(self) -> Attestation { + pub fn build(self) -> Attestation { self.attestation } } diff --git a/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs b/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs index 0b4aa2987..ac45abe0f 100644 --- a/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_attestation_data_builder.rs @@ -20,60 +20,57 @@ impl TestingAttestationDataBuilder { let current_epoch = state.current_epoch(); let previous_epoch = state.previous_epoch(); - let is_previous_epoch = - state.slot.epoch(T::slots_per_epoch()) != slot.epoch(T::slots_per_epoch()); + let is_previous_epoch = slot.epoch(T::slots_per_epoch()) != current_epoch; - let source_epoch = if is_previous_epoch { - state.previous_justified_epoch + let source = if is_previous_epoch { + state.previous_justified_checkpoint.clone() } else { - state.current_justified_epoch + state.current_justified_checkpoint.clone() }; - let target_epoch = if is_previous_epoch { - state.previous_epoch() + let target = if is_previous_epoch { + Checkpoint { + epoch: previous_epoch, + root: *state + .get_block_root(previous_epoch.start_slot(T::slots_per_epoch())) + .unwrap(), + } } else { - state.current_epoch() + Checkpoint { + epoch: current_epoch, + root: *state + .get_block_root(current_epoch.start_slot(T::slots_per_epoch())) + .unwrap(), + } }; - let target_root = if is_previous_epoch { - *state - .get_block_root(previous_epoch.start_slot(T::slots_per_epoch())) - .unwrap() + let parent_crosslink = if is_previous_epoch { + state.get_previous_crosslink(shard).unwrap() } else { - *state - .get_block_root(current_epoch.start_slot(T::slots_per_epoch())) - .unwrap() + state.get_current_crosslink(shard).unwrap() }; - let previous_crosslink_root = if is_previous_epoch { - Hash256::from_slice( - &state - .get_previous_crosslink(shard) - .unwrap() - .tree_hash_root(), - ) - } else { - Hash256::from_slice(&state.get_current_crosslink(shard).unwrap().tree_hash_root()) + let crosslink = Crosslink { + shard, + parent_root: Hash256::from_slice(&parent_crosslink.tree_hash_root()), + start_epoch: parent_crosslink.end_epoch, + end_epoch: std::cmp::min( + target.epoch, + parent_crosslink.end_epoch + spec.max_epochs_per_crosslink, + ), + data_root: Hash256::zero(), }; - let source_root = *state - .get_block_root(source_epoch.start_slot(T::slots_per_epoch())) - .unwrap(); - let data = AttestationData { // LMD GHOST vote beacon_block_root: *state.get_block_root(slot).unwrap(), // FFG Vote - source_epoch, - source_root, - target_epoch, - target_root, + source, + target, // Crosslink vote - shard, - previous_crosslink_root, - crosslink_data_root: spec.zero_hash, + crosslink, }; Self { data } diff --git a/eth2/types/src/test_utils/builders/testing_attester_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_attester_slashing_builder.rs index 6cde3f145..39673ef38 100644 --- a/eth2/types/src/test_utils/builders/testing_attester_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_attester_slashing_builder.rs @@ -17,7 +17,7 @@ impl TestingAttesterSlashingBuilder { /// - `domain: Domain` /// /// Where domain is a domain "constant" (e.g., `spec.domain_attestation`). - pub fn double_vote(validator_indices: &[u64], signer: F) -> AttesterSlashing + pub fn double_vote(validator_indices: &[u64], signer: F) -> AttesterSlashing where F: Fn(u64, &[u8], Epoch, Domain) -> Signature, { @@ -26,38 +26,49 @@ impl TestingAttesterSlashingBuilder { let epoch_2 = Epoch::new(2); let hash_1 = Hash256::from_low_u64_le(1); let hash_2 = Hash256::from_low_u64_le(2); + let checkpoint_1 = Checkpoint { + epoch: epoch_1, + root: hash_1, + }; + let checkpoint_2 = Checkpoint { + epoch: epoch_1, + root: hash_2, + }; + let crosslink = Crosslink { + shard, + parent_root: hash_1, + start_epoch: epoch_1, + end_epoch: epoch_2, + data_root: hash_1, + }; let data_1 = AttestationData { beacon_block_root: hash_1, - source_epoch: epoch_1, - source_root: hash_1, - target_epoch: epoch_2, - target_root: hash_1, - shard, - previous_crosslink_root: hash_1, - crosslink_data_root: hash_1, + source: checkpoint_1.clone(), + target: checkpoint_1, + crosslink, }; let data_2 = AttestationData { - beacon_block_root: hash_2, + target: checkpoint_2, ..data_1.clone() }; let mut attestation_1 = IndexedAttestation { - custody_bit_0_indices: validator_indices.to_vec(), - custody_bit_1_indices: vec![], + custody_bit_0_indices: validator_indices.to_vec().into(), + custody_bit_1_indices: VariableList::empty(), data: data_1, signature: AggregateSignature::new(), }; let mut attestation_2 = IndexedAttestation { - custody_bit_0_indices: validator_indices.to_vec(), - custody_bit_1_indices: vec![], + custody_bit_0_indices: validator_indices.to_vec().into(), + custody_bit_1_indices: VariableList::empty(), data: data_2, signature: AggregateSignature::new(), }; - let add_signatures = |attestation: &mut IndexedAttestation| { + let add_signatures = |attestation: &mut IndexedAttestation| { // All validators sign with a `false` custody bit. let attestation_data_and_custody_bit = AttestationDataAndCustodyBit { data: attestation.data.clone(), diff --git a/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs index 36bbe2d37..79e886f68 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_block_builder.rs @@ -11,11 +11,11 @@ use tree_hash::{SignedRoot, TreeHash}; /// Builds a beacon block to be used for testing purposes. /// /// This struct should **never be used for production purposes.** -pub struct TestingBeaconBlockBuilder { - pub block: BeaconBlock, +pub struct TestingBeaconBlockBuilder { + pub block: BeaconBlock, } -impl TestingBeaconBlockBuilder { +impl TestingBeaconBlockBuilder { /// Create a new builder from genesis. pub fn new(spec: &ChainSpec) -> Self { Self { @@ -24,8 +24,8 @@ impl TestingBeaconBlockBuilder { } /// Set the previous block root - pub fn set_previous_block_root(&mut self, root: Hash256) { - self.block.previous_block_root = root; + pub fn set_parent_root(&mut self, root: Hash256) { + self.block.parent_root = root; } /// Set the slot of the block. @@ -36,7 +36,7 @@ impl TestingBeaconBlockBuilder { /// Signs the block. /// /// Modifying the block after signing may invalidate the signature. - pub fn sign(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { + pub fn sign(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { let message = self.block.signed_root(); let epoch = self.block.slot.epoch(T::slots_per_epoch()); let domain = spec.get_domain(epoch, Domain::BeaconProposer, fork); @@ -46,7 +46,7 @@ impl TestingBeaconBlockBuilder { /// Sets the randao to be a signature across the blocks epoch. /// /// Modifying the block's slot after signing may invalidate the signature. - pub fn set_randao_reveal(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { + pub fn set_randao_reveal(&mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) { let epoch = self.block.slot.epoch(T::slots_per_epoch()); let message = epoch.tree_hash_root(); let domain = spec.get_domain(epoch, Domain::Randao, fork); @@ -59,7 +59,7 @@ impl TestingBeaconBlockBuilder { } /// Inserts a signed, valid `ProposerSlashing` for the validator. - pub fn insert_proposer_slashing( + pub fn insert_proposer_slashing( &mut self, validator_index: u64, secret_key: &SecretKey, @@ -68,7 +68,11 @@ impl TestingBeaconBlockBuilder { ) { let proposer_slashing = build_proposer_slashing::(validator_index, secret_key, fork, spec); - self.block.body.proposer_slashings.push(proposer_slashing); + self.block + .body + .proposer_slashings + .push(proposer_slashing) + .unwrap(); } /// Inserts a signed, valid `AttesterSlashing` for each validator index in `validator_indices`. @@ -81,7 +85,11 @@ impl TestingBeaconBlockBuilder { ) { let attester_slashing = build_double_vote_attester_slashing(validator_indices, secret_keys, fork, spec); - self.block.body.attester_slashings.push(attester_slashing); + self.block + .body + .attester_slashings + .push(attester_slashing) + .unwrap(); } /// Fills the block with `num_attestations` attestations. @@ -93,7 +101,7 @@ impl TestingBeaconBlockBuilder { /// /// Note: the signed messages of the split committees will be identical -- it would be possible /// to aggregate these split attestations. - pub fn insert_attestations( + pub fn insert_attestations( &mut self, state: &BeaconState, secret_keys: &[&SecretKey], @@ -160,7 +168,7 @@ impl TestingBeaconBlockBuilder { } } - let mut attestations: Vec = committees + let attestations: Vec<_> = committees .par_iter() .map(|(slot, committee, signing_validators, shard)| { let mut builder = @@ -170,29 +178,37 @@ impl TestingBeaconBlockBuilder { .iter() .map(|validator_index| secret_keys[*validator_index]) .collect(); - builder.sign(signing_validators, &signing_secret_keys, &state.fork, spec); + builder.sign( + signing_validators, + &signing_secret_keys, + &state.fork, + spec, + false, + ); builder.build() }) .collect(); - self.block.body.attestations.append(&mut attestations); + for attestation in attestations { + self.block.body.attestations.push(attestation).unwrap(); + } Ok(()) } /// Insert a `Valid` deposit into the state. - pub fn insert_deposit( + pub fn insert_deposit( &mut self, amount: u64, - index: u64, + // TODO: deal with the fact deposits no longer have explicit indices + _index: u64, state: &BeaconState, spec: &ChainSpec, ) { let keypair = Keypair::random(); let mut builder = TestingDepositBuilder::new(keypair.pk.clone(), amount); - builder.set_index(index); builder.sign( &keypair, state.slot.epoch(T::slots_per_epoch()), @@ -200,11 +216,11 @@ impl TestingBeaconBlockBuilder { spec, ); - self.block.body.deposits.push(builder.build()) + self.block.body.deposits.push(builder.build()).unwrap() } /// Insert a `Valid` exit into the state. - pub fn insert_exit( + pub fn insert_exit( &mut self, state: &BeaconState, validator_index: u64, @@ -218,14 +234,18 @@ impl TestingBeaconBlockBuilder { builder.sign(secret_key, &state.fork, spec); - self.block.body.voluntary_exits.push(builder.build()) + self.block + .body + .voluntary_exits + .push(builder.build()) + .unwrap() } /// Insert a `Valid` transfer into the state. /// /// Note: this will set the validator to be withdrawable by directly modifying the state /// validator registry. This _may_ cause problems historic hashes, etc. - pub fn insert_transfer( + pub fn insert_transfer( &mut self, state: &BeaconState, from: u64, @@ -237,22 +257,17 @@ impl TestingBeaconBlockBuilder { let mut builder = TestingTransferBuilder::new(from, to, amount, state.slot); builder.sign::(keypair, &state.fork, spec); - self.block.body.transfers.push(builder.build()) + self.block.body.transfers.push(builder.build()).unwrap() } /// Signs and returns the block, consuming the builder. - pub fn build( - mut self, - sk: &SecretKey, - fork: &Fork, - spec: &ChainSpec, - ) -> BeaconBlock { - self.sign::(sk, fork, spec); + pub fn build(mut self, sk: &SecretKey, fork: &Fork, spec: &ChainSpec) -> BeaconBlock { + self.sign(sk, fork, spec); self.block } /// Returns the block, consuming the builder. - pub fn build_without_signing(self) -> BeaconBlock { + pub fn build_without_signing(self) -> BeaconBlock { self.block } } @@ -277,12 +292,12 @@ fn build_proposer_slashing( /// Builds an `AttesterSlashing` for some `validator_indices`. /// /// Signs the message using a `BeaconChainHarness`. -fn build_double_vote_attester_slashing( +fn build_double_vote_attester_slashing( validator_indices: &[u64], secret_keys: &[&SecretKey], fork: &Fork, spec: &ChainSpec, -) -> AttesterSlashing { +) -> AttesterSlashing { let signer = |validator_index: u64, message: &[u8], epoch: Epoch, domain: Domain| { let key_index = validator_indices .iter() diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index e949c26b2..a9383242f 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -120,10 +120,11 @@ impl TestingBeaconStateBuilder { effective_balance: starting_balance, } }) - .collect(); + .collect::>() + .into(); - let mut state = BeaconState::genesis( - spec.genesis_time, + let mut state = BeaconState::new( + spec.min_genesis_time, Eth1Data { deposit_root: Hash256::zero(), deposit_count: 0, @@ -132,10 +133,10 @@ impl TestingBeaconStateBuilder { spec, ); - let balances = vec![starting_balance; validator_count]; + let balances = vec![starting_balance; validator_count].into(); debug!("Importing {} existing validators...", validator_count); - state.validator_registry = validators; + state.validators = validators; state.balances = balances; debug!("BeaconState initialized."); @@ -177,11 +178,11 @@ impl TestingBeaconStateBuilder { // NOTE: we could update the latest start shard here - state.previous_justified_epoch = epoch - 3; - state.current_justified_epoch = epoch - 2; - state.justification_bitfield = u64::max_value(); + state.previous_justified_checkpoint.epoch = epoch - 3; + state.current_justified_checkpoint.epoch = epoch - 2; + state.justification_bits = BitVector::from_bytes(vec![0b0000_1111]).unwrap(); - state.finalized_epoch = epoch - 3; + state.finalized_checkpoint.epoch = epoch - 3; } /// Creates a full set of attestations for the `BeaconState`. Each attestation has full @@ -228,10 +229,10 @@ impl TestingBeaconStateBuilder { builder.add_committee_participation(signers); let attestation = builder.build(); - if attestation.data.target_epoch < state.current_epoch() { - state.previous_epoch_attestations.push(attestation) + if attestation.data.target.epoch < state.current_epoch() { + state.previous_epoch_attestations.push(attestation).unwrap() } else { - state.current_epoch_attestations.push(attestation) + state.current_epoch_attestations.push(attestation).unwrap() } } } diff --git a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs index aec7ae48f..df3dcffa1 100644 --- a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs @@ -13,7 +13,6 @@ impl TestingDepositBuilder { pub fn new(pubkey: PublicKey, amount: u64) -> Self { let deposit = Deposit { proof: vec![].into(), - index: 0, data: DepositData { pubkey, withdrawal_credentials: Hash256::zero(), @@ -25,11 +24,6 @@ impl TestingDepositBuilder { Self { deposit } } - /// Set the `deposit.index` value. - pub fn set_index(&mut self, index: u64) { - self.deposit.index = index; - } - /// Signs the deposit, also setting the following values: /// /// - `pubkey` to the signing pubkey. diff --git a/eth2/types/src/test_utils/builders/testing_pending_attestation_builder.rs b/eth2/types/src/test_utils/builders/testing_pending_attestation_builder.rs index d4ba9c826..14fe9a5f9 100644 --- a/eth2/types/src/test_utils/builders/testing_pending_attestation_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_pending_attestation_builder.rs @@ -4,23 +4,18 @@ use crate::*; /// Builds an `AttesterSlashing` to be used for testing purposes. /// /// This struct should **never be used for production purposes.** -pub struct TestingPendingAttestationBuilder { - pending_attestation: PendingAttestation, +pub struct TestingPendingAttestationBuilder { + pending_attestation: PendingAttestation, } -impl TestingPendingAttestationBuilder { +impl TestingPendingAttestationBuilder { /// Create a new valid* `PendingAttestation` for the given parameters. /// /// The `inclusion_delay` will be set to `MIN_ATTESTATION_INCLUSION_DELAY`. /// /// * The aggregation and custody bitfields will all be empty, they need to be set with /// `Self::add_committee_participation`. - pub fn new( - state: &BeaconState, - shard: u64, - slot: Slot, - spec: &ChainSpec, - ) -> Self { + pub fn new(state: &BeaconState, shard: u64, slot: Slot, spec: &ChainSpec) -> Self { let data_builder = TestingAttestationDataBuilder::new(state, shard, slot, spec); let relative_epoch = @@ -31,7 +26,8 @@ impl TestingPendingAttestationBuilder { .unwrap() as u64; let pending_attestation = PendingAttestation { - aggregation_bitfield: Bitfield::new(), + aggregation_bits: BitList::with_capacity(T::MaxValidatorsPerCommittee::to_usize()) + .unwrap(), data: data_builder.build(), inclusion_delay: spec.min_attestation_inclusion_delay, proposer_index, @@ -47,17 +43,17 @@ impl TestingPendingAttestationBuilder { /// The `PendingAttestation` will appear to be signed by each committee member who's value in /// `signers` is true. pub fn add_committee_participation(&mut self, signers: Vec) { - let mut aggregation_bitfield = Bitfield::new(); + let mut aggregation_bits = BitList::with_capacity(signers.len()).unwrap(); for (i, signed) in signers.iter().enumerate() { - aggregation_bitfield.set(i, *signed); + aggregation_bits.set(i, *signed).unwrap(); } - self.pending_attestation.aggregation_bitfield = aggregation_bitfield; + self.pending_attestation.aggregation_bits = aggregation_bits; } /// Returns the `PendingAttestation`, consuming the builder. - pub fn build(self) -> PendingAttestation { + pub fn build(self) -> PendingAttestation { self.pending_attestation } } diff --git a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs index 67668d130..6c72b520f 100644 --- a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs @@ -28,14 +28,14 @@ impl TestingProposerSlashingBuilder { let mut header_1 = BeaconBlockHeader { slot, - previous_block_root: hash_1, + parent_root: hash_1, state_root: hash_1, - block_body_root: hash_1, + body_root: hash_1, signature: Signature::empty_signature(), }; let mut header_2 = BeaconBlockHeader { - previous_block_root: hash_2, + parent_root: hash_2, ..header_1.clone() }; diff --git a/eth2/types/src/test_utils/macros.rs b/eth2/types/src/test_utils/macros.rs index b060882f2..f11cd8bac 100644 --- a/eth2/types/src/test_utils/macros.rs +++ b/eth2/types/src/test_utils/macros.rs @@ -1,17 +1,18 @@ #[cfg(test)] #[macro_export] macro_rules! ssz_tests { - ($type: ident) => { + ($type: ty) => { #[test] pub fn test_ssz_round_trip() { use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use ssz::{ssz_encode, Decode}; let mut rng = XorShiftRng::from_seed([42; 16]); - let original = $type::random_for_test(&mut rng); + let original = <$type>::random_for_test(&mut rng); let bytes = ssz_encode(&original); - let decoded = $type::from_ssz_bytes(&bytes).unwrap(); + println!("bytes length: {}", bytes.len()); + let decoded = <$type>::from_ssz_bytes(&bytes).unwrap(); assert_eq!(original, decoded); } @@ -22,7 +23,7 @@ macro_rules! ssz_tests { use tree_hash::TreeHash; let mut rng = XorShiftRng::from_seed([42; 16]); - let original = $type::random_for_test(&mut rng); + let original = <$type>::random_for_test(&mut rng); let result = original.tree_hash_root(); @@ -36,8 +37,10 @@ macro_rules! ssz_tests { #[cfg(test)] #[macro_export] macro_rules! cached_tree_hash_tests { - ($type: ident) => { + ($type: ty) => { #[test] + #[ignore] + // FIXME: re-enable https://github.com/sigp/lighthouse/issues/440 pub fn test_cached_tree_hash() { use crate::test_utils::{SeedableRng, TestRandom, XorShiftRng}; use tree_hash::TreeHash; @@ -45,7 +48,7 @@ macro_rules! cached_tree_hash_tests { let mut rng = XorShiftRng::from_seed([42; 16]); // Test the original hash - let original = $type::random_for_test(&mut rng); + let original = <$type>::random_for_test(&mut rng); let mut cache = cached_tree_hash::TreeHashCache::new(&original).unwrap(); assert_eq!( @@ -55,7 +58,7 @@ macro_rules! cached_tree_hash_tests { ); // Test the updated hash - let modified = $type::random_for_test(&mut rng); + let modified = <$type>::random_for_test(&mut rng); cache.update(&modified).unwrap(); assert_eq!( cache.tree_hash_root().unwrap().to_vec(), diff --git a/eth2/types/src/test_utils/test_random.rs b/eth2/types/src/test_utils/test_random.rs index 4f56d1596..3598fa79c 100644 --- a/eth2/types/src/test_utils/test_random.rs +++ b/eth2/types/src/test_utils/test_random.rs @@ -1,6 +1,6 @@ use crate::*; -use fixed_len_vec::typenum::Unsigned; use rand::RngCore; +use ssz_types::typenum::Unsigned; mod address; mod aggregate_signature; @@ -53,7 +53,7 @@ where } } -impl TestRandom for FixedLenVec +impl TestRandom for FixedVector where T: TestRandom + Default, { @@ -68,6 +68,23 @@ where } } +impl TestRandom for VariableList +where + T: TestRandom, +{ + fn random_for_test(rng: &mut impl RngCore) -> Self { + let mut output = vec![]; + + if N::to_usize() != 0 { + for _ in 0..(usize::random_for_test(rng) % std::cmp::min(4, N::to_usize())) { + output.push(::random_for_test(rng)); + } + } + + output.into() + } +} + macro_rules! impl_test_random_for_u8_array { ($len: expr) => { impl TestRandom for [u8; $len] { diff --git a/eth2/types/src/test_utils/test_random/bitfield.rs b/eth2/types/src/test_utils/test_random/bitfield.rs index 9a4d21840..2ba3576b7 100644 --- a/eth2/types/src/test_utils/test_random/bitfield.rs +++ b/eth2/types/src/test_utils/test_random/bitfield.rs @@ -1,10 +1,18 @@ use super::*; -use crate::Bitfield; +use crate::{BitList, BitVector, Unsigned}; -impl TestRandom for Bitfield { +impl TestRandom for BitList { fn random_for_test(rng: &mut impl RngCore) -> Self { - let mut raw_bytes = vec![0; 32]; + let mut raw_bytes = vec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; rng.fill_bytes(&mut raw_bytes); - Bitfield::from_bytes(&raw_bytes) + Self::from_bytes(raw_bytes).expect("we generate a valid BitList") + } +} + +impl TestRandom for BitVector { + fn random_for_test(rng: &mut impl RngCore) -> Self { + let mut raw_bytes = vec![0; std::cmp::max(1, (N::to_usize() + 7) / 8)]; + rng.fill_bytes(&mut raw_bytes); + Self::from_bytes(raw_bytes).expect("we generate a valid BitVector") } } diff --git a/eth2/types/src/transfer.rs b/eth2/types/src/transfer.rs index 8a7850cfc..3c4d6ee2e 100644 --- a/eth2/types/src/transfer.rs +++ b/eth2/types/src/transfer.rs @@ -11,7 +11,7 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; /// The data submitted to the deposit contract. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, Clone, diff --git a/eth2/types/src/validator.rs b/eth2/types/src/validator.rs index 4337e164d..39fe911aa 100644 --- a/eth2/types/src/validator.rs +++ b/eth2/types/src/validator.rs @@ -7,7 +7,7 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; /// Information about a `BeaconChain` validator. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, Clone, @@ -23,12 +23,12 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; pub struct Validator { pub pubkey: PublicKey, pub withdrawal_credentials: Hash256, + pub effective_balance: u64, + pub slashed: bool, pub activation_eligibility_epoch: Epoch, pub activation_epoch: Epoch, pub exit_epoch: Epoch, pub withdrawable_epoch: Epoch, - pub slashed: bool, - pub effective_balance: u64, } impl Validator { diff --git a/eth2/types/src/voluntary_exit.rs b/eth2/types/src/voluntary_exit.rs index 5630d4d4c..231fa4441 100644 --- a/eth2/types/src/voluntary_exit.rs +++ b/eth2/types/src/voluntary_exit.rs @@ -9,7 +9,7 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; /// An exit voluntarily submitted a validator who wishes to withdraw. /// -/// Spec v0.6.3 +/// Spec v0.8.0 #[derive( Debug, PartialEq, @@ -24,6 +24,7 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; SignedRoot, )] pub struct VoluntaryExit { + /// Earliest epoch when voluntary exit can be processed. pub epoch: Epoch, pub validator_index: u64, #[signed_root(skip_hashing)] diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index 127589463..880d1144f 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -14,6 +14,7 @@ serde = "1.0" serde_derive = "1.0" serde_hex = { path = "../serde_hex" } eth2_ssz = { path = "../ssz" } +eth2_ssz_types = { path = "../ssz_types" } tree_hash = { path = "../tree_hash" } [features] diff --git a/eth2/utils/bls/src/aggregate_signature.rs b/eth2/utils/bls/src/aggregate_signature.rs index 60f9ee993..8b5189c19 100644 --- a/eth2/utils/bls/src/aggregate_signature.rs +++ b/eth2/utils/bls/src/aggregate_signature.rs @@ -1,13 +1,11 @@ use super::*; -use cached_tree_hash::cached_tree_hash_ssz_encoding_as_vector; use milagro_bls::{ AggregatePublicKey as RawAggregatePublicKey, AggregateSignature as RawAggregateSignature, }; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode as hex_encode, HexVisitor}; -use ssz::{Decode, DecodeError}; -use tree_hash::tree_hash_ssz_encoding_as_vector; +use ssz::{Decode, DecodeError, Encode}; /// A BLS aggregate signature. /// @@ -143,6 +141,10 @@ impl_ssz!( "AggregateSignature" ); +impl_tree_hash!(AggregateSignature, U96); + +impl_cached_tree_hash!(AggregateSignature, U96); + impl Serialize for AggregateSignature { /// Serde serialization is compliant the Ethereum YAML test format. fn serialize(&self, serializer: S) -> Result @@ -167,9 +169,6 @@ impl<'de> Deserialize<'de> for AggregateSignature { } } -tree_hash_ssz_encoding_as_vector!(AggregateSignature); -cached_tree_hash_ssz_encoding_as_vector!(AggregateSignature, 96); - #[cfg(test)] mod tests { use super::super::{Keypair, Signature}; diff --git a/eth2/utils/bls/src/fake_aggregate_signature.rs b/eth2/utils/bls/src/fake_aggregate_signature.rs index 709c008aa..c87417db8 100644 --- a/eth2/utils/bls/src/fake_aggregate_signature.rs +++ b/eth2/utils/bls/src/fake_aggregate_signature.rs @@ -2,12 +2,10 @@ use super::{ fake_aggregate_public_key::FakeAggregatePublicKey, fake_signature::FakeSignature, BLS_AGG_SIG_BYTE_SIZE, }; -use cached_tree_hash::cached_tree_hash_ssz_encoding_as_vector; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode as hex_encode, PrefixedHexVisitor}; -use ssz::{ssz_encode, Decode, DecodeError}; -use tree_hash::tree_hash_ssz_encoding_as_vector; +use ssz::{ssz_encode, Decode, DecodeError, Encode}; /// A BLS aggregate signature. /// @@ -86,6 +84,10 @@ impl_ssz!( "FakeAggregateSignature" ); +impl_tree_hash!(FakeAggregateSignature, U96); + +impl_cached_tree_hash!(FakeAggregateSignature, U96); + impl Serialize for FakeAggregateSignature { fn serialize(&self, serializer: S) -> Result where @@ -107,9 +109,6 @@ impl<'de> Deserialize<'de> for FakeAggregateSignature { } } -tree_hash_ssz_encoding_as_vector!(FakeAggregateSignature); -cached_tree_hash_ssz_encoding_as_vector!(FakeAggregateSignature, 96); - #[cfg(test)] mod tests { use super::super::{Keypair, Signature}; diff --git a/eth2/utils/bls/src/fake_public_key.rs b/eth2/utils/bls/src/fake_public_key.rs index 617363d12..4431b3232 100644 --- a/eth2/utils/bls/src/fake_public_key.rs +++ b/eth2/utils/bls/src/fake_public_key.rs @@ -1,13 +1,11 @@ use super::{SecretKey, BLS_PUBLIC_KEY_BYTE_SIZE}; -use cached_tree_hash::cached_tree_hash_ssz_encoding_as_vector; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode as hex_encode, HexVisitor}; -use ssz::{ssz_encode, Decode, DecodeError}; +use ssz::{ssz_encode, Decode, DecodeError, Encode}; use std::default; use std::fmt; use std::hash::{Hash, Hasher}; -use tree_hash::tree_hash_ssz_encoding_as_vector; /// A single BLS signature. /// @@ -84,6 +82,10 @@ impl default::Default for FakePublicKey { impl_ssz!(FakePublicKey, BLS_PUBLIC_KEY_BYTE_SIZE, "FakePublicKey"); +impl_tree_hash!(FakePublicKey, U48); + +impl_cached_tree_hash!(FakePublicKey, U48); + impl Serialize for FakePublicKey { fn serialize(&self, serializer: S) -> Result where @@ -105,9 +107,6 @@ impl<'de> Deserialize<'de> for FakePublicKey { } } -tree_hash_ssz_encoding_as_vector!(FakePublicKey); -cached_tree_hash_ssz_encoding_as_vector!(FakePublicKey, 48); - impl PartialEq for FakePublicKey { fn eq(&self, other: &FakePublicKey) -> bool { ssz_encode(self) == ssz_encode(other) diff --git a/eth2/utils/bls/src/fake_signature.rs b/eth2/utils/bls/src/fake_signature.rs index ebe4e997e..60607628a 100644 --- a/eth2/utils/bls/src/fake_signature.rs +++ b/eth2/utils/bls/src/fake_signature.rs @@ -1,11 +1,9 @@ use super::{PublicKey, SecretKey, BLS_SIG_BYTE_SIZE}; -use cached_tree_hash::cached_tree_hash_ssz_encoding_as_vector; use hex::encode as hex_encode; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::HexVisitor; -use ssz::{ssz_encode, Decode, DecodeError}; -use tree_hash::tree_hash_ssz_encoding_as_vector; +use ssz::{ssz_encode, Decode, DecodeError, Encode}; /// A single BLS signature. /// @@ -84,8 +82,9 @@ impl FakeSignature { impl_ssz!(FakeSignature, BLS_SIG_BYTE_SIZE, "FakeSignature"); -tree_hash_ssz_encoding_as_vector!(FakeSignature); -cached_tree_hash_ssz_encoding_as_vector!(FakeSignature, 96); +impl_tree_hash!(FakeSignature, U96); + +impl_cached_tree_hash!(FakeSignature, U96); impl Serialize for FakeSignature { fn serialize(&self, serializer: S) -> Result diff --git a/eth2/utils/bls/src/macros.rs b/eth2/utils/bls/src/macros.rs index af2cde190..4f41bac1d 100644 --- a/eth2/utils/bls/src/macros.rs +++ b/eth2/utils/bls/src/macros.rs @@ -36,3 +36,51 @@ macro_rules! impl_ssz { } }; } + +macro_rules! impl_tree_hash { + ($type: ty, $byte_size: ident) => { + impl tree_hash::TreeHash for $type { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::Vector + } + + fn tree_hash_packed_encoding(&self) -> Vec { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("Vector should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { + let vector: ssz_types::FixedVector = + ssz_types::FixedVector::from(self.as_ssz_bytes()); + vector.tree_hash_root() + } + } + }; +} + +macro_rules! impl_cached_tree_hash { + ($type: ty, $byte_size: ident) => { + impl cached_tree_hash::CachedTreeHash for $type { + fn new_tree_hash_cache( + &self, + _depth: usize, + ) -> Result { + unimplemented!("CachedTreeHash is not implemented for BLS types") + } + + fn tree_hash_cache_schema(&self, _depth: usize) -> cached_tree_hash::BTreeSchema { + unimplemented!("CachedTreeHash is not implemented for BLS types") + } + + fn update_tree_hash_cache( + &self, + _cache: &mut cached_tree_hash::TreeHashCache, + ) -> Result<(), cached_tree_hash::Error> { + unimplemented!("CachedTreeHash is not implemented for BLS types") + } + } + }; +} diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index 09451331d..d78b5869b 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -1,5 +1,4 @@ use super::{SecretKey, BLS_PUBLIC_KEY_BYTE_SIZE}; -use cached_tree_hash::cached_tree_hash_ssz_encoding_as_vector; use milagro_bls::PublicKey as RawPublicKey; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; @@ -8,7 +7,6 @@ use ssz::{Decode, DecodeError, Encode}; use std::default; use std::fmt; use std::hash::{Hash, Hasher}; -use tree_hash::tree_hash_ssz_encoding_as_vector; /// A single BLS signature. /// @@ -92,6 +90,10 @@ impl default::Default for PublicKey { impl_ssz!(PublicKey, BLS_PUBLIC_KEY_BYTE_SIZE, "PublicKey"); +impl_tree_hash!(PublicKey, U48); + +impl_cached_tree_hash!(PublicKey, U48); + impl Serialize for PublicKey { fn serialize(&self, serializer: S) -> Result where @@ -113,9 +115,6 @@ impl<'de> Deserialize<'de> for PublicKey { } } -tree_hash_ssz_encoding_as_vector!(PublicKey); -cached_tree_hash_ssz_encoding_as_vector!(PublicKey, 48); - impl PartialEq for PublicKey { fn eq(&self, other: &PublicKey) -> bool { self.as_ssz_bytes() == other.as_ssz_bytes() @@ -152,6 +151,8 @@ mod tests { } #[test] + // TODO: once `CachedTreeHash` is fixed, this test should _not_ panic. + #[should_panic] pub fn test_cached_tree_hash() { let sk = SecretKey::random(); let original = PublicKey::from_secret_key(&sk); diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs index 1107c9332..383723845 100644 --- a/eth2/utils/bls/src/secret_key.rs +++ b/eth2/utils/bls/src/secret_key.rs @@ -6,8 +6,7 @@ use milagro_bls::SecretKey as RawSecretKey; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::HexVisitor; -use ssz::{ssz_encode, Decode, DecodeError}; -use tree_hash::tree_hash_ssz_encoding_as_vector; +use ssz::{ssz_encode, Decode, DecodeError, Encode}; /// A single BLS signature. /// @@ -46,6 +45,10 @@ impl SecretKey { impl_ssz!(SecretKey, BLS_SECRET_KEY_BYTE_SIZE, "SecretKey"); +impl_tree_hash!(SecretKey, U48); + +impl_cached_tree_hash!(SecretKey, U48); + impl Serialize for SecretKey { fn serialize(&self, serializer: S) -> Result where @@ -67,8 +70,6 @@ impl<'de> Deserialize<'de> for SecretKey { } } -tree_hash_ssz_encoding_as_vector!(SecretKey); - #[cfg(test)] mod tests { use super::*; diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index 257254eba..20240039b 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -1,12 +1,10 @@ use super::{PublicKey, SecretKey, BLS_SIG_BYTE_SIZE}; -use cached_tree_hash::cached_tree_hash_ssz_encoding_as_vector; use hex::encode as hex_encode; use milagro_bls::Signature as RawSignature; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::HexVisitor; -use ssz::{ssz_encode, Decode, DecodeError}; -use tree_hash::tree_hash_ssz_encoding_as_vector; +use ssz::{ssz_encode, Decode, DecodeError, Encode}; /// A single BLS signature. /// @@ -111,8 +109,9 @@ impl Signature { impl_ssz!(Signature, BLS_SIG_BYTE_SIZE, "Signature"); -tree_hash_ssz_encoding_as_vector!(Signature); -cached_tree_hash_ssz_encoding_as_vector!(Signature, 96); +impl_tree_hash!(Signature, U96); + +impl_cached_tree_hash!(Signature, U96); impl Serialize for Signature { /// Serde serialization is compliant the Ethereum YAML test format. @@ -157,6 +156,8 @@ mod tests { } #[test] + // TODO: once `CachedTreeHash` is fixed, this test should _not_ panic. + #[should_panic] pub fn test_cached_tree_hash() { let keypair = Keypair::random(); let original = Signature::new(&[42, 42], 0, &keypair.sk); diff --git a/eth2/utils/boolean-bitfield/Cargo.toml b/eth2/utils/boolean-bitfield/Cargo.toml deleted file mode 100644 index e892fa5ba..000000000 --- a/eth2/utils/boolean-bitfield/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "boolean-bitfield" -version = "0.1.0" -authors = ["Paul Hauner "] -edition = "2018" - -[dependencies] -cached_tree_hash = { path = "../cached_tree_hash" } -serde_hex = { path = "../serde_hex" } -eth2_ssz = { path = "../ssz" } -bit-vec = "0.5.0" -bit_reverse = "0.1" -serde = "1.0" -tree_hash = { path = "../tree_hash" } - -[dev-dependencies] -serde_yaml = "0.8" diff --git a/eth2/utils/boolean-bitfield/README.md b/eth2/utils/boolean-bitfield/README.md deleted file mode 100644 index adf83f6f8..000000000 --- a/eth2/utils/boolean-bitfield/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Boolean Bitfield - -Implements a set of boolean as a tightly-packed vector of bits. diff --git a/eth2/utils/boolean-bitfield/fuzz/.gitignore b/eth2/utils/boolean-bitfield/fuzz/.gitignore deleted file mode 100644 index 572e03bdf..000000000 --- a/eth2/utils/boolean-bitfield/fuzz/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ - -target -corpus -artifacts diff --git a/eth2/utils/boolean-bitfield/fuzz/Cargo.toml b/eth2/utils/boolean-bitfield/fuzz/Cargo.toml deleted file mode 100644 index 6a664ee60..000000000 --- a/eth2/utils/boolean-bitfield/fuzz/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ - -[package] -name = "boolean-bitfield-fuzz" -version = "0.0.1" -authors = ["Automatically generated"] -publish = false - -[package.metadata] -cargo-fuzz = true - -[dependencies] -eth2_ssz = { path = "../../ssz" } - -[dependencies.boolean-bitfield] -path = ".." -[dependencies.libfuzzer-sys] -git = "https://github.com/rust-fuzz/libfuzzer-sys.git" - -# Prevent this from interfering with workspaces -[workspace] -members = ["."] - -[[bin]] -name = "fuzz_target_from_bytes" -path = "fuzz_targets/fuzz_target_from_bytes.rs" - -[[bin]] -name = "fuzz_target_ssz_decode" -path = "fuzz_targets/fuzz_target_ssz_decode.rs" - -[[bin]] -name = "fuzz_target_ssz_encode" -path = "fuzz_targets/fuzz_target_ssz_encode.rs" diff --git a/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_from_bytes.rs b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_from_bytes.rs deleted file mode 100644 index 0c71c6d68..000000000 --- a/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_from_bytes.rs +++ /dev/null @@ -1,9 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate boolean_bitfield; - -use boolean_bitfield::BooleanBitfield; - -fuzz_target!(|data: &[u8]| { - let _result = BooleanBitfield::from_bytes(data); -}); diff --git a/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_decode.rs b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_decode.rs deleted file mode 100644 index 14ddbb0a9..000000000 --- a/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_decode.rs +++ /dev/null @@ -1,11 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate boolean_bitfield; -extern crate ssz; - -use boolean_bitfield::BooleanBitfield; -use ssz::{Decodable, DecodeError}; - -fuzz_target!(|data: &[u8]| { - let result: Result<(BooleanBitfield, usize), DecodeError> = <_>::ssz_decode(data, 0); -}); diff --git a/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_encode.rs b/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_encode.rs deleted file mode 100644 index 0626e5db7..000000000 --- a/eth2/utils/boolean-bitfield/fuzz/fuzz_targets/fuzz_target_ssz_encode.rs +++ /dev/null @@ -1,13 +0,0 @@ -#![no_main] -#[macro_use] extern crate libfuzzer_sys; -extern crate boolean_bitfield; -extern crate ssz; - -use boolean_bitfield::BooleanBitfield; -use ssz::SszStream; - -fuzz_target!(|data: &[u8]| { - let bitfield = BooleanBitfield::from_bytes(data); - let mut ssz = SszStream::new(); - ssz.append(&bitfield); -}); diff --git a/eth2/utils/boolean-bitfield/src/lib.rs b/eth2/utils/boolean-bitfield/src/lib.rs deleted file mode 100644 index ac6ffa89a..000000000 --- a/eth2/utils/boolean-bitfield/src/lib.rs +++ /dev/null @@ -1,572 +0,0 @@ -extern crate bit_vec; -extern crate ssz; - -use bit_reverse::LookupReverse; -use bit_vec::BitVec; -use cached_tree_hash::cached_tree_hash_bytes_as_list; -use serde::de::{Deserialize, Deserializer}; -use serde::ser::{Serialize, Serializer}; -use serde_hex::{encode, PrefixedHexVisitor}; -use ssz::{Decode, Encode}; -use std::cmp; -use std::default; - -/// A BooleanBitfield represents a set of booleans compactly stored as a vector of bits. -/// The BooleanBitfield is given a fixed size during construction. Reads outside of the current size return an out-of-bounds error. Writes outside of the current size expand the size of the set. -#[derive(Debug, Clone, Hash)] -pub struct BooleanBitfield(BitVec); - -/// Error represents some reason a request against a bitfield was not satisfied -#[derive(Debug, PartialEq)] -pub enum Error { - /// OutOfBounds refers to indexing into a bitfield where no bits exist; returns the illegal index and the current size of the bitfield, respectively - OutOfBounds(usize, usize), -} - -impl BooleanBitfield { - /// Create a new bitfield. - pub fn new() -> Self { - Default::default() - } - - pub fn with_capacity(initial_len: usize) -> Self { - Self::from_elem(initial_len, false) - } - - /// Create a new bitfield with the given length `initial_len` and all values set to `bit`. - /// - /// Note: if `initial_len` is not a multiple of 8, the remaining bits will be set to `false` - /// regardless of `bit`. - pub fn from_elem(initial_len: usize, bit: bool) -> Self { - // BitVec can panic if we don't set the len to be a multiple of 8. - let full_len = ((initial_len + 7) / 8) * 8; - let mut bitfield = BitVec::from_elem(full_len, false); - - if bit { - for i in 0..initial_len { - bitfield.set(i, true); - } - } - - Self { 0: bitfield } - } - - /// Create a new bitfield using the supplied `bytes` as input - pub fn from_bytes(bytes: &[u8]) -> Self { - Self { - 0: BitVec::from_bytes(&reverse_bit_order(bytes.to_vec())), - } - } - - /// Returns a vector of bytes representing the bitfield - pub fn to_bytes(&self) -> Vec { - reverse_bit_order(self.0.to_bytes().to_vec()) - } - - /// Read the value of a bit. - /// - /// If the index is in bounds, then result is Ok(value) where value is `true` if the bit is 1 and `false` if the bit is 0. - /// If the index is out of bounds, we return an error to that extent. - pub fn get(&self, i: usize) -> Result { - match self.0.get(i) { - Some(value) => Ok(value), - None => Err(Error::OutOfBounds(i, self.0.len())), - } - } - - /// Set the value of a bit. - /// - /// If the index is out of bounds, we expand the size of the underlying set to include the new index. - /// Returns the previous value if there was one. - pub fn set(&mut self, i: usize, value: bool) -> Option { - let previous = match self.get(i) { - Ok(previous) => Some(previous), - Err(Error::OutOfBounds(_, len)) => { - let new_len = i - len + 1; - self.0.grow(new_len, false); - None - } - }; - self.0.set(i, value); - previous - } - - /// Returns the number of bits in this bitfield. - pub fn len(&self) -> usize { - self.0.len() - } - - /// Returns true if `self.len() == 0` - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns true if all bits are set to 0. - pub fn is_zero(&self) -> bool { - self.0.none() - } - - /// Returns the number of bytes required to represent this bitfield. - pub fn num_bytes(&self) -> usize { - self.to_bytes().len() - } - - /// Returns the number of `1` bits in the bitfield - pub fn num_set_bits(&self) -> usize { - self.0.iter().filter(|&bit| bit).count() - } - - /// Compute the intersection (binary-and) of this bitfield with another. Lengths must match. - pub fn intersection(&self, other: &Self) -> Self { - let mut res = self.clone(); - res.intersection_inplace(other); - res - } - - /// Like `intersection` but in-place (updates `self`). - pub fn intersection_inplace(&mut self, other: &Self) { - self.0.intersect(&other.0); - } - - /// Compute the union (binary-or) of this bitfield with another. Lengths must match. - pub fn union(&self, other: &Self) -> Self { - let mut res = self.clone(); - res.union_inplace(other); - res - } - - /// Like `union` but in-place (updates `self`). - pub fn union_inplace(&mut self, other: &Self) { - self.0.union(&other.0); - } - - /// Compute the difference (binary-minus) of this bitfield with another. Lengths must match. - /// - /// Computes `self - other`. - pub fn difference(&self, other: &Self) -> Self { - let mut res = self.clone(); - res.difference_inplace(other); - res - } - - /// Like `difference` but in-place (updates `self`). - pub fn difference_inplace(&mut self, other: &Self) { - self.0.difference(&other.0); - } -} - -impl default::Default for BooleanBitfield { - /// default provides the "empty" bitfield - /// Note: the empty bitfield is set to the `0` byte. - fn default() -> Self { - Self::from_elem(8, false) - } -} - -impl cmp::PartialEq for BooleanBitfield { - /// Determines equality by comparing the `ssz` encoding of the two candidates. - /// This method ensures that the presence of high-order (empty) bits in the highest byte do not exclude equality when they are in fact representing the same information. - fn eq(&self, other: &Self) -> bool { - ssz::ssz_encode(self) == ssz::ssz_encode(other) - } -} -impl Eq for BooleanBitfield {} - -/// Create a new bitfield that is a union of two other bitfields. -/// -/// For example `union(0101, 1000) == 1101` -// TODO: length-independent intersection for BitAnd -impl std::ops::BitOr for BooleanBitfield { - type Output = Self; - - fn bitor(self, other: Self) -> Self { - let (biggest, smallest) = if self.len() > other.len() { - (&self, &other) - } else { - (&other, &self) - }; - let mut new = biggest.clone(); - for i in 0..smallest.len() { - if let Ok(true) = smallest.get(i) { - new.set(i, true); - } - } - new - } -} - -impl Encode for BooleanBitfield { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_append(&self, buf: &mut Vec) { - buf.append(&mut self.to_bytes()) - } -} - -impl Decode for BooleanBitfield { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - Ok(BooleanBitfield::from_bytes(bytes)) - } -} - -// Reverse the bit order of a whole byte vec, so that the ith bit -// of the input vec is placed in the (N - i)th bit of the output vec. -// This function is necessary for converting bitfields to and from YAML, -// as the BitVec library and the hex-parser use opposing bit orders. -fn reverse_bit_order(mut bytes: Vec) -> Vec { - bytes.reverse(); - bytes.into_iter().map(LookupReverse::swap_bits).collect() -} - -impl Serialize for BooleanBitfield { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&encode(self.to_bytes())) - } -} - -impl<'de> Deserialize<'de> for BooleanBitfield { - /// Serde serialization is compliant with the Ethereum YAML test format. - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - // We reverse the bit-order so that the BitVec library can read its 0th - // bit from the end of the hex string, e.g. - // "0xef01" => [0xef, 0x01] => [0b1000_0000, 0b1111_1110] - let bytes = deserializer.deserialize_str(PrefixedHexVisitor)?; - Ok(BooleanBitfield::from_bytes(&bytes)) - } -} - -impl tree_hash::TreeHash for BooleanBitfield { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> Vec { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Vec { - self.to_bytes().tree_hash_root() - } -} - -cached_tree_hash_bytes_as_list!(BooleanBitfield); - -#[cfg(test)] -mod tests { - use super::*; - use serde_yaml; - use ssz::ssz_encode; - use tree_hash::TreeHash; - - #[test] - pub fn test_cached_tree_hash() { - let original = BooleanBitfield::from_bytes(&vec![18; 12][..]); - - let mut cache = cached_tree_hash::TreeHashCache::new(&original).unwrap(); - - assert_eq!( - cache.tree_hash_root().unwrap().to_vec(), - original.tree_hash_root() - ); - - let modified = BooleanBitfield::from_bytes(&vec![2; 1][..]); - - cache.update(&modified).unwrap(); - - assert_eq!( - cache.tree_hash_root().unwrap().to_vec(), - modified.tree_hash_root() - ); - } - - #[test] - fn test_new_bitfield() { - let mut field = BooleanBitfield::new(); - let original_len = field.len(); - - for i in 0..100 { - if i < original_len { - assert!(!field.get(i).unwrap()); - } else { - assert!(field.get(i).is_err()); - } - let previous = field.set(i, true); - if i < original_len { - assert!(!previous.unwrap()); - } else { - assert!(previous.is_none()); - } - } - } - - #[test] - fn test_empty_bitfield() { - let mut field = BooleanBitfield::from_elem(0, false); - let original_len = field.len(); - - assert_eq!(original_len, 0); - - for i in 0..100 { - if i < original_len { - assert!(!field.get(i).unwrap()); - } else { - assert!(field.get(i).is_err()); - } - let previous = field.set(i, true); - if i < original_len { - assert!(!previous.unwrap()); - } else { - assert!(previous.is_none()); - } - } - - assert_eq!(field.len(), 100); - assert_eq!(field.num_set_bits(), 100); - } - - const INPUT: &[u8] = &[0b0100_0000, 0b0100_0000]; - - #[test] - fn test_get_from_bitfield() { - let field = BooleanBitfield::from_bytes(INPUT); - let unset = field.get(0).unwrap(); - assert!(!unset); - let set = field.get(6).unwrap(); - assert!(set); - let set = field.get(14).unwrap(); - assert!(set); - } - - #[test] - fn test_set_for_bitfield() { - let mut field = BooleanBitfield::from_bytes(INPUT); - let previous = field.set(10, true).unwrap(); - assert!(!previous); - let previous = field.get(10).unwrap(); - assert!(previous); - let previous = field.set(6, false).unwrap(); - assert!(previous); - let previous = field.get(6).unwrap(); - assert!(!previous); - } - - #[test] - fn test_len() { - let field = BooleanBitfield::from_bytes(INPUT); - assert_eq!(field.len(), 16); - - let field = BooleanBitfield::new(); - assert_eq!(field.len(), 8); - } - - #[test] - fn test_num_set_bits() { - let field = BooleanBitfield::from_bytes(INPUT); - assert_eq!(field.num_set_bits(), 2); - - let field = BooleanBitfield::new(); - assert_eq!(field.num_set_bits(), 0); - } - - #[test] - fn test_to_bytes() { - let field = BooleanBitfield::from_bytes(INPUT); - assert_eq!(field.to_bytes(), INPUT); - - let field = BooleanBitfield::new(); - assert_eq!(field.to_bytes(), vec![0]); - } - - #[test] - fn test_out_of_bounds() { - let mut field = BooleanBitfield::from_bytes(INPUT); - - let out_of_bounds_index = field.len(); - assert!(field.set(out_of_bounds_index, true).is_none()); - assert!(field.len() == out_of_bounds_index + 1); - assert!(field.get(out_of_bounds_index).unwrap()); - - for i in 0..100 { - if i <= out_of_bounds_index { - assert!(field.set(i, true).is_some()); - } else { - assert!(field.set(i, true).is_none()); - } - } - } - - #[test] - fn test_grows_with_false() { - let input_all_set: &[u8] = &[0b1111_1111, 0b1111_1111]; - let mut field = BooleanBitfield::from_bytes(input_all_set); - - // Define `a` and `b`, where both are out of bounds and `b` is greater than `a`. - let a = field.len(); - let b = a + 1; - - // Ensure `a` is out-of-bounds for test integrity. - assert!(field.get(a).is_err()); - - // Set `b` to `true`. Also, for test integrity, ensure it was previously out-of-bounds. - assert!(field.set(b, true).is_none()); - - // Ensure that `a` wasn't also set to `true` during the grow. - assert_eq!(field.get(a), Ok(false)); - assert_eq!(field.get(b), Ok(true)); - } - - #[test] - fn test_num_bytes() { - let field = BooleanBitfield::from_bytes(INPUT); - assert_eq!(field.num_bytes(), 2); - - let field = BooleanBitfield::from_elem(2, true); - assert_eq!(field.num_bytes(), 1); - - let field = BooleanBitfield::from_elem(13, true); - assert_eq!(field.num_bytes(), 2); - } - - #[test] - fn test_ssz_encode() { - let field = create_test_bitfield(); - assert_eq!(field.as_ssz_bytes(), vec![0b0000_0011, 0b1000_0111]); - - let field = BooleanBitfield::from_elem(18, true); - assert_eq!( - field.as_ssz_bytes(), - vec![0b0000_0011, 0b1111_1111, 0b1111_1111] - ); - - let mut b = BooleanBitfield::new(); - b.set(1, true); - assert_eq!(ssz_encode(&b), vec![0b0000_0010]); - } - - fn create_test_bitfield() -> BooleanBitfield { - let count = 2 * 8; - let mut field = BooleanBitfield::with_capacity(count); - - let indices = &[0, 1, 2, 7, 8, 9]; - for &i in indices { - field.set(i, true); - } - field - } - - #[test] - fn test_ssz_decode() { - let encoded = vec![0b0000_0011, 0b1000_0111]; - let field = BooleanBitfield::from_ssz_bytes(&encoded).unwrap(); - let expected = create_test_bitfield(); - assert_eq!(field, expected); - - let encoded = vec![255, 255, 3]; - let field = BooleanBitfield::from_ssz_bytes(&encoded).unwrap(); - let expected = BooleanBitfield::from_bytes(&[255, 255, 3]); - assert_eq!(field, expected); - } - - #[test] - fn test_serialize_deserialize() { - use serde_yaml::Value; - - let data: &[(_, &[_])] = &[ - ("0x01", &[0b00000001]), - ("0xf301", &[0b11110011, 0b00000001]), - ]; - for (hex_data, bytes) in data { - let bitfield = BooleanBitfield::from_bytes(bytes); - assert_eq!( - serde_yaml::from_str::(hex_data).unwrap(), - bitfield - ); - assert_eq!( - serde_yaml::to_value(&bitfield).unwrap(), - Value::String(hex_data.to_string()) - ); - } - } - - #[test] - fn test_ssz_round_trip() { - let original = BooleanBitfield::from_bytes(&vec![18; 12][..]); - let ssz = ssz_encode(&original); - let decoded = BooleanBitfield::from_ssz_bytes(&ssz).unwrap(); - assert_eq!(original, decoded); - } - - #[test] - fn test_bitor() { - let a = BooleanBitfield::from_bytes(&vec![2, 8, 1][..]); - let b = BooleanBitfield::from_bytes(&vec![4, 8, 16][..]); - let c = BooleanBitfield::from_bytes(&vec![6, 8, 17][..]); - assert_eq!(c, a | b); - } - - #[test] - fn test_is_zero() { - let yes_data: &[&[u8]] = &[&[], &[0], &[0, 0], &[0, 0, 0]]; - for bytes in yes_data { - assert!(BooleanBitfield::from_bytes(bytes).is_zero()); - } - let no_data: &[&[u8]] = &[&[1], &[6], &[0, 1], &[0, 0, 1], &[0, 0, 255]]; - for bytes in no_data { - assert!(!BooleanBitfield::from_bytes(bytes).is_zero()); - } - } - - #[test] - fn test_intersection() { - let a = BooleanBitfield::from_bytes(&[0b1100, 0b0001]); - let b = BooleanBitfield::from_bytes(&[0b1011, 0b1001]); - let c = BooleanBitfield::from_bytes(&[0b1000, 0b0001]); - assert_eq!(a.intersection(&b), c); - assert_eq!(b.intersection(&a), c); - assert_eq!(a.intersection(&c), c); - assert_eq!(b.intersection(&c), c); - assert_eq!(a.intersection(&a), a); - assert_eq!(b.intersection(&b), b); - assert_eq!(c.intersection(&c), c); - } - - #[test] - fn test_union() { - let a = BooleanBitfield::from_bytes(&[0b1100, 0b0001]); - let b = BooleanBitfield::from_bytes(&[0b1011, 0b1001]); - let c = BooleanBitfield::from_bytes(&[0b1111, 0b1001]); - assert_eq!(a.union(&b), c); - assert_eq!(b.union(&a), c); - assert_eq!(a.union(&a), a); - assert_eq!(b.union(&b), b); - assert_eq!(c.union(&c), c); - } - - #[test] - fn test_difference() { - let a = BooleanBitfield::from_bytes(&[0b1100, 0b0001]); - let b = BooleanBitfield::from_bytes(&[0b1011, 0b1001]); - let a_b = BooleanBitfield::from_bytes(&[0b0100, 0b0000]); - let b_a = BooleanBitfield::from_bytes(&[0b0011, 0b1000]); - assert_eq!(a.difference(&b), a_b); - assert_eq!(b.difference(&a), b_a); - assert!(a.difference(&a).is_zero()); - } -} diff --git a/eth2/utils/cached_tree_hash/examples/8k_hashes_cached.rs b/eth2/utils/cached_tree_hash/examples/8k_hashes_cached.rs deleted file mode 100644 index 1e67571d5..000000000 --- a/eth2/utils/cached_tree_hash/examples/8k_hashes_cached.rs +++ /dev/null @@ -1,21 +0,0 @@ -use cached_tree_hash::TreeHashCache; -use ethereum_types::H256 as Hash256; - -fn run(vec: &Vec, modified_vec: &Vec) { - let mut cache = TreeHashCache::new(vec).unwrap(); - - cache.update(modified_vec).unwrap(); -} - -fn main() { - let n = 2048; - - let vec: Vec = (0..n).map(|_| Hash256::random()).collect(); - - let mut modified_vec = vec.clone(); - modified_vec[n - 1] = Hash256::random(); - - for _ in 0..10_000 { - run(&vec, &modified_vec); - } -} diff --git a/eth2/utils/cached_tree_hash/examples/8k_hashes_standard.rs b/eth2/utils/cached_tree_hash/examples/8k_hashes_standard.rs deleted file mode 100644 index bcbb392e2..000000000 --- a/eth2/utils/cached_tree_hash/examples/8k_hashes_standard.rs +++ /dev/null @@ -1,10 +0,0 @@ -use ethereum_types::H256 as Hash256; -use tree_hash::TreeHash; - -fn main() { - let n = 2048; - - let vec: Vec = (0..n).map(|_| Hash256::random()).collect(); - - vec.tree_hash_root(); -} diff --git a/eth2/utils/cached_tree_hash/tests/tests.rs b/eth2/utils/cached_tree_hash/tests/tests.rs deleted file mode 100644 index 3e2598e2b..000000000 --- a/eth2/utils/cached_tree_hash/tests/tests.rs +++ /dev/null @@ -1,677 +0,0 @@ -use cached_tree_hash::{merkleize::merkleize, *}; -use ethereum_types::H256 as Hash256; -use int_to_bytes::int_to_bytes32; -use tree_hash_derive::{CachedTreeHash, TreeHash}; - -#[test] -fn modifications() { - let n = 2048; - - let vec: Vec = (0..n).map(|_| Hash256::random()).collect(); - - let mut cache = TreeHashCache::new(&vec).unwrap(); - cache.update(&vec).unwrap(); - - let modifications = cache.chunk_modified.iter().filter(|b| **b).count(); - - assert_eq!(modifications, 0); - - let mut modified_vec = vec.clone(); - modified_vec[n - 1] = Hash256::random(); - - cache.update(&modified_vec).unwrap(); - - let modifications = cache.chunk_modified.iter().filter(|b| **b).count(); - - assert_eq!(modifications, n.trailing_zeros() as usize + 2); -} - -#[derive(Clone, Debug, TreeHash, CachedTreeHash)] -pub struct NestedStruct { - pub a: u64, - pub b: Inner, -} - -fn test_routine(original: T, modified: Vec) -where - T: CachedTreeHash + std::fmt::Debug, -{ - let mut cache = TreeHashCache::new(&original).unwrap(); - - let standard_root = original.tree_hash_root(); - let cached_root = cache.tree_hash_root().unwrap(); - assert_eq!(standard_root, cached_root, "Initial cache build failed."); - - for (i, modified) in modified.iter().enumerate() { - println!("-- Start of modification {} --", i); - - // Update the existing hasher. - cache - .update(modified) - .expect(&format!("Modification {}", i)); - - // Create a new hasher from the "modified" struct. - let modified_cache = TreeHashCache::new(modified).unwrap(); - - assert_eq!( - cache.chunk_modified.len(), - modified_cache.chunk_modified.len(), - "Number of chunks is different" - ); - - assert_eq!( - cache.bytes.len(), - modified_cache.bytes.len(), - "Number of bytes is different" - ); - - assert_eq!(cache.bytes, modified_cache.bytes, "Bytes are different"); - - assert_eq!( - cache.schemas.len(), - modified_cache.schemas.len(), - "Number of schemas is different" - ); - - assert_eq!( - cache.schemas, modified_cache.schemas, - "Schemas are different" - ); - - // Test the root generated by the updated hasher matches a non-cached tree hash root. - let standard_root = modified.tree_hash_root(); - let cached_root = cache - .tree_hash_root() - .expect(&format!("Modification {}", i)); - assert_eq!( - standard_root, cached_root, - "Modification {} failed. \n Cache: {:?}", - i, cache - ); - } -} - -#[test] -fn test_nested_struct() { - let original = NestedStruct { - a: 42, - b: Inner { - a: 12, - b: 13, - c: 14, - d: 15, - }, - }; - let modified = vec![NestedStruct { - a: 99, - ..original.clone() - }]; - - test_routine(original, modified); -} - -#[test] -fn test_inner() { - let original = Inner { - a: 12, - b: 13, - c: 14, - d: 15, - }; - - let modified = vec![Inner { - a: 99, - ..original.clone() - }]; - - test_routine(original, modified); -} - -#[test] -fn test_vec_of_hash256() { - let n = 16; - - let original: Vec = (0..n).map(|_| Hash256::random()).collect(); - - let modified: Vec> = vec![ - original[..].to_vec(), - original[0..n / 2].to_vec(), - vec![], - original[0..1].to_vec(), - original[0..3].to_vec(), - original[0..n - 12].to_vec(), - ]; - - test_routine(original, modified); -} - -#[test] -fn test_vec_of_u64() { - let original: Vec = vec![1, 2, 3, 4, 5]; - - let modified: Vec> = vec![ - vec![1, 2, 3, 4, 42], - vec![1, 2, 3, 4], - vec![], - vec![42; 2_usize.pow(4)], - vec![], - vec![], - vec![1, 2, 3, 4, 42], - vec![1, 2, 3], - vec![1], - ]; - - test_routine(original, modified); -} - -#[test] -fn test_nested_list_of_u64() { - let original: Vec> = vec![vec![42]]; - - let modified = vec![ - vec![vec![1]], - vec![vec![1], vec![2]], - vec![vec![1], vec![3], vec![4]], - vec![], - vec![vec![1], vec![3], vec![4]], - vec![], - vec![vec![1, 2], vec![3], vec![4, 5, 6, 7, 8]], - vec![], - vec![vec![1], vec![2], vec![3]], - vec![vec![1, 2, 3, 4, 5, 6], vec![1, 2, 3, 4, 5, 6, 7]], - vec![vec![], vec![], vec![]], - vec![vec![0, 0, 0], vec![0], vec![0]], - ]; - - test_routine(original, modified); -} - -#[test] -fn test_shrinking_vec_of_vec() { - let original: Vec> = vec![vec![1], vec![2], vec![3], vec![4], vec![5]]; - let modified: Vec> = original[0..3].to_vec(); - - let new_cache = TreeHashCache::new(&modified).unwrap(); - - let mut modified_cache = TreeHashCache::new(&original).unwrap(); - modified_cache.update(&modified).unwrap(); - - assert_eq!( - new_cache.schemas.len(), - modified_cache.schemas.len(), - "Schema count is different" - ); - - assert_eq!( - new_cache.chunk_modified.len(), - modified_cache.chunk_modified.len(), - "Chunk count is different" - ); -} - -#[derive(Clone, Debug, TreeHash, CachedTreeHash)] -pub struct StructWithVec { - pub a: u64, - pub b: Inner, - pub c: Vec, -} - -#[test] -fn test_struct_with_vec() { - let original = StructWithVec { - a: 42, - b: Inner { - a: 12, - b: 13, - c: 14, - d: 15, - }, - c: vec![1, 2, 3, 4, 5], - }; - - let modified = vec![ - StructWithVec { - a: 99, - ..original.clone() - }, - StructWithVec { - a: 100, - ..original.clone() - }, - StructWithVec { - c: vec![1, 2, 3, 4, 5], - ..original.clone() - }, - StructWithVec { - c: vec![1, 3, 4, 5, 6], - ..original.clone() - }, - StructWithVec { - c: vec![1, 3, 4, 5, 6, 7, 8, 9], - ..original.clone() - }, - StructWithVec { - c: vec![1, 3, 4, 5], - ..original.clone() - }, - StructWithVec { - b: Inner { - a: u64::max_value(), - b: u64::max_value(), - c: u64::max_value(), - d: u64::max_value(), - }, - c: vec![], - ..original.clone() - }, - StructWithVec { - b: Inner { - a: 0, - b: 1, - c: 2, - d: 3, - }, - ..original.clone() - }, - ]; - - test_routine(original, modified); -} - -#[test] -fn test_vec_of_struct_with_vec() { - let a = StructWithVec { - a: 42, - b: Inner { - a: 12, - b: 13, - c: 14, - d: 15, - }, - c: vec![1, 2, 3, 4, 5], - }; - let b = StructWithVec { - c: vec![], - ..a.clone() - }; - let c = StructWithVec { - b: Inner { - a: 99, - b: 100, - c: 101, - d: 102, - }, - ..a.clone() - }; - let d = StructWithVec { a: 0, ..a.clone() }; - - let original: Vec = vec![a.clone(), c.clone()]; - - let modified = vec![ - vec![a.clone(), c.clone()], - vec![], - vec![a.clone(), b.clone(), c.clone(), d.clone()], - vec![b.clone(), a.clone(), c.clone(), d.clone()], - vec![], - vec![a.clone()], - vec![], - vec![a.clone(), b.clone(), c.clone(), d.clone()], - ]; - - test_routine(original, modified); -} - -#[derive(Clone, Debug, TreeHash, CachedTreeHash)] -pub struct StructWithVecOfStructs { - pub a: u64, - pub b: Inner, - pub c: Vec, -} - -fn get_inners() -> Vec { - vec![ - Inner { - a: 12, - b: 13, - c: 14, - d: 15, - }, - Inner { - a: 99, - b: 100, - c: 101, - d: 102, - }, - Inner { - a: 255, - b: 256, - c: 257, - d: 0, - }, - Inner { - a: 1000, - b: 2000, - c: 3000, - d: 0, - }, - Inner { - a: 0, - b: 0, - c: 0, - d: 0, - }, - ] -} - -fn get_struct_with_vec_of_structs() -> Vec { - let inner_a = Inner { - a: 12, - b: 13, - c: 14, - d: 15, - }; - - let inner_b = Inner { - a: 99, - b: 100, - c: 101, - d: 102, - }; - - let inner_c = Inner { - a: 255, - b: 256, - c: 257, - d: 0, - }; - - let a = StructWithVecOfStructs { - a: 42, - b: inner_a.clone(), - c: vec![inner_a.clone(), inner_b.clone(), inner_c.clone()], - }; - - let b = StructWithVecOfStructs { - c: vec![], - ..a.clone() - }; - - let c = StructWithVecOfStructs { - a: 800, - ..a.clone() - }; - - let d = StructWithVecOfStructs { - b: inner_c.clone(), - ..a.clone() - }; - - let e = StructWithVecOfStructs { - c: vec![inner_a.clone(), inner_b.clone()], - ..a.clone() - }; - - let f = StructWithVecOfStructs { - c: vec![inner_a.clone()], - ..a.clone() - }; - - vec![a, b, c, d, e, f] -} - -#[test] -fn test_struct_with_vec_of_structs() { - let variants = get_struct_with_vec_of_structs(); - - test_routine(variants[0].clone(), variants.clone()); - test_routine(variants[1].clone(), variants.clone()); - test_routine(variants[2].clone(), variants.clone()); - test_routine(variants[3].clone(), variants.clone()); - test_routine(variants[4].clone(), variants.clone()); - test_routine(variants[5].clone(), variants.clone()); -} - -#[derive(Clone, Debug, TreeHash, CachedTreeHash)] -pub struct StructWithVecOfStructWithVecOfStructs { - pub a: Vec, - pub b: u64, -} - -#[test] -fn test_struct_with_vec_of_struct_with_vec_of_structs() { - let structs = get_struct_with_vec_of_structs(); - - let variants = vec![ - StructWithVecOfStructWithVecOfStructs { - a: structs[..].to_vec(), - b: 99, - }, - StructWithVecOfStructWithVecOfStructs { a: vec![], b: 99 }, - StructWithVecOfStructWithVecOfStructs { - a: structs[0..2].to_vec(), - b: 99, - }, - StructWithVecOfStructWithVecOfStructs { - a: structs[0..2].to_vec(), - b: 100, - }, - StructWithVecOfStructWithVecOfStructs { - a: structs[0..1].to_vec(), - b: 100, - }, - StructWithVecOfStructWithVecOfStructs { - a: structs[0..4].to_vec(), - b: 100, - }, - StructWithVecOfStructWithVecOfStructs { - a: structs[0..5].to_vec(), - b: 8, - }, - ]; - - for v in &variants { - test_routine(v.clone(), variants.clone()); - } -} - -#[derive(Clone, Debug, TreeHash, CachedTreeHash)] -pub struct StructWithTwoVecs { - pub a: Vec, - pub b: Vec, -} - -fn get_struct_with_two_vecs() -> Vec { - let inners = get_inners(); - - vec![ - StructWithTwoVecs { - a: inners[..].to_vec(), - b: inners[..].to_vec(), - }, - StructWithTwoVecs { - a: inners[0..1].to_vec(), - b: inners[..].to_vec(), - }, - StructWithTwoVecs { - a: inners[0..1].to_vec(), - b: inners[0..2].to_vec(), - }, - StructWithTwoVecs { - a: inners[0..4].to_vec(), - b: inners[0..2].to_vec(), - }, - StructWithTwoVecs { - a: vec![], - b: inners[..].to_vec(), - }, - StructWithTwoVecs { - a: inners[..].to_vec(), - b: vec![], - }, - StructWithTwoVecs { - a: inners[0..3].to_vec(), - b: inners[0..1].to_vec(), - }, - ] -} - -#[test] -fn test_struct_with_two_vecs() { - let variants = get_struct_with_two_vecs(); - - for v in &variants { - test_routine(v.clone(), variants.clone()); - } -} - -#[test] -fn test_vec_of_struct_with_two_vecs() { - let structs = get_struct_with_two_vecs(); - - let variants = vec![ - structs[0..].to_vec(), - structs[0..2].to_vec(), - structs[2..3].to_vec(), - vec![], - structs[2..4].to_vec(), - ]; - - test_routine(variants[0].clone(), vec![variants[2].clone()]); - - for v in &variants { - test_routine(v.clone(), variants.clone()); - } -} - -#[derive(Clone, Debug, TreeHash, CachedTreeHash)] -pub struct U64AndTwoStructs { - pub a: u64, - pub b: Inner, - pub c: Inner, -} - -#[test] -fn test_u64_and_two_structs() { - let inners = get_inners(); - - let variants = vec![ - U64AndTwoStructs { - a: 99, - b: inners[0].clone(), - c: inners[1].clone(), - }, - U64AndTwoStructs { - a: 10, - b: inners[2].clone(), - c: inners[3].clone(), - }, - U64AndTwoStructs { - a: 0, - b: inners[1].clone(), - c: inners[1].clone(), - }, - U64AndTwoStructs { - a: 0, - b: inners[1].clone(), - c: inners[1].clone(), - }, - ]; - - for v in &variants { - test_routine(v.clone(), variants.clone()); - } -} - -#[derive(Clone, Debug, TreeHash, CachedTreeHash)] -pub struct Inner { - pub a: u64, - pub b: u64, - pub c: u64, - pub d: u64, -} - -fn generic_test(index: usize) { - let inner = Inner { - a: 1, - b: 2, - c: 3, - d: 4, - }; - - let mut cache = TreeHashCache::new(&inner).unwrap(); - - let changed_inner = match index { - 0 => Inner { - a: 42, - ..inner.clone() - }, - 1 => Inner { - b: 42, - ..inner.clone() - }, - 2 => Inner { - c: 42, - ..inner.clone() - }, - 3 => Inner { - d: 42, - ..inner.clone() - }, - _ => panic!("bad index"), - }; - - changed_inner.update_tree_hash_cache(&mut cache).unwrap(); - - let data1 = int_to_bytes32(1); - let data2 = int_to_bytes32(2); - let data3 = int_to_bytes32(3); - let data4 = int_to_bytes32(4); - - let mut data = vec![data1, data2, data3, data4]; - - data[index] = int_to_bytes32(42); - - let expected = merkleize(join(data)); - - let (cache_bytes, _, _) = cache.into_components(); - - assert_eq!(expected, cache_bytes); -} - -#[test] -fn cached_hash_on_inner() { - generic_test(0); - generic_test(1); - generic_test(2); - generic_test(3); -} - -#[test] -fn inner_builds() { - let data1 = int_to_bytes32(1); - let data2 = int_to_bytes32(2); - let data3 = int_to_bytes32(3); - let data4 = int_to_bytes32(4); - - let data = join(vec![data1, data2, data3, data4]); - let expected = merkleize(data); - - let inner = Inner { - a: 1, - b: 2, - c: 3, - d: 4, - }; - - let (cache_bytes, _, _) = TreeHashCache::new(&inner).unwrap().into_components(); - - assert_eq!(expected, cache_bytes); -} - -fn join(many: Vec>) -> Vec { - let mut all = vec![]; - for one in many { - all.extend_from_slice(&mut one.clone()) - } - all -} diff --git a/eth2/utils/eth2_config/src/lib.rs b/eth2/utils/eth2_config/src/lib.rs index f6ad54c21..17cbc4211 100644 --- a/eth2/utils/eth2_config/src/lib.rs +++ b/eth2/utils/eth2_config/src/lib.rs @@ -46,7 +46,7 @@ impl Eth2Config { /// invalid. pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { if args.is_present("recent-genesis") { - self.spec.genesis_time = recent_genesis_time() + self.spec.min_genesis_time = recent_genesis_time() } Ok(()) diff --git a/eth2/utils/fixed_len_vec/Cargo.toml b/eth2/utils/fixed_len_vec/Cargo.toml deleted file mode 100644 index 2750d3acd..000000000 --- a/eth2/utils/fixed_len_vec/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "fixed_len_vec" -version = "0.1.0" -authors = ["Paul Hauner "] -edition = "2018" - -[dependencies] -cached_tree_hash = { path = "../cached_tree_hash" } -tree_hash = { path = "../tree_hash" } -serde = "1.0" -serde_derive = "1.0" -eth2_ssz = { path = "../ssz" } -typenum = "1.10" diff --git a/eth2/utils/fixed_len_vec/src/impls.rs b/eth2/utils/fixed_len_vec/src/impls.rs deleted file mode 100644 index 691c8ee89..000000000 --- a/eth2/utils/fixed_len_vec/src/impls.rs +++ /dev/null @@ -1,140 +0,0 @@ -use super::*; - -impl tree_hash::TreeHash for FixedLenVec -where - T: tree_hash::TreeHash, -{ - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> Vec { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Vec { - tree_hash::impls::vec_tree_hash_root(&self.vec) - } -} - -impl cached_tree_hash::CachedTreeHash for FixedLenVec -where - T: cached_tree_hash::CachedTreeHash + tree_hash::TreeHash, -{ - fn new_tree_hash_cache( - &self, - depth: usize, - ) -> Result { - let (cache, _overlay) = cached_tree_hash::vec::new_tree_hash_cache(&self.vec, depth)?; - - Ok(cache) - } - - fn tree_hash_cache_schema(&self, depth: usize) -> cached_tree_hash::BTreeSchema { - cached_tree_hash::vec::produce_schema(&self.vec, depth) - } - - fn update_tree_hash_cache( - &self, - cache: &mut cached_tree_hash::TreeHashCache, - ) -> Result<(), cached_tree_hash::Error> { - cached_tree_hash::vec::update_tree_hash_cache(&self.vec, cache)?; - - Ok(()) - } -} - -impl ssz::Encode for FixedLenVec -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - true - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - T::ssz_fixed_len() * N::to_usize() - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn ssz_append(&self, buf: &mut Vec) { - if T::is_ssz_fixed_len() { - buf.reserve(T::ssz_fixed_len() * self.len()); - - for item in &self.vec { - item.ssz_append(buf); - } - } else { - let mut encoder = ssz::SszEncoder::list(buf, self.len() * ssz::BYTES_PER_LENGTH_OFFSET); - - for item in &self.vec { - encoder.append(item); - } - - encoder.finalize(); - } - } -} - -impl ssz::Decode for FixedLenVec -where - T: ssz::Decode + Default, -{ - fn is_ssz_fixed_len() -> bool { - T::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - if ::is_ssz_fixed_len() { - T::ssz_fixed_len() * N::to_usize() - } else { - ssz::BYTES_PER_LENGTH_OFFSET - } - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - if bytes.is_empty() { - Ok(FixedLenVec::from(vec![])) - } else if T::is_ssz_fixed_len() { - bytes - .chunks(T::ssz_fixed_len()) - .map(|chunk| T::from_ssz_bytes(chunk)) - .collect::, _>>() - .and_then(|vec| Ok(vec.into())) - } else { - ssz::decode_list_of_variable_length_items(bytes).and_then(|vec| Ok(vec.into())) - } - } -} - -#[cfg(test)] -mod ssz_tests { - use super::*; - use ssz::*; - use typenum::*; - - #[test] - fn encode() { - let vec: FixedLenVec = vec![0; 2].into(); - assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); - } - - fn round_trip(item: T) { - let encoded = &item.as_ssz_bytes(); - assert_eq!(T::from_ssz_bytes(&encoded), Ok(item)); - } - - #[test] - fn u16_len_8() { - round_trip::>(vec![42; 8].into()); - round_trip::>(vec![0; 8].into()); - } -} diff --git a/eth2/utils/fixed_len_vec/src/lib.rs b/eth2/utils/fixed_len_vec/src/lib.rs deleted file mode 100644 index b8a3292bd..000000000 --- a/eth2/utils/fixed_len_vec/src/lib.rs +++ /dev/null @@ -1,134 +0,0 @@ -use serde_derive::{Deserialize, Serialize}; -use std::marker::PhantomData; -use std::ops::{Deref, Index, IndexMut}; -use std::slice::SliceIndex; -use typenum::Unsigned; - -pub use typenum; - -mod impls; - -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -#[serde(transparent)] -pub struct FixedLenVec { - vec: Vec, - _phantom: PhantomData, -} - -impl FixedLenVec { - pub fn len(&self) -> usize { - self.vec.len() - } - - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - pub fn capacity() -> usize { - N::to_usize() - } -} - -impl From> for FixedLenVec { - fn from(mut vec: Vec) -> Self { - vec.resize_with(Self::capacity(), Default::default); - - Self { - vec, - _phantom: PhantomData, - } - } -} - -impl Into> for FixedLenVec { - fn into(self) -> Vec { - self.vec - } -} - -impl Default for FixedLenVec { - fn default() -> Self { - Self { - vec: Vec::default(), - _phantom: PhantomData, - } - } -} - -impl> Index for FixedLenVec { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&self.vec, index) - } -} - -impl> IndexMut for FixedLenVec { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut self.vec, index) - } -} - -impl Deref for FixedLenVec { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -#[cfg(test)] -mod test { - use super::*; - use typenum::*; - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: FixedLenVec = vec.clone().into(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!((&fixed[..]).len(), 8192); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - let vec = vec![42; 5]; - let fixed: FixedLenVec = FixedLenVec::from(vec.clone()); - assert_eq!(&fixed[..], &vec[0..4]); - - let vec = vec![42; 3]; - let fixed: FixedLenVec = FixedLenVec::from(vec.clone()); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42, 0][..]); - - let vec = vec![]; - let fixed: FixedLenVec = FixedLenVec::from(vec.clone()); - assert_eq!(&fixed[..], &vec![0, 0, 0, 0][..]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: FixedLenVec = FixedLenVec::from(vec); - - assert_eq!(fixed.get(0), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } -} - -#[cfg(test)] -mod tests { - #[test] - fn it_works() { - assert_eq!(2 + 2, 4); - } -} diff --git a/eth2/utils/honey-badger-split/Cargo.toml b/eth2/utils/honey-badger-split/Cargo.toml deleted file mode 100644 index 87246eafd..000000000 --- a/eth2/utils/honey-badger-split/Cargo.toml +++ /dev/null @@ -1,7 +0,0 @@ -[package] -name = "honey-badger-split" -version = "0.1.0" -authors = ["Paul Hauner "] -edition = "2018" - -[dependencies] diff --git a/eth2/utils/honey-badger-split/src/lib.rs b/eth2/utils/honey-badger-split/src/lib.rs deleted file mode 100644 index 6b5b325c9..000000000 --- a/eth2/utils/honey-badger-split/src/lib.rs +++ /dev/null @@ -1,117 +0,0 @@ -/// A function for splitting a list into N pieces. -/// -/// We have titled it the "honey badger split" because of its robustness. It don't care. - -/// Iterator for the honey_badger_split function -pub struct Split<'a, T: 'a> { - n: usize, - current_pos: usize, - list: &'a [T], - list_length: usize, -} - -impl<'a, T> Iterator for Split<'a, T> { - type Item = &'a [T]; - - fn next(&mut self) -> Option { - self.current_pos += 1; - if self.current_pos <= self.n { - match self.list.get( - self.list_length * (self.current_pos - 1) / self.n - ..self.list_length * self.current_pos / self.n, - ) { - Some(v) => Some(v), - None => unreachable!(), - } - } else { - None - } - } -} - -/// Splits a slice into chunks of size n. All positive n values are applicable, -/// hence the honey_badger prefix. -/// -/// Returns an iterator over the original list. -pub trait SplitExt { - fn honey_badger_split(&self, n: usize) -> Split; -} - -impl SplitExt for [T] { - fn honey_badger_split(&self, n: usize) -> Split { - Split { - n, - current_pos: 0, - list: &self, - list_length: self.len(), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn alternative_split_at_index(indices: &[T], index: usize, count: usize) -> &[T] { - let start = (indices.len() * index) / count; - let end = (indices.len() * (index + 1)) / count; - - &indices[start..end] - } - - fn alternative_split(input: &[T], n: usize) -> Vec<&[T]> { - (0..n) - .into_iter() - .map(|i| alternative_split_at_index(&input, i, n)) - .collect() - } - - fn honey_badger_vs_alternative_fn(num_items: usize, num_chunks: usize) { - let input: Vec = (0..num_items).collect(); - - let hb: Vec<&[usize]> = input.honey_badger_split(num_chunks).collect(); - let spec: Vec<&[usize]> = alternative_split(&input, num_chunks); - - assert_eq!(hb, spec); - } - - #[test] - fn vs_eth_spec_fn() { - for i in 0..10 { - for j in 0..10 { - honey_badger_vs_alternative_fn(i, j); - } - } - } - - #[test] - fn test_honey_badger_split() { - /* - * These test cases are generated from the eth2.0 spec `split()` - * function at commit cbd254a. - */ - let input: Vec = vec![0, 1, 2, 3]; - let output: Vec<&[usize]> = input.honey_badger_split(2).collect(); - assert_eq!(output, vec![&[0, 1], &[2, 3]]); - - let input: Vec = vec![0, 1, 2, 3]; - let output: Vec<&[usize]> = input.honey_badger_split(6).collect(); - let expected: Vec<&[usize]> = vec![&[], &[0], &[1], &[], &[2], &[3]]; - assert_eq!(output, expected); - - let input: Vec = vec![0, 1, 2, 3]; - let output: Vec<&[usize]> = input.honey_badger_split(10).collect(); - let expected: Vec<&[usize]> = vec![&[], &[], &[0], &[], &[1], &[], &[], &[2], &[], &[3]]; - assert_eq!(output, expected); - - let input: Vec = vec![0]; - let output: Vec<&[usize]> = input.honey_badger_split(5).collect(); - let expected: Vec<&[usize]> = vec![&[], &[], &[], &[], &[0]]; - assert_eq!(output, expected); - - let input: Vec = vec![0, 1, 2]; - let output: Vec<&[usize]> = input.honey_badger_split(2).collect(); - let expected: Vec<&[usize]> = vec![&[0], &[1, 2]]; - assert_eq!(output, expected); - } -} diff --git a/eth2/utils/ssz/Cargo.toml b/eth2/utils/ssz/Cargo.toml index 928a0e6e9..78e65a977 100644 --- a/eth2/utils/ssz/Cargo.toml +++ b/eth2/utils/ssz/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "eth2_ssz" -version = "0.1.0" +version = "0.1.2" authors = ["Paul Hauner "] edition = "2018" description = "SimpleSerialize (SSZ) as used in Ethereum 2.0" @@ -9,12 +9,7 @@ license = "Apache-2.0" [lib] name = "ssz" -[[bench]] -name = "benches" -harness = false - [dev-dependencies] -criterion = "0.2" eth2_ssz_derive = "0.1.0" [dependencies] diff --git a/eth2/utils/ssz/benches/benches.rs b/eth2/utils/ssz/benches/benches.rs deleted file mode 100644 index 4604b0cd8..000000000 --- a/eth2/utils/ssz/benches/benches.rs +++ /dev/null @@ -1,80 +0,0 @@ -#[macro_use] -extern crate criterion; - -use criterion::black_box; -use criterion::{Benchmark, Criterion}; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; - -#[derive(Clone, Copy, Encode, Decode)] -pub struct FixedLen { - a: u64, - b: u64, - c: u64, - d: u64, -} - -fn criterion_benchmark(c: &mut Criterion) { - let n = 8196; - - let vec: Vec = vec![4242; 8196]; - c.bench( - &format!("vec_of_{}_u64", n), - Benchmark::new("as_ssz_bytes", move |b| { - b.iter_with_setup(|| vec.clone(), |vec| black_box(vec.as_ssz_bytes())) - }) - .sample_size(100), - ); - - let vec: Vec = vec![4242; 8196]; - let bytes = vec.as_ssz_bytes(); - c.bench( - &format!("vec_of_{}_u64", n), - Benchmark::new("from_ssz_bytes", move |b| { - b.iter_with_setup( - || bytes.clone(), - |bytes| { - let vec: Vec = Vec::from_ssz_bytes(&bytes).unwrap(); - black_box(vec) - }, - ) - }) - .sample_size(100), - ); - - let fixed_len = FixedLen { - a: 42, - b: 42, - c: 42, - d: 42, - }; - let fixed_len_vec: Vec = vec![fixed_len; 8196]; - - let vec = fixed_len_vec.clone(); - c.bench( - &format!("vec_of_{}_struct", n), - Benchmark::new("as_ssz_bytes", move |b| { - b.iter_with_setup(|| vec.clone(), |vec| black_box(vec.as_ssz_bytes())) - }) - .sample_size(100), - ); - - let vec = fixed_len_vec.clone(); - let bytes = vec.as_ssz_bytes(); - c.bench( - &format!("vec_of_{}_struct", n), - Benchmark::new("from_ssz_bytes", move |b| { - b.iter_with_setup( - || bytes.clone(), - |bytes| { - let vec: Vec = Vec::from_ssz_bytes(&bytes).unwrap(); - black_box(vec) - }, - ) - }) - .sample_size(100), - ); -} - -criterion_group!(benches, criterion_benchmark); -criterion_main!(benches); diff --git a/eth2/utils/ssz/src/lib.rs b/eth2/utils/ssz/src/lib.rs index 886433f14..696d36cbf 100644 --- a/eth2/utils/ssz/src/lib.rs +++ b/eth2/utils/ssz/src/lib.rs @@ -1,9 +1,9 @@ //! Provides encoding (serialization) and decoding (deserialization) in the SimpleSerialize (SSZ) //! format designed for use in Ethereum 2.0. //! -//! Conforms to -//! [v0.7.1](https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/simple-serialize.md) of the -//! Ethereum 2.0 specification. +//! Adheres to the Ethereum 2.0 [SSZ +//! specification](https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/simple-serialize.md) +//! at v0.8.1 . //! //! ## Example //! diff --git a/eth2/utils/ssz_types/Cargo.toml b/eth2/utils/ssz_types/Cargo.toml index 2e4cbc899..6b59a655d 100644 --- a/eth2/utils/ssz_types/Cargo.toml +++ b/eth2/utils/ssz_types/Cargo.toml @@ -1,9 +1,12 @@ [package] -name = "ssz_types" +name = "eth2_ssz_types" version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" +[lib] +name = "ssz_types" + [dependencies] cached_tree_hash = { path = "../cached_tree_hash" } tree_hash = { path = "../tree_hash" } @@ -15,3 +18,4 @@ typenum = "1.10" [dev-dependencies] serde_yaml = "0.8" +tree_hash_derive = { path = "../tree_hash_derive" } diff --git a/eth2/utils/ssz_types/src/bitfield.rs b/eth2/utils/ssz_types/src/bitfield.rs index de9a198f3..78182712b 100644 --- a/eth2/utils/ssz_types/src/bitfield.rs +++ b/eth2/utils/ssz_types/src/bitfield.rs @@ -1,3 +1,4 @@ +use crate::tree_hash::bitfield_bytes_tree_hash_root; use crate::Error; use core::marker::PhantomData; use serde::de::{Deserialize, Deserializer}; @@ -82,9 +83,9 @@ pub type BitVector = Bitfield>; /// /// ## Note /// -/// The internal representation of the bitfield is the same as that required by SSZ. The highest +/// The internal representation of the bitfield is the same as that required by SSZ. The lowest /// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest -/// bit-index. E.g., `vec![0b0000_0010, 0b0000_0001]` has bits `0, 9` set. +/// bit-index. E.g., `vec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set. #[derive(Clone, Debug, PartialEq)] pub struct Bitfield { bytes: Vec, @@ -136,15 +137,21 @@ impl Bitfield> { /// ``` pub fn into_bytes(self) -> Vec { let len = self.len(); - let mut bytes = self.as_slice().to_vec(); + let mut bytes = self.bytes; - while bytes_for_bit_len(len + 1) > bytes.len() { - bytes.insert(0, 0); - } + bytes.resize(bytes_for_bit_len(len + 1), 0); let mut bitfield: Bitfield> = Bitfield::from_raw_bytes(bytes, len + 1) - .expect("Bitfield capacity has been confirmed earlier."); - bitfield.set(len, true).expect("Bitfield index must exist."); + .unwrap_or_else(|_| { + unreachable!( + "Bitfield with {} bytes must have enough capacity for {} bits.", + bytes_for_bit_len(len + 1), + len + 1 + ) + }); + bitfield + .set(len, true) + .expect("len must be in bounds for bitfield."); bitfield.bytes } @@ -171,9 +178,7 @@ impl Bitfield> { let mut bytes = initial_bitfield.into_raw_bytes(); - if bytes_for_bit_len(len) < bytes.len() && bytes != [0] { - bytes.remove(0); - } + bytes.truncate(bytes_for_bit_len(len)); Self::from_raw_bytes(bytes, len) } else { @@ -183,6 +188,34 @@ impl Bitfield> { }) } } + + /// Compute the intersection of two BitLists of potentially different lengths. + /// + /// Return a new BitList with length equal to the shorter of the two inputs. + pub fn intersection(&self, other: &Self) -> Self { + let min_len = std::cmp::min(self.len(), other.len()); + let mut result = Self::with_capacity(min_len).expect("min len always less than N"); + // Bitwise-and the bytes together, starting from the left of each vector. This takes care + // of masking out any entries beyond `min_len` as well, assuming the bitfield doesn't + // contain any set bits beyond its length. + for i in 0..result.bytes.len() { + result.bytes[i] = self.bytes[i] & other.bytes[i]; + } + result + } + + /// Compute the union of two BitLists of potentially different lengths. + /// + /// Return a new BitList with length equal to the longer of the two inputs. + pub fn union(&self, other: &Self) -> Self { + let max_len = std::cmp::max(self.len(), other.len()); + let mut result = Self::with_capacity(max_len).expect("max len always less than N"); + for i in 0..result.bytes.len() { + result.bytes[i] = + self.bytes.get(i).copied().unwrap_or(0) | other.bytes.get(i).copied().unwrap_or(0); + } + result + } } impl Bitfield> { @@ -238,14 +271,13 @@ impl Bitfield { /// /// Returns `None` if `i` is out-of-bounds of `self`. pub fn set(&mut self, i: usize, value: bool) -> Result<(), Error> { - if i < self.len { - let byte = { - let num_bytes = self.bytes.len(); - let offset = i / 8; - self.bytes - .get_mut(num_bytes - offset - 1) - .expect("Cannot be OOB if less than self.len") - }; + let len = self.len; + + if i < len { + let byte = self + .bytes + .get_mut(i / 8) + .ok_or_else(|| Error::OutOfBounds { i, len })?; if value { *byte |= 1 << (i % 8) @@ -264,13 +296,10 @@ impl Bitfield { /// Returns `None` if `i` is out-of-bounds of `self`. pub fn get(&self, i: usize) -> Result { if i < self.len { - let byte = { - let num_bytes = self.bytes.len(); - let offset = i / 8; - self.bytes - .get(num_bytes - offset - 1) - .expect("Cannot be OOB if less than self.len") - }; + let byte = self + .bytes + .get(i / 8) + .ok_or_else(|| Error::OutOfBounds { i, len: self.len })?; Ok(*byte & 1 << (i % 8) > 0) } else { @@ -328,7 +357,7 @@ impl Bitfield { // Ensure there are no bits higher than `bit_len` that are set to true. let (mask, _) = u8::max_value().overflowing_shr(8 - (bit_len as u32 % 8)); - if (bytes.first().expect("Guarded against empty bytes") & !mask) == 0 { + if (bytes.last().expect("Guarded against empty bytes") & !mask) == 0 { Ok(Self { bytes, len: bit_len, @@ -343,10 +372,12 @@ impl Bitfield { /// Returns the `Some(i)` where `i` is the highest index with a set bit. Returns `None` if /// there are no set bits. pub fn highest_set_bit(&self) -> Option { - let byte_i = self.bytes.iter().position(|byte| *byte > 0)?; - let bit_i = 7 - self.bytes[byte_i].leading_zeros() as usize; - - Some((self.bytes.len().saturating_sub(1) - byte_i) * 8 + bit_i) + self.bytes + .iter() + .enumerate() + .rev() + .find(|(_, byte)| **byte > 0) + .map(|(i, byte)| i * 8 + 7 - byte.leading_zeros() as usize) } /// Returns an iterator across bitfield `bool` values, starting at the lowest index. @@ -362,86 +393,51 @@ impl Bitfield { self.bytes.iter().all(|byte| *byte == 0) } - /// Compute the intersection (binary-and) of this bitfield with another. + /// Returns the number of bits that are set to `true`. + pub fn num_set_bits(&self) -> usize { + self.bytes + .iter() + .map(|byte| byte.count_ones() as usize) + .sum() + } + + /// Compute the difference of this Bitfield and another of potentially different length. + pub fn difference(&self, other: &Self) -> Self { + let mut result = self.clone(); + result.difference_inplace(other); + result + } + + /// Compute the difference of this Bitfield and another of potentially different length. + pub fn difference_inplace(&mut self, other: &Self) { + let min_byte_len = std::cmp::min(self.bytes.len(), other.bytes.len()); + + for i in 0..min_byte_len { + self.bytes[i] &= !other.bytes[i]; + } + } + + /// Shift the bits to higher indices, filling the lower indices with zeroes. /// - /// Returns `None` if `self.is_comparable(other) == false`. - pub fn intersection(&self, other: &Self) -> Option { - if self.is_comparable(other) { - let mut res = self.clone(); - res.intersection_inplace(other); - Some(res) - } else { - None - } - } - - /// Like `intersection` but in-place (updates `self`). - pub fn intersection_inplace(&mut self, other: &Self) -> Option<()> { - if self.is_comparable(other) { - for i in 0..self.bytes.len() { - self.bytes[i] &= other.bytes[i]; + /// The amount to shift by, `n`, must be less than or equal to `self.len()`. + pub fn shift_up(&mut self, n: usize) -> Result<(), Error> { + if n <= self.len() { + // Shift the bits up (starting from the high indices to avoid overwriting) + for i in (n..self.len()).rev() { + self.set(i, self.get(i - n)?)?; } - Some(()) - } else { - None - } - } - - /// Compute the union (binary-or) of this bitfield with another. - /// - /// Returns `None` if `self.is_comparable(other) == false`. - pub fn union(&self, other: &Self) -> Option { - if self.is_comparable(other) { - let mut res = self.clone(); - res.union_inplace(other); - Some(res) - } else { - None - } - } - - /// Like `union` but in-place (updates `self`). - pub fn union_inplace(&mut self, other: &Self) -> Option<()> { - if self.is_comparable(other) { - for i in 0..self.bytes.len() { - self.bytes[i] |= other.bytes[i]; + // Zero the low bits + for i in 0..n { + self.set(i, false).unwrap(); } - Some(()) + Ok(()) } else { - None + Err(Error::OutOfBounds { + i: n, + len: self.len(), + }) } } - - /// Compute the difference (binary-minus) of this bitfield with another. Lengths must match. - /// - /// Returns `None` if `self.is_comparable(other) == false`. - pub fn difference(&self, other: &Self) -> Option { - if self.is_comparable(other) { - let mut res = self.clone(); - res.difference_inplace(other); - Some(res) - } else { - None - } - } - - /// Like `difference` but in-place (updates `self`). - pub fn difference_inplace(&mut self, other: &Self) -> Option<()> { - if self.is_comparable(other) { - for i in 0..self.bytes.len() { - self.bytes[i] &= !other.bytes[i]; - } - Some(()) - } else { - None - } - } - - /// Returns true if `self` and `other` have the same lengths and can be used in binary - /// comparison operations. - pub fn is_comparable(&self, other: &Self) -> bool { - (self.len() == other.len()) && (self.bytes.len() == other.bytes.len()) - } } /// Returns the minimum required bytes to represent a given number of bits. @@ -505,7 +501,11 @@ impl Encode for Bitfield> { impl Decode for Bitfield> { fn is_ssz_fixed_len() -> bool { - false + true + } + + fn ssz_fixed_len() -> usize { + bytes_for_bit_len(N::to_usize()) } fn from_ssz_bytes(bytes: &[u8]) -> Result { @@ -573,106 +573,72 @@ impl tree_hash::TreeHash for Bitfield> { } fn tree_hash_root(&self) -> Vec { - // TODO: pad this out to max length. - self.as_ssz_bytes().tree_hash_root() + // Note: we use `as_slice` because it does _not_ have the length-delimiting bit set (or + // present). + let root = bitfield_bytes_tree_hash_root::(self.as_slice()); + tree_hash::mix_in_length(&root, self.len()) } } impl tree_hash::TreeHash for Bitfield> { fn tree_hash_type() -> tree_hash::TreeHashType { - // TODO: move this to be a vector. - tree_hash::TreeHashType::List + tree_hash::TreeHashType::Vector } fn tree_hash_packed_encoding(&self) -> Vec { - // TODO: move this to be a vector. unreachable!("Vector should never be packed.") } fn tree_hash_packing_factor() -> usize { - // TODO: move this to be a vector. unreachable!("Vector should never be packed.") } fn tree_hash_root(&self) -> Vec { - self.as_ssz_bytes().tree_hash_root() + bitfield_bytes_tree_hash_root::(self.as_slice()) } } impl cached_tree_hash::CachedTreeHash for Bitfield> { fn new_tree_hash_cache( &self, - depth: usize, + _depth: usize, ) -> Result { - let bytes = self.clone().into_bytes(); - - let (mut cache, schema) = cached_tree_hash::vec::new_tree_hash_cache(&bytes, depth)?; - - cache.add_length_nodes(schema.into_overlay(0).chunk_range(), bytes.len())?; - - Ok(cache) + unimplemented!("CachedTreeHash is not implemented for BitList") } fn num_tree_hash_cache_chunks(&self) -> usize { - // Add two extra nodes to cater for the node before and after to allow mixing-in length. - cached_tree_hash::BTreeOverlay::new(self, 0, 0).num_chunks() + 2 + unimplemented!("CachedTreeHash is not implemented for BitList") } - fn tree_hash_cache_schema(&self, depth: usize) -> cached_tree_hash::BTreeSchema { - let bytes = self.clone().into_bytes(); - cached_tree_hash::vec::produce_schema(&bytes, depth) + fn tree_hash_cache_schema(&self, _depth: usize) -> cached_tree_hash::BTreeSchema { + unimplemented!("CachedTreeHash is not implemented for BitList") } fn update_tree_hash_cache( &self, - cache: &mut cached_tree_hash::TreeHashCache, + _cache: &mut cached_tree_hash::TreeHashCache, ) -> Result<(), cached_tree_hash::Error> { - let bytes = self.clone().into_bytes(); - - // Skip the length-mixed-in root node. - cache.chunk_index += 1; - - // Update the cache, returning the new overlay. - let new_overlay = cached_tree_hash::vec::update_tree_hash_cache(&bytes, cache)?; - - // Mix in length - cache.mix_in_length(new_overlay.chunk_range(), bytes.len())?; - - // Skip an extra node to clear the length node. - cache.chunk_index += 1; - - Ok(()) + unimplemented!("CachedTreeHash is not implemented for BitList") } } impl cached_tree_hash::CachedTreeHash for Bitfield> { fn new_tree_hash_cache( &self, - depth: usize, + _depth: usize, ) -> Result { - let (cache, _schema) = - cached_tree_hash::vec::new_tree_hash_cache(&ssz::ssz_encode(self), depth)?; - - Ok(cache) + unimplemented!("CachedTreeHash is not implemented for BitVec") } - fn tree_hash_cache_schema(&self, depth: usize) -> cached_tree_hash::BTreeSchema { - let lengths = vec![ - 1; - cached_tree_hash::merkleize::num_unsanitized_leaves(bytes_for_bit_len( - N::to_usize() - )) - ]; - cached_tree_hash::BTreeSchema::from_lengths(depth, lengths) + fn tree_hash_cache_schema(&self, _depth: usize) -> cached_tree_hash::BTreeSchema { + unimplemented!("CachedTreeHash is not implemented for BitVec") } fn update_tree_hash_cache( &self, - cache: &mut cached_tree_hash::TreeHashCache, + _cache: &mut cached_tree_hash::TreeHashCache, ) -> Result<(), cached_tree_hash::Error> { - cached_tree_hash::vec::update_tree_hash_cache(&ssz::ssz_encode(self), cache)?; - - Ok(()) + unimplemented!("CachedTreeHash is not implemented for BitVec") } } @@ -724,10 +690,12 @@ mod bitvector { assert!(BitVector8::from_ssz_bytes(&[0b0000_0000]).is_ok()); assert!(BitVector8::from_ssz_bytes(&[1, 0b0000_0000]).is_err()); + assert!(BitVector8::from_ssz_bytes(&[0b0000_0000, 1]).is_err()); assert!(BitVector8::from_ssz_bytes(&[0b0000_0001]).is_ok()); assert!(BitVector8::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0001, 0b0000_0100]).is_err()); - assert!(BitVector8::from_ssz_bytes(&[0b0000_0010, 0b0000_0100]).is_err()); + assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0001]).is_err()); + assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0010]).is_err()); + assert!(BitVector8::from_ssz_bytes(&[0b0000_0100, 0b0000_0100]).is_err()); assert!(BitVector16::from_ssz_bytes(&[0b0000_0000]).is_err()); assert!(BitVector16::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_ok()); @@ -806,7 +774,7 @@ mod bitlist { assert_eq!( BitList8::with_capacity(8).unwrap().as_ssz_bytes(), - vec![0b0000_0001, 0b0000_0000], + vec![0b0000_0000, 0b0000_0001], ); assert_eq!( @@ -818,17 +786,17 @@ mod bitlist { for i in 0..8 { b.set(i, true).unwrap(); } - assert_eq!(b.as_ssz_bytes(), vec![0b0000_0001, 255]); + assert_eq!(b.as_ssz_bytes(), vec![255, 0b0000_0001]); let mut b = BitList8::with_capacity(8).unwrap(); for i in 0..4 { b.set(i, true).unwrap(); } - assert_eq!(b.as_ssz_bytes(), vec![0b0000_0001, 0b0000_1111]); + assert_eq!(b.as_ssz_bytes(), vec![0b0000_1111, 0b0000_0001]); assert_eq!( BitList16::with_capacity(16).unwrap().as_ssz_bytes(), - vec![0b0000_0001, 0b0000_0000, 0b0000_0000] + vec![0b0000_0000, 0b0000_0000, 0b0000_0001] ); } @@ -848,8 +816,9 @@ mod bitlist { assert!(BitList8::from_ssz_bytes(&[0b0000_0001]).is_ok()); assert!(BitList8::from_ssz_bytes(&[0b0000_0010]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0100]).is_ok()); - assert!(BitList8::from_ssz_bytes(&[0b0000_0010, 0b0000_0100]).is_err()); + assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0001]).is_ok()); + assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0010]).is_err()); + assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0100]).is_err()); } #[test] @@ -919,19 +888,19 @@ mod bitlist { assert!(BitList1024::from_raw_bytes(vec![0b0111_1111], 7).is_ok()); assert!(BitList1024::from_raw_bytes(vec![0b1111_1111], 8).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_0001, 0b1111_1111], 9).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_0011, 0b1111_1111], 10).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_0111, 0b1111_1111], 11).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_1111, 0b1111_1111], 12).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0001_1111, 0b1111_1111], 13).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0011_1111, 0b1111_1111], 14).is_ok()); - assert!(BitList1024::from_raw_bytes(vec![0b0111_1111, 0b1111_1111], 15).is_ok()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0001], 9).is_ok()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0011], 10).is_ok()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0111], 11).is_ok()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_1111], 12).is_ok()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0001_1111], 13).is_ok()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0011_1111], 14).is_ok()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0111_1111], 15).is_ok()); assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b1111_1111], 16).is_ok()); for i in 0..8 { assert!(BitList1024::from_raw_bytes(vec![], i).is_err()); assert!(BitList1024::from_raw_bytes(vec![0b1111_1111], i).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b1111_1110, 0b0000_0000], i).is_err()); + assert!(BitList1024::from_raw_bytes(vec![0b0000_0000, 0b1111_1110], i).is_err()); } assert!(BitList1024::from_raw_bytes(vec![0b0000_0001], 0).is_err()); @@ -945,13 +914,13 @@ mod bitlist { assert!(BitList1024::from_raw_bytes(vec![0b0111_1111], 6).is_err()); assert!(BitList1024::from_raw_bytes(vec![0b1111_1111], 7).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_0001, 0b1111_1111], 8).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_0011, 0b1111_1111], 9).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_0111, 0b1111_1111], 10).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0000_1111, 0b1111_1111], 11).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0001_1111, 0b1111_1111], 12).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0011_1111, 0b1111_1111], 13).is_err()); - assert!(BitList1024::from_raw_bytes(vec![0b0111_1111, 0b1111_1111], 14).is_err()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0001], 8).is_err()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0011], 9).is_err()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_0111], 10).is_err()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0000_1111], 11).is_err()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0001_1111], 12).is_err()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0011_1111], 13).is_err()); + assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b0111_1111], 14).is_err()); assert!(BitList1024::from_raw_bytes(vec![0b1111_1111, 0b1111_1111], 15).is_err()); } @@ -1006,47 +975,47 @@ mod bitlist { bitfield.set(0, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0000_0000, 0b0000_0001] + vec![0b0000_0001, 0b0000_0000] ); bitfield.set(1, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0000_0000, 0b0000_0011] + vec![0b0000_0011, 0b0000_0000] ); bitfield.set(2, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0000_0000, 0b0000_0111] + vec![0b0000_0111, 0b0000_0000] ); bitfield.set(3, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0000_0000, 0b0000_1111] + vec![0b0000_1111, 0b0000_0000] ); bitfield.set(4, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0000_0000, 0b0001_1111] + vec![0b0001_1111, 0b0000_0000] ); bitfield.set(5, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0000_0000, 0b0011_1111] + vec![0b0011_1111, 0b0000_0000] ); bitfield.set(6, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0000_0000, 0b0111_1111] + vec![0b0111_1111, 0b0000_0000] ); bitfield.set(7, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0000_0000, 0b1111_1111] + vec![0b1111_1111, 0b0000_0000] ); bitfield.set(8, true).unwrap(); assert_eq!( bitfield.clone().into_raw_bytes(), - vec![0b0000_0001, 0b1111_1111] + vec![0b1111_1111, 0b0000_0001] ); } @@ -1058,14 +1027,14 @@ mod bitlist { ); assert_eq!( - BitList1024::from_raw_bytes(vec![0b0000_000, 0b0000_0001], 16) + BitList1024::from_raw_bytes(vec![0b0000_0001, 0b0000_0000], 16) .unwrap() .highest_set_bit(), Some(0) ); assert_eq!( - BitList1024::from_raw_bytes(vec![0b0000_000, 0b0000_0010], 16) + BitList1024::from_raw_bytes(vec![0b0000_0010, 0b0000_0000], 16) .unwrap() .highest_set_bit(), Some(1) @@ -1079,7 +1048,7 @@ mod bitlist { ); assert_eq!( - BitList1024::from_raw_bytes(vec![0b1000_0000, 0b0000_0000], 16) + BitList1024::from_raw_bytes(vec![0b0000_0000, 0b1000_0000], 16) .unwrap() .highest_set_bit(), Some(15) @@ -1092,13 +1061,30 @@ mod bitlist { let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap(); let c = BitList1024::from_raw_bytes(vec![0b1000, 0b0001], 16).unwrap(); - assert_eq!(a.intersection(&b).unwrap(), c); - assert_eq!(b.intersection(&a).unwrap(), c); - assert_eq!(a.intersection(&c).unwrap(), c); - assert_eq!(b.intersection(&c).unwrap(), c); - assert_eq!(a.intersection(&a).unwrap(), a); - assert_eq!(b.intersection(&b).unwrap(), b); - assert_eq!(c.intersection(&c).unwrap(), c); + assert_eq!(a.intersection(&b), c); + assert_eq!(b.intersection(&a), c); + assert_eq!(a.intersection(&c), c); + assert_eq!(b.intersection(&c), c); + assert_eq!(a.intersection(&a), a); + assert_eq!(b.intersection(&b), b); + assert_eq!(c.intersection(&c), c); + } + + #[test] + fn intersection_diff_length() { + let a = BitList1024::from_bytes(vec![0b0010_1110, 0b0010_1011]).unwrap(); + let b = BitList1024::from_bytes(vec![0b0010_1101, 0b0000_0001]).unwrap(); + let c = BitList1024::from_bytes(vec![0b0010_1100, 0b0000_0001]).unwrap(); + let d = BitList1024::from_bytes(vec![0b0010_1110, 0b1111_1111, 0b1111_1111]).unwrap(); + + assert_eq!(a.len(), 13); + assert_eq!(b.len(), 8); + assert_eq!(c.len(), 8); + assert_eq!(d.len(), 23); + assert_eq!(a.intersection(&b), c); + assert_eq!(b.intersection(&a), c); + assert_eq!(a.intersection(&d), a); + assert_eq!(d.intersection(&a), a); } #[test] @@ -1107,11 +1093,25 @@ mod bitlist { let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap(); let c = BitList1024::from_raw_bytes(vec![0b1111, 0b1001], 16).unwrap(); - assert_eq!(a.union(&b).unwrap(), c); - assert_eq!(b.union(&a).unwrap(), c); - assert_eq!(a.union(&a).unwrap(), a); - assert_eq!(b.union(&b).unwrap(), b); - assert_eq!(c.union(&c).unwrap(), c); + assert_eq!(a.union(&b), c); + assert_eq!(b.union(&a), c); + assert_eq!(a.union(&a), a); + assert_eq!(b.union(&b), b); + assert_eq!(c.union(&c), c); + } + + #[test] + fn union_diff_length() { + let a = BitList1024::from_bytes(vec![0b0010_1011, 0b0010_1110]).unwrap(); + let b = BitList1024::from_bytes(vec![0b0000_0001, 0b0010_1101]).unwrap(); + let c = BitList1024::from_bytes(vec![0b0010_1011, 0b0010_1111]).unwrap(); + let d = BitList1024::from_bytes(vec![0b0010_1011, 0b1011_1110, 0b1000_1101]).unwrap(); + + assert_eq!(a.len(), c.len()); + assert_eq!(a.union(&b), c); + assert_eq!(b.union(&a), c); + assert_eq!(a.union(&d), d); + assert_eq!(d.union(&a), d); } #[test] @@ -1121,9 +1121,44 @@ mod bitlist { let a_b = BitList1024::from_raw_bytes(vec![0b0100, 0b0000], 16).unwrap(); let b_a = BitList1024::from_raw_bytes(vec![0b0011, 0b1000], 16).unwrap(); - assert_eq!(a.difference(&b).unwrap(), a_b); - assert_eq!(b.difference(&a).unwrap(), b_a); - assert!(a.difference(&a).unwrap().is_zero()); + assert_eq!(a.difference(&b), a_b); + assert_eq!(b.difference(&a), b_a); + assert!(a.difference(&a).is_zero()); + } + + #[test] + fn difference_diff_length() { + let a = BitList1024::from_raw_bytes(vec![0b0110, 0b1100, 0b0011], 24).unwrap(); + let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap(); + let a_b = BitList1024::from_raw_bytes(vec![0b0100, 0b0100, 0b0011], 24).unwrap(); + let b_a = BitList1024::from_raw_bytes(vec![0b1001, 0b0001], 16).unwrap(); + + assert_eq!(a.difference(&b), a_b); + assert_eq!(b.difference(&a), b_a); + } + + #[test] + fn shift_up() { + let mut a = BitList1024::from_raw_bytes(vec![0b1100_1111, 0b1101_0110], 16).unwrap(); + let mut b = BitList1024::from_raw_bytes(vec![0b1001_1110, 0b1010_1101], 16).unwrap(); + + a.shift_up(1).unwrap(); + assert_eq!(a, b); + a.shift_up(15).unwrap(); + assert!(a.is_zero()); + + b.shift_up(16).unwrap(); + assert!(b.is_zero()); + assert!(b.shift_up(17).is_err()); + } + + #[test] + fn num_set_bits() { + let a = BitList1024::from_raw_bytes(vec![0b1100, 0b0001], 16).unwrap(); + let b = BitList1024::from_raw_bytes(vec![0b1011, 0b1001], 16).unwrap(); + + assert_eq!(a.num_set_bits(), 3); + assert_eq!(b.num_set_bits(), 5); } #[test] diff --git a/eth2/utils/ssz_types/src/fixed_vector.rs b/eth2/utils/ssz_types/src/fixed_vector.rs index 687d7d738..1a467157f 100644 --- a/eth2/utils/ssz_types/src/fixed_vector.rs +++ b/eth2/utils/ssz_types/src/fixed_vector.rs @@ -1,3 +1,4 @@ +use crate::tree_hash::vec_tree_hash_root; use crate::Error; use serde_derive::{Deserialize, Serialize}; use std::marker::PhantomData; @@ -66,6 +67,17 @@ impl FixedVector { } } + /// Create a new vector filled with clones of `elem`. + pub fn from_elem(elem: T) -> Self + where + T: Clone, + { + Self { + vec: vec![elem; N::to_usize()], + _phantom: PhantomData, + } + } + /// Identical to `self.capacity`, returns the type-level constant length. /// /// Exists for compatibility with `Vec`. @@ -134,67 +146,6 @@ impl Deref for FixedVector { } } -#[cfg(test)] -mod test { - use super::*; - use typenum::*; - - #[test] - fn new() { - let vec = vec![42; 5]; - let fixed: Result, _> = FixedVector::new(vec.clone()); - assert!(fixed.is_err()); - - let vec = vec![42; 3]; - let fixed: Result, _> = FixedVector::new(vec.clone()); - assert!(fixed.is_err()); - - let vec = vec![42; 4]; - let fixed: Result, _> = FixedVector::new(vec.clone()); - assert!(fixed.is_ok()); - } - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: FixedVector = vec.clone().into(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!((&fixed[..]).len(), 8192); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - let vec = vec![42; 5]; - let fixed: FixedVector = FixedVector::from(vec.clone()); - assert_eq!(&fixed[..], &vec[0..4]); - - let vec = vec![42; 3]; - let fixed: FixedVector = FixedVector::from(vec.clone()); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42, 0][..]); - - let vec = vec![]; - let fixed: FixedVector = FixedVector::from(vec.clone()); - assert_eq!(&fixed[..], &vec![0, 0, 0, 0][..]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: FixedVector = FixedVector::from(vec); - - assert_eq!(fixed.get(0), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } -} - impl tree_hash::TreeHash for FixedVector where T: tree_hash::TreeHash, @@ -212,7 +163,7 @@ where } fn tree_hash_root(&self) -> Vec { - tree_hash::impls::vec_tree_hash_root(&self.vec) + vec_tree_hash_root::(&self.vec) } } @@ -222,24 +173,20 @@ where { fn new_tree_hash_cache( &self, - depth: usize, + _depth: usize, ) -> Result { - let (cache, _overlay) = cached_tree_hash::vec::new_tree_hash_cache(&self.vec, depth)?; - - Ok(cache) + unimplemented!("CachedTreeHash is not implemented for FixedVector") } - fn tree_hash_cache_schema(&self, depth: usize) -> cached_tree_hash::BTreeSchema { - cached_tree_hash::vec::produce_schema(&self.vec, depth) + fn tree_hash_cache_schema(&self, _depth: usize) -> cached_tree_hash::BTreeSchema { + unimplemented!("CachedTreeHash is not implemented for FixedVector") } fn update_tree_hash_cache( &self, - cache: &mut cached_tree_hash::TreeHashCache, + _cache: &mut cached_tree_hash::TreeHashCache, ) -> Result<(), cached_tree_hash::Error> { - cached_tree_hash::vec::update_tree_hash_cache(&self.vec, cache)?; - - Ok(()) + unimplemented!("CachedTreeHash is not implemented for FixedVector") } } @@ -310,26 +257,147 @@ where } #[cfg(test)] -mod ssz_tests { +mod test { use super::*; use ssz::*; + use tree_hash::{merkle_root, TreeHash}; + use tree_hash_derive::TreeHash; use typenum::*; #[test] - fn encode() { + fn new() { + let vec = vec![42; 5]; + let fixed: Result, _> = FixedVector::new(vec.clone()); + assert!(fixed.is_err()); + + let vec = vec![42; 3]; + let fixed: Result, _> = FixedVector::new(vec.clone()); + assert!(fixed.is_err()); + + let vec = vec![42; 4]; + let fixed: Result, _> = FixedVector::new(vec.clone()); + assert!(fixed.is_ok()); + } + + #[test] + fn indexing() { + let vec = vec![1, 2]; + + let mut fixed: FixedVector = vec.clone().into(); + + assert_eq!(fixed[0], 1); + assert_eq!(&fixed[0..1], &vec[0..1]); + assert_eq!((&fixed[..]).len(), 8192); + + fixed[1] = 3; + assert_eq!(fixed[1], 3); + } + + #[test] + fn length() { + let vec = vec![42; 5]; + let fixed: FixedVector = FixedVector::from(vec.clone()); + assert_eq!(&fixed[..], &vec[0..4]); + + let vec = vec![42; 3]; + let fixed: FixedVector = FixedVector::from(vec.clone()); + assert_eq!(&fixed[0..3], &vec[..]); + assert_eq!(&fixed[..], &vec![42, 42, 42, 0][..]); + + let vec = vec![]; + let fixed: FixedVector = FixedVector::from(vec.clone()); + assert_eq!(&fixed[..], &vec![0, 0, 0, 0][..]); + } + + #[test] + fn deref() { + let vec = vec![0, 2, 4, 6]; + let fixed: FixedVector = FixedVector::from(vec); + + assert_eq!(fixed.get(0), Some(&0)); + assert_eq!(fixed.get(3), Some(&6)); + assert_eq!(fixed.get(4), None); + } + + #[test] + fn ssz_encode() { let vec: FixedVector = vec![0; 2].into(); assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); assert_eq!( as Encode>::ssz_fixed_len(), 4); } - fn round_trip(item: T) { + fn ssz_round_trip(item: T) { let encoded = &item.as_ssz_bytes(); assert_eq!(T::from_ssz_bytes(&encoded), Ok(item)); } #[test] - fn u16_len_8() { - round_trip::>(vec![42; 8].into()); - round_trip::>(vec![0; 8].into()); + fn ssz_round_trip_u16_len_8() { + ssz_round_trip::>(vec![42; 8].into()); + ssz_round_trip::>(vec![0; 8].into()); + } + + #[test] + fn tree_hash_u8() { + let fixed: FixedVector = FixedVector::from(vec![]); + assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); + + let fixed: FixedVector = FixedVector::from(vec![0; 1]); + assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); + + let fixed: FixedVector = FixedVector::from(vec![0; 8]); + assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 8], 0)); + + let fixed: FixedVector = FixedVector::from(vec![42; 16]); + assert_eq!(fixed.tree_hash_root(), merkle_root(&[42; 16], 0)); + + let source: Vec = (0..16).collect(); + let fixed: FixedVector = FixedVector::from(source.clone()); + assert_eq!(fixed.tree_hash_root(), merkle_root(&source, 0)); + } + + #[derive(Clone, Copy, TreeHash, Default)] + struct A { + a: u32, + b: u32, + } + + fn repeat(input: &[u8], n: usize) -> Vec { + let mut output = vec![]; + + for _ in 0..n { + output.append(&mut input.to_vec()); + } + + output + } + + #[test] + fn tree_hash_composite() { + let a = A { a: 0, b: 1 }; + + let fixed: FixedVector = FixedVector::from(vec![]); + assert_eq!(fixed.tree_hash_root(), merkle_root(&[0; 32], 0)); + + let fixed: FixedVector = FixedVector::from(vec![a]); + assert_eq!(fixed.tree_hash_root(), merkle_root(&a.tree_hash_root(), 0)); + + let fixed: FixedVector = FixedVector::from(vec![a; 8]); + assert_eq!( + fixed.tree_hash_root(), + merkle_root(&repeat(&a.tree_hash_root(), 8), 0) + ); + + let fixed: FixedVector = FixedVector::from(vec![a; 13]); + assert_eq!( + fixed.tree_hash_root(), + merkle_root(&repeat(&a.tree_hash_root(), 13), 0) + ); + + let fixed: FixedVector = FixedVector::from(vec![a; 16]); + assert_eq!( + fixed.tree_hash_root(), + merkle_root(&repeat(&a.tree_hash_root(), 16), 0) + ); } } diff --git a/eth2/utils/ssz_types/src/lib.rs b/eth2/utils/ssz_types/src/lib.rs index 59869b7c0..b4c96eefb 100644 --- a/eth2/utils/ssz_types/src/lib.rs +++ b/eth2/utils/ssz_types/src/lib.rs @@ -8,6 +8,10 @@ //! These structs are required as SSZ serialization and Merklization rely upon type-level lengths //! for padding and verification. //! +//! Adheres to the Ethereum 2.0 [SSZ +//! specification](https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/simple-serialize.md) +//! at v0.8.1 . +//! //! ## Example //! ``` //! use ssz_types::*; @@ -36,6 +40,7 @@ #[macro_use] mod bitfield; mod fixed_vector; +mod tree_hash; mod variable_list; pub use bitfield::{BitList, BitVector, Bitfield}; diff --git a/eth2/utils/ssz_types/src/tree_hash.rs b/eth2/utils/ssz_types/src/tree_hash.rs new file mode 100644 index 000000000..5074034da --- /dev/null +++ b/eth2/utils/ssz_types/src/tree_hash.rs @@ -0,0 +1,48 @@ +use tree_hash::{merkle_root, TreeHash, TreeHashType, BYTES_PER_CHUNK}; +use typenum::Unsigned; + +/// A helper function providing common functionality between the `TreeHash` implementations for +/// `FixedVector` and `VariableList`. +pub fn vec_tree_hash_root(vec: &[T]) -> Vec +where + T: TreeHash, + N: Unsigned, +{ + let (leaves, minimum_chunk_count) = match T::tree_hash_type() { + TreeHashType::Basic => { + let mut leaves = + Vec::with_capacity((BYTES_PER_CHUNK / T::tree_hash_packing_factor()) * vec.len()); + + for item in vec { + leaves.append(&mut item.tree_hash_packed_encoding()); + } + + let values_per_chunk = T::tree_hash_packing_factor(); + let minimum_chunk_count = (N::to_usize() + values_per_chunk - 1) / values_per_chunk; + + (leaves, minimum_chunk_count) + } + TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { + let mut leaves = Vec::with_capacity(vec.len() * BYTES_PER_CHUNK); + + for item in vec { + leaves.append(&mut item.tree_hash_root()) + } + + let minimum_chunk_count = N::to_usize(); + + (leaves, minimum_chunk_count) + } + }; + + merkle_root(&leaves, minimum_chunk_count) +} + +/// A helper function providing common functionality for finding the Merkle root of some bytes that +/// represent a bitfield. +pub fn bitfield_bytes_tree_hash_root(bytes: &[u8]) -> Vec { + let byte_size = (N::to_usize() + 7) / 8; + let minimum_chunk_count = (byte_size + BYTES_PER_CHUNK - 1) / BYTES_PER_CHUNK; + + merkle_root(bytes, minimum_chunk_count) +} diff --git a/eth2/utils/ssz_types/src/variable_list.rs b/eth2/utils/ssz_types/src/variable_list.rs index 52872ada6..478d41dc9 100644 --- a/eth2/utils/ssz_types/src/variable_list.rs +++ b/eth2/utils/ssz_types/src/variable_list.rs @@ -1,7 +1,8 @@ +use crate::tree_hash::vec_tree_hash_root; use crate::Error; use serde_derive::{Deserialize, Serialize}; use std::marker::PhantomData; -use std::ops::{Deref, Index, IndexMut}; +use std::ops::{Deref, DerefMut, Index, IndexMut}; use std::slice::SliceIndex; use typenum::Unsigned; @@ -68,6 +69,14 @@ impl VariableList { } } + /// Create an empty list. + pub fn empty() -> Self { + Self { + vec: vec![], + _phantom: PhantomData, + } + } + /// Returns the number of values presently in `self`. pub fn len(&self) -> usize { self.vec.len() @@ -99,7 +108,7 @@ impl VariableList { } } -impl From> for VariableList { +impl From> for VariableList { fn from(mut vec: Vec) -> Self { vec.truncate(N::to_usize()); @@ -149,9 +158,109 @@ impl Deref for VariableList { } } +impl DerefMut for VariableList { + fn deref_mut(&mut self) -> &mut [T] { + &mut self.vec[..] + } +} + +impl<'a, T, N: Unsigned> IntoIterator for &'a VariableList { + type Item = &'a T; + type IntoIter = std::slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl tree_hash::TreeHash for VariableList +where + T: tree_hash::TreeHash, +{ + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::List + } + + fn tree_hash_packed_encoding(&self) -> Vec { + unreachable!("List should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("List should never be packed.") + } + + fn tree_hash_root(&self) -> Vec { + let root = vec_tree_hash_root::(&self.vec); + + tree_hash::mix_in_length(&root, self.len()) + } +} + +impl cached_tree_hash::CachedTreeHash for VariableList +where + T: cached_tree_hash::CachedTreeHash + tree_hash::TreeHash, +{ + fn new_tree_hash_cache( + &self, + _depth: usize, + ) -> Result { + unimplemented!("CachedTreeHash is not implemented for VariableList") + } + + fn tree_hash_cache_schema(&self, _depth: usize) -> cached_tree_hash::BTreeSchema { + unimplemented!("CachedTreeHash is not implemented for VariableList") + } + + fn update_tree_hash_cache( + &self, + _cache: &mut cached_tree_hash::TreeHashCache, + ) -> Result<(), cached_tree_hash::Error> { + unimplemented!("CachedTreeHash is not implemented for VariableList") + } +} + +impl ssz::Encode for VariableList +where + T: ssz::Encode, +{ + fn is_ssz_fixed_len() -> bool { + >::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + >::ssz_fixed_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + self.vec.ssz_append(buf) + } +} + +impl ssz::Decode for VariableList +where + T: ssz::Decode, +{ + fn is_ssz_fixed_len() -> bool { + >::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + >::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + let vec = >::from_ssz_bytes(bytes)?; + + Self::new(vec).map_err(|e| ssz::DecodeError::BytesInvalid(format!("VariableList {:?}", e))) + } +} + #[cfg(test)] mod test { use super::*; + use ssz::*; + use tree_hash::{merkle_root, TreeHash}; + use tree_hash_derive::TreeHash; use typenum::*; #[test] @@ -208,97 +317,6 @@ mod test { assert_eq!(fixed.get(3), Some(&6)); assert_eq!(fixed.get(4), None); } -} - -impl tree_hash::TreeHash for VariableList -where - T: tree_hash::TreeHash, -{ - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::Vector - } - - fn tree_hash_packed_encoding(&self) -> Vec { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("Vector should never be packed.") - } - - fn tree_hash_root(&self) -> Vec { - tree_hash::impls::vec_tree_hash_root(&self.vec) - } -} - -impl cached_tree_hash::CachedTreeHash for VariableList -where - T: cached_tree_hash::CachedTreeHash + tree_hash::TreeHash, -{ - fn new_tree_hash_cache( - &self, - depth: usize, - ) -> Result { - let (cache, _overlay) = cached_tree_hash::vec::new_tree_hash_cache(&self.vec, depth)?; - - Ok(cache) - } - - fn tree_hash_cache_schema(&self, depth: usize) -> cached_tree_hash::BTreeSchema { - cached_tree_hash::vec::produce_schema(&self.vec, depth) - } - - fn update_tree_hash_cache( - &self, - cache: &mut cached_tree_hash::TreeHashCache, - ) -> Result<(), cached_tree_hash::Error> { - cached_tree_hash::vec::update_tree_hash_cache(&self.vec, cache)?; - - Ok(()) - } -} - -impl ssz::Encode for VariableList -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - >::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - >::ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.vec.ssz_append(buf) - } -} - -impl ssz::Decode for VariableList -where - T: ssz::Decode + Default, -{ - fn is_ssz_fixed_len() -> bool { - >::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - >::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let vec = >::from_ssz_bytes(bytes)?; - - Self::new(vec).map_err(|e| ssz::DecodeError::BytesInvalid(format!("VariableList {:?}", e))) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use ssz::*; - use typenum::*; #[test] fn encode() { @@ -317,4 +335,111 @@ mod tests { round_trip::>(vec![42; 8].into()); round_trip::>(vec![0; 8].into()); } + + fn root_with_length(bytes: &[u8], len: usize) -> Vec { + let root = merkle_root(bytes, 0); + tree_hash::mix_in_length(&root, len) + } + + #[test] + fn tree_hash_u8() { + let fixed: VariableList = VariableList::from(vec![]); + assert_eq!(fixed.tree_hash_root(), root_with_length(&[0; 8], 0)); + + for i in 0..=1 { + let fixed: VariableList = VariableList::from(vec![0; i]); + assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); + } + + for i in 0..=8 { + let fixed: VariableList = VariableList::from(vec![0; i]); + assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); + } + + for i in 0..=13 { + let fixed: VariableList = VariableList::from(vec![0; i]); + assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); + } + + for i in 0..=16 { + let fixed: VariableList = VariableList::from(vec![0; i]); + assert_eq!(fixed.tree_hash_root(), root_with_length(&vec![0; i], i)); + } + + let source: Vec = (0..16).collect(); + let fixed: VariableList = VariableList::from(source.clone()); + assert_eq!(fixed.tree_hash_root(), root_with_length(&source, 16)); + } + + #[derive(Clone, Copy, TreeHash, Default)] + struct A { + a: u32, + b: u32, + } + + fn repeat(input: &[u8], n: usize) -> Vec { + let mut output = vec![]; + + for _ in 0..n { + output.append(&mut input.to_vec()); + } + + output + } + + fn padded_root_with_length(bytes: &[u8], len: usize, min_nodes: usize) -> Vec { + let root = merkle_root(bytes, min_nodes); + tree_hash::mix_in_length(&root, len) + } + + #[test] + fn tree_hash_composite() { + let a = A { a: 0, b: 1 }; + + let fixed: VariableList = VariableList::from(vec![]); + assert_eq!( + fixed.tree_hash_root(), + padded_root_with_length(&[0; 32], 0, 0), + ); + + for i in 0..=1 { + let fixed: VariableList = VariableList::from(vec![a; i]); + assert_eq!( + fixed.tree_hash_root(), + padded_root_with_length(&repeat(&a.tree_hash_root(), i), i, 1), + "U1 {}", + i + ); + } + + for i in 0..=8 { + let fixed: VariableList = VariableList::from(vec![a; i]); + assert_eq!( + fixed.tree_hash_root(), + padded_root_with_length(&repeat(&a.tree_hash_root(), i), i, 8), + "U8 {}", + i + ); + } + + for i in 0..=13 { + let fixed: VariableList = VariableList::from(vec![a; i]); + assert_eq!( + fixed.tree_hash_root(), + padded_root_with_length(&repeat(&a.tree_hash_root(), i), i, 13), + "U13 {}", + i + ); + } + + for i in 0..=16 { + let fixed: VariableList = VariableList::from(vec![a; i]); + assert_eq!( + fixed.tree_hash_root(), + padded_root_with_length(&repeat(&a.tree_hash_root(), i), i, 16), + "U16 {}", + i + ); + } + } } diff --git a/eth2/utils/tree_hash/src/impls.rs b/eth2/utils/tree_hash/src/impls.rs index b91147830..08e596648 100644 --- a/eth2/utils/tree_hash/src/impls.rs +++ b/eth2/utils/tree_hash/src/impls.rs @@ -1,7 +1,5 @@ use super::*; -use crate::merkle_root; use ethereum_types::H256; -use hashing::hash; use int_to_bytes::int_to_bytes32; macro_rules! impl_for_bitsize { @@ -67,7 +65,7 @@ macro_rules! impl_for_u8_array { } fn tree_hash_root(&self) -> Vec { - merkle_root(&self[..]) + merkle_root(&self[..], 0) } } }; @@ -90,10 +88,12 @@ impl TreeHash for H256 { } fn tree_hash_root(&self) -> Vec { - merkle_root(&self.as_bytes().to_vec()) + merkle_root(&self.as_bytes().to_vec(), 0) } } +// TODO: this implementation always panics, it only exists to allow us to compile whilst +// refactoring tree hash. Should be removed. macro_rules! impl_for_list { ($type: ty) => { impl TreeHash for $type @@ -101,23 +101,19 @@ macro_rules! impl_for_list { T: TreeHash, { fn tree_hash_type() -> TreeHashType { - TreeHashType::List + unimplemented!("TreeHash is not implemented for Vec or slice") } fn tree_hash_packed_encoding(&self) -> Vec { - unreachable!("List should never be packed.") + unimplemented!("TreeHash is not implemented for Vec or slice") } fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") + unimplemented!("TreeHash is not implemented for Vec or slice") } fn tree_hash_root(&self) -> Vec { - let mut root_and_len = Vec::with_capacity(HASHSIZE * 2); - root_and_len.append(&mut vec_tree_hash_root(self)); - root_and_len.append(&mut int_to_bytes32(self.len() as u64)); - - hash(&root_and_len) + unimplemented!("TreeHash is not implemented for Vec or slice") } } }; @@ -126,35 +122,6 @@ macro_rules! impl_for_list { impl_for_list!(Vec); impl_for_list!(&[T]); -pub fn vec_tree_hash_root(vec: &[T]) -> Vec -where - T: TreeHash, -{ - let leaves = match T::tree_hash_type() { - TreeHashType::Basic => { - let mut leaves = - Vec::with_capacity((HASHSIZE / T::tree_hash_packing_factor()) * vec.len()); - - for item in vec { - leaves.append(&mut item.tree_hash_packed_encoding()); - } - - leaves - } - TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { - let mut leaves = Vec::with_capacity(vec.len() * HASHSIZE); - - for item in vec { - leaves.append(&mut item.tree_hash_root()) - } - - leaves - } - }; - - merkle_root(&leaves) -} - #[cfg(test)] mod test { use super::*; diff --git a/eth2/utils/tree_hash/src/lib.rs b/eth2/utils/tree_hash/src/lib.rs index a1d7a048e..b280693c5 100644 --- a/eth2/utils/tree_hash/src/lib.rs +++ b/eth2/utils/tree_hash/src/lib.rs @@ -8,15 +8,28 @@ mod merkleize_standard; pub use merkleize_padded::merkleize_padded; pub use merkleize_standard::merkleize_standard; -/// Alias to `merkleize_padded(&bytes, 0)` -pub fn merkle_root(bytes: &[u8]) -> Vec { - merkleize_padded(&bytes, 0) -} - pub const BYTES_PER_CHUNK: usize = 32; pub const HASHSIZE: usize = 32; pub const MERKLE_HASH_CHUNK: usize = 2 * BYTES_PER_CHUNK; +/// Alias to `merkleize_padded(&bytes, minimum_chunk_count)` +/// +/// If `minimum_chunk_count < bytes / BYTES_PER_CHUNK`, padding will be added for the difference +/// between the two. +pub fn merkle_root(bytes: &[u8], minimum_chunk_count: usize) -> Vec { + merkleize_padded(&bytes, minimum_chunk_count) +} + +/// Returns the node created by hashing `root` and `length`. +/// +/// Used in `TreeHash` for inserting the length of a list above it's root. +pub fn mix_in_length(root: &[u8], length: usize) -> Vec { + let mut length_bytes = length.to_le_bytes().to_vec(); + length_bytes.resize(BYTES_PER_CHUNK, 0); + + merkleize_padded::hash_concat(root, &length_bytes) +} + #[derive(Debug, PartialEq, Clone)] pub enum TreeHashType { Basic, @@ -84,3 +97,20 @@ macro_rules! tree_hash_ssz_encoding_as_list { } }; } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn mix_length() { + let hash = { + let mut preimage = vec![42; BYTES_PER_CHUNK]; + preimage.append(&mut vec![42]); + preimage.append(&mut vec![0; BYTES_PER_CHUNK - 1]); + hashing::hash(&preimage) + }; + + assert_eq!(mix_in_length(&[42; BYTES_PER_CHUNK], 42), hash); + } +} diff --git a/eth2/utils/tree_hash/src/merkleize_padded.rs b/eth2/utils/tree_hash/src/merkleize_padded.rs index 43bd247d8..5d5f7719e 100644 --- a/eth2/utils/tree_hash/src/merkleize_padded.rs +++ b/eth2/utils/tree_hash/src/merkleize_padded.rs @@ -243,7 +243,7 @@ fn concat(mut vec1: Vec, mut vec2: Vec) -> Vec { } /// Compute the hash of two other hashes concatenated. -fn hash_concat(h1: &[u8], h2: &[u8]) -> Vec { +pub fn hash_concat(h1: &[u8], h2: &[u8]) -> Vec { hash(&concat(h1.to_vec(), h2.to_vec())) } diff --git a/eth2/utils/tree_hash_derive/src/lib.rs b/eth2/utils/tree_hash_derive/src/lib.rs index 5a7b304b5..e2a705dc5 100644 --- a/eth2/utils/tree_hash_derive/src/lib.rs +++ b/eth2/utils/tree_hash_derive/src/lib.rs @@ -150,7 +150,7 @@ pub fn tree_hash_derive(input: TokenStream) -> TokenStream { leaves.append(&mut self.#idents.tree_hash_root()); )* - tree_hash::merkle_root(&leaves) + tree_hash::merkle_root(&leaves, 0) } } }; @@ -162,6 +162,7 @@ pub fn tree_hash_signed_root_derive(input: TokenStream) -> TokenStream { let item = parse_macro_input!(input as DeriveInput); let name = &item.ident; + let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); let struct_data = match &item.data { syn::Data::Struct(s) => s, @@ -172,7 +173,7 @@ pub fn tree_hash_signed_root_derive(input: TokenStream) -> TokenStream { let num_elems = idents.len(); let output = quote! { - impl tree_hash::SignedRoot for #name { + impl #impl_generics tree_hash::SignedRoot for #name #ty_generics #where_clause { fn signed_root(&self) -> Vec { let mut leaves = Vec::with_capacity(#num_elems * tree_hash::HASHSIZE); @@ -180,7 +181,7 @@ pub fn tree_hash_signed_root_derive(input: TokenStream) -> TokenStream { leaves.append(&mut self.#idents.tree_hash_root()); )* - tree_hash::merkle_root(&leaves) + tree_hash::merkle_root(&leaves, 0) } } }; diff --git a/eth2/utils/tree_hash_derive/tests/tests.rs b/eth2/utils/tree_hash_derive/tests/tests.rs deleted file mode 100644 index ab11730ff..000000000 --- a/eth2/utils/tree_hash_derive/tests/tests.rs +++ /dev/null @@ -1,179 +0,0 @@ -use cached_tree_hash::{CachedTreeHash, TreeHashCache}; -use tree_hash::{merkle_root, SignedRoot, TreeHash}; -use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; - -#[derive(Clone, Debug, TreeHash, CachedTreeHash)] -pub struct Inner { - pub a: u64, - pub b: u64, - pub c: u64, - pub d: u64, -} - -fn test_standard_and_cached(original: &T, modified: &T) { - // let mut cache = original.new_tree_hash_cache().unwrap(); - let mut cache = TreeHashCache::new(original).unwrap(); - - let standard_root = original.tree_hash_root(); - let cached_root = cache.tree_hash_root().unwrap(); - assert_eq!(standard_root, cached_root); - - // Test after a modification - cache.update(modified).unwrap(); - let standard_root = modified.tree_hash_root(); - let cached_root = cache.tree_hash_root().unwrap(); - assert_eq!(standard_root, cached_root); -} - -#[test] -fn inner_standard_vs_cached() { - let original = Inner { - a: 1, - b: 2, - c: 3, - d: 4, - }; - let modified = Inner { - b: 42, - ..original.clone() - }; - - test_standard_and_cached(&original, &modified); -} - -#[derive(Clone, Debug, TreeHash, CachedTreeHash)] -pub struct Uneven { - pub a: u64, - pub b: u64, - pub c: u64, - pub d: u64, - pub e: u64, -} - -#[test] -fn uneven_standard_vs_cached() { - let original = Uneven { - a: 1, - b: 2, - c: 3, - d: 4, - e: 5, - }; - let modified = Uneven { - e: 42, - ..original.clone() - }; - - test_standard_and_cached(&original, &modified); -} - -#[derive(Clone, Debug, TreeHash, SignedRoot)] -pub struct SignedInner { - pub a: u64, - pub b: u64, - pub c: u64, - pub d: u64, - #[signed_root(skip_hashing)] - pub e: u64, -} - -#[test] -fn signed_root() { - let unsigned = Inner { - a: 1, - b: 2, - c: 3, - d: 4, - }; - let signed = SignedInner { - a: 1, - b: 2, - c: 3, - d: 4, - e: 5, - }; - - assert_eq!(unsigned.tree_hash_root(), signed.signed_root()); -} - -#[derive(TreeHash, SignedRoot)] -struct CryptoKitties { - best_kitty: u64, - worst_kitty: u8, - kitties: Vec, -} - -impl CryptoKitties { - fn new() -> Self { - CryptoKitties { - best_kitty: 9999, - worst_kitty: 1, - kitties: vec![2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43], - } - } - - fn hash(&self) -> Vec { - let mut leaves = vec![]; - leaves.append(&mut self.best_kitty.tree_hash_root()); - leaves.append(&mut self.worst_kitty.tree_hash_root()); - leaves.append(&mut self.kitties.tree_hash_root()); - merkle_root(&leaves) - } -} - -#[test] -fn test_simple_tree_hash_derive() { - let kitties = CryptoKitties::new(); - assert_eq!(kitties.tree_hash_root(), kitties.hash()); -} - -#[test] -fn test_simple_signed_root_derive() { - let kitties = CryptoKitties::new(); - assert_eq!(kitties.signed_root(), kitties.hash()); -} - -#[derive(TreeHash, SignedRoot)] -struct Casper { - friendly: bool, - #[tree_hash(skip_hashing)] - friends: Vec, - #[signed_root(skip_hashing)] - dead: bool, -} - -impl Casper { - fn new() -> Self { - Casper { - friendly: true, - friends: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10], - dead: true, - } - } - - fn expected_signed_hash(&self) -> Vec { - let mut list = Vec::new(); - list.append(&mut self.friendly.tree_hash_root()); - list.append(&mut self.friends.tree_hash_root()); - merkle_root(&list) - } - - fn expected_tree_hash(&self) -> Vec { - let mut list = Vec::new(); - list.append(&mut self.friendly.tree_hash_root()); - list.append(&mut self.dead.tree_hash_root()); - merkle_root(&list) - } -} - -#[test] -fn test_annotated_tree_hash_derive() { - let casper = Casper::new(); - assert_eq!(casper.tree_hash_root(), casper.expected_tree_hash()); -} - -#[test] -fn test_annotated_signed_root_derive() { - let casper = Casper::new(); - assert_eq!(casper.signed_root(), casper.expected_signed_hash()); -} diff --git a/tests/ef_tests/eth2.0-spec-tests b/tests/ef_tests/eth2.0-spec-tests index d40578264..aaa1673f5 160000 --- a/tests/ef_tests/eth2.0-spec-tests +++ b/tests/ef_tests/eth2.0-spec-tests @@ -1 +1 @@ -Subproject commit d405782646190595927cc0a59f504f7b00a760f3 +Subproject commit aaa1673f508103e11304833e0456e4149f880065 diff --git a/tests/ef_tests/src/cases.rs b/tests/ef_tests/src/cases.rs index 3801ba6a7..dbc5d4de6 100644 --- a/tests/ef_tests/src/cases.rs +++ b/tests/ef_tests/src/cases.rs @@ -8,7 +8,10 @@ mod bls_g2_uncompressed; mod bls_priv_to_pub; mod bls_sign_msg; mod epoch_processing_crosslinks; +mod epoch_processing_final_updates; +mod epoch_processing_justification_and_finalization; mod epoch_processing_registry_updates; +mod epoch_processing_slashings; mod operations_attestation; mod operations_attester_slashing; mod operations_block_header; @@ -29,7 +32,10 @@ pub use bls_g2_uncompressed::*; pub use bls_priv_to_pub::*; pub use bls_sign_msg::*; pub use epoch_processing_crosslinks::*; +pub use epoch_processing_final_updates::*; +pub use epoch_processing_justification_and_finalization::*; pub use epoch_processing_registry_updates::*; +pub use epoch_processing_slashings::*; pub use operations_attestation::*; pub use operations_attester_slashing::*; pub use operations_block_header::*; diff --git a/tests/ef_tests/src/cases/bls_g2_compressed.rs b/tests/ef_tests/src/cases/bls_g2_compressed.rs index 8478a0ff6..185cb58f3 100644 --- a/tests/ef_tests/src/cases/bls_g2_compressed.rs +++ b/tests/ef_tests/src/cases/bls_g2_compressed.rs @@ -23,12 +23,6 @@ impl YamlDecode for BlsG2Compressed { impl Case for BlsG2Compressed { fn result(&self, _case_index: usize) -> Result<(), Error> { - // FIXME: re-enable in v0.7 - // https://github.com/ethereum/eth2.0-spec-tests/issues/3 - if _case_index == 4 { - return Err(Error::SkippedKnownFailure); - } - // Convert message and domain to required types let msg = hex::decode(&self.input.message[2..]) .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; diff --git a/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs b/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs index bf1564b97..f2676d122 100644 --- a/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs +++ b/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs @@ -5,11 +5,10 @@ use state_processing::per_epoch_processing::process_crosslinks; use types::{BeaconState, EthSpec}; #[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] pub struct EpochProcessingCrosslinks { pub description: String, - #[serde(bound = "E: EthSpec")] pub pre: BeaconState, - #[serde(bound = "E: EthSpec")] pub post: Option>, } diff --git a/tests/ef_tests/src/cases/epoch_processing_final_updates.rs b/tests/ef_tests/src/cases/epoch_processing_final_updates.rs new file mode 100644 index 000000000..69e6b8bd3 --- /dev/null +++ b/tests/ef_tests/src/cases/epoch_processing_final_updates.rs @@ -0,0 +1,41 @@ +use super::*; +use crate::case_result::compare_beacon_state_results_without_caches; +use serde_derive::Deserialize; +use state_processing::per_epoch_processing::process_final_updates; +use types::{BeaconState, EthSpec}; + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct EpochProcessingFinalUpdates { + pub description: String, + pub pre: BeaconState, + pub post: Option>, +} + +impl YamlDecode for EpochProcessingFinalUpdates { + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) + } +} + +impl Case for EpochProcessingFinalUpdates { + fn description(&self) -> String { + self.description.clone() + } + + fn result(&self, _case_index: usize) -> Result<(), Error> { + let mut state = self.pre.clone(); + let mut expected = self.post.clone(); + + let spec = &E::default_spec(); + + let mut result = (|| { + // Processing requires the epoch cache. + state.build_all_caches(spec)?; + + process_final_updates(&mut state, spec).map(|_| state) + })(); + + compare_beacon_state_results_without_caches(&mut result, &mut expected) + } +} diff --git a/tests/ef_tests/src/cases/epoch_processing_justification_and_finalization.rs b/tests/ef_tests/src/cases/epoch_processing_justification_and_finalization.rs new file mode 100644 index 000000000..788301086 --- /dev/null +++ b/tests/ef_tests/src/cases/epoch_processing_justification_and_finalization.rs @@ -0,0 +1,46 @@ +use super::*; +use crate::case_result::compare_beacon_state_results_without_caches; +use serde_derive::Deserialize; +use state_processing::per_epoch_processing::{ + process_justification_and_finalization, validator_statuses::ValidatorStatuses, +}; +use types::{BeaconState, EthSpec}; + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct EpochProcessingJustificationAndFinalization { + pub description: String, + pub pre: BeaconState, + pub post: Option>, +} + +impl YamlDecode for EpochProcessingJustificationAndFinalization { + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) + } +} + +impl Case for EpochProcessingJustificationAndFinalization { + fn description(&self) -> String { + self.description.clone() + } + + fn result(&self, _case_index: usize) -> Result<(), Error> { + let mut state = self.pre.clone(); + let mut expected = self.post.clone(); + + let spec = &E::default_spec(); + + // Processing requires the epoch cache. + state.build_all_caches(spec).unwrap(); + + let mut result = (|| { + let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; + validator_statuses.process_attestations(&state, spec)?; + process_justification_and_finalization(&mut state, &validator_statuses.total_balances) + .map(|_| state) + })(); + + compare_beacon_state_results_without_caches(&mut result, &mut expected) + } +} diff --git a/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs b/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs index 02311656e..a01f895fe 100644 --- a/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs +++ b/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs @@ -5,11 +5,10 @@ use state_processing::per_epoch_processing::registry_updates::process_registry_u use types::{BeaconState, EthSpec}; #[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] pub struct EpochProcessingRegistryUpdates { pub description: String, - #[serde(bound = "E: EthSpec")] pub pre: BeaconState, - #[serde(bound = "E: EthSpec")] pub post: Option>, } diff --git a/tests/ef_tests/src/cases/epoch_processing_slashings.rs b/tests/ef_tests/src/cases/epoch_processing_slashings.rs new file mode 100644 index 000000000..d2a988d92 --- /dev/null +++ b/tests/ef_tests/src/cases/epoch_processing_slashings.rs @@ -0,0 +1,50 @@ +use super::*; +use crate::case_result::compare_beacon_state_results_without_caches; +use serde_derive::Deserialize; +use state_processing::per_epoch_processing::{ + process_slashings::process_slashings, validator_statuses::ValidatorStatuses, +}; +use types::{BeaconState, EthSpec}; + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct EpochProcessingSlashings { + pub description: String, + pub pre: BeaconState, + pub post: Option>, +} + +impl YamlDecode for EpochProcessingSlashings { + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) + } +} + +impl Case for EpochProcessingSlashings { + fn description(&self) -> String { + self.description.clone() + } + + fn result(&self, _case_index: usize) -> Result<(), Error> { + let mut state = self.pre.clone(); + let mut expected = self.post.clone(); + + let spec = &E::default_spec(); + + let mut result = (|| { + // Processing requires the epoch cache. + state.build_all_caches(spec)?; + + let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; + validator_statuses.process_attestations(&state, spec)?; + process_slashings( + &mut state, + validator_statuses.total_balances.current_epoch, + spec, + ) + .map(|_| state) + })(); + + compare_beacon_state_results_without_caches(&mut result, &mut expected) + } +} diff --git a/tests/ef_tests/src/cases/operations_attestation.rs b/tests/ef_tests/src/cases/operations_attestation.rs index 1db0f6d02..76cbe3f18 100644 --- a/tests/ef_tests/src/cases/operations_attestation.rs +++ b/tests/ef_tests/src/cases/operations_attestation.rs @@ -6,13 +6,12 @@ use state_processing::per_block_processing::process_attestations; use types::{Attestation, BeaconState, EthSpec}; #[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] pub struct OperationsAttestation { pub description: String, pub bls_setting: Option, - #[serde(bound = "E: EthSpec")] pub pre: BeaconState, - pub attestation: Attestation, - #[serde(bound = "E: EthSpec")] + pub attestation: Attestation, pub post: Option>, } diff --git a/tests/ef_tests/src/cases/operations_attester_slashing.rs b/tests/ef_tests/src/cases/operations_attester_slashing.rs index fd7435009..c658b1af4 100644 --- a/tests/ef_tests/src/cases/operations_attester_slashing.rs +++ b/tests/ef_tests/src/cases/operations_attester_slashing.rs @@ -11,7 +11,8 @@ pub struct OperationsAttesterSlashing { pub bls_setting: Option, #[serde(bound = "E: EthSpec")] pub pre: BeaconState, - pub attester_slashing: AttesterSlashing, + #[serde(bound = "E: EthSpec")] + pub attester_slashing: AttesterSlashing, #[serde(bound = "E: EthSpec")] pub post: Option>, } diff --git a/tests/ef_tests/src/cases/operations_block_header.rs b/tests/ef_tests/src/cases/operations_block_header.rs index 599285ca0..8261b16d9 100644 --- a/tests/ef_tests/src/cases/operations_block_header.rs +++ b/tests/ef_tests/src/cases/operations_block_header.rs @@ -6,13 +6,12 @@ use state_processing::per_block_processing::process_block_header; use types::{BeaconBlock, BeaconState, EthSpec}; #[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] pub struct OperationsBlockHeader { pub description: String, pub bls_setting: Option, - #[serde(bound = "E: EthSpec")] pub pre: BeaconState, - pub block: BeaconBlock, - #[serde(bound = "E: EthSpec")] + pub block: BeaconBlock, pub post: Option>, } diff --git a/tests/ef_tests/src/cases/operations_deposit.rs b/tests/ef_tests/src/cases/operations_deposit.rs index 7478708b0..801c02029 100644 --- a/tests/ef_tests/src/cases/operations_deposit.rs +++ b/tests/ef_tests/src/cases/operations_deposit.rs @@ -6,13 +6,12 @@ use state_processing::per_block_processing::process_deposits; use types::{BeaconState, Deposit, EthSpec}; #[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] pub struct OperationsDeposit { pub description: String, pub bls_setting: Option, - #[serde(bound = "E: EthSpec")] pub pre: BeaconState, pub deposit: Deposit, - #[serde(bound = "E: EthSpec")] pub post: Option>, } diff --git a/tests/ef_tests/src/cases/operations_exit.rs b/tests/ef_tests/src/cases/operations_exit.rs index 013021c04..d7e53bcb5 100644 --- a/tests/ef_tests/src/cases/operations_exit.rs +++ b/tests/ef_tests/src/cases/operations_exit.rs @@ -6,13 +6,12 @@ use state_processing::per_block_processing::process_exits; use types::{BeaconState, EthSpec, VoluntaryExit}; #[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] pub struct OperationsExit { pub description: String, pub bls_setting: Option, - #[serde(bound = "E: EthSpec")] pub pre: BeaconState, pub voluntary_exit: VoluntaryExit, - #[serde(bound = "E: EthSpec")] pub post: Option>, } diff --git a/tests/ef_tests/src/cases/operations_proposer_slashing.rs b/tests/ef_tests/src/cases/operations_proposer_slashing.rs index 7ddb97163..e52e84f39 100644 --- a/tests/ef_tests/src/cases/operations_proposer_slashing.rs +++ b/tests/ef_tests/src/cases/operations_proposer_slashing.rs @@ -6,13 +6,12 @@ use state_processing::per_block_processing::process_proposer_slashings; use types::{BeaconState, EthSpec, ProposerSlashing}; #[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] pub struct OperationsProposerSlashing { pub description: String, pub bls_setting: Option, - #[serde(bound = "E: EthSpec")] pub pre: BeaconState, pub proposer_slashing: ProposerSlashing, - #[serde(bound = "E: EthSpec")] pub post: Option>, } diff --git a/tests/ef_tests/src/cases/operations_transfer.rs b/tests/ef_tests/src/cases/operations_transfer.rs index 8456017b5..250f58769 100644 --- a/tests/ef_tests/src/cases/operations_transfer.rs +++ b/tests/ef_tests/src/cases/operations_transfer.rs @@ -6,13 +6,12 @@ use state_processing::per_block_processing::process_transfers; use types::{BeaconState, EthSpec, Transfer}; #[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] pub struct OperationsTransfer { pub description: String, pub bls_setting: Option, - #[serde(bound = "E: EthSpec")] pub pre: BeaconState, pub transfer: Transfer, - #[serde(bound = "E: EthSpec")] pub post: Option>, } @@ -37,8 +36,7 @@ impl Case for OperationsTransfer { // Transfer processing requires the epoch cache. state.build_all_caches(&E::default_spec()).unwrap(); - let mut spec = E::default_spec(); - spec.max_transfers = 1; + let spec = E::default_spec(); let result = process_transfers(&mut state, &[transfer], &spec); diff --git a/tests/ef_tests/src/cases/sanity_blocks.rs b/tests/ef_tests/src/cases/sanity_blocks.rs index bbd4abbad..cd9008fda 100644 --- a/tests/ef_tests/src/cases/sanity_blocks.rs +++ b/tests/ef_tests/src/cases/sanity_blocks.rs @@ -2,17 +2,18 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; use serde_derive::Deserialize; -use state_processing::{per_block_processing, per_slot_processing}; +use state_processing::{ + per_block_processing, per_slot_processing, BlockInvalid, BlockProcessingError, +}; use types::{BeaconBlock, BeaconState, EthSpec, RelativeEpoch}; #[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] pub struct SanityBlocks { pub description: String, pub bls_setting: Option, - #[serde(bound = "E: EthSpec")] pub pre: BeaconState, - pub blocks: Vec, - #[serde(bound = "E: EthSpec")] + pub blocks: Vec>, pub post: Option>, } @@ -27,19 +28,9 @@ impl Case for SanityBlocks { self.description.clone() } - fn result(&self, case_index: usize) -> Result<(), Error> { + fn result(&self, _case_index: usize) -> Result<(), Error> { self.bls_setting.unwrap_or_default().check()?; - // FIXME: re-enable these tests in v0.7 - let known_failures = vec![ - 0, // attestation: https://github.com/ethereum/eth2.0-spec-tests/issues/6 - 10, // transfer: https://github.com/ethereum/eth2.0-spec-tests/issues/7 - 11, // voluntary exit: signature is invalid, don't know why - ]; - if known_failures.contains(&case_index) { - return Err(Error::SkippedKnownFailure); - } - let mut state = self.pre.clone(); let mut expected = self.post.clone(); let spec = &E::default_spec(); @@ -59,7 +50,15 @@ impl Case for SanityBlocks { .build_committee_cache(RelativeEpoch::Current, spec) .unwrap(); - per_block_processing(&mut state, block, spec) + per_block_processing(&mut state, block, spec)?; + + if block.state_root == state.canonical_root() { + Ok(()) + } else { + Err(BlockProcessingError::Invalid( + BlockInvalid::StateRootMismatch, + )) + } }) .map(|_| state); diff --git a/tests/ef_tests/src/cases/sanity_slots.rs b/tests/ef_tests/src/cases/sanity_slots.rs index 779a90c70..fbce1a06a 100644 --- a/tests/ef_tests/src/cases/sanity_slots.rs +++ b/tests/ef_tests/src/cases/sanity_slots.rs @@ -5,12 +5,11 @@ use state_processing::per_slot_processing; use types::{BeaconState, EthSpec}; #[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] pub struct SanitySlots { pub description: String, - #[serde(bound = "E: EthSpec")] pub pre: BeaconState, pub slots: usize, - #[serde(bound = "E: EthSpec")] pub post: Option>, } diff --git a/tests/ef_tests/src/cases/ssz_static.rs b/tests/ef_tests/src/cases/ssz_static.rs index becd6d888..8aa19b2c8 100644 --- a/tests/ef_tests/src/cases/ssz_static.rs +++ b/tests/ef_tests/src/cases/ssz_static.rs @@ -1,17 +1,17 @@ use super::*; use crate::case_result::compare_result; -use cached_tree_hash::{CachedTreeHash, TreeHashCache}; +use cached_tree_hash::CachedTreeHash; use serde_derive::Deserialize; use ssz::{Decode, Encode}; use std::fmt::Debug; use std::marker::PhantomData; use tree_hash::TreeHash; use types::{ - test_utils::{SeedableRng, TestRandom, XorShiftRng}, - Attestation, AttestationData, AttestationDataAndCustodyBit, AttesterSlashing, BeaconBlock, - BeaconBlockBody, BeaconBlockHeader, BeaconState, Crosslink, Deposit, DepositData, Eth1Data, - EthSpec, Fork, Hash256, HistoricalBatch, IndexedAttestation, PendingAttestation, - ProposerSlashing, Transfer, Validator, VoluntaryExit, + test_utils::TestRandom, Attestation, AttestationData, AttestationDataAndCustodyBit, + AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, BeaconState, Checkpoint, + CompactCommittee, Crosslink, Deposit, DepositData, Eth1Data, EthSpec, Fork, Hash256, + HistoricalBatch, IndexedAttestation, PendingAttestation, ProposerSlashing, Transfer, Validator, + VoluntaryExit, }; // Enum variant names are used by Serde when deserializing the test YAML @@ -23,23 +23,25 @@ where { Fork(SszStaticInner), Crosslink(SszStaticInner), + Checkpoint(SszStaticInner), + CompactCommittee(SszStaticInner, E>), Eth1Data(SszStaticInner), AttestationData(SszStaticInner), AttestationDataAndCustodyBit(SszStaticInner), - IndexedAttestation(SszStaticInner), + IndexedAttestation(SszStaticInner, E>), DepositData(SszStaticInner), BeaconBlockHeader(SszStaticInner), Validator(SszStaticInner), - PendingAttestation(SszStaticInner), + PendingAttestation(SszStaticInner, E>), HistoricalBatch(SszStaticInner, E>), ProposerSlashing(SszStaticInner), - AttesterSlashing(SszStaticInner), - Attestation(SszStaticInner), + AttesterSlashing(SszStaticInner, E>), + Attestation(SszStaticInner, E>), Deposit(SszStaticInner), VoluntaryExit(SszStaticInner), Transfer(SszStaticInner), - BeaconBlockBody(SszStaticInner), - BeaconBlock(SszStaticInner), + BeaconBlockBody(SszStaticInner, E>), + BeaconBlock(SszStaticInner, E>), BeaconState(SszStaticInner, E>), } @@ -68,6 +70,8 @@ impl Case for SszStatic { match *self { Fork(ref val) => ssz_static_test(val), Crosslink(ref val) => ssz_static_test(val), + Checkpoint(ref val) => ssz_static_test(val), + CompactCommittee(ref val) => ssz_static_test(val), Eth1Data(ref val) => ssz_static_test(val), AttestationData(ref val) => ssz_static_test(val), AttestationDataAndCustodyBit(ref val) => ssz_static_test(val), @@ -121,18 +125,5 @@ where let tree_hash_root = Hash256::from_slice(&decoded.tree_hash_root()); compare_result::(&Ok(tree_hash_root), &Some(expected_root))?; - // Verify a _new_ CachedTreeHash root of the decoded struct matches the test. - let cache = TreeHashCache::new(&decoded).unwrap(); - let cached_tree_hash_root = Hash256::from_slice(cache.tree_hash_root().unwrap()); - compare_result::(&Ok(cached_tree_hash_root), &Some(expected_root))?; - - // Verify the root after an update from a random CachedTreeHash to the decoded struct. - let mut rng = XorShiftRng::from_seed([42; 16]); - let random_instance = T::random_for_test(&mut rng); - let mut cache = TreeHashCache::new(&random_instance).unwrap(); - cache.update(&decoded).unwrap(); - let updated_root = Hash256::from_slice(cache.tree_hash_root().unwrap()); - compare_result::(&Ok(updated_root), &Some(expected_root))?; - Ok(()) } diff --git a/tests/ef_tests/src/doc.rs b/tests/ef_tests/src/doc.rs index 183f2781f..c3a48f76c 100644 --- a/tests/ef_tests/src/doc.rs +++ b/tests/ef_tests/src/doc.rs @@ -43,9 +43,11 @@ impl Doc { ("ssz", "static", "minimal") => run_test::>(self), ("ssz", "static", "mainnet") => run_test::>(self), ("sanity", "slots", "minimal") => run_test::>(self), - ("sanity", "slots", "mainnet") => run_test::>(self), + // FIXME: skipped due to compact committees issue + ("sanity", "slots", "mainnet") => vec![], // run_test::>(self), ("sanity", "blocks", "minimal") => run_test::>(self), - ("sanity", "blocks", "mainnet") => run_test::>(self), + // FIXME: skipped due to compact committees issue + ("sanity", "blocks", "mainnet") => vec![], // run_test::>(self), ("shuffling", "core", "minimal") => run_test::>(self), ("shuffling", "core", "mainnet") => run_test::>(self), ("bls", "aggregate_pubkeys", "mainnet") => run_test::(self), @@ -112,6 +114,26 @@ impl Doc { ("epoch_processing", "registry_updates", "mainnet") => { run_test::>(self) } + ("epoch_processing", "justification_and_finalization", "minimal") => { + run_test::>(self) + } + ("epoch_processing", "justification_and_finalization", "mainnet") => { + run_test::>(self) + } + ("epoch_processing", "slashings", "minimal") => { + run_test::>(self) + } + ("epoch_processing", "slashings", "mainnet") => { + run_test::>(self) + } + ("epoch_processing", "final_updates", "minimal") => { + run_test::>(self) + } + ("epoch_processing", "final_updates", "mainnet") => { + vec![] + // FIXME: skipped due to compact committees issue + // run_test::>(self) + } (runner, handler, config) => panic!( "No implementation for runner: \"{}\", handler: \"{}\", config: \"{}\"", runner, handler, config @@ -190,9 +212,8 @@ pub fn print_results( ); println!("Title: {}", header.title); println!("File: {:?}", doc.path); - println!(); println!( - "{} tests, {} failed, {} skipped (known failure), {} skipped (bls), {} passed.", + "{} tests, {} failed, {} skipped (known failure), {} skipped (bls), {} passed. (See below for errors)", results.len(), failed.len(), skipped_known_failures.len(), diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index f6e14c927..b7b922e0a 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -161,6 +161,15 @@ fn bls() { }); } +#[test] +fn epoch_processing_justification_and_finalization() { + yaml_files_in_test_dir(&Path::new("epoch_processing").join("justification_and_finalization")) + .into_par_iter() + .for_each(|file| { + Doc::assert_tests_pass(file); + }); +} + #[test] fn epoch_processing_crosslinks() { yaml_files_in_test_dir(&Path::new("epoch_processing").join("crosslinks")) @@ -178,3 +187,21 @@ fn epoch_processing_registry_updates() { Doc::assert_tests_pass(file); }); } + +#[test] +fn epoch_processing_slashings() { + yaml_files_in_test_dir(&Path::new("epoch_processing").join("slashings")) + .into_par_iter() + .for_each(|file| { + Doc::assert_tests_pass(file); + }); +} + +#[test] +fn epoch_processing_final_updates() { + yaml_files_in_test_dir(&Path::new("epoch_processing").join("final_updates")) + .into_par_iter() + .for_each(|file| { + Doc::assert_tests_pass(file); + }); +} diff --git a/validator_client/src/attestation_producer/beacon_node_attestation.rs b/validator_client/src/attestation_producer/beacon_node_attestation.rs index b5ff777de..1213be8a6 100644 --- a/validator_client/src/attestation_producer/beacon_node_attestation.rs +++ b/validator_client/src/attestation_producer/beacon_node_attestation.rs @@ -1,6 +1,6 @@ //TODO: generalise these enums to the crate use crate::block_producer::{BeaconNodeError, PublishOutcome}; -use types::{Attestation, AttestationData, Slot}; +use types::{Attestation, AttestationData, EthSpec, Slot}; /// Defines the methods required to produce and publish attestations on a Beacon Node. Abstracts the /// actual beacon node. @@ -16,8 +16,8 @@ pub trait BeaconNodeAttestation: Send + Sync { /// Request that the node publishes a attestation. /// /// Returns `true` if the publish was successful. - fn publish_attestation( + fn publish_attestation( &self, - attestation: Attestation, + attestation: Attestation, ) -> Result; } diff --git a/validator_client/src/attestation_producer/grpc.rs b/validator_client/src/attestation_producer/grpc.rs index 9ac0a433f..22af304ae 100644 --- a/validator_client/src/attestation_producer/grpc.rs +++ b/validator_client/src/attestation_producer/grpc.rs @@ -6,7 +6,7 @@ use ssz::{Decode, Encode}; use protos::services::{ Attestation as GrpcAttestation, ProduceAttestationDataRequest, PublishAttestationRequest, }; -use types::{Attestation, AttestationData, Slot}; +use types::{Attestation, AttestationData, EthSpec, Slot}; impl BeaconNodeAttestation for AttestationServiceClient { fn produce_attestation_data( @@ -28,9 +28,9 @@ impl BeaconNodeAttestation for AttestationServiceClient { Ok(attestation_data) } - fn publish_attestation( + fn publish_attestation( &self, - attestation: Attestation, + attestation: Attestation, ) -> Result { let mut req = PublishAttestationRequest::new(); diff --git a/validator_client/src/attestation_producer/mod.rs b/validator_client/src/attestation_producer/mod.rs index 900b0de24..e831b4c1c 100644 --- a/validator_client/src/attestation_producer/mod.rs +++ b/validator_client/src/attestation_producer/mod.rs @@ -2,16 +2,17 @@ mod beacon_node_attestation; mod grpc; use std::sync::Arc; -use types::{ChainSpec, Domain, Fork}; +use types::{ChainSpec, Domain, EthSpec, Fork}; //TODO: Move these higher up in the crate use super::block_producer::{BeaconNodeError, PublishOutcome, ValidatorEvent}; use crate::signer::Signer; use beacon_node_attestation::BeaconNodeAttestation; +use core::marker::PhantomData; use slog::{error, info, warn}; use tree_hash::TreeHash; use types::{ AggregateSignature, Attestation, AttestationData, AttestationDataAndCustodyBit, - AttestationDuty, Bitfield, + AttestationDuty, BitList, }; //TODO: Group these errors at a crate level @@ -28,7 +29,7 @@ impl From for Error { /// This struct contains the logic for requesting and signing beacon attestations for a validator. The /// validator can abstractly sign via the Signer trait object. -pub struct AttestationProducer<'a, B: BeaconNodeAttestation, S: Signer> { +pub struct AttestationProducer<'a, B: BeaconNodeAttestation, S: Signer, E: EthSpec> { /// The current fork. pub fork: Fork, /// The attestation duty to perform. @@ -41,9 +42,11 @@ pub struct AttestationProducer<'a, B: BeaconNodeAttestation, S: Signer> { pub signer: &'a S, /// Used for calculating epoch. pub slots_per_epoch: u64, + /// Mere vessel for E. + pub _phantom: PhantomData, } -impl<'a, B: BeaconNodeAttestation, S: Signer> AttestationProducer<'a, B, S> { +impl<'a, B: BeaconNodeAttestation, S: Signer, E: EthSpec> AttestationProducer<'a, B, S, E> { /// Handle outputs and results from attestation production. pub fn handle_produce_attestation(&mut self, log: slog::Logger) { match self.produce_attestation() { @@ -116,7 +119,7 @@ impl<'a, B: BeaconNodeAttestation, S: Signer> AttestationProducer<'a, B, S> { attestation: AttestationData, duties: AttestationDuty, domain: u64, - ) -> Option { + ) -> Option> { self.store_produce(&attestation); // build the aggregate signature @@ -134,14 +137,14 @@ impl<'a, B: BeaconNodeAttestation, S: Signer> AttestationProducer<'a, B, S> { agg_sig }; - let mut aggregation_bitfield = Bitfield::with_capacity(duties.committee_len); - let custody_bitfield = Bitfield::with_capacity(duties.committee_len); - aggregation_bitfield.set(duties.committee_index, true); + let mut aggregation_bits = BitList::with_capacity(duties.committee_len).ok()?; + let custody_bits = BitList::with_capacity(duties.committee_len).ok()?; + aggregation_bits.set(duties.committee_index, true).ok()?; Some(Attestation { - aggregation_bitfield, + aggregation_bits, data: attestation, - custody_bitfield, + custody_bits, signature: aggregate_signature, }) } diff --git a/validator_client/src/block_producer/beacon_node_block.rs b/validator_client/src/block_producer/beacon_node_block.rs index 65ccb2104..7e681d44b 100644 --- a/validator_client/src/block_producer/beacon_node_block.rs +++ b/validator_client/src/block_producer/beacon_node_block.rs @@ -1,4 +1,4 @@ -use types::{BeaconBlock, Signature, Slot}; +use types::{BeaconBlock, EthSpec, Signature, Slot}; #[derive(Debug, PartialEq, Clone)] pub enum BeaconNodeError { RemoteFailure(String), @@ -18,14 +18,17 @@ pub trait BeaconNodeBlock: Send + Sync { /// Request that the node produces a block. /// /// Returns Ok(None) if the Beacon Node is unable to produce at the given slot. - fn produce_beacon_block( + fn produce_beacon_block( &self, slot: Slot, randao_reveal: &Signature, - ) -> Result, BeaconNodeError>; + ) -> Result>, BeaconNodeError>; /// Request that the node publishes a block. /// /// Returns `true` if the publish was successful. - fn publish_beacon_block(&self, block: BeaconBlock) -> Result; + fn publish_beacon_block( + &self, + block: BeaconBlock, + ) -> Result; } diff --git a/validator_client/src/block_producer/grpc.rs b/validator_client/src/block_producer/grpc.rs index 820fbdb66..7a3e7f284 100644 --- a/validator_client/src/block_producer/grpc.rs +++ b/validator_client/src/block_producer/grpc.rs @@ -5,7 +5,7 @@ use protos::services::{ use protos::services_grpc::BeaconBlockServiceClient; use ssz::{Decode, Encode}; use std::sync::Arc; -use types::{BeaconBlock, Signature, Slot}; +use types::{BeaconBlock, EthSpec, Signature, Slot}; //TODO: Remove this new type. Do not need to wrap /// A newtype designed to wrap the gRPC-generated service so the `BeaconNode` trait may be @@ -25,11 +25,11 @@ impl BeaconNodeBlock for BeaconBlockGrpcClient { /// /// Returns `None` if it is not possible to produce at the supplied slot. For example, if the /// BN is unable to find a parent block. - fn produce_beacon_block( + fn produce_beacon_block( &self, slot: Slot, randao_reveal: &Signature, - ) -> Result, BeaconNodeError> { + ) -> Result>, BeaconNodeError> { // request a beacon block from the node let mut req = ProduceBeaconBlockRequest::new(); req.set_slot(slot.as_u64()); @@ -59,7 +59,10 @@ impl BeaconNodeBlock for BeaconBlockGrpcClient { /// /// Generally, this will be called after a `produce_beacon_block` call with a block that has /// been completed (signed) by the validator client. - fn publish_beacon_block(&self, block: BeaconBlock) -> Result { + fn publish_beacon_block( + &self, + block: BeaconBlock, + ) -> Result { let mut req = PublishBeaconBlockRequest::new(); let ssz = block.as_ssz_bytes(); diff --git a/validator_client/src/block_producer/mod.rs b/validator_client/src/block_producer/mod.rs index 48173b835..ca1e3a1d8 100644 --- a/validator_client/src/block_producer/mod.rs +++ b/validator_client/src/block_producer/mod.rs @@ -5,10 +5,11 @@ use self::beacon_node_block::BeaconNodeBlock; pub use self::beacon_node_block::{BeaconNodeError, PublishOutcome}; pub use self::grpc::BeaconBlockGrpcClient; use crate::signer::Signer; +use core::marker::PhantomData; use slog::{error, info, warn}; use std::sync::Arc; use tree_hash::{SignedRoot, TreeHash}; -use types::{BeaconBlock, ChainSpec, Domain, Fork, Slot}; +use types::{BeaconBlock, ChainSpec, Domain, EthSpec, Fork, Slot}; #[derive(Debug, PartialEq)] pub enum Error { @@ -37,7 +38,7 @@ pub enum ValidatorEvent { /// This struct contains the logic for requesting and signing beacon blocks for a validator. The /// validator can abstractly sign via the Signer trait object. -pub struct BlockProducer<'a, B: BeaconNodeBlock, S: Signer> { +pub struct BlockProducer<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> { /// The current fork. pub fork: Fork, /// The current slot to produce a block for. @@ -50,9 +51,11 @@ pub struct BlockProducer<'a, B: BeaconNodeBlock, S: Signer> { pub signer: &'a S, /// Used for calculating epoch. pub slots_per_epoch: u64, + /// Mere vessel for E. + pub _phantom: PhantomData, } -impl<'a, B: BeaconNodeBlock, S: Signer> BlockProducer<'a, B, S> { +impl<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> BlockProducer<'a, B, S, E> { /// Handle outputs and results from block production. pub fn handle_produce_block(&mut self, log: slog::Logger) { match self.produce_block() { @@ -123,7 +126,7 @@ impl<'a, B: BeaconNodeBlock, S: Signer> BlockProducer<'a, B, S> { /// /// Important: this function will not check to ensure the block is not slashable. This must be /// done upstream. - fn sign_block(&mut self, mut block: BeaconBlock, domain: u64) -> Option { + fn sign_block(&mut self, mut block: BeaconBlock, domain: u64) -> Option> { self.store_produce(&block); match self.signer.sign_message(&block.signed_root()[..], domain) { @@ -140,7 +143,7 @@ impl<'a, B: BeaconNodeBlock, S: Signer> BlockProducer<'a, B, S> { /// !!! UNSAFE !!! /// /// Important: this function is presently stubbed-out. It provides ZERO SAFETY. - fn safe_to_produce(&self, _block: &BeaconBlock) -> bool { + fn safe_to_produce(&self, _block: &BeaconBlock) -> bool { // TODO: ensure the producer doesn't produce slashable blocks. // https://github.com/sigp/lighthouse/issues/160 true @@ -151,7 +154,7 @@ impl<'a, B: BeaconNodeBlock, S: Signer> BlockProducer<'a, B, S> { /// !!! UNSAFE !!! /// /// Important: this function is presently stubbed-out. It provides ZERO SAFETY. - fn store_produce(&mut self, _block: &BeaconBlock) { + fn store_produce(&mut self, _block: &BeaconBlock) { // TODO: record this block production to prevent future slashings. // https://github.com/sigp/lighthouse/issues/160 } diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index a4377e708..cbcd101da 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -110,7 +110,7 @@ fn main() { } }; default_dir.push(DEFAULT_DATA_DIR); - PathBuf::from(default_dir) + default_dir } }; @@ -203,12 +203,12 @@ fn main() { ); let result = match eth2_config.spec_constants.as_str() { - "mainnet" => ValidatorService::::start::( + "mainnet" => ValidatorService::::start( client_config, eth2_config, log.clone(), ), - "minimal" => ValidatorService::::start::( + "minimal" => ValidatorService::::start( client_config, eth2_config, log.clone(), diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 8dbb82b37..3f99efe36 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -25,6 +25,7 @@ use protos::services_grpc::{ }; use slog::{error, info, warn}; use slot_clock::{SlotClock, SystemTimeSlotClock}; +use std::marker::PhantomData; use std::sync::Arc; use std::sync::RwLock; use std::time::{Duration, Instant, SystemTime}; @@ -41,7 +42,7 @@ const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(200); /// The validator service. This is the main thread that executes and maintains validator /// duties. //TODO: Generalize the BeaconNode types to use testing -pub struct Service { +pub struct Service { /// The node's current fork version we are processing on. fork: Fork, /// The slot clock for this service. @@ -60,18 +61,19 @@ pub struct Service { attestation_client: Arc, /// The validator client logger. log: slog::Logger, + _phantom: PhantomData, } -impl Service { +impl Service { /// Initial connection to the beacon node to determine its properties. /// /// This tries to connect to a beacon node. Once connected, it initialised the gRPC clients /// and returns an instance of the service. - fn initialize_service( + fn initialize_service( client_config: ValidatorConfig, eth2_config: Eth2Config, log: slog::Logger, - ) -> error_chain::Result> { + ) -> error_chain::Result> { // initialise the beacon node client to check for a connection let env = Arc::new(EnvBuilder::new().build()); @@ -180,7 +182,7 @@ impl Service { } }; - let slots_per_epoch = T::slots_per_epoch(); + let slots_per_epoch = E::slots_per_epoch(); // TODO: keypairs are randomly generated; they should be loaded from a file or generated. // https://github.com/sigp/lighthouse/issues/160 @@ -212,18 +214,19 @@ impl Service { beacon_block_client, attestation_client, log, + _phantom: PhantomData, }) } /// Initialise the service then run the core thread. // TODO: Improve handling of generic BeaconNode types, to stub grpcClient - pub fn start( + pub fn start( client_config: ValidatorConfig, eth2_config: Eth2Config, log: slog::Logger, ) -> error_chain::Result<()> { // connect to the node and retrieve its properties and initialize the gRPC clients - let mut service = Service::::initialize_service::( + let mut service = Service::::initialize_service( client_config, eth2_config, log, @@ -351,6 +354,7 @@ impl Service { beacon_node, signer, slots_per_epoch, + _phantom: PhantomData::, }; block_producer.handle_produce_block(log); }); @@ -374,6 +378,7 @@ impl Service { beacon_node, signer, slots_per_epoch, + _phantom: PhantomData::, }; attestation_producer.handle_produce_attestation(log); }); From eb669ab40f4edff0c47f99d430df5092d297c5c7 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 30 Jul 2019 17:02:38 +1000 Subject: [PATCH 09/25] Add v0.8 genesis tests (#466) Closes #452 --- .../src/common/get_compact_committees_root.rs | 13 ++- eth2/state_processing/src/genesis.rs | 16 +++- eth2/state_processing/src/lib.rs | 2 +- .../src/per_block_processing.rs | 92 +++++++++++-------- eth2/types/src/beacon_state/pubkey_cache.rs | 7 +- tests/ef_tests/src/cases.rs | 4 + .../src/cases/genesis_initialization.rs | 45 +++++++++ tests/ef_tests/src/cases/genesis_validity.rs | 42 +++++++++ tests/ef_tests/src/doc.rs | 8 ++ tests/ef_tests/tests/tests.rs | 18 ++++ 10 files changed, 200 insertions(+), 47 deletions(-) create mode 100644 tests/ef_tests/src/cases/genesis_initialization.rs create mode 100644 tests/ef_tests/src/cases/genesis_validity.rs diff --git a/eth2/state_processing/src/common/get_compact_committees_root.rs b/eth2/state_processing/src/common/get_compact_committees_root.rs index 3a1f3998b..75edb3549 100644 --- a/eth2/state_processing/src/common/get_compact_committees_root.rs +++ b/eth2/state_processing/src/common/get_compact_committees_root.rs @@ -14,15 +14,22 @@ pub fn get_compact_committees_root( // FIXME: this is a spec bug, whereby the start shard for the epoch after the next epoch // is mistakenly used. The start shard from the cache SHOULD work. // Waiting on a release to fix https://github.com/ethereum/eth2.0-specs/issues/1315 - // let start_shard = state.get_epoch_start_shard(relative_epoch)?; - let start_shard = state.next_epoch_start_shard(spec)?; + let start_shard = if relative_epoch == RelativeEpoch::Next { + state.next_epoch_start_shard(spec)? + } else { + state.get_epoch_start_shard(relative_epoch)? + }; for committee_number in 0..state.get_committee_count(relative_epoch)? { let shard = (start_shard + committee_number) % T::ShardCount::to_u64(); // FIXME: this is a partial workaround for the above, but it only works in the case // where there's a committee for every shard in every epoch. It works for the minimal // tests but not the mainnet ones. - let fake_shard = (shard + 1) % T::ShardCount::to_u64(); + let fake_shard = if relative_epoch == RelativeEpoch::Next { + (shard + 1) % T::ShardCount::to_u64() + } else { + shard + }; for &index in state .get_crosslink_committee_for_shard(fake_shard, relative_epoch)? diff --git a/eth2/state_processing/src/genesis.rs b/eth2/state_processing/src/genesis.rs index 6f1f2819e..e36261ca3 100644 --- a/eth2/state_processing/src/genesis.rs +++ b/eth2/state_processing/src/genesis.rs @@ -1,4 +1,4 @@ -use super::per_block_processing::{errors::BlockProcessingError, process_deposits}; +use super::per_block_processing::{errors::BlockProcessingError, process_deposit}; use crate::common::get_compact_committees_root; use tree_hash::TreeHash; use types::typenum::U4294967296; @@ -32,7 +32,7 @@ pub fn initialize_beacon_state_from_eth1( for (index, deposit) in deposits.into_iter().enumerate() { let deposit_data_list = VariableList::<_, U4294967296>::from(leaves[..=index].to_vec()); state.eth1_data.deposit_root = Hash256::from_slice(&deposit_data_list.tree_hash_root()); - process_deposits(&mut state, &[deposit], spec)?; + process_deposit(&mut state, &deposit, spec, true)?; } // Process activations @@ -48,6 +48,9 @@ pub fn initialize_beacon_state_from_eth1( } } + // Now that we have our validators, initialize the caches (including the committees) + state.build_all_caches(spec)?; + // Populate active_index_roots and compact_committees_roots let indices_list = VariableList::::from( state.get_active_validator_indices(T::genesis_epoch()), @@ -59,3 +62,12 @@ pub fn initialize_beacon_state_from_eth1( Ok(state) } + +/// Determine whether a candidate genesis state is suitable for starting the chain. +/// +/// Spec v0.8.1 +pub fn is_valid_genesis_state(state: &BeaconState, spec: &ChainSpec) -> bool { + state.genesis_time >= spec.min_genesis_time + && state.get_active_validator_indices(T::genesis_epoch()).len() as u64 + >= spec.min_genesis_active_validator_count +} diff --git a/eth2/state_processing/src/lib.rs b/eth2/state_processing/src/lib.rs index 90f89b599..0539855fc 100644 --- a/eth2/state_processing/src/lib.rs +++ b/eth2/state_processing/src/lib.rs @@ -7,7 +7,7 @@ pub mod per_block_processing; pub mod per_epoch_processing; pub mod per_slot_processing; -pub use genesis::initialize_beacon_state_from_eth1; +pub use genesis::{initialize_beacon_state_from_eth1, is_valid_genesis_state}; pub use per_block_processing::{ errors::{BlockInvalid, BlockProcessingError}, per_block_processing, per_block_processing_without_verifying_block_signature, diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 4d58b6b18..10ca6e370 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -358,7 +358,7 @@ pub fn process_deposits( Invalid::DepositCountInvalid ); - // Verify deposits in parallel. + // Verify merkle proofs in parallel. deposits .par_iter() .enumerate() @@ -368,47 +368,67 @@ pub fn process_deposits( })?; // Update the state in series. - for (i, deposit) in deposits.iter().enumerate() { - state.eth1_deposit_index += 1; + for deposit in deposits { + process_deposit(state, deposit, spec, false)?; + } - // Ensure the state's pubkey cache is fully up-to-date, it will be used to check to see if the - // depositing validator already exists in the registry. - state.update_pubkey_cache()?; + Ok(()) +} - // Get an `Option` where `u64` is the validator index if this deposit public key - // already exists in the beacon_state. - let validator_index = - get_existing_validator_index(state, deposit).map_err(|e| e.into_with_index(i))?; +/// Process a single deposit, optionally verifying its merkle proof. +/// +/// Spec v0.8.1 +pub fn process_deposit( + state: &mut BeaconState, + deposit: &Deposit, + spec: &ChainSpec, + verify_merkle_proof: bool, +) -> Result<(), Error> { + let deposit_index = state.eth1_deposit_index as usize; + if verify_merkle_proof { + verify_deposit_merkle_proof(state, deposit, state.eth1_deposit_index, spec) + .map_err(|e| e.into_with_index(deposit_index))?; + } - let amount = deposit.data.amount; + state.eth1_deposit_index += 1; - if let Some(index) = validator_index { - // Update the existing validator balance. - safe_add_assign!(state.balances[index as usize], amount); - } else { - // The signature should be checked for new validators. Return early for a bad - // signature. - if verify_deposit_signature(state, deposit, spec).is_err() { - return Ok(()); - } + // Ensure the state's pubkey cache is fully up-to-date, it will be used to check to see if the + // depositing validator already exists in the registry. + state.update_pubkey_cache()?; - // Create a new validator. - let validator = Validator { - pubkey: deposit.data.pubkey.clone(), - withdrawal_credentials: deposit.data.withdrawal_credentials, - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance: std::cmp::min( - amount - amount % spec.effective_balance_increment, - spec.max_effective_balance, - ), - slashed: false, - }; - state.validators.push(validator)?; - state.balances.push(deposit.data.amount)?; + // Get an `Option` where `u64` is the validator index if this deposit public key + // already exists in the beacon_state. + let validator_index = get_existing_validator_index(state, deposit) + .map_err(|e| e.into_with_index(deposit_index))?; + + let amount = deposit.data.amount; + + if let Some(index) = validator_index { + // Update the existing validator balance. + safe_add_assign!(state.balances[index as usize], amount); + } else { + // The signature should be checked for new validators. Return early for a bad + // signature. + if verify_deposit_signature(state, deposit, spec).is_err() { + return Ok(()); } + + // Create a new validator. + let validator = Validator { + pubkey: deposit.data.pubkey.clone(), + withdrawal_credentials: deposit.data.withdrawal_credentials, + activation_eligibility_epoch: spec.far_future_epoch, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + effective_balance: std::cmp::min( + amount - amount % spec.effective_balance_increment, + spec.max_effective_balance, + ), + slashed: false, + }; + state.validators.push(validator)?; + state.balances.push(deposit.data.amount)?; } Ok(()) diff --git a/eth2/types/src/beacon_state/pubkey_cache.rs b/eth2/types/src/beacon_state/pubkey_cache.rs index 4632a2d9c..b601c3c11 100644 --- a/eth2/types/src/beacon_state/pubkey_cache.rs +++ b/eth2/types/src/beacon_state/pubkey_cache.rs @@ -33,11 +33,8 @@ impl PubkeyCache { } } - /// Inserts a validator index into the map. - /// - /// The added index must equal the number of validators already added to the map. This ensures - /// that an index is never skipped. + /// Looks up a validator index's by their public key. pub fn get(&self, pubkey: &PublicKey) -> Option { - self.map.get(pubkey).cloned() + self.map.get(pubkey).copied() } } diff --git a/tests/ef_tests/src/cases.rs b/tests/ef_tests/src/cases.rs index dbc5d4de6..1ae4ea1d8 100644 --- a/tests/ef_tests/src/cases.rs +++ b/tests/ef_tests/src/cases.rs @@ -12,6 +12,8 @@ mod epoch_processing_final_updates; mod epoch_processing_justification_and_finalization; mod epoch_processing_registry_updates; mod epoch_processing_slashings; +mod genesis_initialization; +mod genesis_validity; mod operations_attestation; mod operations_attester_slashing; mod operations_block_header; @@ -36,6 +38,8 @@ pub use epoch_processing_final_updates::*; pub use epoch_processing_justification_and_finalization::*; pub use epoch_processing_registry_updates::*; pub use epoch_processing_slashings::*; +pub use genesis_initialization::*; +pub use genesis_validity::*; pub use operations_attestation::*; pub use operations_attester_slashing::*; pub use operations_block_header::*; diff --git a/tests/ef_tests/src/cases/genesis_initialization.rs b/tests/ef_tests/src/cases/genesis_initialization.rs new file mode 100644 index 000000000..7ae8eef59 --- /dev/null +++ b/tests/ef_tests/src/cases/genesis_initialization.rs @@ -0,0 +1,45 @@ +use super::*; +use crate::bls_setting::BlsSetting; +use crate::case_result::compare_beacon_state_results_without_caches; +use serde_derive::Deserialize; +use state_processing::initialize_beacon_state_from_eth1; +use types::{BeaconState, Deposit, EthSpec, Hash256}; + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct GenesisInitialization { + pub description: String, + pub bls_setting: Option, + pub eth1_block_hash: Hash256, + pub eth1_timestamp: u64, + pub deposits: Vec, + pub state: Option>, +} + +impl YamlDecode for GenesisInitialization { + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) + } +} + +impl Case for GenesisInitialization { + fn description(&self) -> String { + self.description.clone() + } + + fn result(&self, _case_index: usize) -> Result<(), Error> { + self.bls_setting.unwrap_or_default().check()?; + let spec = &E::default_spec(); + + let mut result = initialize_beacon_state_from_eth1( + self.eth1_block_hash, + self.eth1_timestamp, + self.deposits.clone(), + spec, + ); + + let mut expected = self.state.clone(); + + compare_beacon_state_results_without_caches(&mut result, &mut expected) + } +} diff --git a/tests/ef_tests/src/cases/genesis_validity.rs b/tests/ef_tests/src/cases/genesis_validity.rs new file mode 100644 index 000000000..7ddd3e8fd --- /dev/null +++ b/tests/ef_tests/src/cases/genesis_validity.rs @@ -0,0 +1,42 @@ +use super::*; +use crate::bls_setting::BlsSetting; +use serde_derive::Deserialize; +use state_processing::is_valid_genesis_state; +use types::{BeaconState, EthSpec}; + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct GenesisValidity { + pub description: String, + pub bls_setting: Option, + pub genesis: BeaconState, + pub is_valid: bool, +} + +impl YamlDecode for GenesisValidity { + fn yaml_decode(yaml: &str) -> Result { + Ok(serde_yaml::from_str(yaml).unwrap()) + } +} + +impl Case for GenesisValidity { + fn description(&self) -> String { + self.description.clone() + } + + fn result(&self, _case_index: usize) -> Result<(), Error> { + self.bls_setting.unwrap_or_default().check()?; + let spec = &E::default_spec(); + + let is_valid = is_valid_genesis_state(&self.genesis, spec); + + if is_valid == self.is_valid { + Ok(()) + } else { + Err(Error::NotEqual(format!( + "Got {}, expected {}", + is_valid, self.is_valid + ))) + } + } +} diff --git a/tests/ef_tests/src/doc.rs b/tests/ef_tests/src/doc.rs index c3a48f76c..7dfe9954c 100644 --- a/tests/ef_tests/src/doc.rs +++ b/tests/ef_tests/src/doc.rs @@ -134,6 +134,14 @@ impl Doc { // FIXME: skipped due to compact committees issue // run_test::>(self) } + ("genesis", "initialization", "minimal") => { + run_test::>(self) + } + ("genesis", "initialization", "mainnet") => { + run_test::>(self) + } + ("genesis", "validity", "minimal") => run_test::>(self), + ("genesis", "validity", "mainnet") => run_test::>(self), (runner, handler, config) => panic!( "No implementation for runner: \"{}\", handler: \"{}\", config: \"{}\"", runner, handler, config diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index b7b922e0a..deb699e78 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -205,3 +205,21 @@ fn epoch_processing_final_updates() { Doc::assert_tests_pass(file); }); } + +#[test] +fn genesis_initialization() { + yaml_files_in_test_dir(&Path::new("genesis").join("initialization")) + .into_par_iter() + .for_each(|file| { + Doc::assert_tests_pass(file); + }); +} + +#[test] +fn genesis_validity() { + yaml_files_in_test_dir(&Path::new("genesis").join("validity")) + .into_par_iter() + .for_each(|file| { + Doc::assert_tests_pass(file); + }); +} From 81a089aa8bf7fad099625133da284b2a568b35f7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 30 Jul 2019 17:19:03 +1000 Subject: [PATCH 10/25] Remove old benches (#465) * Remove cached_tree_hash benches * Remove state_processing benches --- eth2/state_processing/Cargo.toml | 5 - .../benches/bench_block_processing.rs | 270 ------------------ .../benches/bench_epoch_processing.rs | 263 ----------------- eth2/state_processing/benches/benches.rs | 103 ------- .../benches/block_benching_builder.rs | 175 ------------ eth2/utils/cached_tree_hash/Cargo.toml | 5 - .../utils/cached_tree_hash/benches/benches.rs | 73 ----- 7 files changed, 894 deletions(-) delete mode 100644 eth2/state_processing/benches/bench_block_processing.rs delete mode 100644 eth2/state_processing/benches/bench_epoch_processing.rs delete mode 100644 eth2/state_processing/benches/benches.rs delete mode 100644 eth2/state_processing/benches/block_benching_builder.rs delete mode 100644 eth2/utils/cached_tree_hash/benches/benches.rs diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index b6941d739..ea4e28110 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -4,12 +4,7 @@ version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" -[[bench]] -name = "benches" -harness = false - [dev-dependencies] -criterion = "0.2" env_logger = "0.6.0" serde = "1.0" serde_derive = "1.0" diff --git a/eth2/state_processing/benches/bench_block_processing.rs b/eth2/state_processing/benches/bench_block_processing.rs deleted file mode 100644 index 978d532f1..000000000 --- a/eth2/state_processing/benches/bench_block_processing.rs +++ /dev/null @@ -1,270 +0,0 @@ -use criterion::Criterion; -use criterion::{black_box, Benchmark}; -use state_processing::{ - per_block_processing, - per_block_processing::{ - process_attestations, process_attester_slashings, process_deposits, process_eth1_data, - process_exits, process_proposer_slashings, process_randao, process_transfers, - verify_block_signature, - }, -}; -use tree_hash::TreeHash; -use types::*; - -/// Run the detailed benchmarking suite on the given `BeaconState`. -/// -/// `desc` will be added to the title of each bench. -pub fn bench_block_processing( - c: &mut Criterion, - initial_block: &BeaconBlock, - initial_state: &BeaconState, - initial_spec: &ChainSpec, - desc: &str, -) { - let state = initial_state.clone(); - let block = initial_block.clone(); - let spec = initial_spec.clone(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("verify_block_signature", move |b| { - b.iter_batched( - || state.clone(), - |mut state| { - verify_block_signature(&mut state, &block, &spec).unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let state = initial_state.clone(); - let block = initial_block.clone(); - let spec = initial_spec.clone(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("process_randao", move |b| { - b.iter_batched( - || state.clone(), - |mut state| { - process_randao(&mut state, &block, &spec).unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let state = initial_state.clone(); - let block = initial_block.clone(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("process_eth1_data", move |b| { - b.iter_batched( - || state.clone(), - |mut state| { - process_eth1_data(&mut state, &block.eth1_data).unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let state = initial_state.clone(); - let block = initial_block.clone(); - let spec = initial_spec.clone(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("process_proposer_slashings", move |b| { - b.iter_batched( - || state.clone(), - |mut state| { - process_proposer_slashings(&mut state, &block.body.proposer_slashings, &spec) - .unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let state = initial_state.clone(); - let block = initial_block.clone(); - let spec = initial_spec.clone(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("process_attester_slashings", move |b| { - b.iter_batched( - || state.clone(), - |mut state| { - process_attester_slashings(&mut state, &block.body.attester_slashings, &spec) - .unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let state = initial_state.clone(); - let block = initial_block.clone(); - let spec = initial_spec.clone(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("process_attestations", move |b| { - b.iter_batched( - || state.clone(), - |mut state| { - process_attestations(&mut state, &block.body.attestations, &spec).unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let state = initial_state.clone(); - let block = initial_block.clone(); - let spec = initial_spec.clone(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("process_deposits", move |b| { - b.iter_batched( - || state.clone(), - |mut state| { - process_deposits(&mut state, &block.body.deposits, &spec).unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let state = initial_state.clone(); - let block = initial_block.clone(); - let spec = initial_spec.clone(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("process_exits", move |b| { - b.iter_batched( - || state.clone(), - |mut state| { - process_exits(&mut state, &block.body.voluntary_exits, &spec).unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let state = initial_state.clone(); - let block = initial_block.clone(); - let spec = initial_spec.clone(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("process_transfers", move |b| { - b.iter_batched( - || state.clone(), - |mut state| { - process_transfers(&mut state, &block.body.transfers, &spec).unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let state = initial_state.clone(); - let block = initial_block.clone(); - let spec = initial_spec.clone(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("per_block_processing", move |b| { - b.iter_batched( - || state.clone(), - |mut state| { - per_block_processing(&mut state, &block, &spec).unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let mut state = initial_state.clone(); - state.drop_cache(RelativeEpoch::Previous); - let spec = initial_spec.clone(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("build_previous_state_committee_cache", move |b| { - b.iter_batched( - || state.clone(), - |mut state| { - state - .build_committee_cache(RelativeEpoch::Previous, &spec) - .unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let mut state = initial_state.clone(); - state.drop_cache(RelativeEpoch::Current); - let spec = initial_spec.clone(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("build_current_state_committee_cache", move |b| { - b.iter_batched( - || state.clone(), - |mut state| { - state - .build_committee_cache(RelativeEpoch::Current, &spec) - .unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let mut state = initial_state.clone(); - state.drop_pubkey_cache(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("build_pubkey_cache", move |b| { - b.iter_batched( - || state.clone(), - |mut state| { - state.update_pubkey_cache().unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let block = initial_block.clone(); - c.bench( - &format!("{}/block_processing", desc), - Benchmark::new("tree_hash_block", move |b| { - b.iter(|| black_box(block.tree_hash_root())) - }) - .sample_size(10), - ); -} diff --git a/eth2/state_processing/benches/bench_epoch_processing.rs b/eth2/state_processing/benches/bench_epoch_processing.rs deleted file mode 100644 index ee9e39a7d..000000000 --- a/eth2/state_processing/benches/bench_epoch_processing.rs +++ /dev/null @@ -1,263 +0,0 @@ -use criterion::Criterion; -use criterion::{black_box, Benchmark}; -use state_processing::{ - per_epoch_processing, - per_epoch_processing::{ - clean_attestations, initialize_validator_statuses, process_crosslinks, process_eth1_data, - process_justification, process_rewards_and_penalities, process_validator_registry, - update_active_tree_index_roots, update_latest_slashed_balances, - }, -}; -use tree_hash::TreeHash; -use types::test_utils::TestingBeaconStateBuilder; -use types::*; - -pub const BENCHING_SAMPLE_SIZE: usize = 10; -pub const SMALL_BENCHING_SAMPLE_SIZE: usize = 10; - -/// Run the benchmarking suite on a foundation spec with 16,384 validators. -pub fn bench_epoch_processing_n_validators(c: &mut Criterion, validator_count: usize) { - let spec = ChainSpec::mainnet(); - - let mut builder = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec); - - // Set the state to be just before an epoch transition. - let target_slot = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch()); - builder.teleport_to_slot(target_slot, &spec); - - // Builds all caches; benches will not contain shuffling/committee building times. - builder.build_caches(&spec).unwrap(); - - // Inserts one attestation with full participation for each committee able to include an - // attestation in this state. - builder.insert_attestations(&spec); - - let (state, _keypairs) = builder.build(); - - // Assert that the state has an attestations for each committee that is able to include an - // attestation in the state. - let committees_per_epoch = spec.get_committee_count(validator_count); - let committees_per_slot = committees_per_epoch / T::slots_per_epoch(); - let previous_epoch_attestations = committees_per_epoch; - let current_epoch_attestations = - committees_per_slot * (T::slots_per_epoch() - spec.min_attestation_inclusion_delay); - assert_eq!( - state.latest_attestations.len() as u64, - previous_epoch_attestations + current_epoch_attestations, - "The state should have an attestation for each committee." - ); - - // Assert that we will run the first arm of process_rewards_and_penalties - let epochs_since_finality = state.next_epoch(&spec) - state.finalized_epoch; - assert_eq!( - epochs_since_finality, 4, - "Epochs since finality should be 4" - ); - - bench_epoch_processing(c, &state, &spec, &format!("{}_validators", validator_count)); -} - -/// Run the detailed benchmarking suite on the given `BeaconState`. -/// -/// `desc` will be added to the title of each bench. -fn bench_epoch_processing(c: &mut Criterion, state: &BeaconState, spec: &ChainSpec, desc: &str) { - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("process_eth1_data", move |b| { - b.iter_batched( - || state_clone.clone(), - |mut state| { - process_eth1_data(&mut state, &spec_clone); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("initialize_validator_statuses", move |b| { - b.iter_batched( - || state_clone.clone(), - |mut state| { - initialize_validator_statuses(&mut state, &spec_clone).unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - let attesters = initialize_validator_statuses(&state, &spec).unwrap(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("process_justification", move |b| { - b.iter_batched( - || state_clone.clone(), - |mut state| { - process_justification(&mut state, &attesters.total_balances, &spec_clone); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(10), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("process_crosslinks", move |b| { - b.iter_batched( - || state_clone.clone(), - |mut state| black_box(process_crosslinks(&mut state, &spec_clone).unwrap()), - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - - let mut state_clone = state.clone(); - let spec_clone = spec.clone(); - let attesters = initialize_validator_statuses(&state, &spec).unwrap(); - let winning_root_for_shards = process_crosslinks(&mut state_clone, &spec).unwrap(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("process_rewards_and_penalties", move |b| { - b.iter_batched( - || (state_clone.clone(), attesters.clone()), - |(mut state, mut attesters)| { - process_rewards_and_penalities( - &mut state, - &mut attesters, - &winning_root_for_shards, - &spec_clone, - ) - .unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(SMALL_BENCHING_SAMPLE_SIZE), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("process_ejections", move |b| { - b.iter_batched( - || state_clone.clone(), - |mut state| { - state.process_ejections(&spec_clone); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("process_validator_registry", move |b| { - b.iter_batched( - || state_clone.clone(), - |mut state| { - process_validator_registry(&mut state, &spec_clone).unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("update_active_tree_index_roots", move |b| { - b.iter_batched( - || state_clone.clone(), - |mut state| { - update_active_tree_index_roots(&mut state, &spec_clone).unwrap(); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("update_latest_slashed_balances", move |b| { - b.iter_batched( - || state_clone.clone(), - |mut state| { - update_latest_slashed_balances(&mut state, &spec_clone); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("clean_attestations", move |b| { - b.iter_batched( - || state_clone.clone(), - |mut state| { - clean_attestations(&mut state, &spec_clone); - state - }, - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(BENCHING_SAMPLE_SIZE), - ); - - let state_clone = state.clone(); - let spec_clone = spec.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("per_epoch_processing", move |b| { - b.iter_batched( - || state_clone.clone(), - |mut state| black_box(per_epoch_processing(&mut state, &spec_clone).unwrap()), - criterion::BatchSize::SmallInput, - ) - }) - .sample_size(SMALL_BENCHING_SAMPLE_SIZE), - ); - - let state_clone = state.clone(); - c.bench( - &format!("{}/epoch_processing", desc), - Benchmark::new("tree_hash_state", move |b| { - b.iter(|| black_box(state_clone.tree_hash_root())) - }) - .sample_size(SMALL_BENCHING_SAMPLE_SIZE), - ); -} diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs deleted file mode 100644 index 3d884c3d8..000000000 --- a/eth2/state_processing/benches/benches.rs +++ /dev/null @@ -1,103 +0,0 @@ -use block_benching_builder::BlockBenchingBuilder; -use criterion::Criterion; -use criterion::{criterion_group, criterion_main}; -use env_logger::{Builder, Env}; -use log::info; -use types::*; - -mod bench_block_processing; -mod bench_epoch_processing; -mod block_benching_builder; - -pub const VALIDATOR_COUNT: usize = 16_384; - -// `LOG_LEVEL == "info"` gives handy messages. -pub const LOG_LEVEL: &str = "info"; - -/// Build a worst-case block and benchmark processing it. -pub fn block_processing_worst_case(c: &mut Criterion) { - if LOG_LEVEL != "" { - Builder::from_env(Env::default().default_filter_or(LOG_LEVEL)).init(); - } - info!( - "Building worst case block bench with {} validators", - VALIDATOR_COUNT - ); - - // Use the specifications from the Eth2.0 spec. - let spec = ChainSpec::mainnet(); - - // Create a builder for configuring the block and state for benching. - let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec); - - // Set the number of included operations to be maximum (e.g., `MAX_ATTESTATIONS`, etc.) - bench_builder.maximize_block_operations(&spec); - - // Set the state and block to be in the last slot of the 4th epoch. - let last_slot_of_epoch = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch()); - bench_builder.set_slot(last_slot_of_epoch, &spec); - - // Build all the state caches so the build times aren't included in the benches. - bench_builder.build_caches(&spec); - - // Generate the block and state then run benches. - let (block, state) = bench_builder.build(&spec); - bench_block_processing::bench_block_processing( - c, - &block, - &state, - &spec, - &format!("{}_validators/worst_case", VALIDATOR_COUNT), - ); -} - -/// Build a reasonable-case block and benchmark processing it. -pub fn block_processing_reasonable_case(c: &mut Criterion) { - info!( - "Building reasonable case block bench with {} validators", - VALIDATOR_COUNT - ); - - // Use the specifications from the Eth2.0 spec. - let spec = ChainSpec::mainnet(); - - // Create a builder for configuring the block and state for benching. - let mut bench_builder = BlockBenchingBuilder::new(VALIDATOR_COUNT, &spec); - - // Set the number of included operations to what we might expect normally. - bench_builder.num_proposer_slashings = 0; - bench_builder.num_attester_slashings = 0; - bench_builder.num_attestations = (spec.shard_count / T::slots_per_epoch()) as usize; - bench_builder.num_deposits = 2; - bench_builder.num_exits = 2; - bench_builder.num_transfers = 2; - - // Set the state and block to be in the last slot of the 4th epoch. - let last_slot_of_epoch = (T::genesis_epoch() + 4).end_slot(T::slots_per_epoch()); - bench_builder.set_slot(last_slot_of_epoch, &spec); - - // Build all the state caches so the build times aren't included in the benches. - bench_builder.build_caches(&spec); - - // Generate the block and state then run benches. - let (block, state) = bench_builder.build(&spec); - bench_block_processing::bench_block_processing( - c, - &block, - &state, - &spec, - &format!("{}_validators/reasonable_case", VALIDATOR_COUNT), - ); -} - -pub fn state_processing(c: &mut Criterion) { - bench_epoch_processing::bench_epoch_processing_n_validators(c, VALIDATOR_COUNT); -} - -criterion_group!( - benches, - block_processing_reasonable_case, - block_processing_worst_case, - state_processing -); -criterion_main!(benches); diff --git a/eth2/state_processing/benches/block_benching_builder.rs b/eth2/state_processing/benches/block_benching_builder.rs deleted file mode 100644 index b993851d7..000000000 --- a/eth2/state_processing/benches/block_benching_builder.rs +++ /dev/null @@ -1,175 +0,0 @@ -use log::info; -use types::test_utils::{TestingBeaconBlockBuilder, TestingBeaconStateBuilder}; -use types::*; - -pub struct BlockBenchingBuilder { - pub state_builder: TestingBeaconStateBuilder, - pub block_builder: TestingBeaconBlockBuilder, - - pub num_validators: usize, - pub num_proposer_slashings: usize, - pub num_attester_slashings: usize, - pub num_indices_per_slashable_vote: usize, - pub num_attestations: usize, - pub num_deposits: usize, - pub num_exits: usize, - pub num_transfers: usize, -} - -impl BlockBenchingBuilder { - pub fn new(num_validators: usize, spec: &ChainSpec) -> Self { - let state_builder = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(num_validators, &spec); - let block_builder = TestingBeaconBlockBuilder::new(spec); - - Self { - state_builder, - block_builder, - num_validators: 0, - num_proposer_slashings: 0, - num_attester_slashings: 0, - num_indices_per_slashable_vote: spec.max_indices_per_slashable_vote as usize, - num_attestations: 0, - num_deposits: 0, - num_exits: 0, - num_transfers: 0, - } - } - - pub fn maximize_block_operations(&mut self, spec: &ChainSpec) { - self.num_proposer_slashings = spec.max_proposer_slashings as usize; - self.num_attester_slashings = spec.max_attester_slashings as usize; - self.num_indices_per_slashable_vote = spec.max_indices_per_slashable_vote as usize; - self.num_attestations = spec.max_attestations as usize; - self.num_deposits = spec.max_deposits as usize; - self.num_exits = spec.max_voluntary_exits as usize; - self.num_transfers = spec.max_transfers as usize; - } - - pub fn set_slot(&mut self, slot: Slot, spec: &ChainSpec) { - self.state_builder.teleport_to_slot(slot, &spec); - } - - pub fn build_caches(&mut self, spec: &ChainSpec) { - // Builds all caches; benches will not contain shuffling/committee building times. - self.state_builder.build_caches(&spec).unwrap(); - } - - pub fn build(mut self, spec: &ChainSpec) -> (BeaconBlock, BeaconState) { - let (mut state, keypairs) = self.state_builder.build(); - let builder = &mut self.block_builder; - - builder.set_slot(state.slot); - - let proposer_index = state.get_beacon_proposer_index(state.slot, spec).unwrap(); - let keypair = &keypairs[proposer_index]; - - builder.set_randao_reveal(&keypair.sk, &state.fork, spec); - - // Used as a stream of validator indices for use in slashings, exits, etc. - let mut validators_iter = (0..keypairs.len() as u64).into_iter(); - - // Insert `ProposerSlashing` objects. - for _ in 0..self.num_proposer_slashings { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - builder.insert_proposer_slashing( - validator_index, - &keypairs[validator_index as usize].sk, - &state.fork, - spec, - ); - } - info!( - "Inserted {} proposer slashings.", - builder.block.body.proposer_slashings.len() - ); - - // Insert `AttesterSlashing` objects - for _ in 0..self.num_attester_slashings { - let mut attesters: Vec = vec![]; - let mut secret_keys: Vec<&SecretKey> = vec![]; - - for _ in 0..self.num_indices_per_slashable_vote { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - attesters.push(validator_index); - secret_keys.push(&keypairs[validator_index as usize].sk); - } - - builder.insert_attester_slashing(&attesters, &secret_keys, &state.fork, spec); - } - info!( - "Inserted {} attester slashings.", - builder.block.body.attester_slashings.len() - ); - - // Insert `Attestation` objects. - let all_secret_keys: Vec<&SecretKey> = keypairs.iter().map(|keypair| &keypair.sk).collect(); - builder - .insert_attestations( - &state, - &all_secret_keys, - self.num_attestations as usize, - spec, - ) - .unwrap(); - info!( - "Inserted {} attestations.", - builder.block.body.attestations.len() - ); - - // Insert `Deposit` objects. - for i in 0..self.num_deposits { - builder.insert_deposit( - 32_000_000_000, - state.deposit_index + (i as u64), - &state, - spec, - ); - } - info!("Inserted {} deposits.", builder.block.body.deposits.len()); - - // Insert the maximum possible number of `Exit` objects. - for _ in 0..self.num_exits { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - builder.insert_exit( - &state, - validator_index, - &keypairs[validator_index as usize].sk, - spec, - ); - } - info!( - "Inserted {} exits.", - builder.block.body.voluntary_exits.len() - ); - - // Insert the maximum possible number of `Transfer` objects. - for _ in 0..self.num_transfers { - let validator_index = validators_iter.next().expect("Insufficient validators."); - - // Manually set the validator to be withdrawn. - state.validator_registry[validator_index as usize].withdrawable_epoch = - state.previous_epoch(spec); - - builder.insert_transfer( - &state, - validator_index, - validator_index, - 1, - keypairs[validator_index as usize].clone(), - spec, - ); - } - info!("Inserted {} transfers.", builder.block.body.transfers.len()); - - let mut block = self.block_builder.build(&keypair.sk, &state.fork, spec); - - // Set the eth1 data to be different from the state. - block.eth1_data.block_hash = Hash256::from_slice(&vec![42; 32]); - - (block, state) - } -} diff --git a/eth2/utils/cached_tree_hash/Cargo.toml b/eth2/utils/cached_tree_hash/Cargo.toml index 7b331ad68..c8881eb0f 100644 --- a/eth2/utils/cached_tree_hash/Cargo.toml +++ b/eth2/utils/cached_tree_hash/Cargo.toml @@ -4,12 +4,7 @@ version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" -[[bench]] -name = "benches" -harness = false - [dev-dependencies] -criterion = "0.2" tree_hash_derive = { path = "../tree_hash_derive" } [dependencies] diff --git a/eth2/utils/cached_tree_hash/benches/benches.rs b/eth2/utils/cached_tree_hash/benches/benches.rs deleted file mode 100644 index be7e26bb5..000000000 --- a/eth2/utils/cached_tree_hash/benches/benches.rs +++ /dev/null @@ -1,73 +0,0 @@ -#[macro_use] -extern crate criterion; - -use cached_tree_hash::TreeHashCache; -use criterion::black_box; -use criterion::{Benchmark, Criterion}; -use ethereum_types::H256 as Hash256; -use hashing::hash; -use tree_hash::TreeHash; - -fn criterion_benchmark(c: &mut Criterion) { - let n = 1024; - - let source_vec: Vec = (0..n).map(|_| Hash256::random()).collect(); - - let mut source_modified_vec = source_vec.clone(); - source_modified_vec[n - 1] = Hash256::random(); - - let modified_vec = source_modified_vec.clone(); - c.bench( - &format!("vec_of_{}_hashes", n), - Benchmark::new("standard", move |b| { - b.iter_with_setup( - || modified_vec.clone(), - |modified_vec| black_box(modified_vec.tree_hash_root()), - ) - }) - .sample_size(100), - ); - - let modified_vec = source_modified_vec.clone(); - c.bench( - &format!("vec_of_{}_hashes", n), - Benchmark::new("build_cache", move |b| { - b.iter_with_setup( - || modified_vec.clone(), - |vec| black_box(TreeHashCache::new(&vec, 0)), - ) - }) - .sample_size(100), - ); - - let vec = source_vec.clone(); - let modified_vec = source_modified_vec.clone(); - c.bench( - &format!("vec_of_{}_hashes", n), - Benchmark::new("cache_update", move |b| { - b.iter_with_setup( - || { - let cache = TreeHashCache::new(&vec, 0).unwrap(); - (cache, modified_vec.clone()) - }, - |(mut cache, modified_vec)| black_box(cache.update(&modified_vec)), - ) - }) - .sample_size(100), - ); - - c.bench( - &format!("{}_hashes", n), - Benchmark::new("hash_64_bytes", move |b| { - b.iter(|| { - for _ in 0..n { - let _digest = hash(&[42; 64]); - } - }) - }) - .sample_size(100), - ); -} - -criterion_group!(benches, criterion_benchmark); -criterion_main!(benches); From 48cdf51cce3f81173edae3351881b3187cc24863 Mon Sep 17 00:00:00 2001 From: Maximilian Ehlers <2843450+b-m-f@users.noreply.github.com> Date: Tue, 30 Jul 2019 10:40:45 +0100 Subject: [PATCH 11/25] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9f0e353c5..08c42539b 100644 --- a/README.md +++ b/README.md @@ -121,7 +121,7 @@ Note that all future created nodes can use the same boot-node ENR. Once connecte In a third terminal window, start a validator client: ``` -$ ./validator-client +$ ./validator_client ``` You should be able to observe the validator signing blocks, the boot node From eb6ba505440153a105b75969eb913903ce94be04 Mon Sep 17 00:00:00 2001 From: b-m-f Date: Tue, 30 Jul 2019 11:33:37 +0100 Subject: [PATCH 12/25] Split network and swarm discovery loop, break each when not ready --- beacon_node/network/src/service.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a771f8add..046c59774 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -105,10 +105,8 @@ fn network_service( log: slog::Logger, ) -> impl futures::Future { futures::future::poll_fn(move || -> Result<_, eth2_libp2p::error::Error> { - // only end the loop once both major polls are not ready. - let mut not_ready_count = 0; - while not_ready_count < 2 { - not_ready_count = 0; + // if the network channel is not ready, try the swarm + loop { // poll the network channel match network_recv.poll() { Ok(Async::Ready(Some(message))) => match message { @@ -123,7 +121,7 @@ fn network_service( libp2p_service.lock().swarm.publish(topics, *message); } }, - Ok(Async::NotReady) => not_ready_count += 1, + Ok(Async::NotReady) => break, Ok(Async::Ready(None)) => { return Err(eth2_libp2p::error::Error::from("Network channel closed")); } @@ -131,7 +129,9 @@ fn network_service( return Err(eth2_libp2p::error::Error::from("Network channel error")); } } + } + loop { // poll the swarm match libp2p_service.lock().poll() { Ok(Async::Ready(Some(event))) => match event { @@ -164,8 +164,8 @@ fn network_service( } }, Ok(Async::Ready(None)) => unreachable!("Stream never ends"), - Ok(Async::NotReady) => not_ready_count += 1, - Err(_) => not_ready_count += 1, + Ok(Async::NotReady) => break, + Err(_) => break, } } From 309b10c4a84c512e88c339ff93417ec916c71228 Mon Sep 17 00:00:00 2001 From: blacktemplar Date: Wed, 31 Jul 2019 01:06:53 +0200 Subject: [PATCH 13/25] add logging functionality for aligning key value pairs after message (#461) * add logging functionality for aligning key value pairs after the main messages * move to own crate, change default message width to 40 * use FullFormat in validator_client (CompactFormat is not compatible with aligning) * move logging to eth2/utils/logging --- Cargo.toml | 1 + beacon_node/Cargo.toml | 1 + beacon_node/src/main.rs | 1 + eth2/utils/logging/Cargo.toml | 9 +++ eth2/utils/logging/src/lib.rs | 122 ++++++++++++++++++++++++++++++++++ validator_client/Cargo.toml | 1 + validator_client/src/main.rs | 3 +- 7 files changed, 137 insertions(+), 1 deletion(-) create mode 100644 eth2/utils/logging/Cargo.toml create mode 100644 eth2/utils/logging/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index 66028ecd5..20c5b3175 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,6 +10,7 @@ members = [ "eth2/utils/compare_fields_derive", "eth2/utils/eth2_config", "eth2/utils/hashing", + "eth2/utils/logging", "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", "eth2/utils/serde_hex", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 24e148dd0..9124047e4 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -21,3 +21,4 @@ futures = "0.1.25" exit-future = "0.1.3" env_logger = "0.6.1" dirs = "2.0.1" +logging = { path = "../eth2/utils/logging" } diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 5d967fc1c..4ad544bb1 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -173,6 +173,7 @@ fn main() { // build the initial logger let decorator = slog_term::TermDecorator::new().build(); + let decorator = logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); let drain = slog_term::FullFormat::new(decorator).build().fuse(); let drain = slog_async::Async::new(drain).build(); diff --git a/eth2/utils/logging/Cargo.toml b/eth2/utils/logging/Cargo.toml new file mode 100644 index 000000000..62a8b41e0 --- /dev/null +++ b/eth2/utils/logging/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "logging" +version = "0.1.0" +authors = ["blacktemplar "] +edition = "2018" + +[dependencies] +slog = { version = "^2.2.3" } +slog-term = "^2.4.0" diff --git a/eth2/utils/logging/src/lib.rs b/eth2/utils/logging/src/lib.rs new file mode 100644 index 000000000..fb32698ed --- /dev/null +++ b/eth2/utils/logging/src/lib.rs @@ -0,0 +1,122 @@ +use std::io::{Write, Result}; + +pub const MAX_MESSAGE_WIDTH: usize = 40; + +pub struct AlignedTermDecorator { + wrapped: slog_term::TermDecorator, + message_width: usize, +} + +impl AlignedTermDecorator { + pub fn new(decorator: slog_term::TermDecorator, message_width: usize) -> AlignedTermDecorator { + AlignedTermDecorator { + wrapped: decorator, + message_width, + } + } +} + +impl slog_term::Decorator for AlignedTermDecorator { + fn with_record(&self, _record: &slog::Record, _logger_values: &slog::OwnedKVList, f: F) + -> Result<()> where + F: FnOnce(&mut dyn slog_term::RecordDecorator) -> std::io::Result<()> { + self.wrapped.with_record(_record, _logger_values, |deco| { + f(&mut AlignedRecordDecorator::new(deco, self.message_width)) + }) + } +} + +struct AlignedRecordDecorator<'a> { + wrapped: &'a mut dyn slog_term::RecordDecorator, + message_count: usize, + message_active: bool, + ignore_comma: bool, + message_width: usize, +} + +impl<'a> AlignedRecordDecorator<'a> { + fn new(decorator: &'a mut dyn slog_term::RecordDecorator, message_width: usize) -> + AlignedRecordDecorator<'a> { + AlignedRecordDecorator { + wrapped: decorator, + message_count: 0, + ignore_comma: false, + message_active: false, + message_width, + } + } +} + +impl<'a> Write for AlignedRecordDecorator<'a> { + fn write(&mut self, buf: &[u8]) -> Result { + if self.ignore_comma { + //don't write comma + self.ignore_comma = false; + Ok(buf.len()) + } else if self.message_active { + self.wrapped.write(buf).map(|n| { + self.message_count += n; + n + }) + } else { + self.wrapped.write(buf) + } + } + + fn flush(&mut self) -> Result<()> { + self.wrapped.flush() + } +} + +impl<'a> slog_term::RecordDecorator for AlignedRecordDecorator<'a> { + fn reset(&mut self) -> Result<()> { + self.message_active = false; + self.message_count = 0; + self.ignore_comma = false; + self.wrapped.reset() + } + + fn start_whitespace(&mut self) -> Result<()> { + self.wrapped.start_whitespace() + } + + fn start_msg(&mut self) -> Result<()> { + self.message_active = true; + self.ignore_comma = false; + self.wrapped.start_msg() + } + + fn start_timestamp(&mut self) -> Result<()> { + self.wrapped.start_timestamp() + } + + fn start_level(&mut self) -> Result<()> { + self.wrapped.start_level() + } + + fn start_comma(&mut self) -> Result<()> { + if self.message_active && self.message_count + 1 < self.message_width { + self.ignore_comma = true; + } + self.wrapped.start_comma() + } + + fn start_key(&mut self) -> Result<()> { + if self.message_active && self.message_count + 1 < self.message_width { + write!(self, "{}", std::iter::repeat(' ').take(self.message_width - self.message_count) + .collect::())?; + self.message_active = false; + self.message_count = 0; + self.ignore_comma = false; + } + self.wrapped.start_key() + } + + fn start_value(&mut self) -> Result<()> { + self.wrapped.start_value() + } + + fn start_separator(&mut self) -> Result<()> { + self.wrapped.start_separator() + } +} diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 19bd10a1e..13bfaa49e 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -34,3 +34,4 @@ error-chain = "0.12.0" bincode = "^1.1.2" futures = "0.1.25" dirs = "2.0.1" +logging = { path = "../eth2/utils/logging" } diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index cbcd101da..bd3919b5a 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -24,7 +24,8 @@ pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; fn main() { // Logging let decorator = slog_term::TermDecorator::new().build(); - let drain = slog_term::CompactFormat::new(decorator).build().fuse(); + let decorator = logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH); + let drain = slog_term::FullFormat::new(decorator).build().fuse(); let drain = slog_async::Async::new(drain).build().fuse(); // CLI From 7738d51a7250879ee1c7897a43cba6d73d67b420 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 31 Jul 2019 14:45:09 +1000 Subject: [PATCH 14/25] Add `cargo fmt` to .travis.yml (#474) * Run cargo fmt * Add cargo fmt to travis.yml --- .travis.yml | 1 + eth2/utils/logging/src/lib.rs | 29 +++++++++++++++++++++-------- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/.travis.yml b/.travis.yml index 3662e17cf..def7435a1 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,6 +11,7 @@ before_install: - sudo chown -R $USER /usr/local/include/google script: - cargo build --verbose --all --release + - cargo fmt --all -- --check rust: - beta - nightly diff --git a/eth2/utils/logging/src/lib.rs b/eth2/utils/logging/src/lib.rs index fb32698ed..cbd256f42 100644 --- a/eth2/utils/logging/src/lib.rs +++ b/eth2/utils/logging/src/lib.rs @@ -1,4 +1,4 @@ -use std::io::{Write, Result}; +use std::io::{Result, Write}; pub const MAX_MESSAGE_WIDTH: usize = 40; @@ -17,9 +17,15 @@ impl AlignedTermDecorator { } impl slog_term::Decorator for AlignedTermDecorator { - fn with_record(&self, _record: &slog::Record, _logger_values: &slog::OwnedKVList, f: F) - -> Result<()> where - F: FnOnce(&mut dyn slog_term::RecordDecorator) -> std::io::Result<()> { + fn with_record( + &self, + _record: &slog::Record, + _logger_values: &slog::OwnedKVList, + f: F, + ) -> Result<()> + where + F: FnOnce(&mut dyn slog_term::RecordDecorator) -> std::io::Result<()>, + { self.wrapped.with_record(_record, _logger_values, |deco| { f(&mut AlignedRecordDecorator::new(deco, self.message_width)) }) @@ -35,8 +41,10 @@ struct AlignedRecordDecorator<'a> { } impl<'a> AlignedRecordDecorator<'a> { - fn new(decorator: &'a mut dyn slog_term::RecordDecorator, message_width: usize) -> - AlignedRecordDecorator<'a> { + fn new( + decorator: &'a mut dyn slog_term::RecordDecorator, + message_width: usize, + ) -> AlignedRecordDecorator<'a> { AlignedRecordDecorator { wrapped: decorator, message_count: 0, @@ -103,8 +111,13 @@ impl<'a> slog_term::RecordDecorator for AlignedRecordDecorator<'a> { fn start_key(&mut self) -> Result<()> { if self.message_active && self.message_count + 1 < self.message_width { - write!(self, "{}", std::iter::repeat(' ').take(self.message_width - self.message_count) - .collect::())?; + write!( + self, + "{}", + std::iter::repeat(' ') + .take(self.message_width - self.message_count) + .collect::() + )?; self.message_active = false; self.message_count = 0; self.ignore_comma = false; From 0052ea711e2e2baab3b3fba6557247284c651e5e Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 31 Jul 2019 18:29:41 +1000 Subject: [PATCH 15/25] First RESTful HTTP API (#399) * Added generated code for REST API. - Created a new crate rest_api, which will adapt the openapi generated code to Lighthouse - Committed automatically generated code from openapi-generator-cli (via docker). Should hopfully not have to modify this at all, and do all changes in the rest_api crate. * Removed openapi generated code, because it was the rust client, not the rust server. * Added the correct rust-server code, automatically generated from openapi. * Added generated code for REST API. - Created a new crate rest_api, which will adapt the openapi generated code to Lighthouse - Committed automatically generated code from openapi-generator-cli (via docker). Should hopfully not have to modify this at all, and do all changes in the rest_api crate. * Removed openapi generated code, because it was the rust client, not the rust server. * Added the correct rust-server code, automatically generated from openapi. * Included REST API in configuratuion. - Started adding the rest_api into the beacon node's dependencies. - Set up configuration file for rest_api and integrated into main client config - Added CLI flags for REST API. * Futher work on REST API. - Adding the dependencies to rest_api crate - Created a skeleton BeaconNodeService, which will handle /node requests. - Started the rest_api server definition, with the high level request handling logic. * Added generated code for REST API. - Created a new crate rest_api, which will adapt the openapi generated code to Lighthouse - Committed automatically generated code from openapi-generator-cli (via docker). Should hopfully not have to modify this at all, and do all changes in the rest_api crate. * Removed openapi generated code, because it was the rust client, not the rust server. * Added the correct rust-server code, automatically generated from openapi. * Included REST API in configuratuion. - Started adding the rest_api into the beacon node's dependencies. - Set up configuration file for rest_api and integrated into main client config - Added CLI flags for REST API. * Futher work on REST API. - Adding the dependencies to rest_api crate - Created a skeleton BeaconNodeService, which will handle /node requests. - Started the rest_api server definition, with the high level request handling logic. * WIP: Restructured REST API to use hyper_router and separate services. * WIP: Fixing rust for REST API * WIP: Fixed up many bugs in trying to get router to compile. * WIP: Got the beacon_node to compile with the REST changes * Basic API works! - Changed CLI flags from rest-api* to api* - Fixed port cli flag - Tested, works over HTTP * WIP: Moved things around so that we can get state inside the handlers. * WIP: Significant API updates. - Started writing a macro for getting the handler functions. - Added the BeaconChain into the type map, gives stateful access to the beacon state. - Created new generic error types (haven't figured out yet), to reduce code duplication. - Moved common stuff into lib.rs * WIP: Factored macros, defined API result and error. - did more logging when creating HTTP responses - Tried moving stuff into macros, but can't get macros in macros to compile. - Pulled out a lot of placeholder code. * Fixed macros so that things compile. * Cleaned up code. - Removed unused imports - Removed comments - Addressed all compiler warnings. - Ran cargo fmt. * Removed auto-generated OpenAPI code. * Addressed Paul's suggestions. - Fixed spelling mistake - Moved the simple macros into functions, since it doesn't make sense for them to be macros. - Removed redundant code & inclusions. * Removed redundant validate_request function. * Included graceful shutdown in Hyper server. * Fixing the dropped exit_signal, which prevented the API from starting. * Wrapped the exit signal, to get an API shutdown log line. --- Cargo.toml | 1 + beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/config.rs | 3 + beacon_node/client/src/lib.rs | 22 ++++ beacon_node/rest_api/Cargo.toml | 22 ++++ beacon_node/rest_api/src/beacon_node.rs | 65 ++++++++++++ beacon_node/rest_api/src/config.rs | 46 +++++++++ beacon_node/rest_api/src/lib.rs | 132 ++++++++++++++++++++++++ beacon_node/rest_api/src/macros.rs | 23 +++++ beacon_node/src/main.rs | 23 +++++ 10 files changed, 338 insertions(+) create mode 100644 beacon_node/rest_api/Cargo.toml create mode 100644 beacon_node/rest_api/src/beacon_node.rs create mode 100644 beacon_node/rest_api/src/config.rs create mode 100644 beacon_node/rest_api/src/lib.rs create mode 100644 beacon_node/rest_api/src/macros.rs diff --git a/Cargo.toml b/Cargo.toml index 20c5b3175..c4034ad35 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,7 @@ members = [ "beacon_node/store", "beacon_node/client", "beacon_node/http_server", + "beacon_node/rest_api", "beacon_node/network", "beacon_node/eth2-libp2p", "beacon_node/rpc", diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 5bbd33b3d..3367b84ce 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -9,6 +9,7 @@ beacon_chain = { path = "../beacon_chain" } network = { path = "../network" } http_server = { path = "../http_server" } rpc = { path = "../rpc" } +rest_api = { path = "../rest_api" } prometheus = "^0.6" types = { path = "../../eth2/types" } tree_hash = { path = "../../eth2/utils/tree_hash" } diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 864577559..9a9fed802 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -17,6 +17,7 @@ pub struct Config { pub network: network::NetworkConfig, pub rpc: rpc::RPCConfig, pub http: HttpServerConfig, + pub rest_api: rest_api::APIConfig, } impl Default for Config { @@ -31,6 +32,7 @@ impl Default for Config { network: NetworkConfig::new(), rpc: rpc::RPCConfig::default(), http: HttpServerConfig::default(), + rest_api: rest_api::APIConfig::default(), } } } @@ -101,6 +103,7 @@ impl Config { self.network.apply_cli_args(args)?; self.rpc.apply_cli_args(args)?; self.http.apply_cli_args(args)?; + self.rest_api.apply_cli_args(args)?; if let Some(log_file) = args.value_of("logfile") { self.log_file = PathBuf::from(log_file); diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 1b9f320be..8138c7d47 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -2,6 +2,7 @@ extern crate slog; mod beacon_chain_types; mod config; + pub mod error; pub mod notifier; @@ -39,6 +40,8 @@ pub struct Client { pub http_exit_signal: Option, /// Signal to terminate the slot timer. pub slot_timer_exit_signal: Option, + /// Signal to terminate the API + pub api_exit_signal: Option, /// The clients logger. log: slog::Logger, /// Marker to pin the beacon chain generics. @@ -143,6 +146,24 @@ where None }; + // Start the `rest_api` service + let api_exit_signal = if client_config.rest_api.enabled { + match rest_api::start_server( + &client_config.rest_api, + executor, + beacon_chain.clone(), + &log, + ) { + Ok(s) => Some(s), + Err(e) => { + error!(log, "API service failed to start."; "error" => format!("{:?}",e)); + None + } + } + } else { + None + }; + let (slot_timer_exit_signal, exit) = exit_future::signal(); if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { // set up the validator work interval - start at next slot and proceed every slot @@ -175,6 +196,7 @@ where http_exit_signal, rpc_exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal), + api_exit_signal, log, network, phantom: PhantomData, diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml new file mode 100644 index 000000000..7a63ca036 --- /dev/null +++ b/beacon_node/rest_api/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "rest_api" +version = "0.1.0" +authors = ["Luke Anderson "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[dependencies] +beacon_chain = { path = "../beacon_chain" } +version = { path = "../version" } +serde = { version = "1.0", features = ["derive"] } +serde_json = "^1.0" +slog = "^2.2.3" +slog-term = "^2.4.0" +slog-async = "^2.3.0" +clap = "2.32.0" +http = "^0.1.17" +hyper = "0.12.32" +hyper-router = "^0.5" +futures = "0.1" +exit-future = "0.1.3" +tokio = "0.1.17" diff --git a/beacon_node/rest_api/src/beacon_node.rs b/beacon_node/rest_api/src/beacon_node.rs new file mode 100644 index 000000000..87d2d3cdc --- /dev/null +++ b/beacon_node/rest_api/src/beacon_node.rs @@ -0,0 +1,65 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use serde::Serialize; +use slog::info; +use std::sync::Arc; +use version; + +use super::{path_from_request, success_response, APIResult, APIService}; + +use hyper::{Body, Request, Response}; +use hyper_router::{Route, RouterBuilder}; + +#[derive(Clone)] +pub struct BeaconNodeServiceInstance { + pub marker: std::marker::PhantomData, +} + +/// A string which uniquely identifies the client implementation and its version; similar to [HTTP User-Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3). +#[derive(Serialize)] +pub struct Version(String); +impl From for Version { + fn from(x: String) -> Self { + Version(x) + } +} + +/// The genesis_time configured for the beacon node, which is the unix time at which the Eth2.0 chain began. +#[derive(Serialize)] +pub struct GenesisTime(u64); +impl From for GenesisTime { + fn from(x: u64) -> Self { + GenesisTime(x) + } +} + +impl APIService for BeaconNodeServiceInstance { + fn add_routes(&mut self, router_builder: RouterBuilder) -> Result { + let router_builder = router_builder + .add(Route::get("/version").using(result_to_response!(get_version))) + .add(Route::get("/genesis_time").using(result_to_response!(get_genesis_time::))); + Ok(router_builder) + } +} + +/// Read the version string from the current Lighthouse build. +fn get_version(_req: Request) -> APIResult { + let ver = Version::from(version::version()); + let body = Body::from( + serde_json::to_string(&ver).expect("Version should always be serialializable as JSON."), + ); + Ok(success_response(body)) +} + +/// Read the genesis time from the current beacon chain state. +fn get_genesis_time(req: Request) -> APIResult { + let beacon_chain = req.extensions().get::>>().unwrap(); + let gen_time = { + let state = beacon_chain.current_state(); + state.genesis_time + }; + let body = Body::from( + serde_json::to_string(&gen_time) + .expect("Genesis should time always have a valid JSON serialization."), + ); + Ok(success_response(body)) +} diff --git a/beacon_node/rest_api/src/config.rs b/beacon_node/rest_api/src/config.rs new file mode 100644 index 000000000..c4a9c738a --- /dev/null +++ b/beacon_node/rest_api/src/config.rs @@ -0,0 +1,46 @@ +use clap::ArgMatches; +use serde::{Deserialize, Serialize}; +use std::net::Ipv4Addr; + +/// HTTP REST API Configuration +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + /// Enable the REST API server. + pub enabled: bool, + /// The IPv4 address the REST API HTTP server will listen on. + pub listen_address: Ipv4Addr, + /// The port the REST API HTTP server will listen on. + pub port: u16, +} + +impl Default for Config { + fn default() -> Self { + Config { + enabled: true, // rest_api enabled by default + listen_address: Ipv4Addr::new(127, 0, 0, 1), + port: 1248, + } + } +} + +impl Config { + pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { + if args.is_present("api") { + self.enabled = true; + } + + if let Some(rpc_address) = args.value_of("api-address") { + self.listen_address = rpc_address + .parse::() + .map_err(|_| "api-address is not a valid IPv4 address.")?; + } + + if let Some(rpc_port) = args.value_of("api-port") { + self.port = rpc_port + .parse::() + .map_err(|_| "api-port is not a valid u16.")?; + } + + Ok(()) + } +} diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs new file mode 100644 index 000000000..0f7849449 --- /dev/null +++ b/beacon_node/rest_api/src/lib.rs @@ -0,0 +1,132 @@ +extern crate futures; +extern crate hyper; +#[macro_use] +mod macros; +mod beacon_node; +pub mod config; + +use beacon_chain::{BeaconChain, BeaconChainTypes}; +pub use config::Config as APIConfig; + +use slog::{info, o, warn}; +use std::sync::Arc; +use tokio::runtime::TaskExecutor; + +use crate::beacon_node::BeaconNodeServiceInstance; +use hyper::rt::Future; +use hyper::service::{service_fn, Service}; +use hyper::{Body, Request, Response, Server, StatusCode}; +use hyper_router::{RouterBuilder, RouterService}; + +pub enum APIError { + MethodNotAllowed { desc: String }, + ServerError { desc: String }, + NotImplemented { desc: String }, +} + +pub type APIResult = Result, APIError>; + +impl Into> for APIError { + fn into(self) -> Response { + let status_code: (StatusCode, String) = match self { + APIError::MethodNotAllowed { desc } => (StatusCode::METHOD_NOT_ALLOWED, desc), + APIError::ServerError { desc } => (StatusCode::INTERNAL_SERVER_ERROR, desc), + APIError::NotImplemented { desc } => (StatusCode::NOT_IMPLEMENTED, desc), + }; + Response::builder() + .status(status_code.0) + .body(Body::from(status_code.1)) + .expect("Response should always be created.") + } +} + +pub trait APIService { + fn add_routes(&mut self, router_builder: RouterBuilder) -> Result; +} + +pub fn start_server( + config: &APIConfig, + executor: &TaskExecutor, + beacon_chain: Arc>, + log: &slog::Logger, +) -> Result { + let log = log.new(o!("Service" => "API")); + + // build a channel to kill the HTTP server + let (exit_signal, exit) = exit_future::signal(); + + let exit_log = log.clone(); + let server_exit = exit.and_then(move |_| { + info!(exit_log, "API service shutdown"); + Ok(()) + }); + + // Get the address to bind to + let bind_addr = (config.listen_address, config.port).into(); + + // Clone our stateful objects, for use in service closure. + let server_log = log.clone(); + let server_bc = beacon_chain.clone(); + + // Create the service closure + let service = move || { + //TODO: This router must be moved out of this closure, so it isn't rebuilt for every connection. + let mut router = build_router_service::(); + + // Clone our stateful objects, for use in handler closure + let service_log = server_log.clone(); + let service_bc = server_bc.clone(); + + // Create a simple handler for the router, inject our stateful objects into the request. + service_fn(move |mut req| { + req.extensions_mut() + .insert::(service_log.clone()); + req.extensions_mut() + .insert::>>(service_bc.clone()); + router.call(req) + }) + }; + + let server = Server::bind(&bind_addr) + .serve(service) + .with_graceful_shutdown(server_exit) + .map_err(move |e| { + warn!( + log, + "API failed to start, Unable to bind"; "address" => format!("{:?}", e) + ) + }); + + executor.spawn(server); + + Ok(exit_signal) +} + +fn build_router_service() -> RouterService { + let mut router_builder = RouterBuilder::new(); + + let mut bn_service: BeaconNodeServiceInstance = BeaconNodeServiceInstance { + marker: std::marker::PhantomData, + }; + + router_builder = bn_service + .add_routes(router_builder) + .expect("The routes should always be made."); + + RouterService::new(router_builder.build()) +} + +fn path_from_request(req: &Request) -> String { + req.uri() + .path_and_query() + .as_ref() + .map(|pq| String::from(pq.as_str())) + .unwrap_or(String::new()) +} + +fn success_response(body: Body) -> Response { + Response::builder() + .status(StatusCode::OK) + .body(body) + .expect("We should always be able to make response from the success body.") +} diff --git a/beacon_node/rest_api/src/macros.rs b/beacon_node/rest_api/src/macros.rs new file mode 100644 index 000000000..db9bfd848 --- /dev/null +++ b/beacon_node/rest_api/src/macros.rs @@ -0,0 +1,23 @@ +macro_rules! result_to_response { + ($handler: path) => { + |req: Request| -> Response { + let log = req + .extensions() + .get::() + .expect("Our logger should be on req.") + .clone(); + let path = path_from_request(&req); + let result = $handler(req); + match result { + Ok(response) => { + info!(log, "Request successful: {:?}", path); + response + } + Err(e) => { + info!(log, "Request failure: {:?}", path); + e.into() + } + } + } + }; +} diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 4ad544bb1..2e0cbb67b 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -127,6 +127,29 @@ fn main() { .help("Listen port for the HTTP server.") .takes_value(true), ) + // REST API related arguments + .arg( + Arg::with_name("api") + .long("api") + .value_name("API") + .help("Enable the RESTful HTTP API server.") + .takes_value(false), + ) + .arg( + Arg::with_name("api-address") + .long("api-address") + .value_name("APIADDRESS") + .help("Set the listen address for the RESTful HTTP API server.") + .takes_value(true), + ) + .arg( + Arg::with_name("api-port") + .long("api-port") + .value_name("APIPORT") + .help("Set the listen TCP port for the RESTful HTTP API server.") + .takes_value(true), + ) + // General arguments .arg( Arg::with_name("db") .long("db") From 68e031a6302bc545e53389255b6266a1138fa134 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 1 Aug 2019 07:53:30 +1000 Subject: [PATCH 16/25] Change gitter link to discord (#476) * Change gitter link to discord * Fix bottom readme chat link --- README.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 08c42539b..f01260a4f 100644 --- a/README.md +++ b/README.md @@ -2,12 +2,12 @@ An open-source Ethereum 2.0 client, written in Rust and maintained by Sigma Prime. -[![Build Status]][Build Link] [![Doc Status]][Doc Link] [![Gitter Badge]][Gitter Link] +[![Build Status]][Build Link] [![Doc Status]][Doc Link] [![Chat Badge]][Chat Link] [Build Status]: https://gitlab.sigmaprime.io/sigp/lighthouse/badges/master/build.svg [Build Link]: https://gitlab.sigmaprime.io/sigp/lighthouse/pipelines -[Gitter Badge]: https://badges.gitter.im/Join%20Chat.svg -[Gitter Link]: https://gitter.im/sigp/lighthouse +[Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da +[Chat Link]: https://discord.gg/cyAszAh [Doc Status]: https://img.shields.io/badge/docs-master-blue.svg [Doc Link]: http://lighthouse-docs.sigmaprime.io/ @@ -171,7 +171,9 @@ your support! ## Contact -The best place for discussion is the [sigp/lighthouse gitter](https://gitter.im/sigp/lighthouse). +The best place for discussion is the [Lighthouse Discord +server](https://discord.gg/cyAszAh). Alternatively, you may use the +[sigp/lighthouse gitter](https://gitter.im/sigp/lighthouse). ## Donations From 89cb01cc93c637ca489a01e5867c80690eb4ed9a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 1 Aug 2019 07:59:23 +1000 Subject: [PATCH 17/25] Delete stray RocksDB store implementation (#477) --- beacon_node/store/src/disk_db.rs | 198 ------------------------------- beacon_node/store/src/store.rs | 37 ------ 2 files changed, 235 deletions(-) delete mode 100644 beacon_node/store/src/disk_db.rs delete mode 100644 beacon_node/store/src/store.rs diff --git a/beacon_node/store/src/disk_db.rs b/beacon_node/store/src/disk_db.rs deleted file mode 100644 index 873c9df82..000000000 --- a/beacon_node/store/src/disk_db.rs +++ /dev/null @@ -1,198 +0,0 @@ -extern crate rocksdb; - -use super::{ClientDB, DBError, DBValue}; -use rocksdb::Error as RocksError; -use rocksdb::{Options, DB}; -use std::fs; -use std::path::Path; - -/// A on-disk database which implements the ClientDB trait. -/// -/// This implementation uses RocksDB with default options. -pub struct DiskStore { - db: DB, -} - -impl DiskStore { - /// Open the RocksDB database, optionally supplying columns if required. - /// - /// The RocksDB database will be contained in a directory titled - /// "database" in the supplied path. - /// - /// # Panics - /// - /// Panics if the database is unable to be created. - pub fn open(path: &Path, columns: Option<&[&str]>) -> Self { - // Rocks options. - let mut options = Options::default(); - options.create_if_missing(true); - - // Ensure the path exists. - fs::create_dir_all(&path).unwrap_or_else(|_| panic!("Unable to create {:?}", &path)); - let db_path = path.join("database"); - - let columns = columns.unwrap_or(&COLUMNS); - - if db_path.exists() { - Self { - db: DB::open_cf(&options, db_path, &COLUMNS) - .expect("Unable to open local database"), - } - } else { - let mut db = Self { - db: DB::open(&options, db_path).expect("Unable to open local database"), - }; - - for cf in columns { - db.create_col(cf).unwrap(); - } - - db - } - } - - /// Create a RocksDB column family. Corresponds to the - /// `create_cf()` function on the RocksDB API. - #[allow(dead_code)] - fn create_col(&mut self, col: &str) -> Result<(), DBError> { - match self.db.create_cf(col, &Options::default()) { - Err(e) => Err(e.into()), - Ok(_) => Ok(()), - } - } -} - -impl From for DBError { - fn from(e: RocksError) -> Self { - Self { - message: e.to_string(), - } - } -} - -impl ClientDB for DiskStore { - /// Get the value for some key on some column. - /// - /// Corresponds to the `get_cf()` method on the RocksDB API. - /// Will attempt to get the `ColumnFamily` and return an Err - /// if it fails. - fn get(&self, col: &str, key: &[u8]) -> Result, DBError> { - match self.db.cf_handle(col) { - None => Err(DBError { - message: "Unknown column".to_string(), - }), - Some(handle) => match self.db.get_cf(handle, key)? { - None => Ok(None), - Some(db_vec) => Ok(Some(DBValue::from(&*db_vec))), - }, - } - } - - /// Set some value for some key on some column. - /// - /// Corresponds to the `cf_handle()` method on the RocksDB API. - /// Will attempt to get the `ColumnFamily` and return an Err - /// if it fails. - fn put(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), DBError> { - match self.db.cf_handle(col) { - None => Err(DBError { - message: "Unknown column".to_string(), - }), - Some(handle) => self.db.put_cf(handle, key, val).map_err(|e| e.into()), - } - } - - /// Return true if some key exists in some column. - fn exists(&self, col: &str, key: &[u8]) -> Result { - /* - * I'm not sure if this is the correct way to read if some - * block exists. Naively I would expect this to unnecessarily - * copy some data, but I could be wrong. - */ - match self.db.cf_handle(col) { - None => Err(DBError { - message: "Unknown column".to_string(), - }), - Some(handle) => Ok(self.db.get_cf(handle, key)?.is_some()), - } - } - - /// Delete the value for some key on some column. - /// - /// Corresponds to the `delete_cf()` method on the RocksDB API. - /// Will attempt to get the `ColumnFamily` and return an Err - /// if it fails. - fn delete(&self, col: &str, key: &[u8]) -> Result<(), DBError> { - match self.db.cf_handle(col) { - None => Err(DBError { - message: "Unknown column".to_string(), - }), - Some(handle) => { - self.db.delete_cf(handle, key)?; - Ok(()) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::super::ClientDB; - use super::*; - use std::sync::Arc; - use std::{env, fs, thread}; - - #[test] - #[ignore] - fn test_rocksdb_can_use_db() { - let pwd = env::current_dir().unwrap(); - let path = pwd.join("testdb_please_remove"); - let _ = fs::remove_dir_all(&path); - fs::create_dir_all(&path).unwrap(); - - let col_name: &str = "TestColumn"; - let column_families = vec![col_name]; - - let mut db = DiskStore::open(&path, None); - - for cf in column_families { - db.create_col(&cf).unwrap(); - } - - let db = Arc::new(db); - - let thread_count = 10; - let write_count = 10; - - // We're expecting the product of these numbers to fit in one byte. - assert!(thread_count * write_count <= 255); - - let mut handles = vec![]; - for t in 0..thread_count { - let wc = write_count; - let db = db.clone(); - let col = col_name.clone(); - let handle = thread::spawn(move || { - for w in 0..wc { - let key = (t * w) as u8; - let val = 42; - db.put(&col, &vec![key], &vec![val]).unwrap(); - } - }); - handles.push(handle); - } - - for handle in handles { - handle.join().unwrap(); - } - - for t in 0..thread_count { - for w in 0..write_count { - let key = (t * w) as u8; - let val = db.get(&col_name, &vec![key]).unwrap().unwrap(); - assert_eq!(vec![42], val); - } - } - fs::remove_dir_all(&path).unwrap(); - } -} diff --git a/beacon_node/store/src/store.rs b/beacon_node/store/src/store.rs deleted file mode 100644 index 84447b83c..000000000 --- a/beacon_node/store/src/store.rs +++ /dev/null @@ -1,37 +0,0 @@ -use super::*; - -pub type Vec = Vec; - -pub trait Store: Sync + Send + Sized { - fn put(&self, key: &Hash256, item: &impl StoreItem) -> Result<(), Error> { - item.db_put(self, key) - } - - fn get(&self, key: &Hash256) -> Result, Error> { - I::db_get(self, key) - } - - fn exists(&self, key: &Hash256) -> Result { - I::db_exists(self, key) - } - - fn delete(&self, key: &Hash256) -> Result<(), Error> { - I::db_delete(self, key) - } - - fn get_block_at_preceding_slot( - &self, - start_block_root: Hash256, - slot: Slot, - ) -> Result, Error> { - block_at_slot::get_block_at_preceeding_slot(self, slot, start_block_root) - } - - fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error>; - - fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error>; - - fn key_exists(&self, col: &str, key: &[u8]) -> Result; - - fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error>; -} From 5079c25bb250117e2f7fdd3cb383b1f471ca3f52 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 5 Aug 2019 16:25:21 +1000 Subject: [PATCH 18/25] Tidy ancestor iterators --- beacon_node/beacon_chain/src/beacon_chain.rs | 75 +++++---- beacon_node/beacon_chain/src/fork_choice.rs | 2 +- beacon_node/beacon_chain/src/iter.rs | 48 ++++++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 19 +-- beacon_node/rest_api/src/beacon_node.rs | 2 +- beacon_node/rpc/src/attestation.rs | 6 +- beacon_node/rpc/src/validator.rs | 6 +- beacon_node/store/src/iter.rs | 159 +------------------ eth2/lmd_ghost/tests/test.rs | 4 +- 11 files changed, 125 insertions(+), 199 deletions(-) create mode 100644 beacon_node/beacon_chain/src/iter.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d0c50af70..58f64bc29 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,6 +1,7 @@ use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; +use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics::Metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use lmd_ghost::LmdGhost; @@ -19,7 +20,7 @@ use state_processing::{ per_slot_processing, BlockProcessingError, }; use std::sync::Arc; -use store::iter::{BestBlockRootsIterator, BlockIterator, BlockRootsIterator, StateRootsIterator}; +use store::iter::{BlockRootsIterator, StateRootsIterator}; use store::{Error as DBError, Store}; use tree_hash::TreeHash; use types::*; @@ -216,45 +217,53 @@ impl BeaconChain { Ok(headers?) } - /// Iterate in reverse (highest to lowest slot) through all blocks from the block at `slot` - /// through to the genesis block. - /// - /// Returns `None` for headers prior to genesis or when there is an error reading from `Store`. - /// - /// Contains duplicate headers when skip slots are encountered. - pub fn rev_iter_blocks(&self, slot: Slot) -> BlockIterator { - BlockIterator::owned(self.store.clone(), self.state.read().clone(), slot) - } - /// Iterates in reverse (highest to lowest slot) through all block roots from `slot` through to - /// genesis. + /// Iterates through all the `BeaconBlock` roots and slots, first returning + /// `self.head().beacon_block` then all prior blocks until either genesis or if the database + /// fails to return a prior block. /// - /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. + /// Returns duplicate roots for skip-slots. /// - /// Contains duplicate roots when skip slots are encountered. - pub fn rev_iter_block_roots(&self, slot: Slot) -> BlockRootsIterator { - BlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), slot) - } - - /// Iterates in reverse (highest to lowest slot) through all block roots from largest - /// `slot <= beacon_state.slot` through to genesis. + /// Iterator returns `(Hash256, Slot)`. /// - /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. + /// ## Note /// - /// Contains duplicate roots when skip slots are encountered. - pub fn rev_iter_best_block_roots( + /// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot + /// returned may be earlier than the wall-clock slot. + pub fn rev_iter_block_roots( &self, slot: Slot, - ) -> BestBlockRootsIterator { - BestBlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), slot) + ) -> ReverseBlockRootIterator { + let state = &self.head().beacon_state; + let block_root = self.head().beacon_block_root; + let block_slot = state.slot; + + let iter = BlockRootsIterator::owned(self.store.clone(), state.clone(), slot); + + ReverseBlockRootIterator::new((block_root, block_slot), iter) } - /// Iterates in reverse (highest to lowest slot) through all state roots from `slot` through to - /// genesis. + /// Iterates through all the `BeaconState` roots and slots, first returning + /// `self.head().beacon_state` then all prior states until either genesis or if the database + /// fails to return a prior state. /// - /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. - pub fn rev_iter_state_roots(&self, slot: Slot) -> StateRootsIterator { - StateRootsIterator::owned(self.store.clone(), self.state.read().clone(), slot) + /// Iterator returns `(Hash256, Slot)`. + /// + /// ## Note + /// + /// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot + /// returned may be earlier than the wall-clock slot. + pub fn rev_iter_state_roots( + &self, + slot: Slot, + ) -> ReverseStateRootIterator { + let state = &self.head().beacon_state; + let state_root = self.head().beacon_state_root; + let state_slot = state.slot; + + let iter = StateRootsIterator::owned(self.store.clone(), state.clone(), slot); + + ReverseStateRootIterator::new((state_root, state_slot), iter) } /// Returns the block at the given root, if any. @@ -271,8 +280,10 @@ impl BeaconChain { /// Returns a read-lock guarded `BeaconState` which is the `canonical_head` that has been /// updated to match the current slot clock. - pub fn current_state(&self) -> RwLockReadGuard> { - self.state.read() + pub fn speculative_state(&self) -> Result>, Error> { + // TODO: ensure the state has done a catch-up. + + Ok(self.state.read()) } /// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index b77979b74..74778be32 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -52,7 +52,7 @@ impl ForkChoice { // been justified for at least 1 epoch ... If no such descendant exists, // set justified_head to finalized_head. let (start_state, start_block_root, start_block_slot) = { - let state = chain.current_state(); + let state = &chain.head().beacon_state; let (block_root, block_slot) = if state.current_epoch() + 1 > state.current_justified_checkpoint.epoch { diff --git a/beacon_node/beacon_chain/src/iter.rs b/beacon_node/beacon_chain/src/iter.rs new file mode 100644 index 000000000..f73e88afa --- /dev/null +++ b/beacon_node/beacon_chain/src/iter.rs @@ -0,0 +1,48 @@ +use store::iter::{BlockRootsIterator, StateRootsIterator}; +use types::{Hash256, Slot}; + +pub type ReverseBlockRootIterator<'a, E, S> = + ReverseHashAndSlotIterator>; +pub type ReverseStateRootIterator<'a, E, S> = + ReverseHashAndSlotIterator>; + +pub type ReverseHashAndSlotIterator = ReverseChainIterator<(Hash256, Slot), I>; + +/// Provides a wrapper for an iterator that returns a given `T` before it starts returning results of +/// the `Iterator`. +pub struct ReverseChainIterator { + first_value_used: bool, + first_value: T, + iter: I, +} + +impl ReverseChainIterator +where + T: Sized, + I: Iterator + Sized, +{ + pub fn new(first_value: T, iter: I) -> Self { + Self { + first_value_used: false, + first_value, + iter, + } + } +} + +impl Iterator for ReverseChainIterator +where + T: Clone, + I: Iterator, +{ + type Item = T; + + fn next(&mut self) -> Option { + if self.first_value_used { + self.iter.next() + } else { + self.first_value_used = true; + Some(self.first_value.clone()) + } + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index df1de153a..c2efcad13 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -2,6 +2,7 @@ mod beacon_chain; mod checkpoint; mod errors; mod fork_choice; +mod iter; mod metrics; mod persisted_beacon_chain; pub mod test_utils; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 6242b8a0a..cdcd8bb21 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -198,7 +198,7 @@ where fn get_state_at_slot(&self, state_slot: Slot) -> BeaconState { let state_root = self .chain - .rev_iter_state_roots(self.chain.current_state().slot - 1) + .rev_iter_state_roots(self.chain.head().beacon_state.slot - 1) .find(|(_hash, slot)| *slot == state_slot) .map(|(hash, _slot)| hash) .expect("could not find state root"); diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index ac001415c..215e37e7f 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -266,7 +266,7 @@ impl SimpleSync { fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain - .rev_iter_best_block_roots(target_slot) + .rev_iter_block_roots(target_slot) .take(1) .find(|(_root, slot)| *slot == target_slot) .map(|(root, _slot)| root) @@ -280,6 +280,8 @@ impl SimpleSync { req: BeaconBlockRootsRequest, network: &mut NetworkContext, ) { + let state = &self.chain.head().beacon_state; + debug!( self.log, "BlockRootsRequest"; @@ -290,8 +292,8 @@ impl SimpleSync { let mut roots: Vec = self .chain - .rev_iter_best_block_roots(req.start_slot + req.count) - .take(req.count as usize) + .rev_iter_block_roots(std::cmp::min(req.start_slot + req.count, state.slot)) + .take_while(|(_root, slot)| req.start_slot <= *slot) .map(|(block_root, slot)| BlockRootSlot { slot, block_root }) .collect(); @@ -302,7 +304,7 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, - "current_slot" => self.chain.current_state().slot, + "current_slot" => self.chain.present_slot(), "requested" => req.count, "returned" => roots.len(), ); @@ -389,6 +391,8 @@ impl SimpleSync { req: BeaconBlockHeadersRequest, network: &mut NetworkContext, ) { + let state = &self.chain.head().beacon_state; + debug!( self.log, "BlockHeadersRequest"; @@ -399,13 +403,10 @@ impl SimpleSync { let count = req.max_headers; // Collect the block roots. - // - // Instead of using `chain.rev_iter_blocks` we collect the roots first. This avoids - // unnecessary block deserialization when `req.skip_slots > 0`. let mut roots: Vec = self .chain - .rev_iter_best_block_roots(req.start_slot + count) - .take(count as usize) + .rev_iter_block_roots(std::cmp::min(req.start_slot + count, state.slot)) + .take_while(|(_root, slot)| req.start_slot <= *slot) .map(|(root, _slot)| root) .collect(); diff --git a/beacon_node/rest_api/src/beacon_node.rs b/beacon_node/rest_api/src/beacon_node.rs index 87d2d3cdc..bd8d98a53 100644 --- a/beacon_node/rest_api/src/beacon_node.rs +++ b/beacon_node/rest_api/src/beacon_node.rs @@ -54,7 +54,7 @@ fn get_version(_req: Request) -> APIResult { fn get_genesis_time(req: Request) -> APIResult { let beacon_chain = req.extensions().get::>>().unwrap(); let gen_time = { - let state = beacon_chain.current_state(); + let state = &beacon_chain.head().beacon_state; state.genesis_time }; let body = Body::from( diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 5ea8368fd..20425d292 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -40,7 +40,11 @@ impl AttestationService for AttestationServiceInstance { // verify the slot, drop lock on state afterwards { let slot_requested = req.get_slot(); - let state = &self.chain.current_state(); + // TODO: this whole module is legacy and not maintained well. + let state = &self + .chain + .speculative_state() + .expect("This is legacy code and should be removed"); // Start by performing some checks // Check that the AttestationData is for the current slot (otherwise it will not be valid) diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index b13303e25..080c828a7 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -29,7 +29,11 @@ impl ValidatorService for ValidatorServiceInstance { trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); let spec = &self.chain.spec; - let state = &self.chain.current_state(); + // TODO: this whole module is legacy and not maintained well. + let state = &self + .chain + .speculative_state() + .expect("This is legacy code and should be removed"); let epoch = Epoch::from(req.get_epoch()); let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 55c525b11..fc5d80679 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -4,20 +4,23 @@ use std::sync::Arc; use types::{BeaconBlock, BeaconState, BeaconStateError, EthSpec, Hash256, Slot}; /// Implemented for types that have ancestors (e.g., blocks, states) that may be iterated over. +/// +/// ## Note +/// +/// It is assumed that all ancestors for this object are stored in the database. If this is not the +/// case, the iterator will start returning `None` prior to genesis. pub trait AncestorIter { /// Returns an iterator over the roots of the ancestors of `self`. fn try_iter_ancestor_roots(&self, store: Arc) -> Option; } -impl<'a, U: Store, E: EthSpec> AncestorIter> - for BeaconBlock -{ +impl<'a, U: Store, E: EthSpec> AncestorIter> for BeaconBlock { /// Iterates across all the prior block roots of `self`, starting at the most recent and ending /// at genesis. - fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { + fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { let state = store.get::>(&self.state_root).ok()??; - Some(BestBlockRootsIterator::owned(store, state, self.slot)) + Some(BlockRootsIterator::owned(store, state, self.slot)) } } @@ -116,11 +119,6 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockIterator<'a, T, U> { /// exhausted. /// /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. -/// -/// ## Notes -/// -/// See [`BestBlockRootsIterator`](struct.BestBlockRootsIterator.html), which has different -/// `start_slot` logic. #[derive(Clone)] pub struct BlockRootsIterator<'a, T: EthSpec, U> { store: Arc, @@ -180,104 +178,6 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> { } } -/// Iterates backwards through block roots with `start_slot` highest possible value -/// `<= beacon_state.slot`. -/// -/// The distinction between `BestBlockRootsIterator` and `BlockRootsIterator` is: -/// -/// - `BestBlockRootsIterator` uses best-effort slot. When `start_slot` is greater than the latest available block root -/// on `beacon_state`, returns `Some(root, slot)` where `slot` is the latest available block -/// root. -/// - `BlockRootsIterator` is strict about `start_slot`. When `start_slot` is greater than the latest available block root -/// on `beacon_state`, returns `None`. -/// -/// This is distinct from `BestBlockRootsIterator`. -/// -/// Uses the `block_roots` field of `BeaconState` to as the source of block roots and will -/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been -/// exhausted. -/// -/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. -#[derive(Clone)] -pub struct BestBlockRootsIterator<'a, T: EthSpec, U> { - store: Arc, - beacon_state: Cow<'a, BeaconState>, - slot: Slot, -} - -impl<'a, T: EthSpec, U: Store> BestBlockRootsIterator<'a, T, U> { - /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn new(store: Arc, beacon_state: &'a BeaconState, start_slot: Slot) -> Self { - let mut slot = start_slot; - if slot >= beacon_state.slot { - // Slot may be too high. - slot = beacon_state.slot; - if beacon_state.get_block_root(slot).is_err() { - slot -= 1; - } - } - - Self { - store, - beacon_state: Cow::Borrowed(beacon_state), - slot: slot + 1, - } - } - - /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn owned(store: Arc, beacon_state: BeaconState, start_slot: Slot) -> Self { - let mut slot = start_slot; - if slot >= beacon_state.slot { - // Slot may be too high. - slot = beacon_state.slot; - // TODO: Use a function other than `get_block_root` as this will always return `Err()` - // for slot = state.slot. - if beacon_state.get_block_root(slot).is_err() { - slot -= 1; - } - } - - Self { - store, - beacon_state: Cow::Owned(beacon_state), - slot: slot + 1, - } - } -} - -impl<'a, T: EthSpec, U: Store> Iterator for BestBlockRootsIterator<'a, T, U> { - type Item = (Hash256, Slot); - - fn next(&mut self) -> Option { - if self.slot == 0 { - // End of Iterator - return None; - } - - self.slot -= 1; - - match self.beacon_state.get_block_root(self.slot) { - Ok(root) => Some((*root, self.slot)), - Err(BeaconStateError::SlotOutOfBounds) => { - // Read a `BeaconState` from the store that has access to prior historical root. - let beacon_state: BeaconState = { - // Load the earliest state from disk. - let new_state_root = self.beacon_state.get_oldest_state_root().ok()?; - - self.store.get(&new_state_root).ok()? - }?; - - self.beacon_state = Cow::Owned(beacon_state); - - let root = self.beacon_state.get_block_root(self.slot).ok()?; - - Some((*root, self.slot)) - } - _ => None, - } - } -} - #[cfg(test)] mod test { use super::*; @@ -337,49 +237,6 @@ mod test { } } - #[test] - fn best_block_root_iter() { - let store = Arc::new(MemoryStore::open()); - let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); - - let mut state_a: BeaconState = get_state(); - let mut state_b: BeaconState = get_state(); - - state_a.slot = Slot::from(slots_per_historical_root); - state_b.slot = Slot::from(slots_per_historical_root * 2); - - let mut hashes = (0..).into_iter().map(|i| Hash256::from(i)); - - for root in &mut state_a.block_roots[..] { - *root = hashes.next().unwrap() - } - for root in &mut state_b.block_roots[..] { - *root = hashes.next().unwrap() - } - - let state_a_root = hashes.next().unwrap(); - state_b.state_roots[0] = state_a_root; - store.put(&state_a_root, &state_a).unwrap(); - - let iter = BestBlockRootsIterator::new(store.clone(), &state_b, state_b.slot); - - assert!( - iter.clone().find(|(_root, slot)| *slot == 0).is_some(), - "iter should contain zero slot" - ); - - let mut collected: Vec<(Hash256, Slot)> = iter.collect(); - collected.reverse(); - - let expected_len = 2 * MainnetEthSpec::slots_per_historical_root(); - - assert_eq!(collected.len(), expected_len); - - for i in 0..expected_len { - assert_eq!(collected[i].0, Hash256::from(i as u64)); - } - } - #[test] fn state_root_iter() { let store = Arc::new(MemoryStore::open()); diff --git a/eth2/lmd_ghost/tests/test.rs b/eth2/lmd_ghost/tests/test.rs index fbe385560..0ac263638 100644 --- a/eth2/lmd_ghost/tests/test.rs +++ b/eth2/lmd_ghost/tests/test.rs @@ -10,7 +10,7 @@ use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree}; use rand::{prelude::*, rngs::StdRng}; use std::sync::Arc; use store::{ - iter::{AncestorIter, BestBlockRootsIterator}, + iter::{AncestorIter, BlockRootsIterator}, MemoryStore, Store, }; use types::{BeaconBlock, EthSpec, Hash256, MinimalEthSpec, Slot}; @@ -159,7 +159,7 @@ fn get_ancestor_roots( .expect("block should exist") .expect("store should not error"); - as AncestorIter<_, BestBlockRootsIterator>>::try_iter_ancestor_roots( + as AncestorIter<_, BlockRootsIterator>>::try_iter_ancestor_roots( &block, store, ) .expect("should be able to create ancestor iter") From e21d3fed0567c9d47251cdc5d5393b58f7b99ec3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 5 Aug 2019 16:27:55 +1000 Subject: [PATCH 19/25] Revert "Tidy ancestor iterators" This reverts commit 5079c25bb250117e2f7fdd3cb383b1f471ca3f52. Accidental push to master.. my bad! --- beacon_node/beacon_chain/src/beacon_chain.rs | 75 ++++----- beacon_node/beacon_chain/src/fork_choice.rs | 2 +- beacon_node/beacon_chain/src/iter.rs | 48 ------ beacon_node/beacon_chain/src/lib.rs | 1 - beacon_node/beacon_chain/src/test_utils.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 19 ++- beacon_node/rest_api/src/beacon_node.rs | 2 +- beacon_node/rpc/src/attestation.rs | 6 +- beacon_node/rpc/src/validator.rs | 6 +- beacon_node/store/src/iter.rs | 159 ++++++++++++++++++- eth2/lmd_ghost/tests/test.rs | 4 +- 11 files changed, 199 insertions(+), 125 deletions(-) delete mode 100644 beacon_node/beacon_chain/src/iter.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 58f64bc29..d0c50af70 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,7 +1,6 @@ use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; -use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics::Metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use lmd_ghost::LmdGhost; @@ -20,7 +19,7 @@ use state_processing::{ per_slot_processing, BlockProcessingError, }; use std::sync::Arc; -use store::iter::{BlockRootsIterator, StateRootsIterator}; +use store::iter::{BestBlockRootsIterator, BlockIterator, BlockRootsIterator, StateRootsIterator}; use store::{Error as DBError, Store}; use tree_hash::TreeHash; use types::*; @@ -217,53 +216,45 @@ impl BeaconChain { Ok(headers?) } - - /// Iterates through all the `BeaconBlock` roots and slots, first returning - /// `self.head().beacon_block` then all prior blocks until either genesis or if the database - /// fails to return a prior block. + /// Iterate in reverse (highest to lowest slot) through all blocks from the block at `slot` + /// through to the genesis block. /// - /// Returns duplicate roots for skip-slots. + /// Returns `None` for headers prior to genesis or when there is an error reading from `Store`. /// - /// Iterator returns `(Hash256, Slot)`. - /// - /// ## Note - /// - /// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot - /// returned may be earlier than the wall-clock slot. - pub fn rev_iter_block_roots( - &self, - slot: Slot, - ) -> ReverseBlockRootIterator { - let state = &self.head().beacon_state; - let block_root = self.head().beacon_block_root; - let block_slot = state.slot; - - let iter = BlockRootsIterator::owned(self.store.clone(), state.clone(), slot); - - ReverseBlockRootIterator::new((block_root, block_slot), iter) + /// Contains duplicate headers when skip slots are encountered. + pub fn rev_iter_blocks(&self, slot: Slot) -> BlockIterator { + BlockIterator::owned(self.store.clone(), self.state.read().clone(), slot) } - /// Iterates through all the `BeaconState` roots and slots, first returning - /// `self.head().beacon_state` then all prior states until either genesis or if the database - /// fails to return a prior state. + /// Iterates in reverse (highest to lowest slot) through all block roots from `slot` through to + /// genesis. /// - /// Iterator returns `(Hash256, Slot)`. + /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. /// - /// ## Note + /// Contains duplicate roots when skip slots are encountered. + pub fn rev_iter_block_roots(&self, slot: Slot) -> BlockRootsIterator { + BlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), slot) + } + + /// Iterates in reverse (highest to lowest slot) through all block roots from largest + /// `slot <= beacon_state.slot` through to genesis. /// - /// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot - /// returned may be earlier than the wall-clock slot. - pub fn rev_iter_state_roots( + /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. + /// + /// Contains duplicate roots when skip slots are encountered. + pub fn rev_iter_best_block_roots( &self, slot: Slot, - ) -> ReverseStateRootIterator { - let state = &self.head().beacon_state; - let state_root = self.head().beacon_state_root; - let state_slot = state.slot; + ) -> BestBlockRootsIterator { + BestBlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), slot) + } - let iter = StateRootsIterator::owned(self.store.clone(), state.clone(), slot); - - ReverseStateRootIterator::new((state_root, state_slot), iter) + /// Iterates in reverse (highest to lowest slot) through all state roots from `slot` through to + /// genesis. + /// + /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. + pub fn rev_iter_state_roots(&self, slot: Slot) -> StateRootsIterator { + StateRootsIterator::owned(self.store.clone(), self.state.read().clone(), slot) } /// Returns the block at the given root, if any. @@ -280,10 +271,8 @@ impl BeaconChain { /// Returns a read-lock guarded `BeaconState` which is the `canonical_head` that has been /// updated to match the current slot clock. - pub fn speculative_state(&self) -> Result>, Error> { - // TODO: ensure the state has done a catch-up. - - Ok(self.state.read()) + pub fn current_state(&self) -> RwLockReadGuard> { + self.state.read() } /// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 74778be32..b77979b74 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -52,7 +52,7 @@ impl ForkChoice { // been justified for at least 1 epoch ... If no such descendant exists, // set justified_head to finalized_head. let (start_state, start_block_root, start_block_slot) = { - let state = &chain.head().beacon_state; + let state = chain.current_state(); let (block_root, block_slot) = if state.current_epoch() + 1 > state.current_justified_checkpoint.epoch { diff --git a/beacon_node/beacon_chain/src/iter.rs b/beacon_node/beacon_chain/src/iter.rs deleted file mode 100644 index f73e88afa..000000000 --- a/beacon_node/beacon_chain/src/iter.rs +++ /dev/null @@ -1,48 +0,0 @@ -use store::iter::{BlockRootsIterator, StateRootsIterator}; -use types::{Hash256, Slot}; - -pub type ReverseBlockRootIterator<'a, E, S> = - ReverseHashAndSlotIterator>; -pub type ReverseStateRootIterator<'a, E, S> = - ReverseHashAndSlotIterator>; - -pub type ReverseHashAndSlotIterator = ReverseChainIterator<(Hash256, Slot), I>; - -/// Provides a wrapper for an iterator that returns a given `T` before it starts returning results of -/// the `Iterator`. -pub struct ReverseChainIterator { - first_value_used: bool, - first_value: T, - iter: I, -} - -impl ReverseChainIterator -where - T: Sized, - I: Iterator + Sized, -{ - pub fn new(first_value: T, iter: I) -> Self { - Self { - first_value_used: false, - first_value, - iter, - } - } -} - -impl Iterator for ReverseChainIterator -where - T: Clone, - I: Iterator, -{ - type Item = T; - - fn next(&mut self) -> Option { - if self.first_value_used { - self.iter.next() - } else { - self.first_value_used = true; - Some(self.first_value.clone()) - } - } -} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index c2efcad13..df1de153a 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -2,7 +2,6 @@ mod beacon_chain; mod checkpoint; mod errors; mod fork_choice; -mod iter; mod metrics; mod persisted_beacon_chain; pub mod test_utils; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index cdcd8bb21..6242b8a0a 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -198,7 +198,7 @@ where fn get_state_at_slot(&self, state_slot: Slot) -> BeaconState { let state_root = self .chain - .rev_iter_state_roots(self.chain.head().beacon_state.slot - 1) + .rev_iter_state_roots(self.chain.current_state().slot - 1) .find(|(_hash, slot)| *slot == state_slot) .map(|(hash, _slot)| hash) .expect("could not find state root"); diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 215e37e7f..ac001415c 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -266,7 +266,7 @@ impl SimpleSync { fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain - .rev_iter_block_roots(target_slot) + .rev_iter_best_block_roots(target_slot) .take(1) .find(|(_root, slot)| *slot == target_slot) .map(|(root, _slot)| root) @@ -280,8 +280,6 @@ impl SimpleSync { req: BeaconBlockRootsRequest, network: &mut NetworkContext, ) { - let state = &self.chain.head().beacon_state; - debug!( self.log, "BlockRootsRequest"; @@ -292,8 +290,8 @@ impl SimpleSync { let mut roots: Vec = self .chain - .rev_iter_block_roots(std::cmp::min(req.start_slot + req.count, state.slot)) - .take_while(|(_root, slot)| req.start_slot <= *slot) + .rev_iter_best_block_roots(req.start_slot + req.count) + .take(req.count as usize) .map(|(block_root, slot)| BlockRootSlot { slot, block_root }) .collect(); @@ -304,7 +302,7 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, - "current_slot" => self.chain.present_slot(), + "current_slot" => self.chain.current_state().slot, "requested" => req.count, "returned" => roots.len(), ); @@ -391,8 +389,6 @@ impl SimpleSync { req: BeaconBlockHeadersRequest, network: &mut NetworkContext, ) { - let state = &self.chain.head().beacon_state; - debug!( self.log, "BlockHeadersRequest"; @@ -403,10 +399,13 @@ impl SimpleSync { let count = req.max_headers; // Collect the block roots. + // + // Instead of using `chain.rev_iter_blocks` we collect the roots first. This avoids + // unnecessary block deserialization when `req.skip_slots > 0`. let mut roots: Vec = self .chain - .rev_iter_block_roots(std::cmp::min(req.start_slot + count, state.slot)) - .take_while(|(_root, slot)| req.start_slot <= *slot) + .rev_iter_best_block_roots(req.start_slot + count) + .take(count as usize) .map(|(root, _slot)| root) .collect(); diff --git a/beacon_node/rest_api/src/beacon_node.rs b/beacon_node/rest_api/src/beacon_node.rs index bd8d98a53..87d2d3cdc 100644 --- a/beacon_node/rest_api/src/beacon_node.rs +++ b/beacon_node/rest_api/src/beacon_node.rs @@ -54,7 +54,7 @@ fn get_version(_req: Request) -> APIResult { fn get_genesis_time(req: Request) -> APIResult { let beacon_chain = req.extensions().get::>>().unwrap(); let gen_time = { - let state = &beacon_chain.head().beacon_state; + let state = beacon_chain.current_state(); state.genesis_time }; let body = Body::from( diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 20425d292..5ea8368fd 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -40,11 +40,7 @@ impl AttestationService for AttestationServiceInstance { // verify the slot, drop lock on state afterwards { let slot_requested = req.get_slot(); - // TODO: this whole module is legacy and not maintained well. - let state = &self - .chain - .speculative_state() - .expect("This is legacy code and should be removed"); + let state = &self.chain.current_state(); // Start by performing some checks // Check that the AttestationData is for the current slot (otherwise it will not be valid) diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 080c828a7..b13303e25 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -29,11 +29,7 @@ impl ValidatorService for ValidatorServiceInstance { trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); let spec = &self.chain.spec; - // TODO: this whole module is legacy and not maintained well. - let state = &self - .chain - .speculative_state() - .expect("This is legacy code and should be removed"); + let state = &self.chain.current_state(); let epoch = Epoch::from(req.get_epoch()); let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index fc5d80679..55c525b11 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -4,23 +4,20 @@ use std::sync::Arc; use types::{BeaconBlock, BeaconState, BeaconStateError, EthSpec, Hash256, Slot}; /// Implemented for types that have ancestors (e.g., blocks, states) that may be iterated over. -/// -/// ## Note -/// -/// It is assumed that all ancestors for this object are stored in the database. If this is not the -/// case, the iterator will start returning `None` prior to genesis. pub trait AncestorIter { /// Returns an iterator over the roots of the ancestors of `self`. fn try_iter_ancestor_roots(&self, store: Arc) -> Option; } -impl<'a, U: Store, E: EthSpec> AncestorIter> for BeaconBlock { +impl<'a, U: Store, E: EthSpec> AncestorIter> + for BeaconBlock +{ /// Iterates across all the prior block roots of `self`, starting at the most recent and ending /// at genesis. - fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { + fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { let state = store.get::>(&self.state_root).ok()??; - Some(BlockRootsIterator::owned(store, state, self.slot)) + Some(BestBlockRootsIterator::owned(store, state, self.slot)) } } @@ -119,6 +116,11 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockIterator<'a, T, U> { /// exhausted. /// /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. +/// +/// ## Notes +/// +/// See [`BestBlockRootsIterator`](struct.BestBlockRootsIterator.html), which has different +/// `start_slot` logic. #[derive(Clone)] pub struct BlockRootsIterator<'a, T: EthSpec, U> { store: Arc, @@ -178,6 +180,104 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> { } } +/// Iterates backwards through block roots with `start_slot` highest possible value +/// `<= beacon_state.slot`. +/// +/// The distinction between `BestBlockRootsIterator` and `BlockRootsIterator` is: +/// +/// - `BestBlockRootsIterator` uses best-effort slot. When `start_slot` is greater than the latest available block root +/// on `beacon_state`, returns `Some(root, slot)` where `slot` is the latest available block +/// root. +/// - `BlockRootsIterator` is strict about `start_slot`. When `start_slot` is greater than the latest available block root +/// on `beacon_state`, returns `None`. +/// +/// This is distinct from `BestBlockRootsIterator`. +/// +/// Uses the `block_roots` field of `BeaconState` to as the source of block roots and will +/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been +/// exhausted. +/// +/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. +#[derive(Clone)] +pub struct BestBlockRootsIterator<'a, T: EthSpec, U> { + store: Arc, + beacon_state: Cow<'a, BeaconState>, + slot: Slot, +} + +impl<'a, T: EthSpec, U: Store> BestBlockRootsIterator<'a, T, U> { + /// Create a new iterator over all block roots in the given `beacon_state` and prior states. + pub fn new(store: Arc, beacon_state: &'a BeaconState, start_slot: Slot) -> Self { + let mut slot = start_slot; + if slot >= beacon_state.slot { + // Slot may be too high. + slot = beacon_state.slot; + if beacon_state.get_block_root(slot).is_err() { + slot -= 1; + } + } + + Self { + store, + beacon_state: Cow::Borrowed(beacon_state), + slot: slot + 1, + } + } + + /// Create a new iterator over all block roots in the given `beacon_state` and prior states. + pub fn owned(store: Arc, beacon_state: BeaconState, start_slot: Slot) -> Self { + let mut slot = start_slot; + if slot >= beacon_state.slot { + // Slot may be too high. + slot = beacon_state.slot; + // TODO: Use a function other than `get_block_root` as this will always return `Err()` + // for slot = state.slot. + if beacon_state.get_block_root(slot).is_err() { + slot -= 1; + } + } + + Self { + store, + beacon_state: Cow::Owned(beacon_state), + slot: slot + 1, + } + } +} + +impl<'a, T: EthSpec, U: Store> Iterator for BestBlockRootsIterator<'a, T, U> { + type Item = (Hash256, Slot); + + fn next(&mut self) -> Option { + if self.slot == 0 { + // End of Iterator + return None; + } + + self.slot -= 1; + + match self.beacon_state.get_block_root(self.slot) { + Ok(root) => Some((*root, self.slot)), + Err(BeaconStateError::SlotOutOfBounds) => { + // Read a `BeaconState` from the store that has access to prior historical root. + let beacon_state: BeaconState = { + // Load the earliest state from disk. + let new_state_root = self.beacon_state.get_oldest_state_root().ok()?; + + self.store.get(&new_state_root).ok()? + }?; + + self.beacon_state = Cow::Owned(beacon_state); + + let root = self.beacon_state.get_block_root(self.slot).ok()?; + + Some((*root, self.slot)) + } + _ => None, + } + } +} + #[cfg(test)] mod test { use super::*; @@ -237,6 +337,49 @@ mod test { } } + #[test] + fn best_block_root_iter() { + let store = Arc::new(MemoryStore::open()); + let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); + + let mut state_a: BeaconState = get_state(); + let mut state_b: BeaconState = get_state(); + + state_a.slot = Slot::from(slots_per_historical_root); + state_b.slot = Slot::from(slots_per_historical_root * 2); + + let mut hashes = (0..).into_iter().map(|i| Hash256::from(i)); + + for root in &mut state_a.block_roots[..] { + *root = hashes.next().unwrap() + } + for root in &mut state_b.block_roots[..] { + *root = hashes.next().unwrap() + } + + let state_a_root = hashes.next().unwrap(); + state_b.state_roots[0] = state_a_root; + store.put(&state_a_root, &state_a).unwrap(); + + let iter = BestBlockRootsIterator::new(store.clone(), &state_b, state_b.slot); + + assert!( + iter.clone().find(|(_root, slot)| *slot == 0).is_some(), + "iter should contain zero slot" + ); + + let mut collected: Vec<(Hash256, Slot)> = iter.collect(); + collected.reverse(); + + let expected_len = 2 * MainnetEthSpec::slots_per_historical_root(); + + assert_eq!(collected.len(), expected_len); + + for i in 0..expected_len { + assert_eq!(collected[i].0, Hash256::from(i as u64)); + } + } + #[test] fn state_root_iter() { let store = Arc::new(MemoryStore::open()); diff --git a/eth2/lmd_ghost/tests/test.rs b/eth2/lmd_ghost/tests/test.rs index 0ac263638..fbe385560 100644 --- a/eth2/lmd_ghost/tests/test.rs +++ b/eth2/lmd_ghost/tests/test.rs @@ -10,7 +10,7 @@ use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree}; use rand::{prelude::*, rngs::StdRng}; use std::sync::Arc; use store::{ - iter::{AncestorIter, BlockRootsIterator}, + iter::{AncestorIter, BestBlockRootsIterator}, MemoryStore, Store, }; use types::{BeaconBlock, EthSpec, Hash256, MinimalEthSpec, Slot}; @@ -159,7 +159,7 @@ fn get_ancestor_roots( .expect("block should exist") .expect("store should not error"); - as AncestorIter<_, BlockRootsIterator>>::try_iter_ancestor_roots( + as AncestorIter<_, BestBlockRootsIterator>>::try_iter_ancestor_roots( &block, store, ) .expect("should be able to create ancestor iter") From 65c18ddc604748a87d3c218de794c1e5358daba8 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 5 Aug 2019 18:06:24 +1000 Subject: [PATCH 20/25] Fix Bitfield from_bytes empty vec bug (#487) Credit to @kirk-baird for finding the bug with the fuzzer. Co-authored-by: Kirk Baird --- eth2/utils/ssz_types/src/bitfield.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/eth2/utils/ssz_types/src/bitfield.rs b/eth2/utils/ssz_types/src/bitfield.rs index 78182712b..592574e15 100644 --- a/eth2/utils/ssz_types/src/bitfield.rs +++ b/eth2/utils/ssz_types/src/bitfield.rs @@ -163,8 +163,7 @@ impl Bitfield> { pub fn from_bytes(bytes: Vec) -> Result { let mut initial_bitfield: Bitfield> = { let num_bits = bytes.len() * 8; - Bitfield::from_raw_bytes(bytes, num_bits) - .expect("Must have adequate bytes for bit count.") + Bitfield::from_raw_bytes(bytes, num_bits)? }; let len = initial_bitfield @@ -802,6 +801,11 @@ mod bitlist { #[test] fn ssz_decode() { + assert!(BitList0::from_ssz_bytes(&[]).is_err()); + assert!(BitList1::from_ssz_bytes(&[]).is_err()); + assert!(BitList8::from_ssz_bytes(&[]).is_err()); + assert!(BitList16::from_ssz_bytes(&[]).is_err()); + assert!(BitList0::from_ssz_bytes(&[0b0000_0000]).is_err()); assert!(BitList1::from_ssz_bytes(&[0b0000_0000, 0b0000_0000]).is_err()); assert!(BitList8::from_ssz_bytes(&[0b0000_0000]).is_err()); From 4f45bf2255787b6b2e952b9e721d9325f6505ff1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 5 Aug 2019 18:06:50 +1000 Subject: [PATCH 21/25] Tree hash benches (#486) * Add initial tree hash benches * Add tree hash example * Use lazy static in tree hash benches --- eth2/utils/tree_hash/Cargo.toml | 6 ++ eth2/utils/tree_hash/benches/benches.rs | 56 +++++++++++++++++++ .../examples/flamegraph_beacon_state.rs | 35 ++++++++++++ 3 files changed, 97 insertions(+) create mode 100644 eth2/utils/tree_hash/benches/benches.rs create mode 100644 eth2/utils/tree_hash/examples/flamegraph_beacon_state.rs diff --git a/eth2/utils/tree_hash/Cargo.toml b/eth2/utils/tree_hash/Cargo.toml index 948e0fe4f..3019c2ad0 100644 --- a/eth2/utils/tree_hash/Cargo.toml +++ b/eth2/utils/tree_hash/Cargo.toml @@ -4,9 +4,15 @@ version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" +[[bench]] +name = "benches" +harness = false + [dev-dependencies] +criterion = "0.2" rand = "0.7" tree_hash_derive = { path = "../tree_hash_derive" } +types = { path = "../../types" } [dependencies] ethereum-types = "0.5" diff --git a/eth2/utils/tree_hash/benches/benches.rs b/eth2/utils/tree_hash/benches/benches.rs new file mode 100644 index 000000000..22e2a8784 --- /dev/null +++ b/eth2/utils/tree_hash/benches/benches.rs @@ -0,0 +1,56 @@ +#[macro_use] +extern crate lazy_static; + +use criterion::Criterion; +use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +use tree_hash::TreeHash; +use types::test_utils::{generate_deterministic_keypairs, TestingBeaconStateBuilder}; +use types::{BeaconState, EthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; + +lazy_static! { + static ref KEYPAIRS: Vec = { generate_deterministic_keypairs(300_000) }; +} + +fn build_state(validator_count: usize) -> BeaconState { + let (state, _keypairs) = TestingBeaconStateBuilder::from_keypairs( + KEYPAIRS[0..validator_count].to_vec(), + &T::default_spec(), + ) + .build(); + + assert_eq!(state.validators.len(), validator_count); + assert_eq!(state.balances.len(), validator_count); + assert!(state.previous_epoch_attestations.is_empty()); + assert!(state.current_epoch_attestations.is_empty()); + assert!(state.eth1_data_votes.is_empty()); + assert!(state.historical_roots.is_empty()); + + state +} + +fn bench_suite(c: &mut Criterion, spec_desc: &str, validator_count: usize) { + let state = build_state::(validator_count); + + c.bench( + &format!("{}/{}_validators", spec_desc, validator_count), + Benchmark::new("genesis_state", move |b| { + b.iter_batched_ref( + || state.clone(), + |state| black_box(state.tree_hash_root()), + criterion::BatchSize::SmallInput, + ) + }) + .sample_size(10), + ); +} + +fn all_benches(c: &mut Criterion) { + bench_suite::(c, "minimal", 100_000); + bench_suite::(c, "minimal", 300_000); + + bench_suite::(c, "mainnet", 100_000); + bench_suite::(c, "mainnet", 300_000); +} + +criterion_group!(benches, all_benches,); +criterion_main!(benches); diff --git a/eth2/utils/tree_hash/examples/flamegraph_beacon_state.rs b/eth2/utils/tree_hash/examples/flamegraph_beacon_state.rs new file mode 100644 index 000000000..8a619ce77 --- /dev/null +++ b/eth2/utils/tree_hash/examples/flamegraph_beacon_state.rs @@ -0,0 +1,35 @@ +use tree_hash::TreeHash; +use types::test_utils::TestingBeaconStateBuilder; +use types::{BeaconState, EthSpec, MainnetEthSpec}; + +const TREE_HASH_LOOPS: usize = 1_000; +const VALIDATOR_COUNT: usize = 1_000; + +fn build_state(validator_count: usize) -> BeaconState { + let (state, _keypairs) = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists( + validator_count, + &T::default_spec(), + ) + .build(); + + assert_eq!(state.validators.len(), validator_count); + assert_eq!(state.balances.len(), validator_count); + assert!(state.previous_epoch_attestations.is_empty()); + assert!(state.current_epoch_attestations.is_empty()); + assert!(state.eth1_data_votes.is_empty()); + assert!(state.historical_roots.is_empty()); + + state +} + +fn main() { + let state = build_state::(VALIDATOR_COUNT); + + // This vec is an attempt to ensure the compiler doesn't optimize-out the hashing. + let mut vec = Vec::with_capacity(TREE_HASH_LOOPS); + + for _ in 0..TREE_HASH_LOOPS { + let root = state.tree_hash_root(); + vec.push(root[0]); + } +} From 0374e319079a10b8cb1d7e447fceece3f80ac5d3 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 6 Aug 2019 11:05:35 +1000 Subject: [PATCH 22/25] Disallow extra bytes in Bitfield from_bytes (#488) --- eth2/utils/ssz_types/src/bitfield.rs | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/eth2/utils/ssz_types/src/bitfield.rs b/eth2/utils/ssz_types/src/bitfield.rs index 592574e15..a95a5faa4 100644 --- a/eth2/utils/ssz_types/src/bitfield.rs +++ b/eth2/utils/ssz_types/src/bitfield.rs @@ -161,6 +161,7 @@ impl Bitfield> { /// /// Returns `None` if `bytes` are not a valid encoding. pub fn from_bytes(bytes: Vec) -> Result { + let bytes_len = bytes.len(); let mut initial_bitfield: Bitfield> = { let num_bits = bytes.len() * 8; Bitfield::from_raw_bytes(bytes, num_bits)? @@ -170,6 +171,14 @@ impl Bitfield> { .highest_set_bit() .ok_or_else(|| Error::MissingLengthInformation)?; + // The length bit should be in the last byte, or else it means we have too many bytes. + if len / 8 + 1 != bytes_len { + return Err(Error::InvalidByteCount { + given: bytes_len, + expected: len / 8 + 1, + }); + } + if len <= Self::max_len() { initial_bitfield .set(len, false) @@ -825,6 +834,17 @@ mod bitlist { assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0100]).is_err()); } + #[test] + fn ssz_decode_extra_bytes() { + assert!(BitList0::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); + assert!(BitList1::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); + assert!(BitList8::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); + assert!(BitList16::from_ssz_bytes(&[0b0000_0001, 0b0000_0000]).is_err()); + assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0]).is_err()); + assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0, 0]).is_err()); + assert!(BitList1024::from_ssz_bytes(&[0b1000_0000, 0, 0, 0, 0]).is_err()); + } + #[test] fn ssz_round_trip() { assert_round_trip(BitList0::with_capacity(0).unwrap()); From 845f336a592f59344f9f035a9376c40299f2232e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 6 Aug 2019 13:29:27 +1000 Subject: [PATCH 23/25] Interop chain start strategies (#479) * Implement more flexible beacon chain genesis * Fix compile issues from rebase on master * Rename CLI flag * Adds initial documentation for TOML files * Update docs readme * Add first version of cli_util * Dont write cache fields in serde * Tidy cli_util * Add code to load genesis YAML file * Move serde_utils out of tests in `types` * Update logging text * Fix serde YAML for Fork * Make yaml hex decoding more strict * Update deterministic key generate for interop * Set deposit count on testing genesis state * Make some fixes for deposit count * Remove code fragements * Large restructure of docs * Tidy docs * Fix readme link * Add interop docs * Tidy README --- Cargo.toml | 2 + beacon_node/beacon_chain/src/beacon_chain.rs | 20 +- beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/beacon_chain_types.rs | 140 ++++++++--- beacon_node/client/src/config.rs | 33 +++ beacon_node/client/src/lib.rs | 11 +- beacon_node/src/main.rs | 41 ++- beacon_node/src/run.rs | 2 +- docs/README.md | 69 ++++++ docs/config_examples/beacon-node.toml | 98 ++++++++ docs/env.md | 52 ++++ docs/installation.md | 40 --- docs/interop.md | 109 ++++++++ docs/lighthouse.md | 83 ------- docs/onboarding.md | 233 ------------------ eth2/types/Cargo.toml | 1 + eth2/types/src/beacon_block_body.rs | 3 +- eth2/types/src/beacon_state.rs | 4 +- eth2/types/src/chain_spec.rs | 2 +- eth2/types/src/fork.rs | 17 +- eth2/types/src/lib.rs | 1 + .../builders/testing_beacon_state_builder.rs | 3 + .../generate_deterministic_keypairs.rs | 15 +- eth2/types/src/test_utils/mod.rs | 2 - eth2/types/src/utils.rs | 3 + .../src/{test_utils => utils}/serde_utils.rs | 14 +- eth2/utils/eth2_interop_keypairs/Cargo.toml | 11 + eth2/utils/eth2_interop_keypairs/src/lib.rs | 130 ++++++++++ tests/cli_util/.gitignore | 1 + tests/cli_util/Cargo.toml | 15 ++ tests/cli_util/src/main.rs | 118 +++++++++ 31 files changed, 835 insertions(+), 439 deletions(-) create mode 100644 docs/README.md create mode 100644 docs/config_examples/beacon-node.toml create mode 100644 docs/env.md delete mode 100644 docs/installation.md create mode 100644 docs/interop.md delete mode 100644 docs/lighthouse.md delete mode 100644 docs/onboarding.md create mode 100644 eth2/types/src/utils.rs rename eth2/types/src/{test_utils => utils}/serde_utils.rs (82%) create mode 100644 eth2/utils/eth2_interop_keypairs/Cargo.toml create mode 100644 eth2/utils/eth2_interop_keypairs/src/lib.rs create mode 100644 tests/cli_util/.gitignore create mode 100644 tests/cli_util/Cargo.toml create mode 100644 tests/cli_util/src/main.rs diff --git a/Cargo.toml b/Cargo.toml index c4034ad35..1c0522bde 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "eth2/utils/compare_fields", "eth2/utils/compare_fields_derive", "eth2/utils/eth2_config", + "eth2/utils/eth2_interop_keypairs", "eth2/utils/hashing", "eth2/utils/logging", "eth2/utils/merkle_proof", @@ -33,6 +34,7 @@ members = [ "beacon_node/version", "beacon_node/beacon_chain", "tests/ef_tests", + "tests/cli_util", "protos", "validator_client", "account_manager", diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d0c50af70..d520f0b5c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -94,29 +94,37 @@ impl BeaconChain { store: Arc, slot_clock: T::SlotClock, mut genesis_state: BeaconState, - genesis_block: BeaconBlock, + mut genesis_block: BeaconBlock, spec: ChainSpec, log: Logger, ) -> Result { genesis_state.build_all_caches(&spec)?; - let state_root = genesis_state.canonical_root(); - store.put(&state_root, &genesis_state)?; + let genesis_state_root = genesis_state.canonical_root(); + store.put(&genesis_state_root, &genesis_state)?; + + genesis_block.state_root = genesis_state_root; let genesis_block_root = genesis_block.block_header().canonical_root(); store.put(&genesis_block_root, &genesis_block)?; // Also store the genesis block under the `ZERO_HASH` key. - let genesis_block_root = genesis_block.block_header().canonical_root(); + let genesis_block_root = genesis_block.canonical_root(); store.put(&Hash256::zero(), &genesis_block)?; let canonical_head = RwLock::new(CheckPoint::new( genesis_block.clone(), genesis_block_root, genesis_state.clone(), - state_root, + genesis_state_root, )); + info!(log, "BeaconChain init"; + "genesis_validator_count" => genesis_state.validators.len(), + "genesis_state_root" => format!("{}", genesis_state_root), + "genesis_block_root" => format!("{}", genesis_block_root), + ); + Ok(Self { spec, slot_clock, @@ -760,7 +768,7 @@ impl BeaconChain { randao_reveal, // TODO: replace with real data. eth1_data: Eth1Data { - deposit_count: 0, + deposit_count: state.eth1_data.deposit_count, deposit_root: Hash256::zero(), block_hash: Hash256::zero(), }, diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 3367b84ce..c06dcb5d5 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -18,6 +18,7 @@ slot_clock = { path = "../../eth2/utils/slot_clock" } serde = "1.0.93" serde_derive = "1.0" error-chain = "0.12.0" +serde_yaml = "0.8" slog = { version = "^2.2.3" , features = ["max_level_trace"] } slog-async = "^2.3.0" slog-json = "^2.3" diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index f332092ca..0b86c9583 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -1,27 +1,31 @@ +use crate::error::Result; +use crate::{config::GenesisState, ClientConfig}; use beacon_chain::{ lmd_ghost::{LmdGhost, ThreadSafeReducedTree}, slot_clock::SystemTimeSlotClock, store::Store, BeaconChain, BeaconChainTypes, }; -use slog::{info, Logger}; +use slog::{crit, info, Logger}; use slot_clock::SlotClock; +use std::fs::File; use std::marker::PhantomData; use std::sync::Arc; +use std::time::SystemTime; use tree_hash::TreeHash; -use types::{test_utils::TestingBeaconStateBuilder, BeaconBlock, ChainSpec, EthSpec, Hash256}; - -/// The number initial validators when starting the `Minimal`. -const TESTNET_VALIDATOR_COUNT: usize = 16; +use types::{ + test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, +}; /// Provides a new, initialized `BeaconChain` pub trait InitialiseBeaconChain { fn initialise_beacon_chain( store: Arc, + config: &ClientConfig, spec: ChainSpec, log: Logger, - ) -> BeaconChain { - maybe_load_from_store_for_testnet::<_, T::Store, T::EthSpec>(store, spec, log) + ) -> Result> { + maybe_load_from_store_for_testnet::<_, T::Store, T::EthSpec>(store, config, spec, log) } } @@ -42,45 +46,109 @@ impl InitialiseBeaconChain for Cli /// Loads a `BeaconChain` from `store`, if it exists. Otherwise, create a new chain from genesis. fn maybe_load_from_store_for_testnet( store: Arc, + config: &ClientConfig, spec: ChainSpec, log: Logger, -) -> BeaconChain +) -> Result> where T: BeaconChainTypes, T::LmdGhost: LmdGhost, { + let genesis_state = match &config.genesis_state { + GenesisState::Mainnet => { + crit!(log, "This release does not support mainnet genesis state."); + return Err("Mainnet is unsupported".into()); + } + GenesisState::RecentGenesis { validator_count } => { + generate_testnet_genesis_state(*validator_count, recent_genesis_time(), &spec) + } + GenesisState::Generated { + validator_count, + genesis_time, + } => generate_testnet_genesis_state(*validator_count, *genesis_time, &spec), + GenesisState::Yaml { file } => { + let file = File::open(file).map_err(|e| { + format!("Unable to open YAML genesis state file {:?}: {:?}", file, e) + })?; + + serde_yaml::from_reader(file) + .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))? + } + }; + + let mut genesis_block = BeaconBlock::empty(&spec); + genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); + let genesis_block_root = genesis_block.canonical_root(); + + // Slot clock + let slot_clock = T::SlotClock::new( + spec.genesis_slot, + genesis_state.genesis_time, + spec.seconds_per_slot, + ); + + // Try load an existing `BeaconChain` from the store. If unable, create a new one. if let Ok(Some(beacon_chain)) = BeaconChain::from_store(store.clone(), spec.clone(), log.clone()) { - info!( - log, - "Loaded BeaconChain from store"; - "slot" => beacon_chain.head().beacon_state.slot, - "best_slot" => beacon_chain.best_slot(), - ); + // Here we check to ensure that the `BeaconChain` loaded from store has the expected + // genesis block. + // + // Without this check, it's possible that there will be an existing DB with a `BeaconChain` + // that has different parameters than provided to this executable. + if beacon_chain.genesis_block_root == genesis_block_root { + info!( + log, + "Loaded BeaconChain from store"; + "slot" => beacon_chain.head().beacon_state.slot, + "best_slot" => beacon_chain.best_slot(), + ); - beacon_chain + Ok(beacon_chain) + } else { + crit!( + log, + "The BeaconChain loaded from disk has an incorrect genesis root. \ + This may be caused by an old database in located in datadir." + ); + Err("Incorrect genesis root".into()) + } } else { - info!(log, "Initializing new BeaconChain from genesis"); - let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists( - TESTNET_VALIDATOR_COUNT, - &spec, - ); - let (genesis_state, _keypairs) = state_builder.build(); - - let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); - - // Slot clock - let slot_clock = T::SlotClock::new( - spec.genesis_slot, - genesis_state.genesis_time, - spec.seconds_per_slot, - ); - - // Genesis chain - //TODO: Handle error correctly - BeaconChain::from_genesis(store, slot_clock, genesis_state, genesis_block, spec, log) - .expect("Terminate if beacon chain generation fails") + BeaconChain::from_genesis( + store, + slot_clock, + genesis_state, + genesis_block, + spec, + log.clone(), + ) + .map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e).into()) } } + +fn generate_testnet_genesis_state( + validator_count: usize, + genesis_time: u64, + spec: &ChainSpec, +) -> BeaconState { + let (mut genesis_state, _keypairs) = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, spec) + .build(); + + genesis_state.genesis_time = genesis_time; + + genesis_state +} + +/// Returns the system time, mod 30 minutes. +/// +/// Used for easily creating testnets. +fn recent_genesis_time() -> u64 { + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0); + // genesis is now the last 30 minute block. + now - secs_after_last_period +} diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 9a9fed802..1a27de406 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -7,6 +7,12 @@ use std::fs::{self, OpenOptions}; use std::path::PathBuf; use std::sync::Mutex; +/// The number initial validators when starting the `Minimal`. +const TESTNET_VALIDATOR_COUNT: usize = 16; + +/// The number initial validators when starting the `Minimal`. +const TESTNET_SPEC_CONSTANTS: &str = "minimal"; + /// The core configuration of a Lighthouse beacon node. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Config { @@ -14,12 +20,35 @@ pub struct Config { pub db_type: String, db_name: String, pub log_file: PathBuf, + pub spec_constants: String, + pub genesis_state: GenesisState, pub network: network::NetworkConfig, pub rpc: rpc::RPCConfig, pub http: HttpServerConfig, pub rest_api: rest_api::APIConfig, } +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type")] +pub enum GenesisState { + /// Use the mainnet genesis state. + /// + /// Mainnet genesis state is not presently known, so this is a place-holder. + Mainnet, + /// Generate a state with `validator_count` validators, all with well-known secret keys. + /// + /// Set the genesis time to be the start of the previous 30-minute window. + RecentGenesis { validator_count: usize }, + /// Generate a state with `genesis_time` and `validator_count` validators, all with well-known + /// secret keys. + Generated { + validator_count: usize, + genesis_time: u64, + }, + /// Load a YAML-encoded genesis state from a file. + Yaml { file: PathBuf }, +} + impl Default for Config { fn default() -> Self { Self { @@ -33,6 +62,10 @@ impl Default for Config { rpc: rpc::RPCConfig::default(), http: HttpServerConfig::default(), rest_api: rest_api::APIConfig::default(), + spec_constants: TESTNET_SPEC_CONSTANTS.into(), + genesis_state: GenesisState::RecentGenesis { + validator_count: TESTNET_VALIDATOR_COUNT, + }, } } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 8138c7d47..65ba071fa 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -67,9 +67,10 @@ where // Load a `BeaconChain` from the store, or create a new one if it does not exist. let beacon_chain = Arc::new(T::initialise_beacon_chain( store, + &client_config, eth2_config.spec.clone(), log.clone(), - )); + )?); // Registry all beacon chain metrics with the global registry. beacon_chain .metrics @@ -90,7 +91,7 @@ where let slots_since_genesis = beacon_chain.slots_since_genesis().unwrap(); info!( log, - "Initializing state"; + "BeaconState cache init"; "state_slot" => state_slot, "wall_clock_slot" => wall_clock_slot, "slots_since_genesis" => slots_since_genesis, @@ -98,12 +99,6 @@ where ); } do_state_catchup(&beacon_chain, &log); - info!( - log, - "State initialized"; - "state_slot" => beacon_chain.head().beacon_state.slot, - "wall_clock_slot" => beacon_chain.read_slot_clock().unwrap(), - ); // Start the network service, libp2p and syncing threads // TODO: Add beacon_chain reference to network parameters diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 2e0cbb67b..dd0c695b4 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -12,6 +12,7 @@ pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; +pub const TESTNET_CONFIG_FILENAME: &str = "testnet.toml"; fn main() { // debugging output for libp2p and external crates @@ -21,7 +22,9 @@ fn main() { .version(version::version().as_str()) .author("Sigma Prime ") .about("Eth 2.0 Client") - // file system related arguments + /* + * Configuration directory locations. + */ .arg( Arg::with_name("datadir") .long("datadir") @@ -43,7 +46,9 @@ fn main() { .help("Data directory for network keys.") .takes_value(true) ) - // network related arguments + /* + * Network parameters. + */ .arg( Arg::with_name("listen-address") .long("listen-address") @@ -86,7 +91,9 @@ fn main() { .help("The IP address to broadcast to other peers on how to reach this node.") .takes_value(true), ) - // rpc related arguments + /* + * gRPC parameters. + */ .arg( Arg::with_name("rpc") .long("rpc") @@ -107,7 +114,9 @@ fn main() { .help("Listen port for RPC endpoint.") .takes_value(true), ) - // HTTP related arguments + /* + * HTTP server parameters. + */ .arg( Arg::with_name("http") .long("http") @@ -127,7 +136,6 @@ fn main() { .help("Listen port for the HTTP server.") .takes_value(true), ) - // REST API related arguments .arg( Arg::with_name("api") .long("api") @@ -149,7 +157,10 @@ fn main() { .help("Set the listen TCP port for the RESTful HTTP API server.") .takes_value(true), ) - // General arguments + + /* + * Database parameters. + */ .arg( Arg::with_name("db") .long("db") @@ -159,12 +170,17 @@ fn main() { .possible_values(&["disk", "memory"]) .default_value("memory"), ) + /* + * Specification/testnet params. + */ .arg( - Arg::with_name("spec-constants") - .long("spec-constants") + Arg::with_name("default-spec") + .long("default-spec") .value_name("TITLE") - .short("s") - .help("The title of the spec constants for chain config.") + .short("default-spec") + .help("Specifies the default eth2 spec to be used. Overridden by any spec loaded + from disk. A spec will be written to disk after this flag is used, so it is + primarily used for creating eth2 spec files.") .takes_value(true) .possible_values(&["mainnet", "minimal"]) .default_value("minimal"), @@ -175,6 +191,9 @@ fn main() { .short("r") .help("When present, genesis will be within 30 minutes prior. Only for testing"), ) + /* + * Logging. + */ .arg( Arg::with_name("debug-level") .long("debug-level") @@ -288,7 +307,7 @@ fn main() { let mut eth2_config = match read_from_file::(eth2_config_path.clone()) { Ok(Some(c)) => c, Ok(None) => { - let default = match matches.value_of("spec-constants") { + let default = match matches.value_of("default-spec") { Some("mainnet") => Eth2Config::mainnet(), Some("minimal") => Eth2Config::minimal(), _ => unreachable!(), // Guarded by slog. diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 9e0b898aa..010993988 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -49,7 +49,7 @@ pub fn run_beacon_node( info!( log, - "Starting beacon node"; + "BeaconNode init"; "p2p_listen_address" => format!("{:?}", &other_client_config.network.listen_address), "data_dir" => format!("{:?}", other_client_config.data_dir()), "network_dir" => format!("{:?}", other_client_config.network.network_dir), diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 000000000..3b69e9dd7 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,69 @@ +# Lighthouse Documentation + +_Lighthouse is a work-in-progress. Instructions are provided for running the +client, however these instructions are designed for developers and researchers +working on the project. We do not (yet) provide user-facing functionality._ + +## Introduction + +- [Overview of Ethereum 2.0](serenity.md) +- [Development Environment Setup](env.md) + +For client implementers looking to inter-op, see the [Inter-Op +Docs](interop.md). + +## Command-line Interface + +With the [development environment](env.md) configured, run `cargo build --all +--release` (this can take several minutes on the first build). Then, +navigate to the `target/release/` directory and read the CLI documentation +using: + +``` +$ ./beacon_node -h +``` + +The main [`README.md`](../README.md#simple-local-testnet) provides instructions +for running a small, local testnet. + +## REST API + +The beacon node provides a RESTful HTTP API which serves information about the +Beacon Chain, the P2P network and more. + +This API is documented in the [`rest_oapi.yaml`](rest_oapi.yaml) Swagger YAML +file. There's an interactive version hosted on +[SwaggerHub](https://app.swaggerhub.com/apis/spble/lighthouse_rest_api/0.1.0). + +The implementation of the Swagger API in Lighthouse is incomplete, we do not +(yet) guarantee that all routes are implemented. + +## Configuration Files + +Lighthouse uses [TOML](https://github.com/toml-lang/toml) files for +configuration. The following binaries use the following config files (they are +generated from defaults if they don't already exist): + +- [Beacon Node](/beacon_node) + - [`~/.lighthouse/beacon_node.toml`](#beacon-nodetoml): the primary + configuration file for a beacon node. + - `~/.lighthouse/eth2-spec.toml`: defines chain-specific "constants" that + define an Ethereum 2.0 network. +- [Validator Client](/validator_client) + - `~/.lighthouse/validator_client.toml`: the primary configuration file for + a validator client. + - `~/.lighthouse/eth2-spec.toml`: defines chain-specific "constants" that + define an Ethereum 2.0 network. + +_Note: default directories are shown, CLI flags can be used to override these +defaults._ + +#### `beacon-node.toml` + +A TOML configuration file that defines the behaviour of the beacon node +runtime. + +- Located in the `datadir` (default `~/.lighthouse`) as `beacon-node.toml`. +- Created from defaults if not present. + +See the [example](config_examples/beacon-node.toml) for more information. diff --git a/docs/config_examples/beacon-node.toml b/docs/config_examples/beacon-node.toml new file mode 100644 index 000000000..3c9f8b613 --- /dev/null +++ b/docs/config_examples/beacon-node.toml @@ -0,0 +1,98 @@ +# +# Beacon Node TOML configuration file. +# +# Defines the runtime configuration of a Lighthouse Beacon Node. +# + +# The directory where beacon-node specific files will be placed. Includes the +# database and configuration files. +data_dir = ".lighthouse" +# The type of database used. Can be either: +# +# - "disk": LevelDB (almost always desired). +# - "memory": an in-memory hashmap (only used for testing). +db_type = "disk" +# The name of the LevelDB database directory, if any. +db_name = "chain_db" +# If specified, all logs will be written to this file. +log_file = "" +# Defines the Ethereum 2.0 specification set to be used: +# +# - "mainnet": parameters expected to be used for Eth2 mainnet. +# - "minimal": smaller, more efficient parameters used for testing. +spec_constants = "minimal" + +# +# The "genesis_state" object defines how the genesis state should be created. +# + +# The "RecentGenesis" type assumes that genesis started at the beginning of the +# most-recent 30 minute window (e.g., 08:00, 08:30, 09:00, ...). +[genesis_state] +type = "RecentGenesis" +validator_count = 16 + +# "Generated" is the same as "RecentGenesis", however allows for manual +# specification of the genesis_time. +# +# [genesis_state] +# type = "Generated" +# validator_count = 16 +# genesis_time = 1564620118 + +# "Yaml" loads a full genesis state from YAML file. +# +# [genesis_state] +# type = "Yaml" +# file = "~/genesis_state.yaml" + +# +# P2P networking configuration. +# +[network] +# The directory for storing p2p network related files. E.g., p2p keys, peer +# lists, etc. +network_dir = "/home/paul/.lighthouse/network" +# The address that libp2p should use for incoming connections. +listen_address = "127.0.0.1" +# The port that libp2p should use for incoming connections. +libp2p_port = 9000 +# The address that should listen for UDP peer-discovery. +discovery_address = "127.0.0.1" +# The port that should listen for UDP peer-discovery. +discovery_port = 9000 +# Maximum number of libp2p peers. +max_peers = 10 +# Boot nodes for initial peer discovery. +boot_nodes = [] +# The client version, may be customized. +client_version = "Lighthouse/v0.1.0-unstable/x86_64-linux" +# A list of libp2p topics. Purpose unknown. +topics = [] + +# +# gRPC configuration. To be removed. +# +[rpc] +enabled = false +listen_address = "127.0.0.1" +port = 5051 + +# +# Legacy HTTP server configuration. To be removed. +# +[http] +enabled = false +listen_address = "127.0.0.1" +listen_port = "5052" + +# +# RESTful HTTP API server configuration. +# +[rest_api] +# Set to `true` to enable the gRPC server. +enabled = true +# The listen port for the HTTP server. +listen_address = "127.0.0.1" +# The listen port for the HTTP server. +port = 1248 diff --git a/docs/env.md b/docs/env.md new file mode 100644 index 000000000..f1728ea6b --- /dev/null +++ b/docs/env.md @@ -0,0 +1,52 @@ +# Development Environment Setup + +_This document describes how to setup a development environment. It is intended +for software developers and researchers who wish to contribute to development._ + +Lighthouse is a Rust project and [`cargo`](https://doc.rust-lang.org/cargo/) is +used extensively. As such, you'll need to install Rust in order to build the +project. Generally, Rust is installed using the +[rustup](https://www.rust-lang.org/tools/install) tool-chain manager. + +## Steps + +A fully-featured development environment can be achieved with the following +steps: + + 1. Install [rustup](https://rustup.rs/). + 1. Use the command `rustup show` to get information about the Rust + installation. You should see that the active tool-chain is the stable + version. + - Updates can be performed using` rustup update`, Lighthouse generally + requires a recent version of Rust. + 1. Install build dependencies (Arch packages are listed here, your + distribution will likely be similar): + - `clang`: required by RocksDB. + - `protobuf`: required for protobuf serialization (gRPC). + - `cmake`: required for building protobuf + - `git-lfs`: The Git extension for [Large File + Support](https://git-lfs.github.com/) (required for Ethereum Foundation + test vectors). + 1. Clone the repository with submodules: `git clone --recursive + https://github.com/sigp/lighthouse`. If you're already cloned the repo, + ensure testing submodules are present: `$ git submodule init; git + submodule update` + 1. Change directory to the root of the repository. + 1. Run the test suite with `cargo test --all --release`. The build and test + process can take several minutes. If you experience any failures on + `master`, please raise an + [issue](https://github.com/sigp/lighthouse/issues). + +## Notes: + +Lighthouse targets Rust `stable` but generally runs on `nightly` too. + +### Note for Windows users: + +Perl may also be required to build lighthouse. You can install [Strawberry +Perl](http://strawberryperl.com/), or alternatively use a choco install command +`choco install strawberryperl`. + +Additionally, the dependency `protoc-grpcio v0.3.1` is reported to have issues +compiling in Windows. You can specify a known working version by editing +version in `protos/Cargo.toml` section to `protoc-grpcio = "<=0.3.0"`. diff --git a/docs/installation.md b/docs/installation.md deleted file mode 100644 index 6d6482ba4..000000000 --- a/docs/installation.md +++ /dev/null @@ -1,40 +0,0 @@ -# Development Environment Setup - -A few basic steps are needed to get set up (skip to #5 if you already have Rust -installed): - - 1. Install [rustup](https://rustup.rs/). It's a toolchain manager for Rust (Linux | macOS | Windows). For installation, download the script with `$ curl -f https://sh.rustup.rs > rustup.sh`, review its content (e.g. `$ less ./rustup.sh`) and run the script `$ ./rustup.sh` (you may need to change the permissions to allow execution, i.e. `$ chmod +x rustup.sh`) - 2. (Linux & MacOS) To configure your current shell run: `$ source $HOME/.cargo/env` - 3. Use the command `rustup show` to get information about the Rust installation. You should see that the - active toolchain is the stable version. - 4. Run `rustc --version` to check the installation and version of rust. - - Updates can be performed using` rustup update` . - 5. Install build dependencies (Arch packages are listed here, your distribution will likely be similar): - - `clang`: required by RocksDB. - - `protobuf`: required for protobuf serialization (gRPC). - - `cmake`: required for building protobuf - - `git-lfs`: The Git extension for [Large File Support](https://git-lfs.github.com/) (required for EF tests submodule). - 6. If you haven't already, clone the repository with submodules: `git clone --recursive https://github.com/sigp/lighthouse`. - Alternatively, run `git submodule init; git submodule update` in a repository which was cloned without submodules. - 7. Change directory to the root of the repository. - 8. Run the test by using command `cargo test --all --release`. By running, it will pass all the required test cases. - If you are doing it for the first time, then you can grab a coffee in the meantime. Usually, it takes time - to build, compile and pass all test cases. If there is no error then it means everything is working properly - and it's time to get your hands dirty. - In case, if there is an error, then please raise the [issue](https://github.com/sigp/lighthouse/issues). - We will help you. - 9. As an alternative to, or instead of the above step, you may also run benchmarks by using - the command `cargo bench --all` - -## Notes: - -Lighthouse targets Rust `stable` but _should_ run on `nightly`. - -### Note for Windows users: - -Perl may also be required to build lighthouse. You can install [Strawberry Perl](http://strawberryperl.com/), -or alternatively use a choco install command `choco install strawberryperl`. - -Additionally, the dependency `protoc-grpcio v0.3.1` is reported to have issues compiling in Windows. You can specify -a known working version by editing version in protos/Cargo.toml's "build-dependencies" section to -`protoc-grpcio = "<=0.3.0"`. diff --git a/docs/interop.md b/docs/interop.md new file mode 100644 index 000000000..5cd3f472a --- /dev/null +++ b/docs/interop.md @@ -0,0 +1,109 @@ +# Lighthouse Inter-Op Docs + +_These documents are intended for a highly technical audience, specifically +Ethereum 2.0 implementers._ + +This document provides details on how to use Lighthouse for inter-op testing. + +## Steps + +_Note: binaries are compiled into the `target/release` directory of the +repository. In this example, we run binaries assuming the user is in this +directory. E.g., running the beacon node binary can be achieved with +`$ ./target/release/beacon_node`. Those familiar with `cargo` may use the +equivalent (and more-convenient) `cargo run --release --` commands._ + +1. Setup a Lighthouse [development environment](env.md). +1. Build all the binaries using `cargo build --all --release` +1. Create default configuration files by running `$ ./beacon_node` and pressing + Ctrl+C after the node has started. +1. Follow the steps in [Genesis](#genesis) to configure the genesis state. +1. Follow the steps in [Networking](#networking) to launch a node with + appropriate networking parameters. + +## Genesis + +Lighthouse supports the following methods for generating a genesis state: + +- [`Yaml`](#yaml): loads the genesis state from some YAML file (recommended + method). +- [`Generated`](#generated): generates a state given a `(validator_count, + genesis_time)` + tuple. _Note: this method is not yet fully specified and the state + generated is almost certainly not identical to other implementations._ +- [`RecentGenesis`](#recentgenesis): identical to `Generated`, however the + `genesis_time` is set + to the previous 30-minute window. For example, if a state is generated at + `0845`, the genesis time will be `0830`. + +You may configure a `beacon_node` to use one of these methods using the +[`beacon_node.toml`](README.md#beacon-nodetoml). There is a [documented +example](config_examples/) configuration file which includes an example for +each of these methods (see the `genesis_state` object). + +### Yaml + +This method involves loading a `BeaconState` from a YAML file. We provide +instructions for generating that YAML file and starting from it. If starting +from a pre-existing YAML file, simply skip the generation steps. + +#### Generating a YAML file + +The [cli_util](/tests/cli_util) generate YAML genesis state files. You can run +`$ ./cli_util genesis_yaml -h` to see documentation. We provide an example to +generate a YAML file with the following properties: + +- 10 initial validators, each with [deterministic + keypairs](https://github.com/ethereum/eth2.0-pm/issues/60#issuecomment-512157915). +- The genesis file is stored in `~/.lighthouse/`, the default data directory + (an absolute path must be supplied). +- Genesis time is set to the time when the command is run (it can be customized + with the `-g` flag). + +``` +$ ./cli_util genesis_yaml -n 10 -f /home/user/.lighthouse/genesis_state.yaml +``` + +#### Configuring the Beacon Node + +Modify the [`beacon-node.toml`](README.md#beacon-nodetoml) file to have the +following `genesiss_state` object (choosing the `file`): + +``` +[genesis_state] +type = "Yaml" +file = "/home/user/.lighthouse/genesis_state.yaml" +``` + +### Generated + +Modify the [`beacon-node.toml`](README.md#beacon-nodetoml) file to have the +following `genesis_state` object (choosing the `validator_count` and +`genesis_time`): + +``` +[genesis_state] +type = "Generated" +validator_count = 16 +genesis_time = 1564620118 +``` + +### RecentGenesis + +Modify the [`beacon-node.toml`](README.md#beacon-nodetoml) file to have the +following `genesis_state` object (choosing the `validator_count`): + +``` +[genesis_state] +type = "RecentGenesis" +validator_count = 16 +``` + +## Networking + +_TODO: provide details on config required to connect to some IP address._ + +## References + +The BLS key generation method used should be identical to [this +implementation](https://github.com/ethereum/eth2.0-pm/issues/60#issuecomment-512157915). diff --git a/docs/lighthouse.md b/docs/lighthouse.md deleted file mode 100644 index 16da13b56..000000000 --- a/docs/lighthouse.md +++ /dev/null @@ -1,83 +0,0 @@ -# About Lighthouse - -## Goals - -The purpose of this project is to work alongside the Ethereum community to -implement a secure, trustworthy, open-source Ethereum Serenity client in Rust. - -* **Security**: Lighthouse's main goal is to implement everything with a -security-first mindset. The goal is to ensure that all components of lighthouse -are thoroughly tested, checked and secure. - -* **Trust** : As Ethereum Serenity is a Proof-of-Stake system, which -involves the interaction of the Ethereum protocol and user funds. Thus, a goal -of Lighthouse is to provide a client that is trustworthy. - - All code can be tested and verified the goal of Lighthouse is to provide code -that is trusted. - -* **Transparency**: Lighthouse aims at being as transparent as possible. This -goal is for Lighthouse to embrace the open-source community and allow for all -to understand the decisions, direction and changes in all aspects. - -* **Error Resilience**: As Lighthouse embraces the "never `panic`" mindset, the -goal is to be resilient to errors that may occur. Providing a client that has -tolerance against errors provides further properties for a secure, trustworthy -client that Lighthouse aims to provide. - -In addition to implementing a new client, the project seeks to maintain and -improve the Ethereum protocol wherever possible. - -## Ideology - -### Never Panic - -Lighthouse will be the gateway interacting with the Proof-of-Stake system -employed by Ethereum. This requires the validation and proposal of blocks -and extremely timely responses. As part of this, Lighthouse aims to ensure -the most uptime as possible, meaning minimising the amount of -exceptions and gracefully handling any issues. - -Rust's `panic` provides the ability to throw an exception and exit, this -will terminate the running processes. Thus, Lighthouse aims to use `panic` -as little as possible to minimise the possible termination cases. - -### Security First Mindset - -Lighthouse aims to provide a safe, secure Serenity client for the Ethereum -ecosystem. At each step of development, the aim is to have a security-first -mindset and always ensure you are following the safe, secure mindset. When -contributing to any part of the Lighthouse client, through any development, -always ensure you understand each aspect thoroughly and cover all potential -security considerations of your code. - -### Functions aren't completed until they are tested - -As part of the Security First mindset, we want to aim to cover as many distinct -cases. A function being developed is not considered "completed" until tests -exist for that function. The tests not only help show the correctness of the -function, but also provide a way for new developers to understand how the -function is to be called and how it works. - - -## Engineering Ethos - -Lighthouse aims to produce many small easily-tested components, each separated -into individual crates wherever possible. - -Generally, tests can be kept in the same file, as is typical in Rust. -Integration tests should be placed in the `tests` directory in the crate's -root. Particularly large (line-count) tests should be placed into a separate -file. - -A function is not considered complete until a test exists for it. We produce -tests to protect against regression (accidentally breaking things) and to -provide examples that help readers of the code base understand how functions -should (or should not) be used. - -Each pull request is to be reviewed by at least one "core developer" (i.e., -someone with write-access to the repository). This helps to ensure bugs are -detected, consistency is maintained, and responsibility of errors is dispersed. - -Discussion must be respectful and intellectual. Have fun and make jokes, but -always respect the limits of other people. diff --git a/docs/onboarding.md b/docs/onboarding.md deleted file mode 100644 index 1937271a0..000000000 --- a/docs/onboarding.md +++ /dev/null @@ -1,233 +0,0 @@ -# Contributing to Lighthouse - -[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/sigp/lighthouse?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) - -Lighthouse is an open-source Ethereum Serenity client built in -[Rust](https://www.rust-lang.org/). - -Lighthouse welcomes all contributions with open arms. If you are interested in -contributing to the Ethereum ecosystem, and you want to learn Rust, Lighthouse -is a great project to work on. - -This documentation aims to provide a smooth on-boarding for all who wish to -help contribute to Lighthouse. Whether it is helping with the mountain of -documentation, writing extra tests or developing components, all help is -appreciated and your contributions will help not only the community but all -the contributors. - -We've bundled up our Goals, Ethos and Ideology into one document for you to -read through, please read our [About Lighthouse](lighthouse.md) docs. :smile: - -Layer-1 infrastructure is a critical component for the ecosystem and relies -heavily on contributions from the community. Building Ethereum Serenity is a -huge task and we refuse to conduct an inappropriate ICO or charge licensing -fees. Instead, we fund development through grants and support from Sigma -Prime. - -If you have any additional questions, please feel free to jump on the -[gitter](https://gitter.im/sigp/lighthouse) and have a chat with all of us. - -**Pre-reading Materials:** - -* [About Lighthouse](lighthouse.md) -* [Ethereum Serenity](serenity.md) - -**Repository** - -If you'd like to contribute, try having a look through the [open -issues](https://github.com/sigp/lighthouse/issues) (tip: look for the [good -first -issue](https://github.com/sigp/lighthouse/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) -tag) and ping us on the [gitter](https://gitter.im/sigp/lighthouse) channel. We need -your support! - -## Understanding Serenity - -Ethereum's Serenity is based on a Proof-of-Stake based sharded beacon chain. - -(*If you don't know what that is, don't `panic`, that's what this documentation -is for!* :smile:) - -Read through our [Understanding -Serenity](https://github.com/sigp/lighthouse/blob/master/docs/serenity.md) docs -to learn more! :smile: (*unless you've already read it.*) - -The document explains the necessary fundamentals for understanding Ethereum, -Proof-of-Stake and the Serenity we are working towards. - -## Development Onboarding - -If you would like to contribute and develop Lighthouse, there are only a few -things to go through (and then you're on your way!). - -### Understanding Rust - -Rust is an extremely powerful, low-level programming language that provides -freedom and performance to create powerful projects. The [Rust -Book](https://doc.rust-lang.org/stable/book/) provides insight into the Rust -language and some of the coding style to follow (As well as acting as a great -introduction and tutorial for the language.) - -Rust has a steep learning curve, but there are many resources to help you! - -* [Rust Book](https://doc.rust-lang.org/stable/book/) -* [Rust by example](https://doc.rust-lang.org/stable/rust-by-example/) -* [Learning Rust With Entirely Too Many Linked Lists](http://cglab.ca/~abeinges/blah/too-many-lists/book/) -* [Rustlings](https://github.com/rustlings/rustlings) -* [Rust Exercism](https://exercism.io/tracks/rust) -* [Learn X in Y minutes - Rust](https://learnxinyminutes.com/docs/rust/) - - -#### Getting Started and installing Rust - -We recommend installing Rust using [**rustup**](https://rustup.rs/). Rustup -allows you to easily install versions of rust. - -**Linux/Unix/Mac:** - -``` -$ curl https://sh.rustup.rs -sSf | sh -``` - -**Windows (You need a bit more):** -* Install the Visual Studio 2015 with C++ support -* Install Rustup using: https://static.rust-lang.org/rustup/dist/x86_64-pc-windows-msvc/rustup-init.exe -* You can then use the ``VS2015 x64 Native Tools Command Prompt`` and run: - -``` -rustup default stable-x86-64-pc-windows-msvc -``` - -#### Getting ready with Cargo - -[Cargo](https://doc.rust-lang.org/cargo/) is the package manager for Rust, and -allows to extend to a number of packages and external libraries. It's also extremely -handy for handling dependencies and helping to modularise your project better. - -*Note: If you've installed rust through rustup, you should have ``cargo`` -installed.* - -#### Rust Terminology - -When developing rust, you'll come across some terminology that differs to -other programming languages you may have used. - -* **Trait**: A trait is a collection of methods defined for a type, they can be -implemented for any data type. -* **Struct**: A custom data type that lets us name and package together -multiple related values that make a meaningful group. -* **Crate**: A crate is synonymous with a *library* or *package* in other -languages. They can produce an executable or library depending on the -project. -* **Module**: A collection of items: functions, structs, traits, and even other -modules. Modules allow you to hierarchically split code into logical units -and manage visibility. -* **Attribute**: Metadata applied to some module, crate or item. -* **Macros**: Macros are powerful meta-programming statements that get expanded -into source code that gets compiled with the rest of the code (Unlike `C` -macros that are pre-processed, Rust macros form an Abstract Syntax Tree). - - -Other good appendix resources: - -* [Keywords](https://doc.rust-lang.org/book/appendix-01-keywords.html) -* [Operators/Symbols](https://doc.rust-lang.org/book/appendix-02-operators.html) -* [Traits](https://doc.rust-lang.org/book/appendix-03-derivable-traits.html) - - -### Understanding the Git Workflow - -Lighthouse utilises git as the primary open-source development tool. To help -with your contributions, it is great to understand the processes used to ensure -everything remains in sync and there's as little conflict as possible when -working on similar files. - -Lighthouse uses the **feature branch** workflow, where each issue, or each -feature, is developed on its own branch and then merged in via a pull-request. - -* [Feature Branch Tutorial](https://www.atlassian.com/git/tutorials/comparing-workflows/feature-branch-workflow) - -## Code Conventions/Styleguide and Ethos - -### Ethos - -**Pull Requests** - -Pull requests should be reviewed by **at least** one "*core developer*" -(someone with write-access to the repo). This should ensure bugs are caught and -the code is kept in a consistent state that follows all conventions and style. - -All discussion (whether in PRs or Issues or in the Gitter) should be respectful -and intellectual. Have fun, but always respect the limits of other people. - -**Testing** - -*"A function is not considered complete until tests exist for it."* - -Generally, tests can be self-contained in the same file. Integration tests -should be added into the ``tests/`` directory in the crate's **root**. - -Large line-count tests should be in a separate file. - -### Rust StyleGuide - -Lighthouse adheres to Rust code conventions as outlined in the [**Rust -Styleguide**](https://github.com/rust-dev-tools/fmt-rfcs/blob/master/guide/guide.md). - -Ensure you use [Clippy](https://github.com/rust-lang/rust-clippy) to lint and -check your code. - -| Code Aspect | Guideline Format | -|:--------------------|:-------------------------------| -| Types | ``UpperCamelCase`` | -| Enums/Enum Variants | ``UpperCamelCase`` | -| Struct Fields | ``snake_case`` | -| Function / Method | ``snake_case`` | -| Macro Names | ``snake_case`` | -| Constants | ``SCREAMING_SNAKE_CASE`` | -| Forbidden name | Trailing Underscore: ``name_`` | - -Other general rust docs: - -* [Rust Other Style Advice](https://github.com/rust-dev-tools/fmt-rfcs/blob/master/guide/advice.md) -* [Cargo.toml Conventions](https://github.com/rust-dev-tools/fmt-rfcs/blob/master/guide/cargo.md) - -### TODOs - -All `TODO` statements should be accompanied by a GitHub issue. - -```rust -pub fn my_function(&mut self, _something &[u8]) -> Result { - // TODO: something_here - // https://github.com/sigp/lighthouse/issues/XX -} -``` - -### Comments - -**General Comments** - -* Prefer line (``//``) comments to block comments (``/* ... */``) -* Comments can appear on the line prior to the item or after a trailing space. -```rust -// Comment for this struct -struct Lighthouse {} - -fn make_blockchain() {} // A comment on the same line after a space -``` - -**Doc Comments** - -* The ``///`` is used to generate comments for Docs. -* The comments should come before attributes. - -```rust -/// Stores the core configuration for this Lighthouse instance. -/// This struct is general, other components may implement more -/// specialized config structs. -#[derive(Clone)] -pub struct LighthouseConfig { - pub data_dir: PathBuf, - pub p2p_listen_port: u16, -} -``` diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index a49e46d93..ae707bc2c 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -11,6 +11,7 @@ compare_fields = { path = "../utils/compare_fields" } compare_fields_derive = { path = "../utils/compare_fields_derive" } dirs = "1.0" derivative = "1.0" +eth2_interop_keypairs = { path = "../utils/eth2_interop_keypairs" } ethereum-types = "0.5" hashing = { path = "../utils/hashing" } hex = "0.3" diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index b1252420f..fe8b18706 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -1,4 +1,5 @@ -use crate::test_utils::{graffiti_from_hex_str, TestRandom}; +use crate::test_utils::TestRandom; +use crate::utils::graffiti_from_hex_str; use crate::*; use serde_derive::{Deserialize, Serialize}; diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 129b05f79..6b533feec 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -134,13 +134,13 @@ where pub finalized_checkpoint: Checkpoint, // Caching (not in the spec) - #[serde(default)] + #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing)] #[ssz(skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] pub committee_caches: [CommitteeCache; CACHED_EPOCHS], - #[serde(default)] + #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing)] #[ssz(skip_deserializing)] #[tree_hash(skip_hashing)] diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index e42b628ac..2128c6ef1 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -1,7 +1,7 @@ use crate::*; use int_to_bytes::int_to_bytes4; use serde_derive::{Deserialize, Serialize}; -use test_utils::{u8_from_hex_str, u8_to_hex_str}; +use utils::{u8_from_hex_str, u8_to_hex_str}; /// Each of the BLS signature domains. /// diff --git a/eth2/types/src/fork.rs b/eth2/types/src/fork.rs index be75d5ca2..2f618f91a 100644 --- a/eth2/types/src/fork.rs +++ b/eth2/types/src/fork.rs @@ -1,7 +1,6 @@ -use crate::{ - test_utils::{fork_from_hex_str, TestRandom}, - Epoch, -}; +use crate::test_utils::TestRandom; +use crate::utils::{fork_from_hex_str, fork_to_hex_str}; +use crate::Epoch; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -25,9 +24,15 @@ use tree_hash_derive::{CachedTreeHash, TreeHash}; TestRandom, )] pub struct Fork { - #[serde(deserialize_with = "fork_from_hex_str")] + #[serde( + serialize_with = "fork_to_hex_str", + deserialize_with = "fork_from_hex_str" + )] pub previous_version: [u8; 4], - #[serde(deserialize_with = "fork_from_hex_str")] + #[serde( + serialize_with = "fork_to_hex_str", + deserialize_with = "fork_from_hex_str" + )] pub current_version: [u8; 4], pub epoch: Epoch, } diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index a8dd04a45..3edf8b36b 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -30,6 +30,7 @@ pub mod indexed_attestation; pub mod pending_attestation; pub mod proposer_slashing; pub mod transfer; +pub mod utils; pub mod voluntary_exit; #[macro_use] pub mod slot_epoch_macros; diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index a9383242f..98f840953 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -133,6 +133,9 @@ impl TestingBeaconStateBuilder { spec, ); + state.eth1_data.deposit_count = validator_count as u64; + state.eth1_deposit_index = validator_count as u64; + let balances = vec![starting_balance; validator_count].into(); debug!("Importing {} existing validators...", validator_count); diff --git a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs index ec1c15f29..172b142ef 100644 --- a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs +++ b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs @@ -1,14 +1,12 @@ use crate::*; -use int_to_bytes::int_to_bytes48; +use eth2_interop_keypairs::be_private_key; use log::debug; use rayon::prelude::*; -/// Generates `validator_count` keypairs where the secret key is the index of the -/// validator. +/// Generates `validator_count` keypairs where the secret key is derived solely from the index of +/// the validator. /// -/// For example, the first validator has a secret key of `int_to_bytes48(1)`, the second has -/// `int_to_bytes48(2)` and so on. (We skip `0` as it generates a weird looking public key and is -/// probably invalid). +/// Uses the `eth2_interop_keypairs` crate to generate keys. pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { debug!( "Generating {} deterministic validator keypairs...", @@ -20,6 +18,7 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { .par_iter() .map(|&i| generate_deterministic_keypair(i)) .collect(); + keypairs } @@ -27,8 +26,8 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { /// /// This is used for testing only, and not to be used in production! pub fn generate_deterministic_keypair(validator_index: usize) -> Keypair { - let secret = int_to_bytes48(validator_index as u64 + 1000); - let sk = SecretKey::from_bytes(&secret).unwrap(); + let sk = SecretKey::from_bytes(&be_private_key(validator_index)) + .expect("be_private_key always returns valid keys"); let pk = PublicKey::from_secret_key(&sk); Keypair { sk, pk } } diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index b5ec7a027..9ca9ca78a 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -3,7 +3,6 @@ mod macros; mod builders; mod generate_deterministic_keypairs; mod keypairs_file; -mod serde_utils; mod test_random; pub use builders::*; @@ -14,5 +13,4 @@ pub use rand::{ RngCore, {prng::XorShiftRng, SeedableRng}, }; -pub use serde_utils::{fork_from_hex_str, graffiti_from_hex_str, u8_from_hex_str, u8_to_hex_str}; pub use test_random::TestRandom; diff --git a/eth2/types/src/utils.rs b/eth2/types/src/utils.rs new file mode 100644 index 000000000..51af86692 --- /dev/null +++ b/eth2/types/src/utils.rs @@ -0,0 +1,3 @@ +mod serde_utils; + +pub use serde_utils::*; diff --git a/eth2/types/src/test_utils/serde_utils.rs b/eth2/types/src/utils/serde_utils.rs similarity index 82% rename from eth2/types/src/test_utils/serde_utils.rs rename to eth2/types/src/utils/serde_utils.rs index 079551b58..4b46fc0dc 100644 --- a/eth2/types/src/test_utils/serde_utils.rs +++ b/eth2/types/src/utils/serde_utils.rs @@ -1,3 +1,4 @@ +use hex; use serde::de::Error; use serde::{Deserialize, Deserializer, Serializer}; @@ -32,7 +33,7 @@ where let mut array = [0 as u8; FORK_BYTES_LEN]; let decoded: Vec = hex::decode(&s.as_str()[2..]).map_err(D::Error::custom)?; - if decoded.len() > FORK_BYTES_LEN { + if decoded.len() != FORK_BYTES_LEN { return Err(D::Error::custom("Fork length too long")); } @@ -45,6 +46,17 @@ where Ok(array) } +// #[allow(clippy::trivially_copy_pass_by_ref)] // Serde requires the `byte` to be a ref. +pub fn fork_to_hex_str(bytes: &[u8; 4], serializer: S) -> Result +where + S: Serializer, +{ + let mut hex_string: String = "0x".to_string(); + hex_string.push_str(&hex::encode(&bytes)); + + serializer.serialize_str(&hex_string) +} + pub fn graffiti_from_hex_str<'de, D>(deserializer: D) -> Result<[u8; GRAFFITI_BYTES_LEN], D::Error> where D: Deserializer<'de>, diff --git a/eth2/utils/eth2_interop_keypairs/Cargo.toml b/eth2/utils/eth2_interop_keypairs/Cargo.toml new file mode 100644 index 000000000..e1c4dab04 --- /dev/null +++ b/eth2/utils/eth2_interop_keypairs/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "eth2_interop_keypairs" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +num-bigint = "0.2" +eth2_hashing = "0.1" diff --git a/eth2/utils/eth2_interop_keypairs/src/lib.rs b/eth2/utils/eth2_interop_keypairs/src/lib.rs new file mode 100644 index 000000000..8ba2b9eba --- /dev/null +++ b/eth2/utils/eth2_interop_keypairs/src/lib.rs @@ -0,0 +1,130 @@ +//! Produces the "deterministic" validator private keys used for inter-operability testing for +//! Ethereum 2.0 clients. +//! +//! Each private key is the first hash in the sha2 hash-chain that is less than 2^255. As such, +//! keys generated here are **not secret** and are **not for production use**. +//! +//! Note: these keys have not been tested against a reference implementation, yet. + +use eth2_hashing::hash; +use num_bigint::BigUint; + +pub const CURVE_ORDER_BITS: usize = 255; +pub const PRIVATE_KEY_BYTES: usize = 48; +pub const HASH_BYTES: usize = 32; + +fn hash_big_int_le(uint: BigUint) -> BigUint { + let mut preimage = uint.to_bytes_le(); + preimage.resize(32, 0_u8); + BigUint::from_bytes_le(&hash(&preimage)) +} + +fn private_key(validator_index: usize) -> BigUint { + let mut key = BigUint::from(validator_index); + loop { + key = hash_big_int_le(key); + if key.bits() <= CURVE_ORDER_BITS { + break key; + } + } +} + +/// Generates an **unsafe** BLS12-381 private key for the given validator index, where that private +/// key is represented in big-endian bytes. +pub fn be_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { + let vec = private_key(validator_index).to_bytes_be(); + + let mut out = [0; PRIVATE_KEY_BYTES]; + out[PRIVATE_KEY_BYTES - vec.len()..PRIVATE_KEY_BYTES].copy_from_slice(&vec); + out +} + +/// Generates an **unsafe** BLS12-381 private key for the given validator index, where that private +/// key is represented in little-endian bytes. +pub fn le_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { + let vec = private_key(validator_index).to_bytes_le(); + + let mut out = [0; PRIVATE_KEY_BYTES]; + out[0..vec.len()].copy_from_slice(&vec); + out +} + +#[cfg(test)] +mod tests { + use super::*; + + fn flip(vec: &[u8]) -> Vec { + let len = vec.len(); + let mut out = vec![0; len]; + for i in 0..len { + out[len - 1 - i] = vec[i]; + } + out + } + + fn pad_le_bls(mut vec: Vec) -> Vec { + vec.resize(PRIVATE_KEY_BYTES, 0_u8); + vec + } + + fn pad_be_bls(mut vec: Vec) -> Vec { + let mut out = vec![0; PRIVATE_KEY_BYTES - vec.len()]; + out.append(&mut vec); + out + } + + fn pad_le_hash(index: usize) -> Vec { + let mut vec = index.to_le_bytes().to_vec(); + vec.resize(HASH_BYTES, 0_u8); + vec + } + + fn multihash(index: usize, rounds: usize) -> Vec { + let mut vec = pad_le_hash(index); + for _ in 0..rounds { + vec = hash(&vec); + } + vec + } + + fn compare(validator_index: usize, preimage: &[u8]) { + assert_eq!( + &le_private_key(validator_index)[..], + &pad_le_bls(hash(preimage))[..] + ); + assert_eq!( + &be_private_key(validator_index)[..], + &pad_be_bls(flip(&hash(preimage)))[..] + ); + } + + #[test] + fn consistency() { + for i in 0..256 { + let le = BigUint::from_bytes_le(&le_private_key(i)); + let be = BigUint::from_bytes_be(&be_private_key(i)); + assert_eq!(le, be); + } + } + + #[test] + fn non_repeats() { + // These indices only need one hash to be in the curve order. + compare(0, &pad_le_hash(0)); + compare(3, &pad_le_hash(3)); + } + + #[test] + fn repeats() { + // Index 5 needs 5x hashes to get into the curve order. + compare(5, &multihash(5, 5)); + } + + #[test] + fn doesnt_panic() { + for i in 0..256 { + be_private_key(i); + le_private_key(i); + } + } +} diff --git a/tests/cli_util/.gitignore b/tests/cli_util/.gitignore new file mode 100644 index 000000000..c3f19cc7f --- /dev/null +++ b/tests/cli_util/.gitignore @@ -0,0 +1 @@ +genesis_state.yaml diff --git a/tests/cli_util/Cargo.toml b/tests/cli_util/Cargo.toml new file mode 100644 index 000000000..7690d5a87 --- /dev/null +++ b/tests/cli_util/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "cli_util" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +clap = "2.33" +log = "0.4" +serde = "1.0" +serde_yaml = "0.8" +simple_logger = "1.0" +types = { path = "../../eth2/types" } diff --git a/tests/cli_util/src/main.rs b/tests/cli_util/src/main.rs new file mode 100644 index 000000000..330a0d171 --- /dev/null +++ b/tests/cli_util/src/main.rs @@ -0,0 +1,118 @@ +#[macro_use] +extern crate log; + +use clap::{App, Arg, SubCommand}; +use std::fs::File; +use std::path::PathBuf; +use std::time::{SystemTime, UNIX_EPOCH}; +use types::{test_utils::TestingBeaconStateBuilder, EthSpec, MainnetEthSpec, MinimalEthSpec}; + +fn main() { + simple_logger::init().expect("logger should initialize"); + + let matches = App::new("Lighthouse Testing CLI Tool") + .version("0.1.0") + .author("Paul Hauner ") + .about("Performs various testing-related tasks.") + .subcommand( + SubCommand::with_name("genesis_yaml") + .about("Generates a genesis YAML file") + .version("0.1.0") + .author("Paul Hauner ") + .arg( + Arg::with_name("num_validators") + .short("n") + .value_name("INTEGER") + .takes_value(true) + .required(true) + .help("Number of initial validators."), + ) + .arg( + Arg::with_name("genesis_time") + .short("g") + .value_name("INTEGER") + .takes_value(true) + .required(false) + .help("Eth2 genesis time (seconds since UNIX epoch)."), + ) + .arg( + Arg::with_name("spec") + .short("s") + .value_name("STRING") + .takes_value(true) + .required(true) + .possible_values(&["minimal", "mainnet"]) + .default_value("minimal") + .help("Eth2 genesis time (seconds since UNIX epoch)."), + ) + .arg( + Arg::with_name("output_file") + .short("f") + .value_name("PATH") + .takes_value(true) + .default_value("./genesis_state.yaml") + .help("Output file for generated state."), + ), + ) + .get_matches(); + + if let Some(matches) = matches.subcommand_matches("genesis_yaml") { + let num_validators = matches + .value_of("num_validators") + .expect("slog requires num_validators") + .parse::() + .expect("num_validators must be a valid integer"); + + let genesis_time = if let Some(string) = matches.value_of("genesis_time") { + string + .parse::() + .expect("genesis_time must be a valid integer") + } else { + warn!("No genesis time supplied via CLI, using the current time."); + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("should obtain time since unix epoch") + .as_secs() + }; + + let file = matches + .value_of("output_file") + .expect("slog requires output file") + .parse::() + .expect("output_file must be a valid path"); + + info!( + "Creating genesis state with {} validators and genesis time {}.", + num_validators, genesis_time + ); + + match matches.value_of("spec").expect("spec is required by slog") { + "minimal" => genesis_yaml::(num_validators, genesis_time, file), + "mainnet" => genesis_yaml::(num_validators, genesis_time, file), + _ => unreachable!("guarded by slog possible_values"), + }; + + info!("Genesis state YAML file created. Exiting successfully."); + } else { + error!("No subcommand supplied.") + } +} + +/// Creates a genesis state and writes it to a YAML file. +fn genesis_yaml(validator_count: usize, genesis_time: u64, output: PathBuf) { + let spec = &T::default_spec(); + + let builder: TestingBeaconStateBuilder = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, spec); + + let (mut state, _keypairs) = builder.build(); + state.genesis_time = genesis_time; + + info!("Generated state root: {:?}", state.canonical_root()); + + info!("Writing genesis state to {:?}", output); + + let file = File::create(output.clone()) + .unwrap_or_else(|e| panic!("unable to create file: {:?}. Error: {:?}", output, e)); + serde_yaml::to_writer(file, &state).expect("should be able to serialize BeaconState"); +} From 01054ecf2f75b81ef71846309b836d6b0930e4d6 Mon Sep 17 00:00:00 2001 From: blacktemplar Date: Tue, 6 Aug 2019 05:49:11 +0200 Subject: [PATCH 24/25] Use SignatureBytes and PublicKeyBytes for deposits (#472) * Replace deposit signatures with SignatureBytes, a struct which lazyly parsers signatures only on demand. * check byte length when parsing SignatureBytes * add comment to struct * distinguish BadSignature and BadSignatureBytes in verify_deposit_signature * add test for valid signature * Implements TryInto for &SignatureBytes and From for &SignatureBytes * add and use PublicKeyBytes + fix formatting * fix compiler warning + docs for macro generated structs * adds tests to ensure correct byte lengths * small style improvement as suggested by michaelsproul --- .../src/per_block_processing.rs | 11 +- .../src/per_block_processing/errors.rs | 2 + .../per_block_processing/verify_deposit.rs | 15 ++- eth2/types/src/deposit_data.rs | 11 +- .../builders/testing_deposit_builder.rs | 8 +- eth2/types/src/test_utils/test_random.rs | 4 + .../test_random/public_key_bytes.rs | 19 +++ .../test_utils/test_random/signature_bytes.rs | 17 +++ eth2/utils/bls/src/lib.rs | 4 + eth2/utils/bls/src/macros.rs | 108 ++++++++++++++++++ eth2/utils/bls/src/public_key.rs | 9 ++ eth2/utils/bls/src/public_key_bytes.rs | 43 +++++++ eth2/utils/bls/src/signature.rs | 9 ++ eth2/utils/bls/src/signature_bytes.rs | 44 +++++++ 14 files changed, 286 insertions(+), 18 deletions(-) create mode 100644 eth2/types/src/test_utils/test_random/public_key_bytes.rs create mode 100644 eth2/types/src/test_utils/test_random/signature_bytes.rs create mode 100644 eth2/utils/bls/src/public_key_bytes.rs create mode 100644 eth2/utils/bls/src/signature_bytes.rs diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 10ca6e370..3c8921555 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -2,6 +2,7 @@ use crate::common::{initiate_validator_exit, slash_validator}; use errors::{BlockInvalid as Invalid, BlockProcessingError as Error, IntoWithIndex}; use rayon::prelude::*; use std::collections::HashSet; +use std::convert::TryInto; use std::iter::FromIterator; use tree_hash::{SignedRoot, TreeHash}; use types::*; @@ -396,9 +397,13 @@ pub fn process_deposit( // depositing validator already exists in the registry. state.update_pubkey_cache()?; + let pubkey: PublicKey = match (&deposit.data.pubkey).try_into() { + Err(_) => return Ok(()), //bad public key => return early + Ok(k) => k, + }; // Get an `Option` where `u64` is the validator index if this deposit public key // already exists in the beacon_state. - let validator_index = get_existing_validator_index(state, deposit) + let validator_index = get_existing_validator_index(state, &pubkey) .map_err(|e| e.into_with_index(deposit_index))?; let amount = deposit.data.amount; @@ -409,13 +414,13 @@ pub fn process_deposit( } else { // The signature should be checked for new validators. Return early for a bad // signature. - if verify_deposit_signature(state, deposit, spec).is_err() { + if verify_deposit_signature(state, deposit, spec, &pubkey).is_err() { return Ok(()); } // Create a new validator. let validator = Validator { - pubkey: deposit.data.pubkey.clone(), + pubkey, withdrawal_credentials: deposit.data.withdrawal_credentials, activation_eligibility_epoch: spec.far_future_epoch, activation_epoch: spec.far_future_epoch, diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index e2b908c73..65179167c 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -349,6 +349,8 @@ pub enum DepositInvalid { BadIndex { state: u64, deposit: u64 }, /// The signature (proof-of-possession) does not match the given pubkey. BadSignature, + /// The signature does not represent a valid BLS signature. + BadSignatureBytes, /// The specified `branch` and `index` did not form a valid proof that the deposit is included /// in the eth1 deposit root. BadMerkleProof, diff --git a/eth2/state_processing/src/per_block_processing/verify_deposit.rs b/eth2/state_processing/src/per_block_processing/verify_deposit.rs index 5642c7a5f..0ce25a0b2 100644 --- a/eth2/state_processing/src/per_block_processing/verify_deposit.rs +++ b/eth2/state_processing/src/per_block_processing/verify_deposit.rs @@ -1,5 +1,6 @@ use super::errors::{DepositInvalid as Invalid, DepositValidationError as Error}; use merkle_proof::verify_merkle_proof; +use std::convert::TryInto; use tree_hash::{SignedRoot, TreeHash}; use types::*; @@ -10,15 +11,17 @@ pub fn verify_deposit_signature( state: &BeaconState, deposit: &Deposit, spec: &ChainSpec, + pubkey: &PublicKey, ) -> Result<(), Error> { // Note: Deposits are valid across forks, thus the deposit domain is computed // with the fork zeroed. let domain = spec.get_domain(state.current_epoch(), Domain::Deposit, &Fork::default()); + let signature: Signature = (&deposit.data.signature) + .try_into() + .map_err(|_| Error::Invalid(Invalid::BadSignatureBytes))?; + verify!( - deposit - .data - .signature - .verify(&deposit.data.signed_root(), domain, &deposit.data.pubkey,), + signature.verify(&deposit.data.signed_root(), domain, pubkey), Invalid::BadSignature ); @@ -33,9 +36,9 @@ pub fn verify_deposit_signature( /// Errors if the state's `pubkey_cache` is not current. pub fn get_existing_validator_index( state: &BeaconState, - deposit: &Deposit, + pub_key: &PublicKey, ) -> Result, Error> { - let validator_index = state.get_validator_index(&deposit.data.pubkey)?; + let validator_index = state.get_validator_index(pub_key)?; Ok(validator_index.map(|idx| idx as u64)) } diff --git a/eth2/types/src/deposit_data.rs b/eth2/types/src/deposit_data.rs index 8e5088889..4dc7689cd 100644 --- a/eth2/types/src/deposit_data.rs +++ b/eth2/types/src/deposit_data.rs @@ -1,6 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; -use bls::{PublicKey, Signature}; +use bls::{PublicKeyBytes, SignatureBytes}; +use std::convert::From; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -25,11 +26,11 @@ use tree_hash_derive::{CachedTreeHash, SignedRoot, TreeHash}; TestRandom, )] pub struct DepositData { - pub pubkey: PublicKey, + pub pubkey: PublicKeyBytes, pub withdrawal_credentials: Hash256, pub amount: u64, #[signed_root(skip_hashing)] - pub signature: Signature, + pub signature: SignatureBytes, } impl DepositData { @@ -42,11 +43,11 @@ impl DepositData { epoch: Epoch, fork: &Fork, spec: &ChainSpec, - ) -> Signature { + ) -> SignatureBytes { let msg = self.signed_root(); let domain = spec.get_domain(epoch, Domain::Deposit, fork); - Signature::new(msg.as_slice(), domain, secret_key) + SignatureBytes::from(Signature::new(msg.as_slice(), domain, secret_key)) } } diff --git a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs index df3dcffa1..ed08571a7 100644 --- a/eth2/types/src/test_utils/builders/testing_deposit_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_deposit_builder.rs @@ -1,5 +1,5 @@ use crate::*; -use bls::get_withdrawal_credentials; +use bls::{get_withdrawal_credentials, PublicKeyBytes, SignatureBytes}; /// Builds an deposit to be used for testing purposes. /// @@ -14,10 +14,10 @@ impl TestingDepositBuilder { let deposit = Deposit { proof: vec![].into(), data: DepositData { - pubkey, + pubkey: PublicKeyBytes::from(pubkey), withdrawal_credentials: Hash256::zero(), amount, - signature: Signature::empty_signature(), + signature: SignatureBytes::empty(), }, }; @@ -34,7 +34,7 @@ impl TestingDepositBuilder { &get_withdrawal_credentials(&keypair.pk, spec.bls_withdrawal_prefix_byte)[..], ); - self.deposit.data.pubkey = keypair.pk.clone(); + self.deposit.data.pubkey = PublicKeyBytes::from(keypair.pk.clone()); self.deposit.data.withdrawal_credentials = withdrawal_credentials; self.deposit.data.signature = diff --git a/eth2/types/src/test_utils/test_random.rs b/eth2/types/src/test_utils/test_random.rs index 3598fa79c..fa1a41815 100644 --- a/eth2/types/src/test_utils/test_random.rs +++ b/eth2/types/src/test_utils/test_random.rs @@ -7,8 +7,10 @@ mod aggregate_signature; mod bitfield; mod hash256; mod public_key; +mod public_key_bytes; mod secret_key; mod signature; +mod signature_bytes; pub trait TestRandom { fn random_for_test(rng: &mut impl RngCore) -> Self; @@ -99,3 +101,5 @@ macro_rules! impl_test_random_for_u8_array { impl_test_random_for_u8_array!(4); impl_test_random_for_u8_array!(32); +impl_test_random_for_u8_array!(48); +impl_test_random_for_u8_array!(96); diff --git a/eth2/types/src/test_utils/test_random/public_key_bytes.rs b/eth2/types/src/test_utils/test_random/public_key_bytes.rs new file mode 100644 index 000000000..e801e30d2 --- /dev/null +++ b/eth2/types/src/test_utils/test_random/public_key_bytes.rs @@ -0,0 +1,19 @@ +use std::convert::From; + +use bls::{PublicKeyBytes, BLS_PUBLIC_KEY_BYTE_SIZE}; + +use super::*; + +impl TestRandom for PublicKeyBytes { + fn random_for_test(rng: &mut impl RngCore) -> Self { + //50-50 chance for signature to be "valid" or invalid + if bool::random_for_test(rng) { + //valid signature + PublicKeyBytes::from(PublicKey::random_for_test(rng)) + } else { + //invalid signature, just random bytes + PublicKeyBytes::from_bytes(&<[u8; BLS_PUBLIC_KEY_BYTE_SIZE]>::random_for_test(rng)) + .unwrap() + } + } +} diff --git a/eth2/types/src/test_utils/test_random/signature_bytes.rs b/eth2/types/src/test_utils/test_random/signature_bytes.rs new file mode 100644 index 000000000..cae2a8225 --- /dev/null +++ b/eth2/types/src/test_utils/test_random/signature_bytes.rs @@ -0,0 +1,17 @@ +use bls::{SignatureBytes, BLS_SIG_BYTE_SIZE}; + +use super::*; +use std::convert::From; + +impl TestRandom for SignatureBytes { + fn random_for_test(rng: &mut impl RngCore) -> Self { + //50-50 chance for signature to be "valid" or invalid + if bool::random_for_test(rng) { + //valid signature + SignatureBytes::from(Signature::random_for_test(rng)) + } else { + //invalid signature, just random bytes + SignatureBytes::from_bytes(&<[u8; BLS_SIG_BYTE_SIZE]>::random_for_test(rng)).unwrap() + } + } +} diff --git a/eth2/utils/bls/src/lib.rs b/eth2/utils/bls/src/lib.rs index e8d71ebeb..5067b1aba 100644 --- a/eth2/utils/bls/src/lib.rs +++ b/eth2/utils/bls/src/lib.rs @@ -4,10 +4,14 @@ extern crate ssz; #[macro_use] mod macros; mod keypair; +mod public_key_bytes; mod secret_key; +mod signature_bytes; pub use crate::keypair::Keypair; +pub use crate::public_key_bytes::PublicKeyBytes; pub use crate::secret_key::SecretKey; +pub use crate::signature_bytes::SignatureBytes; pub use milagro_bls::{compress_g2, hash_on_g2}; #[cfg(feature = "fake_crypto")] diff --git a/eth2/utils/bls/src/macros.rs b/eth2/utils/bls/src/macros.rs index 4f41bac1d..5a84bb61a 100644 --- a/eth2/utils/bls/src/macros.rs +++ b/eth2/utils/bls/src/macros.rs @@ -84,3 +84,111 @@ macro_rules! impl_cached_tree_hash { } }; } + +macro_rules! bytes_struct { + ($name: ident, $type: ty, $byte_size: expr, $small_name: expr, $ssz_type_size: ident, + $type_str: expr, $byte_size_str: expr) => { + #[doc = "Stores `"] + #[doc = $byte_size_str] + #[doc = "` bytes which may or may not represent a valid BLS "] + #[doc = $small_name] + #[doc = ".\n\nThe `"] + #[doc = $type_str] + #[doc = "` struct performs validation when it is instantiated, where as this struct does \ + not. This struct is suitable where we may wish to store bytes that are \ + potentially not a valid "] + #[doc = $small_name] + #[doc = " (e.g., from the deposit contract)."] + #[derive(Clone)] + pub struct $name([u8; $byte_size]); + }; + ($name: ident, $type: ty, $byte_size: expr, $small_name: expr, $ssz_type_size: ident) => { + bytes_struct!($name, $type, $byte_size, $small_name, $ssz_type_size, stringify!($type), + stringify!($byte_size)); + + impl $name { + pub fn from_bytes(bytes: &[u8]) -> Result { + Ok(Self(Self::get_bytes(bytes)?)) + } + + pub fn empty() -> Self { + Self([0; $byte_size]) + } + + pub fn as_bytes(&self) -> Vec { + self.0.to_vec() + } + + fn get_bytes(bytes: &[u8]) -> Result<[u8; $byte_size], ssz::DecodeError> { + let mut result = [0; $byte_size]; + if bytes.len() != $byte_size { + Err(ssz::DecodeError::InvalidByteLength { + len: bytes.len(), + expected: $byte_size, + }) + } else { + result[..].copy_from_slice(bytes); + Ok(result) + } + } + } + + impl std::fmt::Debug for $name { + fn fmt(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + self.0[..].fmt(formatter) + } + } + + impl PartialEq for $name { + fn eq(&self, other: &Self) -> bool { + &self.0[..] == &other.0[..] + } + } + + impl Eq for $name {} + + impl std::convert::TryInto<$type> for &$name { + type Error = ssz::DecodeError; + + fn try_into(self) -> Result<$type, Self::Error> { + <$type>::from_bytes(&self.0[..]) + } + } + + impl std::convert::From<$type> for $name { + fn from(obj: $type) -> Self { + // We know that obj.as_bytes() always has exactly $byte_size many bytes. + Self::from_bytes(obj.as_ssz_bytes().as_slice()).unwrap() + } + } + + impl_ssz!($name, $byte_size, "$type"); + + impl_tree_hash!($name, $ssz_type_size); + + impl_cached_tree_hash!($name, $ssz_type_size); + + impl serde::ser::Serialize for $name { + /// Serde serialization is compliant the Ethereum YAML test format. + fn serialize(&self, serializer: S) -> Result + where + S: serde::ser::Serializer, + { + serializer.serialize_str(&hex::encode(ssz::ssz_encode(self))) + } + } + + impl<'de> serde::de::Deserialize<'de> for $name { + /// Serde serialization is compliant the Ethereum YAML test format. + fn deserialize(deserializer: D) -> Result + where + D: serde::de::Deserializer<'de>, + { + let bytes = deserializer.deserialize_str(serde_hex::HexVisitor)?; + let signature = Self::from_ssz_bytes(&bytes[..]) + .map_err(|e| serde::de::Error::custom(format!("invalid ssz ({:?})", e)))?; + Ok(signature) + } + } + }; +} diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index d78b5869b..5924baa4c 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -150,6 +150,15 @@ mod tests { assert_eq!(original, decoded); } + #[test] + pub fn test_byte_size() { + let sk = SecretKey::random(); + let original = PublicKey::from_secret_key(&sk); + + let bytes = ssz_encode(&original); + assert_eq!(bytes.len(), BLS_PUBLIC_KEY_BYTE_SIZE); + } + #[test] // TODO: once `CachedTreeHash` is fixed, this test should _not_ panic. #[should_panic] diff --git a/eth2/utils/bls/src/public_key_bytes.rs b/eth2/utils/bls/src/public_key_bytes.rs new file mode 100644 index 000000000..f75735140 --- /dev/null +++ b/eth2/utils/bls/src/public_key_bytes.rs @@ -0,0 +1,43 @@ +use ssz::{Decode, DecodeError, Encode}; + +use super::{PublicKey, BLS_PUBLIC_KEY_BYTE_SIZE}; + +bytes_struct!( + PublicKeyBytes, + PublicKey, + BLS_PUBLIC_KEY_BYTE_SIZE, + "public key", + U48 +); + +#[cfg(test)] +mod tests { + use std::convert::TryInto; + + use ssz::ssz_encode; + + use super::super::Keypair; + use super::*; + + #[test] + pub fn test_valid_public_key() { + let keypair = Keypair::random(); + + let bytes = ssz_encode(&keypair.pk); + let public_key_bytes = PublicKeyBytes::from_bytes(&bytes).unwrap(); + let public_key: Result = (&public_key_bytes).try_into(); + assert!(public_key.is_ok()); + assert_eq!(keypair.pk, public_key.unwrap()); + } + + #[test] + pub fn test_invalid_public_key() { + let mut public_key_bytes = [0; BLS_PUBLIC_KEY_BYTE_SIZE]; + public_key_bytes[0] = 255; //a_flag1 == b_flag1 == c_flag1 == 1 and x1 = 0 shouldn't be allowed + let public_key_bytes = PublicKeyBytes::from_bytes(&public_key_bytes[..]); + assert!(public_key_bytes.is_ok()); + + let public_key: Result = public_key_bytes.as_ref().unwrap().try_into(); + assert!(public_key.is_err()); + } +} diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index 20240039b..aba5fc1da 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -155,6 +155,15 @@ mod tests { assert_eq!(original, decoded); } + #[test] + pub fn test_byte_size() { + let keypair = Keypair::random(); + + let signature = Signature::new(&[42, 42], 0, &keypair.sk); + let bytes = ssz_encode(&signature); + assert_eq!(bytes.len(), BLS_SIG_BYTE_SIZE); + } + #[test] // TODO: once `CachedTreeHash` is fixed, this test should _not_ panic. #[should_panic] diff --git a/eth2/utils/bls/src/signature_bytes.rs b/eth2/utils/bls/src/signature_bytes.rs new file mode 100644 index 000000000..a30cecb4d --- /dev/null +++ b/eth2/utils/bls/src/signature_bytes.rs @@ -0,0 +1,44 @@ +use ssz::{Decode, DecodeError, Encode}; + +use super::{Signature, BLS_SIG_BYTE_SIZE}; + +bytes_struct!( + SignatureBytes, + Signature, + BLS_SIG_BYTE_SIZE, + "signature", + U96 +); + +#[cfg(test)] +mod tests { + use std::convert::TryInto; + + use ssz::ssz_encode; + + use super::super::Keypair; + use super::*; + + #[test] + pub fn test_valid_signature() { + let keypair = Keypair::random(); + let original = Signature::new(&[42, 42], 0, &keypair.sk); + + let bytes = ssz_encode(&original); + let signature_bytes = SignatureBytes::from_bytes(&bytes).unwrap(); + let signature: Result = (&signature_bytes).try_into(); + assert!(signature.is_ok()); + assert_eq!(original, signature.unwrap()); + } + + #[test] + pub fn test_invalid_signature() { + let mut signature_bytes = [0; BLS_SIG_BYTE_SIZE]; + signature_bytes[0] = 255; //a_flag1 == b_flag1 == c_flag1 == 1 and x1 = 0 shouldn't be allowed + let signature_bytes = SignatureBytes::from_bytes(&signature_bytes[..]); + assert!(signature_bytes.is_ok()); + + let signature: Result = signature_bytes.as_ref().unwrap().try_into(); + assert!(signature.is_err()); + } +} From 88e89f9ab21c3080b48bc840595617a4af3314b6 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 6 Aug 2019 10:11:42 +0530 Subject: [PATCH 25/25] Update ethereum types (#489) * Update ethereum-types to version 0.6 * Fix tests * Run rustfmt --- beacon_node/store/src/iter.rs | 21 ++++++++++++------- eth2/types/Cargo.toml | 2 +- .../src/beacon_state/committee_cache/tests.rs | 2 +- eth2/types/src/beacon_state/tests.rs | 2 +- eth2/utils/cached_tree_hash/Cargo.toml | 2 +- eth2/utils/merkle_proof/Cargo.toml | 2 +- eth2/utils/ssz/Cargo.toml | 2 +- eth2/utils/ssz/fuzz/Cargo.toml | 2 +- eth2/utils/swap_or_not_shuffle/Cargo.toml | 2 +- eth2/utils/tree_hash/Cargo.toml | 2 +- tests/ef_tests/Cargo.toml | 2 +- 11 files changed, 23 insertions(+), 18 deletions(-) diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 55c525b11..11969e2a9 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -305,7 +305,7 @@ mod test { state_a.slot = Slot::from(slots_per_historical_root); state_b.slot = Slot::from(slots_per_historical_root * 2); - let mut hashes = (0..).into_iter().map(|i| Hash256::from(i)); + let mut hashes = (0..).into_iter().map(|i| Hash256::from_low_u64_be(i)); for root in &mut state_a.block_roots[..] { *root = hashes.next().unwrap() @@ -333,7 +333,7 @@ mod test { assert_eq!(collected.len(), expected_len); for i in 0..expected_len { - assert_eq!(collected[i].0, Hash256::from(i as u64)); + assert_eq!(collected[i].0, Hash256::from_low_u64_be(i as u64)); } } @@ -348,7 +348,7 @@ mod test { state_a.slot = Slot::from(slots_per_historical_root); state_b.slot = Slot::from(slots_per_historical_root * 2); - let mut hashes = (0..).into_iter().map(|i| Hash256::from(i)); + let mut hashes = (0..).into_iter().map(|i| Hash256::from_low_u64_be(i)); for root in &mut state_a.block_roots[..] { *root = hashes.next().unwrap() @@ -376,7 +376,7 @@ mod test { assert_eq!(collected.len(), expected_len); for i in 0..expected_len { - assert_eq!(collected[i].0, Hash256::from(i as u64)); + assert_eq!(collected[i].0, Hash256::from_low_u64_be(i as u64)); } } @@ -391,7 +391,7 @@ mod test { state_a.slot = Slot::from(slots_per_historical_root); state_b.slot = Slot::from(slots_per_historical_root * 2); - let mut hashes = (0..).into_iter().map(|i| Hash256::from(i)); + let mut hashes = (0..).into_iter().map(|i| Hash256::from_low_u64_be(i)); for slot in 0..slots_per_historical_root { state_a @@ -404,8 +404,8 @@ mod test { .expect(&format!("should set state_b slot {}", slot)); } - let state_a_root = Hash256::from(slots_per_historical_root as u64); - let state_b_root = Hash256::from(slots_per_historical_root as u64 * 2); + let state_a_root = Hash256::from_low_u64_be(slots_per_historical_root as u64); + let state_b_root = Hash256::from_low_u64_be(slots_per_historical_root as u64 * 2); store.put(&state_a_root, &state_a).unwrap(); store.put(&state_b_root, &state_b).unwrap(); @@ -429,7 +429,12 @@ mod test { assert_eq!(slot, i as u64, "slot mismatch at {}: {} vs {}", i, slot, i); - assert_eq!(hash, Hash256::from(i as u64), "hash mismatch at {}", i); + assert_eq!( + hash, + Hash256::from_low_u64_be(i as u64), + "hash mismatch at {}", + i + ); } } } diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index ae707bc2c..2e4474499 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -12,7 +12,7 @@ compare_fields_derive = { path = "../utils/compare_fields_derive" } dirs = "1.0" derivative = "1.0" eth2_interop_keypairs = { path = "../utils/eth2_interop_keypairs" } -ethereum-types = "0.5" +ethereum-types = "0.6" hashing = { path = "../utils/hashing" } hex = "0.3" int_to_bytes = { path = "../utils/int_to_bytes" } diff --git a/eth2/types/src/beacon_state/committee_cache/tests.rs b/eth2/types/src/beacon_state/committee_cache/tests.rs index 0fe2fb8a4..28e9d92f8 100644 --- a/eth2/types/src/beacon_state/committee_cache/tests.rs +++ b/eth2/types/src/beacon_state/committee_cache/tests.rs @@ -74,7 +74,7 @@ fn shuffles_for_the_right_epoch() { let distinct_hashes: Vec = (0..MinimalEthSpec::epochs_per_historical_vector()) .into_iter() - .map(|i| Hash256::from(i as u64)) + .map(|i| Hash256::from_low_u64_be(i as u64)) .collect(); state.randao_mixes = FixedVector::from(distinct_hashes); diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index cff034e56..e4c493f92 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -308,7 +308,7 @@ mod committees { let distinct_hashes: Vec = (0..T::epochs_per_historical_vector()) .into_iter() - .map(|i| Hash256::from(i as u64)) + .map(|i| Hash256::from_low_u64_be(i as u64)) .collect(); state.randao_mixes = FixedVector::from(distinct_hashes); diff --git a/eth2/utils/cached_tree_hash/Cargo.toml b/eth2/utils/cached_tree_hash/Cargo.toml index c8881eb0f..ce26ee94f 100644 --- a/eth2/utils/cached_tree_hash/Cargo.toml +++ b/eth2/utils/cached_tree_hash/Cargo.toml @@ -9,6 +9,6 @@ tree_hash_derive = { path = "../tree_hash_derive" } [dependencies] tree_hash = { path = "../tree_hash" } -ethereum-types = "0.5" +ethereum-types = "0.6" hashing = { path = "../hashing" } int_to_bytes = { path = "../int_to_bytes" } diff --git a/eth2/utils/merkle_proof/Cargo.toml b/eth2/utils/merkle_proof/Cargo.toml index b7cd81216..7464773a5 100644 --- a/eth2/utils/merkle_proof/Cargo.toml +++ b/eth2/utils/merkle_proof/Cargo.toml @@ -5,5 +5,5 @@ authors = ["Michael Sproul "] edition = "2018" [dependencies] -ethereum-types = "0.5" +ethereum-types = "0.6" hashing = { path = "../hashing" } diff --git a/eth2/utils/ssz/Cargo.toml b/eth2/utils/ssz/Cargo.toml index 78e65a977..ff5df162d 100644 --- a/eth2/utils/ssz/Cargo.toml +++ b/eth2/utils/ssz/Cargo.toml @@ -13,4 +13,4 @@ name = "ssz" eth2_ssz_derive = "0.1.0" [dependencies] -ethereum-types = "0.5" +ethereum-types = "0.6" diff --git a/eth2/utils/ssz/fuzz/Cargo.toml b/eth2/utils/ssz/fuzz/Cargo.toml index 71628e858..3c922bac9 100644 --- a/eth2/utils/ssz/fuzz/Cargo.toml +++ b/eth2/utils/ssz/fuzz/Cargo.toml @@ -9,7 +9,7 @@ publish = false cargo-fuzz = true [dependencies] -ethereum-types = "0.5" +ethereum-types = "0.6" [dependencies.ssz] path = ".." diff --git a/eth2/utils/swap_or_not_shuffle/Cargo.toml b/eth2/utils/swap_or_not_shuffle/Cargo.toml index 19d5444fb..764dbf409 100644 --- a/eth2/utils/swap_or_not_shuffle/Cargo.toml +++ b/eth2/utils/swap_or_not_shuffle/Cargo.toml @@ -12,7 +12,7 @@ harness = false criterion = "0.2" yaml-rust = "0.4.2" hex = "0.3" -ethereum-types = "0.5" +ethereum-types = "0.6" [dependencies] hashing = { path = "../hashing" } diff --git a/eth2/utils/tree_hash/Cargo.toml b/eth2/utils/tree_hash/Cargo.toml index 3019c2ad0..d69a75faa 100644 --- a/eth2/utils/tree_hash/Cargo.toml +++ b/eth2/utils/tree_hash/Cargo.toml @@ -15,7 +15,7 @@ tree_hash_derive = { path = "../tree_hash_derive" } types = { path = "../../types" } [dependencies] -ethereum-types = "0.5" +ethereum-types = "0.6" hashing = { path = "../hashing" } int_to_bytes = { path = "../int_to_bytes" } lazy_static = "0.1" diff --git a/tests/ef_tests/Cargo.toml b/tests/ef_tests/Cargo.toml index 90f66f355..507e0f7c3 100644 --- a/tests/ef_tests/Cargo.toml +++ b/tests/ef_tests/Cargo.toml @@ -10,7 +10,7 @@ fake_crypto = ["bls/fake_crypto"] [dependencies] bls = { path = "../../eth2/utils/bls" } compare_fields = { path = "../../eth2/utils/compare_fields" } -ethereum-types = "0.5" +ethereum-types = "0.6" hex = "0.3" rayon = "1.0" serde = "1.0"