From e23726c0a1b02a1afb24f84e799dca881f821cbc Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Tue, 9 Jul 2019 12:36:26 +0200 Subject: [PATCH 001/305] Renamed fork_choice::process_attestation_from_block --- beacon_node/beacon_chain/src/fork_choice.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index c693145ea..cdda56386 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -112,7 +112,7 @@ impl ForkChoice { // // https://github.com/ethereum/eth2.0-specs/blob/v0.7.0/specs/core/0_fork-choice.md for attestation in &block.body.attestations { - self.process_attestation_from_block(state, attestation)?; + self.process_attestation(state, attestation)?; } self.backend.process_block(block, block_root)?; @@ -120,7 +120,7 @@ impl ForkChoice { Ok(()) } - fn process_attestation_from_block( + pub fn process_attestation( &self, state: &BeaconState, attestation: &Attestation, From adf1d9c533d51c908b8e3b7430ba7e2554a2ef45 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Tue, 9 Jul 2019 12:36:59 +0200 Subject: [PATCH 002/305] Processing attestation in fork choice --- beacon_node/beacon_chain/src/beacon_chain.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 2d8282270..ca4667e00 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -480,6 +480,14 @@ impl BeaconChain { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); + match self.store.exists::(&attestation.data.target_root) { + Ok(true) => { + per_block_processing::validate_attestation_time_independent_only(&*self.state.read(), &attestation, &self.spec)?; + self.fork_choice.process_attestation(&*self.state.read(), &attestation); + }, + _ => {} + }; + let result = self .op_pool .insert_attestation(attestation, &*self.state.read(), &self.spec); From 40b166edcdb5d6102216f8f00397659089027ccc Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Thu, 11 Jul 2019 16:32:01 +0200 Subject: [PATCH 003/305] Retrieving state from store and checking signature --- beacon_node/beacon_chain/src/beacon_chain.rs | 24 ++++++++++++++------ 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ca4667e00..74d24244e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -15,7 +15,7 @@ use state_processing::per_block_processing::errors::{ }; use state_processing::{ per_block_processing, per_block_processing_without_verifying_block_signature, - per_slot_processing, BlockProcessingError, + per_slot_processing, BlockProcessingError, common }; use std::sync::Arc; use store::iter::{BlockIterator, BlockRootsIterator, StateRootsIterator}; @@ -480,14 +480,24 @@ impl BeaconChain { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); - match self.store.exists::(&attestation.data.target_root) { - Ok(true) => { - per_block_processing::validate_attestation_time_independent_only(&*self.state.read(), &attestation, &self.spec)?; - self.fork_choice.process_attestation(&*self.state.read(), &attestation); - }, - _ => {} + // Retrieve the attestation's state from `store` if necessary. + let attestation_state = match attestation.data.beacon_block_root == self.canonical_head.read().beacon_block_root { + true => Some(self.state.read().clone()), + false => match self.store.get::(&attestation.data.beacon_block_root) { + Ok(Some(block)) => match self.store.get::>(&block.state_root) { + Ok(state) => state, + _ => None + }, + _ => None + } }; + if let Some(state) = attestation_state { + let indexed_attestation = common::convert_to_indexed(&state, &attestation)?; + per_block_processing::verify_indexed_attestation(&state, &indexed_attestation, &self.spec)?; + self.fork_choice.process_attestation(&state, &attestation); + } + let result = self .op_pool .insert_attestation(attestation, &*self.state.read(), &self.spec); From 7cdfa3cc279be167b933b3cb632f83233d35baf8 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Fri, 19 Jul 2019 14:52:01 +0200 Subject: [PATCH 004/305] Looser check on beacon state validity. --- beacon_node/beacon_chain/src/beacon_chain.rs | 42 ++++++++++++++------ 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f18b49e12..701d900c7 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -493,19 +493,7 @@ impl BeaconChain { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); - // Retrieve the attestation's state from `store` if necessary. - let attestation_state = match attestation.data.beacon_block_root == self.canonical_head.read().beacon_block_root { - true => Some(self.state.read().clone()), - false => match self.store.get::(&attestation.data.beacon_block_root) { - Ok(Some(block)) => match self.store.get::>(&block.state_root) { - Ok(state) => state, - _ => None - }, - _ => None - } - }; - - if let Some(state) = attestation_state { + if let Some(state) = self.get_attestation_state(&attestation) { let indexed_attestation = common::convert_to_indexed(&state, &attestation)?; per_block_processing::verify_indexed_attestation(&state, &indexed_attestation, &self.spec)?; self.fork_choice.process_attestation(&state, &attestation); @@ -535,6 +523,34 @@ impl BeaconChain { result } + fn get_attestation_state(&self, attestation: &Attestation) -> Option> { + let blocks = BestBlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), self.state.read().slot.clone()); + for (root, slot) in blocks { + if root == attestation.data.target_root + && self.slot_epochs_equal_or_adjacent(slot, self.state.read().slot) { + return Some(self.state.read().clone()); + } + }; + + match self.store.get::(&attestation.data.target_root) { + Ok(Some(block)) => match self.store.get::>(&block.state_root) { + Ok(state) => state, + _ => None + }, + _ => None + } + } + + fn slot_epochs_equal_or_adjacent(&self, slot_a: Slot, slot_b: Slot) -> bool { + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let epoch_a = slot_a.epoch(slots_per_epoch); + let epoch_b = slot_b.epoch(slots_per_epoch); + + epoch_a == epoch_b + || epoch_a + 1 == epoch_b + || epoch_b + 1 == epoch_a + } + /// Accept some deposit and queue it for inclusion in an appropriate block. pub fn process_deposit( &self, From bef7ca6bfb1bb68d43d58b69e9a658b59c69d014 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Sat, 20 Jul 2019 12:47:59 +0200 Subject: [PATCH 005/305] Cleaned up get_attestation_state --- beacon_node/beacon_chain/src/beacon_chain.rs | 23 ++++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 701d900c7..d02ab3176 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -523,15 +523,24 @@ impl BeaconChain { result } + /// Retrieves the `BeaconState` used to create the attestation. fn get_attestation_state(&self, attestation: &Attestation) -> Option> { + // Current state is used if the attestation targets a historic block and a slot within an + // equal or adjacent epoch. + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let min_slot = (self.state.read().slot.epoch(slots_per_epoch) - 1).start_slot(slots_per_epoch); let blocks = BestBlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), self.state.read().slot.clone()); for (root, slot) in blocks { - if root == attestation.data.target_root - && self.slot_epochs_equal_or_adjacent(slot, self.state.read().slot) { + if root == attestation.data.target_root { return Some(self.state.read().clone()); } + + if slot == min_slot { + break; + } }; + // A different state is retrieved from the database. match self.store.get::(&attestation.data.target_root) { Ok(Some(block)) => match self.store.get::>(&block.state_root) { Ok(state) => state, @@ -541,16 +550,6 @@ impl BeaconChain { } } - fn slot_epochs_equal_or_adjacent(&self, slot_a: Slot, slot_b: Slot) -> bool { - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let epoch_a = slot_a.epoch(slots_per_epoch); - let epoch_b = slot_b.epoch(slots_per_epoch); - - epoch_a == epoch_b - || epoch_a + 1 == epoch_b - || epoch_b + 1 == epoch_a - } - /// Accept some deposit and queue it for inclusion in an appropriate block. pub fn process_deposit( &self, From 3b8a584c550fc7788806e04e7a7e76a9dfb07796 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Sun, 21 Jul 2019 22:53:39 +0200 Subject: [PATCH 006/305] Expanded fork choice api to provide latest validator message. --- eth2/lmd_ghost/src/lib.rs | 3 +++ eth2/lmd_ghost/src/reduced_tree.rs | 13 +++++++++++++ 2 files changed, 16 insertions(+) diff --git a/eth2/lmd_ghost/src/lib.rs b/eth2/lmd_ghost/src/lib.rs index dd413e2eb..f18b5b81f 100644 --- a/eth2/lmd_ghost/src/lib.rs +++ b/eth2/lmd_ghost/src/lib.rs @@ -43,4 +43,7 @@ pub trait LmdGhost: Send + Sync { finalized_block: &BeaconBlock, finalized_block_root: Hash256, ) -> Result<()>; + + /// Returns the latest message for a given validator index. + fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)>; } diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index dace2bda6..f069ae68c 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -87,6 +87,12 @@ where .update_root(new_block.slot, new_root) .map_err(|e| format!("update_finalized_root failed: {:?}", e)) } + + fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)> { + self.core + .write() + .latest_message(validator_index) + } } struct ReducedTree { @@ -222,6 +228,13 @@ where Ok(head_node.block_hash) } + pub fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)> { + match self.latest_votes.get(validator_index) { + Some(v) => Some((v.hash.clone(), v.slot.clone())), + None => None + } + } + fn find_head_from<'a>(&'a self, start_node: &'a Node) -> Result<&'a Node> { if start_node.does_not_have_children() { Ok(start_node) From b2471eca494369e946fa212610af6b2c8be44802 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Tue, 23 Jul 2019 20:50:18 +0200 Subject: [PATCH 007/305] Checking if the an attestation contains a latest message --- beacon_node/beacon_chain/src/beacon_chain.rs | 19 ++++------------ beacon_node/beacon_chain/src/fork_choice.rs | 24 ++++++++++++++++++++ eth2/lmd_ghost/src/lib.rs | 2 +- eth2/lmd_ghost/src/reduced_tree.rs | 2 +- 4 files changed, 31 insertions(+), 16 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d02ab3176..f215465f2 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -494,9 +494,11 @@ impl BeaconChain { let timer = self.metrics.attestation_processing_times.start_timer(); if let Some(state) = self.get_attestation_state(&attestation) { - let indexed_attestation = common::convert_to_indexed(&state, &attestation)?; - per_block_processing::verify_indexed_attestation(&state, &indexed_attestation, &self.spec)?; - self.fork_choice.process_attestation(&state, &attestation); + if self.fork_choice.should_process_attestation(&state, &attestation) { + let indexed_attestation = common::convert_to_indexed(&state, &attestation)?; + per_block_processing::verify_indexed_attestation(&state, &indexed_attestation, &self.spec)?; + self.fork_choice.process_attestation(&state, &attestation); + } } let result = self @@ -509,17 +511,6 @@ impl BeaconChain { self.metrics.attestation_processing_successes.inc(); } - // TODO: process attestation. Please consider: - // - // - Because a block was not added to the op pool does not mean it's invalid (it might - // just be old). - // - The attestation should be rejected if we don't know the block (ideally it should be - // queued, but this may be overkill). - // - The attestation _must_ be validated against it's state before being added to fork - // choice. - // - You can avoid verifying some attestations by first checking if they're a latest - // message. This would involve expanding the `LmdGhost` API. - result } diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index cdda56386..6b69e3e08 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -4,6 +4,7 @@ use state_processing::common::get_attesting_indices_unsorted; use std::sync::Arc; use store::{Error as StoreError, Store}; use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256}; +use state_processing::common; type Result = std::result::Result; @@ -120,6 +121,9 @@ impl ForkChoice { Ok(()) } + /// Process an attestation. + /// + /// Assumes the attestation is valid. pub fn process_attestation( &self, state: &BeaconState, @@ -162,6 +166,26 @@ impl ForkChoice { Ok(()) } + /// Determines whether or not the given attestation contains a latest messages. + pub fn should_process_attestation(&self, state: &BeaconState, attestation: &Attestation) -> bool { + let validator_indices = common::get_attesting_indices_unsorted( + state, + &attestation.data, + &attestation.aggregation_bitfield, + ).unwrap(); + + let target_slot = attestation.data.target_epoch.start_slot(T::EthSpec::slots_per_epoch()); + + validator_indices + .iter() + .find(|&&v| { + match self.backend.latest_message(v) { + Some((_, slot)) => target_slot > slot, + None => true + } + }).is_some() + } + /// Inform the fork choice that the given block (and corresponding root) have been finalized so /// it may prune it's storage. /// diff --git a/eth2/lmd_ghost/src/lib.rs b/eth2/lmd_ghost/src/lib.rs index f18b5b81f..183d45c9a 100644 --- a/eth2/lmd_ghost/src/lib.rs +++ b/eth2/lmd_ghost/src/lib.rs @@ -45,5 +45,5 @@ pub trait LmdGhost: Send + Sync { ) -> Result<()>; /// Returns the latest message for a given validator index. - fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)>; + fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)>; } diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index f069ae68c..0985441df 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -88,7 +88,7 @@ where .map_err(|e| format!("update_finalized_root failed: {:?}", e)) } - fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)> { + fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { self.core .write() .latest_message(validator_index) From 51645aa9af00ef2b93226f65c0a64c411a73243d Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Wed, 24 Jul 2019 18:03:48 +0200 Subject: [PATCH 008/305] Correct process_attestation error handling. --- beacon_node/beacon_chain/src/beacon_chain.rs | 10 +++++--- beacon_node/beacon_chain/src/errors.rs | 5 ++++ beacon_node/beacon_chain/src/fork_choice.rs | 11 +++++---- beacon_node/rpc/src/attestation.rs | 26 ++++++++++++++++++-- 4 files changed, 41 insertions(+), 11 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f215465f2..67d928127 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -22,6 +22,7 @@ use store::iter::{BestBlockRootsIterator, BlockIterator, BlockRootsIterator, Sta use store::{Error as DBError, Store}; use tree_hash::TreeHash; use types::*; +use crate::BeaconChainError; // Text included in blocks. // Must be 32-bytes or panic. @@ -489,15 +490,15 @@ impl BeaconChain { pub fn process_attestation( &self, attestation: Attestation, - ) -> Result<(), AttestationValidationError> { + ) -> Result<(), Error> { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); if let Some(state) = self.get_attestation_state(&attestation) { - if self.fork_choice.should_process_attestation(&state, &attestation) { + if self.fork_choice.should_process_attestation(&state, &attestation)? { let indexed_attestation = common::convert_to_indexed(&state, &attestation)?; per_block_processing::verify_indexed_attestation(&state, &indexed_attestation, &self.spec)?; - self.fork_choice.process_attestation(&state, &attestation); + self.fork_choice.process_attestation(&state, &attestation)?; } } @@ -511,7 +512,7 @@ impl BeaconChain { self.metrics.attestation_processing_successes.inc(); } - result + result.map_err(|e| BeaconChainError::AttestationValidationError(e)) } /// Retrieves the `BeaconState` used to create the attestation. @@ -968,3 +969,4 @@ impl From for Error { Error::BeaconStateError(e) } } + diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 0d619d7f2..4e2170ca8 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -3,6 +3,7 @@ use crate::metrics::Error as MetricsError; use state_processing::BlockProcessingError; use state_processing::SlotProcessingError; use types::*; +use state_processing::per_block_processing::errors::{AttestationValidationError, IndexedAttestationValidationError}; macro_rules! easy_from_to { ($from: ident, $to: ident) => { @@ -31,6 +32,8 @@ pub enum BeaconChainError { MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), MetricsError(String), + AttestationValidationError(AttestationValidationError), + IndexedAttestationValidationError(IndexedAttestationValidationError) } easy_from_to!(SlotProcessingError, BeaconChainError); @@ -53,3 +56,5 @@ pub enum BlockProductionError { easy_from_to!(BlockProcessingError, BlockProductionError); easy_from_to!(BeaconStateError, BlockProductionError); easy_from_to!(SlotProcessingError, BlockProductionError); +easy_from_to!(AttestationValidationError, BeaconChainError); +easy_from_to!(IndexedAttestationValidationError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 6b69e3e08..92b683590 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -166,24 +166,24 @@ impl ForkChoice { Ok(()) } - /// Determines whether or not the given attestation contains a latest messages. - pub fn should_process_attestation(&self, state: &BeaconState, attestation: &Attestation) -> bool { + /// Determines whether or not the given attestation contains a latest message. + pub fn should_process_attestation(&self, state: &BeaconState, attestation: &Attestation) -> Result { let validator_indices = common::get_attesting_indices_unsorted( state, &attestation.data, &attestation.aggregation_bitfield, - ).unwrap(); + )?; let target_slot = attestation.data.target_epoch.start_slot(T::EthSpec::slots_per_epoch()); - validator_indices + Ok(validator_indices .iter() .find(|&&v| { match self.backend.latest_message(v) { Some((_, slot)) => target_slot > slot, None => true } - }).is_some() + }).is_some()) } /// Inform the fork choice that the given block (and corresponding root) have been finalized so @@ -218,3 +218,4 @@ impl From for Error { Error::BackendError(e) } } + diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index b85d4e947..48d9eb469 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -1,4 +1,4 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BeaconChainError}; use eth2_libp2p::PubsubMessage; use eth2_libp2p::TopicBuilder; use eth2_libp2p::BEACON_ATTESTATION_TOPIC; @@ -159,7 +159,7 @@ impl AttestationService for AttestationServiceInstance { resp.set_success(true); } - Err(e) => { + Err(BeaconChainError::AttestationValidationError(e)) => { // Attestation was invalid warn!( self.log, @@ -170,6 +170,28 @@ impl AttestationService for AttestationServiceInstance { resp.set_success(false); resp.set_msg(format!("InvalidAttestation: {:?}", e).as_bytes().to_vec()); } + Err(BeaconChainError::IndexedAttestationValidationError(e)) => { + // Indexed attestation was invalid + warn!( + self.log, + "PublishAttestation"; + "type" => "invalid_attestation", + "error" => format!("{:?}", e), + ); + resp.set_success(false); + resp.set_msg(format!("InvalidIndexedAttestation: {:?}", e).as_bytes().to_vec()); + } + Err(e) => { + // Attestation was invalid + warn!( + self.log, + "PublishAttestation"; + "type" => "beacon_chain_error", + "error" => format!("{:?}", e), + ); + resp.set_success(false); + resp.set_msg(format!("There was a beacon chain error: {:?}", e).as_bytes().to_vec()); + } }; let error_log = self.log.clone(); From b49d592eee41fbbd04e88e8e6a08c3be372d1b77 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Wed, 24 Jul 2019 18:06:18 +0200 Subject: [PATCH 009/305] Copy paste error in comment fixed. --- beacon_node/rpc/src/attestation.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 48d9eb469..1cfa81a04 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -182,7 +182,7 @@ impl AttestationService for AttestationServiceInstance { resp.set_msg(format!("InvalidIndexedAttestation: {:?}", e).as_bytes().to_vec()); } Err(e) => { - // Attestation was invalid + // Some other error warn!( self.log, "PublishAttestation"; From b096e3a6432be6ce6850b211704a858bfcad8a28 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 5 Aug 2019 16:25:21 +1000 Subject: [PATCH 010/305] Tidy ancestor iterators --- beacon_node/beacon_chain/src/beacon_chain.rs | 75 +++++---- beacon_node/beacon_chain/src/fork_choice.rs | 2 +- beacon_node/beacon_chain/src/iter.rs | 48 ++++++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 19 +-- beacon_node/rest_api/src/beacon_node.rs | 2 +- beacon_node/rpc/src/attestation.rs | 6 +- beacon_node/rpc/src/validator.rs | 6 +- beacon_node/store/src/iter.rs | 159 +------------------ eth2/lmd_ghost/tests/test.rs | 4 +- 11 files changed, 125 insertions(+), 199 deletions(-) create mode 100644 beacon_node/beacon_chain/src/iter.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d520f0b5c..290128994 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,6 +1,7 @@ use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; +use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics::Metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use lmd_ghost::LmdGhost; @@ -19,7 +20,7 @@ use state_processing::{ per_slot_processing, BlockProcessingError, }; use std::sync::Arc; -use store::iter::{BestBlockRootsIterator, BlockIterator, BlockRootsIterator, StateRootsIterator}; +use store::iter::{BlockRootsIterator, StateRootsIterator}; use store::{Error as DBError, Store}; use tree_hash::TreeHash; use types::*; @@ -224,45 +225,53 @@ impl BeaconChain { Ok(headers?) } - /// Iterate in reverse (highest to lowest slot) through all blocks from the block at `slot` - /// through to the genesis block. - /// - /// Returns `None` for headers prior to genesis or when there is an error reading from `Store`. - /// - /// Contains duplicate headers when skip slots are encountered. - pub fn rev_iter_blocks(&self, slot: Slot) -> BlockIterator { - BlockIterator::owned(self.store.clone(), self.state.read().clone(), slot) - } - /// Iterates in reverse (highest to lowest slot) through all block roots from `slot` through to - /// genesis. + /// Iterates through all the `BeaconBlock` roots and slots, first returning + /// `self.head().beacon_block` then all prior blocks until either genesis or if the database + /// fails to return a prior block. /// - /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. + /// Returns duplicate roots for skip-slots. /// - /// Contains duplicate roots when skip slots are encountered. - pub fn rev_iter_block_roots(&self, slot: Slot) -> BlockRootsIterator { - BlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), slot) - } - - /// Iterates in reverse (highest to lowest slot) through all block roots from largest - /// `slot <= beacon_state.slot` through to genesis. + /// Iterator returns `(Hash256, Slot)`. /// - /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. + /// ## Note /// - /// Contains duplicate roots when skip slots are encountered. - pub fn rev_iter_best_block_roots( + /// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot + /// returned may be earlier than the wall-clock slot. + pub fn rev_iter_block_roots( &self, slot: Slot, - ) -> BestBlockRootsIterator { - BestBlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), slot) + ) -> ReverseBlockRootIterator { + let state = &self.head().beacon_state; + let block_root = self.head().beacon_block_root; + let block_slot = state.slot; + + let iter = BlockRootsIterator::owned(self.store.clone(), state.clone(), slot); + + ReverseBlockRootIterator::new((block_root, block_slot), iter) } - /// Iterates in reverse (highest to lowest slot) through all state roots from `slot` through to - /// genesis. + /// Iterates through all the `BeaconState` roots and slots, first returning + /// `self.head().beacon_state` then all prior states until either genesis or if the database + /// fails to return a prior state. /// - /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. - pub fn rev_iter_state_roots(&self, slot: Slot) -> StateRootsIterator { - StateRootsIterator::owned(self.store.clone(), self.state.read().clone(), slot) + /// Iterator returns `(Hash256, Slot)`. + /// + /// ## Note + /// + /// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot + /// returned may be earlier than the wall-clock slot. + pub fn rev_iter_state_roots( + &self, + slot: Slot, + ) -> ReverseStateRootIterator { + let state = &self.head().beacon_state; + let state_root = self.head().beacon_state_root; + let state_slot = state.slot; + + let iter = StateRootsIterator::owned(self.store.clone(), state.clone(), slot); + + ReverseStateRootIterator::new((state_root, state_slot), iter) } /// Returns the block at the given root, if any. @@ -279,8 +288,10 @@ impl BeaconChain { /// Returns a read-lock guarded `BeaconState` which is the `canonical_head` that has been /// updated to match the current slot clock. - pub fn current_state(&self) -> RwLockReadGuard> { - self.state.read() + pub fn speculative_state(&self) -> Result>, Error> { + // TODO: ensure the state has done a catch-up. + + Ok(self.state.read()) } /// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index b77979b74..74778be32 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -52,7 +52,7 @@ impl ForkChoice { // been justified for at least 1 epoch ... If no such descendant exists, // set justified_head to finalized_head. let (start_state, start_block_root, start_block_slot) = { - let state = chain.current_state(); + let state = &chain.head().beacon_state; let (block_root, block_slot) = if state.current_epoch() + 1 > state.current_justified_checkpoint.epoch { diff --git a/beacon_node/beacon_chain/src/iter.rs b/beacon_node/beacon_chain/src/iter.rs new file mode 100644 index 000000000..f73e88afa --- /dev/null +++ b/beacon_node/beacon_chain/src/iter.rs @@ -0,0 +1,48 @@ +use store::iter::{BlockRootsIterator, StateRootsIterator}; +use types::{Hash256, Slot}; + +pub type ReverseBlockRootIterator<'a, E, S> = + ReverseHashAndSlotIterator>; +pub type ReverseStateRootIterator<'a, E, S> = + ReverseHashAndSlotIterator>; + +pub type ReverseHashAndSlotIterator = ReverseChainIterator<(Hash256, Slot), I>; + +/// Provides a wrapper for an iterator that returns a given `T` before it starts returning results of +/// the `Iterator`. +pub struct ReverseChainIterator { + first_value_used: bool, + first_value: T, + iter: I, +} + +impl ReverseChainIterator +where + T: Sized, + I: Iterator + Sized, +{ + pub fn new(first_value: T, iter: I) -> Self { + Self { + first_value_used: false, + first_value, + iter, + } + } +} + +impl Iterator for ReverseChainIterator +where + T: Clone, + I: Iterator, +{ + type Item = T; + + fn next(&mut self) -> Option { + if self.first_value_used { + self.iter.next() + } else { + self.first_value_used = true; + Some(self.first_value.clone()) + } + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index df1de153a..c2efcad13 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -2,6 +2,7 @@ mod beacon_chain; mod checkpoint; mod errors; mod fork_choice; +mod iter; mod metrics; mod persisted_beacon_chain; pub mod test_utils; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 6242b8a0a..cdcd8bb21 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -198,7 +198,7 @@ where fn get_state_at_slot(&self, state_slot: Slot) -> BeaconState { let state_root = self .chain - .rev_iter_state_roots(self.chain.current_state().slot - 1) + .rev_iter_state_roots(self.chain.head().beacon_state.slot - 1) .find(|(_hash, slot)| *slot == state_slot) .map(|(hash, _slot)| hash) .expect("could not find state root"); diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index ac001415c..215e37e7f 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -266,7 +266,7 @@ impl SimpleSync { fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain - .rev_iter_best_block_roots(target_slot) + .rev_iter_block_roots(target_slot) .take(1) .find(|(_root, slot)| *slot == target_slot) .map(|(root, _slot)| root) @@ -280,6 +280,8 @@ impl SimpleSync { req: BeaconBlockRootsRequest, network: &mut NetworkContext, ) { + let state = &self.chain.head().beacon_state; + debug!( self.log, "BlockRootsRequest"; @@ -290,8 +292,8 @@ impl SimpleSync { let mut roots: Vec = self .chain - .rev_iter_best_block_roots(req.start_slot + req.count) - .take(req.count as usize) + .rev_iter_block_roots(std::cmp::min(req.start_slot + req.count, state.slot)) + .take_while(|(_root, slot)| req.start_slot <= *slot) .map(|(block_root, slot)| BlockRootSlot { slot, block_root }) .collect(); @@ -302,7 +304,7 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, - "current_slot" => self.chain.current_state().slot, + "current_slot" => self.chain.present_slot(), "requested" => req.count, "returned" => roots.len(), ); @@ -389,6 +391,8 @@ impl SimpleSync { req: BeaconBlockHeadersRequest, network: &mut NetworkContext, ) { + let state = &self.chain.head().beacon_state; + debug!( self.log, "BlockHeadersRequest"; @@ -399,13 +403,10 @@ impl SimpleSync { let count = req.max_headers; // Collect the block roots. - // - // Instead of using `chain.rev_iter_blocks` we collect the roots first. This avoids - // unnecessary block deserialization when `req.skip_slots > 0`. let mut roots: Vec = self .chain - .rev_iter_best_block_roots(req.start_slot + count) - .take(count as usize) + .rev_iter_block_roots(std::cmp::min(req.start_slot + count, state.slot)) + .take_while(|(_root, slot)| req.start_slot <= *slot) .map(|(root, _slot)| root) .collect(); diff --git a/beacon_node/rest_api/src/beacon_node.rs b/beacon_node/rest_api/src/beacon_node.rs index 87d2d3cdc..bd8d98a53 100644 --- a/beacon_node/rest_api/src/beacon_node.rs +++ b/beacon_node/rest_api/src/beacon_node.rs @@ -54,7 +54,7 @@ fn get_version(_req: Request) -> APIResult { fn get_genesis_time(req: Request) -> APIResult { let beacon_chain = req.extensions().get::>>().unwrap(); let gen_time = { - let state = beacon_chain.current_state(); + let state = &beacon_chain.head().beacon_state; state.genesis_time }; let body = Body::from( diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 5ea8368fd..20425d292 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -40,7 +40,11 @@ impl AttestationService for AttestationServiceInstance { // verify the slot, drop lock on state afterwards { let slot_requested = req.get_slot(); - let state = &self.chain.current_state(); + // TODO: this whole module is legacy and not maintained well. + let state = &self + .chain + .speculative_state() + .expect("This is legacy code and should be removed"); // Start by performing some checks // Check that the AttestationData is for the current slot (otherwise it will not be valid) diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index b13303e25..080c828a7 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -29,7 +29,11 @@ impl ValidatorService for ValidatorServiceInstance { trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); let spec = &self.chain.spec; - let state = &self.chain.current_state(); + // TODO: this whole module is legacy and not maintained well. + let state = &self + .chain + .speculative_state() + .expect("This is legacy code and should be removed"); let epoch = Epoch::from(req.get_epoch()); let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 55c525b11..fc5d80679 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -4,20 +4,23 @@ use std::sync::Arc; use types::{BeaconBlock, BeaconState, BeaconStateError, EthSpec, Hash256, Slot}; /// Implemented for types that have ancestors (e.g., blocks, states) that may be iterated over. +/// +/// ## Note +/// +/// It is assumed that all ancestors for this object are stored in the database. If this is not the +/// case, the iterator will start returning `None` prior to genesis. pub trait AncestorIter { /// Returns an iterator over the roots of the ancestors of `self`. fn try_iter_ancestor_roots(&self, store: Arc) -> Option; } -impl<'a, U: Store, E: EthSpec> AncestorIter> - for BeaconBlock -{ +impl<'a, U: Store, E: EthSpec> AncestorIter> for BeaconBlock { /// Iterates across all the prior block roots of `self`, starting at the most recent and ending /// at genesis. - fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { + fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { let state = store.get::>(&self.state_root).ok()??; - Some(BestBlockRootsIterator::owned(store, state, self.slot)) + Some(BlockRootsIterator::owned(store, state, self.slot)) } } @@ -116,11 +119,6 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockIterator<'a, T, U> { /// exhausted. /// /// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. -/// -/// ## Notes -/// -/// See [`BestBlockRootsIterator`](struct.BestBlockRootsIterator.html), which has different -/// `start_slot` logic. #[derive(Clone)] pub struct BlockRootsIterator<'a, T: EthSpec, U> { store: Arc, @@ -180,104 +178,6 @@ impl<'a, T: EthSpec, U: Store> Iterator for BlockRootsIterator<'a, T, U> { } } -/// Iterates backwards through block roots with `start_slot` highest possible value -/// `<= beacon_state.slot`. -/// -/// The distinction between `BestBlockRootsIterator` and `BlockRootsIterator` is: -/// -/// - `BestBlockRootsIterator` uses best-effort slot. When `start_slot` is greater than the latest available block root -/// on `beacon_state`, returns `Some(root, slot)` where `slot` is the latest available block -/// root. -/// - `BlockRootsIterator` is strict about `start_slot`. When `start_slot` is greater than the latest available block root -/// on `beacon_state`, returns `None`. -/// -/// This is distinct from `BestBlockRootsIterator`. -/// -/// Uses the `block_roots` field of `BeaconState` to as the source of block roots and will -/// perform a lookup on the `Store` for a prior `BeaconState` if `block_roots` has been -/// exhausted. -/// -/// Returns `None` for roots prior to genesis or when there is an error reading from `Store`. -#[derive(Clone)] -pub struct BestBlockRootsIterator<'a, T: EthSpec, U> { - store: Arc, - beacon_state: Cow<'a, BeaconState>, - slot: Slot, -} - -impl<'a, T: EthSpec, U: Store> BestBlockRootsIterator<'a, T, U> { - /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn new(store: Arc, beacon_state: &'a BeaconState, start_slot: Slot) -> Self { - let mut slot = start_slot; - if slot >= beacon_state.slot { - // Slot may be too high. - slot = beacon_state.slot; - if beacon_state.get_block_root(slot).is_err() { - slot -= 1; - } - } - - Self { - store, - beacon_state: Cow::Borrowed(beacon_state), - slot: slot + 1, - } - } - - /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn owned(store: Arc, beacon_state: BeaconState, start_slot: Slot) -> Self { - let mut slot = start_slot; - if slot >= beacon_state.slot { - // Slot may be too high. - slot = beacon_state.slot; - // TODO: Use a function other than `get_block_root` as this will always return `Err()` - // for slot = state.slot. - if beacon_state.get_block_root(slot).is_err() { - slot -= 1; - } - } - - Self { - store, - beacon_state: Cow::Owned(beacon_state), - slot: slot + 1, - } - } -} - -impl<'a, T: EthSpec, U: Store> Iterator for BestBlockRootsIterator<'a, T, U> { - type Item = (Hash256, Slot); - - fn next(&mut self) -> Option { - if self.slot == 0 { - // End of Iterator - return None; - } - - self.slot -= 1; - - match self.beacon_state.get_block_root(self.slot) { - Ok(root) => Some((*root, self.slot)), - Err(BeaconStateError::SlotOutOfBounds) => { - // Read a `BeaconState` from the store that has access to prior historical root. - let beacon_state: BeaconState = { - // Load the earliest state from disk. - let new_state_root = self.beacon_state.get_oldest_state_root().ok()?; - - self.store.get(&new_state_root).ok()? - }?; - - self.beacon_state = Cow::Owned(beacon_state); - - let root = self.beacon_state.get_block_root(self.slot).ok()?; - - Some((*root, self.slot)) - } - _ => None, - } - } -} - #[cfg(test)] mod test { use super::*; @@ -337,49 +237,6 @@ mod test { } } - #[test] - fn best_block_root_iter() { - let store = Arc::new(MemoryStore::open()); - let slots_per_historical_root = MainnetEthSpec::slots_per_historical_root(); - - let mut state_a: BeaconState = get_state(); - let mut state_b: BeaconState = get_state(); - - state_a.slot = Slot::from(slots_per_historical_root); - state_b.slot = Slot::from(slots_per_historical_root * 2); - - let mut hashes = (0..).into_iter().map(|i| Hash256::from(i)); - - for root in &mut state_a.block_roots[..] { - *root = hashes.next().unwrap() - } - for root in &mut state_b.block_roots[..] { - *root = hashes.next().unwrap() - } - - let state_a_root = hashes.next().unwrap(); - state_b.state_roots[0] = state_a_root; - store.put(&state_a_root, &state_a).unwrap(); - - let iter = BestBlockRootsIterator::new(store.clone(), &state_b, state_b.slot); - - assert!( - iter.clone().find(|(_root, slot)| *slot == 0).is_some(), - "iter should contain zero slot" - ); - - let mut collected: Vec<(Hash256, Slot)> = iter.collect(); - collected.reverse(); - - let expected_len = 2 * MainnetEthSpec::slots_per_historical_root(); - - assert_eq!(collected.len(), expected_len); - - for i in 0..expected_len { - assert_eq!(collected[i].0, Hash256::from(i as u64)); - } - } - #[test] fn state_root_iter() { let store = Arc::new(MemoryStore::open()); diff --git a/eth2/lmd_ghost/tests/test.rs b/eth2/lmd_ghost/tests/test.rs index fbe385560..0ac263638 100644 --- a/eth2/lmd_ghost/tests/test.rs +++ b/eth2/lmd_ghost/tests/test.rs @@ -10,7 +10,7 @@ use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree}; use rand::{prelude::*, rngs::StdRng}; use std::sync::Arc; use store::{ - iter::{AncestorIter, BestBlockRootsIterator}, + iter::{AncestorIter, BlockRootsIterator}, MemoryStore, Store, }; use types::{BeaconBlock, EthSpec, Hash256, MinimalEthSpec, Slot}; @@ -159,7 +159,7 @@ fn get_ancestor_roots( .expect("block should exist") .expect("store should not error"); - as AncestorIter<_, BestBlockRootsIterator>>::try_iter_ancestor_roots( + as AncestorIter<_, BlockRootsIterator>>::try_iter_ancestor_roots( &block, store, ) .expect("should be able to create ancestor iter") From 40c0b70b22de83cb4fea86250397fa568d08dbc9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 24 Jul 2019 21:31:49 +1000 Subject: [PATCH 011/305] Add interop chain spec and rename chain_id --- beacon_node/http_server/src/api.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 10 +++---- beacon_node/rpc/src/beacon_node.rs | 2 +- beacon_node/src/main.rs | 3 +- beacon_node/src/run.rs | 18 +++++++++++- .../src/beacon_state/beacon_state_types.rs | 20 +++++++++++++ eth2/types/src/chain_spec.rs | 28 +++++++++++++++++-- protos/src/services.proto | 2 +- tests/ef_tests/eth2.0-spec-tests | 2 +- validator_client/src/main.rs | 9 ++++-- validator_client/src/service.rs | 6 ++-- 11 files changed, 83 insertions(+), 19 deletions(-) diff --git a/beacon_node/http_server/src/api.rs b/beacon_node/http_server/src/api.rs index a91080899..8cb023b02 100644 --- a/beacon_node/http_server/src/api.rs +++ b/beacon_node/http_server/src/api.rs @@ -64,7 +64,7 @@ fn handle_fork(req: &mut Request) -> IronResult(beacon_chain: &BeaconChain) -> HelloMes let state = &beacon_chain.head().beacon_state; HelloMessage { - //TODO: Correctly define the chain/network id - network_id: spec.chain_id, - chain_id: u64::from(spec.chain_id), - latest_finalized_root: state.finalized_checkpoint.root, - latest_finalized_epoch: state.finalized_checkpoint.epoch, + network_id: spec.network_id, + //TODO: Correctly define the chain id + chain_id: spec.network_id as u64, + latest_finalized_root: state.finalized_root, + latest_finalized_epoch: state.finalized_epoch, best_root: beacon_chain.head().beacon_block_root, best_slot: state.slot, } diff --git a/beacon_node/rpc/src/beacon_node.rs b/beacon_node/rpc/src/beacon_node.rs index 631601ac9..5d635c9d1 100644 --- a/beacon_node/rpc/src/beacon_node.rs +++ b/beacon_node/rpc/src/beacon_node.rs @@ -37,7 +37,7 @@ impl BeaconNodeService for BeaconNodeServiceInstance { node_info.set_fork(fork); node_info.set_genesis_time(genesis_time); node_info.set_genesis_slot(spec.genesis_slot.as_u64()); - node_info.set_chain_id(u32::from(spec.chain_id)); + node_info.set_network_id(u32::from(spec.network_id)); // send the node_info the requester let error_log = self.log.clone(); diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index dd0c695b4..c61e0c6b6 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -136,6 +136,7 @@ fn main() { .help("Listen port for the HTTP server.") .takes_value(true), ) + /* Client related arguments */ .arg( Arg::with_name("api") .long("api") @@ -182,7 +183,7 @@ fn main() { from disk. A spec will be written to disk after this flag is used, so it is primarily used for creating eth2 spec files.") .takes_value(true) - .possible_values(&["mainnet", "minimal"]) + .possible_values(&["mainnet", "minimal", "interop"]) .default_value("minimal"), ) .arg( diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 010993988..c16d23e5f 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -13,7 +13,7 @@ use tokio::runtime::Builder; use tokio::runtime::Runtime; use tokio::runtime::TaskExecutor; use tokio_timer::clock::Clock; -use types::{MainnetEthSpec, MinimalEthSpec}; +use types::{InteropEthSpec, MainnetEthSpec, MinimalEthSpec}; /// Reads the configuration and initializes a `BeaconChain` with the required types and parameters. /// @@ -90,6 +90,22 @@ pub fn run_beacon_node( runtime, log, ), + ("disk", "interop") => run::>( + &db_path, + client_config, + eth2_config, + executor, + runtime, + log, + ), + ("memory", "interop") => run::>( + &db_path, + client_config, + eth2_config, + executor, + runtime, + log, + ), (db_type, spec) => { error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type); Err("Unknown specification and/or db_type.".into()) diff --git a/eth2/types/src/beacon_state/beacon_state_types.rs b/eth2/types/src/beacon_state/beacon_state_types.rs index 1dc34e195..dd6ca3272 100644 --- a/eth2/types/src/beacon_state/beacon_state_types.rs +++ b/eth2/types/src/beacon_state/beacon_state_types.rs @@ -200,3 +200,23 @@ impl EthSpec for MinimalEthSpec { } pub type MinimalBeaconState = BeaconState; + +/// Interop testnet spec +#[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize)] +pub struct InteropEthSpec; + +impl EthSpec for InteropEthSpec { + type ShardCount = U8; + type SlotsPerHistoricalRoot = U64; + type LatestRandaoMixesLength = U64; + type LatestActiveIndexRootsLength = U64; + type LatestSlashedExitLength = U64; + type SlotsPerEpoch = U8; + type GenesisEpoch = U0; + + fn default_spec() -> ChainSpec { + ChainSpec::interop() + } +} + +pub type InteropBeaconState = BeaconState; diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 2128c6ef1..d6eaa123d 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -92,7 +92,7 @@ pub struct ChainSpec { domain_transfer: u32, pub boot_nodes: Vec, - pub chain_id: u8, + pub network_id: u8, } impl ChainSpec { @@ -190,7 +190,7 @@ impl ChainSpec { * Network specific */ boot_nodes: vec![], - chain_id: 1, // mainnet chain id + network_id: 1, // mainnet network id } } @@ -202,13 +202,35 @@ impl ChainSpec { pub fn minimal() -> Self { // Note: bootnodes to be updated when static nodes exist. let boot_nodes = vec![]; + let genesis_slot = Slot::new(0); Self { target_committee_size: 4, shuffle_round_count: 10, min_genesis_active_validator_count: 64, max_epochs_per_crosslink: 4, - chain_id: 2, // lighthouse testnet chain id + min_attestation_inclusion_delay: 2, + genesis_slot, + network_id: 2, // lighthouse testnet network id + boot_nodes, + ..ChainSpec::mainnet() + } + } + + /// Interop testing spec + /// + /// This allows us to customize a chain spec for interop testing. + pub fn interop() -> Self { + let genesis_slot = Slot::new(0); + let boot_nodes = vec![]; + + Self { + seconds_per_slot: 12, + target_committee_size: 4, + shuffle_round_count: 10, + min_attestation_inclusion_delay: 2, + genesis_slot, + network_id: 13, boot_nodes, ..ChainSpec::mainnet() } diff --git a/protos/src/services.proto b/protos/src/services.proto index bf23ff391..ba0462bbe 100644 --- a/protos/src/services.proto +++ b/protos/src/services.proto @@ -45,7 +45,7 @@ service AttestationService { message NodeInfoResponse { string version = 1; Fork fork = 2; - uint32 chain_id = 3; + uint32 network_id = 3; uint64 genesis_time = 4; uint64 genesis_slot = 5; } diff --git a/tests/ef_tests/eth2.0-spec-tests b/tests/ef_tests/eth2.0-spec-tests index aaa1673f5..d40578264 160000 --- a/tests/ef_tests/eth2.0-spec-tests +++ b/tests/ef_tests/eth2.0-spec-tests @@ -1 +1 @@ -Subproject commit aaa1673f508103e11304833e0456e4149f880065 +Subproject commit d405782646190595927cc0a59f504f7b00a760f3 diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index bd3919b5a..756f82991 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -14,7 +14,7 @@ use protos::services_grpc::ValidatorServiceClient; use slog::{crit, error, info, o, Drain, Level}; use std::fs; use std::path::PathBuf; -use types::{Keypair, MainnetEthSpec, MinimalEthSpec}; +use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; pub const DEFAULT_SPEC: &str = "minimal"; pub const DEFAULT_DATA_DIR: &str = ".lighthouse-validator"; @@ -70,7 +70,7 @@ fn main() { .short("s") .help("The title of the spec constants for chain config.") .takes_value(true) - .possible_values(&["mainnet", "minimal"]) + .possible_values(&["mainnet", "minimal", "interop"]) .default_value("minimal"), ) .arg( @@ -214,6 +214,11 @@ fn main() { eth2_config, log.clone(), ), + "interop" => ValidatorService::::start::( + client_config, + eth2_config, + log.clone(), + ), other => { crit!(log, "Unknown spec constants"; "title" => other); return; diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 3f99efe36..c4ccbc204 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -107,12 +107,12 @@ impl Service Service node_info.version.clone(), "Chain ID" => node_info.chain_id, "Genesis time" => genesis_time); + info!(log,"Beacon node connected"; "Node Version" => node_info.version.clone(), "Chain ID" => node_info.network_id, "Genesis time" => genesis_time); let proto_fork = node_info.get_fork(); let mut previous_version: [u8; 4] = [0; 4]; From 15c4062761a3ae855bdc237d5edcdf9bf9c8ae44 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 24 Jul 2019 22:25:37 +1000 Subject: [PATCH 012/305] Add ability to connect to raw libp2p nodes --- beacon_node/eth2-libp2p/src/config.rs | 16 ++++++++++++++++ beacon_node/eth2-libp2p/src/discovery.rs | 6 +++--- beacon_node/eth2-libp2p/src/service.rs | 11 +++++++++++ beacon_node/src/main.rs | 21 ++++++++++++++------- 4 files changed, 44 insertions(+), 10 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 7391dba8a..d04eae14b 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -1,6 +1,7 @@ use clap::ArgMatches; use enr::Enr; use libp2p::gossipsub::{GossipsubConfig, GossipsubConfigBuilder}; +use libp2p::Multiaddr; use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; use std::time::Duration; @@ -39,6 +40,9 @@ pub struct Config { /// List of nodes to initially connect to. pub boot_nodes: Vec, + /// List of libp2p nodes to initially connect to. + pub libp2p_nodes: Vec, + /// Client version pub client_version: String, @@ -66,6 +70,7 @@ impl Default for Config { .heartbeat_interval(Duration::from_secs(20)) .build(), boot_nodes: vec![], + libp2p_nodes: vec![], client_version: version::version(), topics: Vec::new(), } @@ -118,6 +123,17 @@ impl Config { .collect::, _>>()?; } + if let Some(libp2p_addresses_str) = args.value_of("libp2p-addresses") { + self.libp2p_nodes = libp2p_addresses_str + .split(',') + .map(|multiaddr| { + multiaddr + .parse() + .map_err(|_| format!("Invalid Multiaddr: {}", multiaddr)) + }) + .collect::, _>>()?; + } + if let Some(discovery_address_str) = args.value_of("discovery-address") { self.discovery_address = discovery_address_str .parse() diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index c2f008756..96cf71846 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -37,6 +37,9 @@ pub struct Discovery { /// The target number of connected peers on the libp2p interface. max_peers: usize, + /// directory to save ENR to + enr_dir: String, + /// The delay between peer discovery searches. peer_discovery_delay: Delay, @@ -54,9 +57,6 @@ pub struct Discovery { /// Logger for the discovery behaviour. log: slog::Logger, - - /// directory to save ENR to - enr_dir: String, } impl Discovery { diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 05ae9e473..5c7c0c7f1 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -76,6 +76,17 @@ impl Service { ), }; + // attempt to connect to user-input libp2p nodes + for multiaddr in config.libp2p_nodes { + match Swarm::dial_addr(&mut swarm, multiaddr.clone()) { + Ok(()) => debug!(log, "Dialing libp2p node: {}", multiaddr), + Err(err) => debug!( + log, + "Could not connect to node: {} error: {:?}", multiaddr, err + ), + }; + } + // subscribe to default gossipsub topics let mut topics = vec![]; //TODO: Handle multiple shard attestations. For now we simply use a separate topic for diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index c61e0c6b6..9a1af2e08 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -56,6 +56,13 @@ fn main() { .help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).") .takes_value(true), ) + .arg( + Arg::with_name("port") + .long("port") + .value_name("Lighthouse Port") + .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") + .takes_value(true), + ) .arg( Arg::with_name("maxpeers") .long("maxpeers") @@ -70,13 +77,6 @@ fn main() { .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network.") .takes_value(true), ) - .arg( - Arg::with_name("port") - .long("port") - .value_name("Lighthouse Port") - .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") - .takes_value(true), - ) .arg( Arg::with_name("discovery-port") .long("disc-port") @@ -91,6 +91,13 @@ fn main() { .help("The IP address to broadcast to other peers on how to reach this node.") .takes_value(true), ) + .arg( + Arg::with_name("libp2p-addresses") + .long("libp2p-addresses") + .value_name("MULTIADDR") + .help("One or more comma-delimited multiaddrs to manually connect to a libp2p peer without an ENR.") + .takes_value(true), + ) /* * gRPC parameters. */ From 04ce9ec95e5d292d348fb88711187f786f1fc2eb Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 26 Jul 2019 14:43:42 +1000 Subject: [PATCH 013/305] Adds Identify protocol, clean up RPC protocol name handling --- beacon_node/eth2-libp2p/src/behaviour.rs | 101 ++++++++++++++------ beacon_node/eth2-libp2p/src/rpc/protocol.rs | 92 +++++++----------- 2 files changed, 107 insertions(+), 86 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 37e3419a3..33acd41e1 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -10,39 +10,44 @@ use libp2p::{ }, discv5::Discv5Event, gossipsub::{Gossipsub, GossipsubEvent}, + identify::{Identify, IdentifyEvent}, ping::{Ping, PingConfig, PingEvent}, tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; -use slog::{o, trace, warn}; +use slog::{debug, o, trace, warn}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; use std::num::NonZeroU32; use std::time::Duration; -use types::{Attestation, BeaconBlock, EthSpec}; +use types::{Attestation, BeaconBlock}; /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. #[derive(NetworkBehaviour)] -#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] -pub struct Behaviour { +#[behaviour(out_event = "BehaviourEvent", poll_method = "poll")] +pub struct Behaviour { /// The routing pub-sub mechanism for eth2. gossipsub: Gossipsub, - /// The serenity RPC specified in the wire-0 protocol. - serenity_rpc: RPC, + /// The Eth2 RPC specified in the wire-0 protocol. + eth2_rpc: RPC, /// Keep regular connection to peers and disconnect if absent. + // TODO: Remove Libp2p ping in favour of discv5 ping. ping: Ping, - /// Kademlia for peer discovery. + // TODO: Using id for initial interop. This will be removed by mainnet. + /// Provides IP addresses and peer information. + identify: Identify, + /// Discovery behaviour. discovery: Discovery, #[behaviour(ignore)] /// The events generated by this behaviour to be consumed in the swarm poll. - events: Vec>, + events: Vec, /// Logger for behaviour actions. #[behaviour(ignore)] log: slog::Logger, } -impl Behaviour { +impl Behaviour { pub fn new( local_key: &Keypair, net_conf: &NetworkConfig, @@ -50,17 +55,25 @@ impl Behaviour { ) -> error::Result { let local_peer_id = local_key.public().clone().into_peer_id(); let behaviour_log = log.new(o!()); + let ping_config = PingConfig::new() .with_timeout(Duration::from_secs(30)) .with_interval(Duration::from_secs(20)) .with_max_failures(NonZeroU32::new(2).expect("2 != 0")) .with_keep_alive(false); + let identify = Identify::new( + "lighthouse/libp2p".into(), + version::version(), + local_key.public(), + ); + Ok(Behaviour { - serenity_rpc: RPC::new(log), + eth2_rpc: RPC::new(log), gossipsub: Gossipsub::new(local_peer_id.clone(), net_conf.gs_config.clone()), discovery: Discovery::new(local_key, net_conf, log)?, ping: Ping::new(ping_config), + identify, events: Vec::new(), log: behaviour_log, }) @@ -68,8 +81,8 @@ impl Behaviour { } // Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, event: GossipsubEvent) { match event { @@ -101,8 +114,8 @@ impl NetworkBehaviourEventProces } } -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, event: RPCMessage) { match event { @@ -119,19 +132,19 @@ impl NetworkBehaviourEventProces } } -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, _event: PingEvent) { // not interested in ping responses at the moment. } } -impl Behaviour { +impl Behaviour { /// Consumes the events list when polled. fn poll( &mut self, - ) -> Async>> { + ) -> Async> { if !self.events.is_empty() { return Async::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); } @@ -140,8 +153,36 @@ impl Behaviour { } } -impl NetworkBehaviourEventProcess - for Behaviour +impl NetworkBehaviourEventProcess + for Behaviour +{ + fn inject_event(&mut self, event: IdentifyEvent) { + match event { + IdentifyEvent::Identified { + peer_id, mut info, .. + } => { + if info.listen_addrs.len() > 20 { + debug!( + self.log, + "More than 20 addresses have been identified, truncating" + ); + info.listen_addrs.truncate(20); + } + debug!(self.log, "Identified Peer"; "Peer" => format!("{}", peer_id), + "Protocol Version" => info.protocol_version, + "Agent Version" => info.agent_version, + "Listening Addresses" => format!("{:?}", info.listen_addrs), + "Protocols" => format!("{:?}", info.protocols) + ); + } + IdentifyEvent::Error { .. } => {} + IdentifyEvent::SendBack { .. } => {} + } + } +} + +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, _event: Discv5Event) { // discv5 has no events to inject @@ -149,7 +190,7 @@ impl NetworkBehaviourEventProces } /// Implements the combined behaviour for the libp2p service. -impl Behaviour { +impl Behaviour { /* Pubsub behaviour functions */ /// Subscribes to a gossipsub topic. @@ -158,7 +199,7 @@ impl Behaviour { } /// Publishes a message on the pubsub (gossipsub) behaviour. - pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { + pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { let message_bytes = ssz_encode(&message); for topic in topics { self.gossipsub.publish(topic, message_bytes.clone()); @@ -169,7 +210,7 @@ impl Behaviour { /// Sends an RPC Request/Response via the RPC protocol. pub fn send_rpc(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { - self.serenity_rpc.send_rpc(peer_id, rpc_event); + self.eth2_rpc.send_rpc(peer_id, rpc_event); } /* Discovery / Peer management functions */ @@ -179,28 +220,28 @@ impl Behaviour { } /// The types of events than can be obtained from polling the behaviour. -pub enum BehaviourEvent { +pub enum BehaviourEvent { RPC(PeerId, RPCEvent), PeerDialed(PeerId), PeerDisconnected(PeerId), GossipMessage { source: PeerId, topics: Vec, - message: Box>, + message: Box, }, } /// Messages that are passed to and from the pubsub (Gossipsub) behaviour. #[derive(Debug, Clone, PartialEq)] -pub enum PubsubMessage { +pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. - Block(BeaconBlock), + Block(BeaconBlock), /// Gossipsub message providing notification of a new attestation. - Attestation(Attestation), + Attestation(Attestation), } //TODO: Correctly encode/decode enums. Prefixing with integer for now. -impl Encode for PubsubMessage { +impl Encode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false } @@ -229,7 +270,7 @@ impl Encode for PubsubMessage { } } -impl Decode for PubsubMessage { +impl Decode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false } diff --git a/beacon_node/eth2-libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs index 8729de3a7..b606fc743 100644 --- a/beacon_node/eth2-libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2-libp2p/src/rpc/protocol.rs @@ -8,7 +8,7 @@ use futures::{ future::{self, FutureResult}, sink, stream, Sink, Stream, }; -use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; +use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo}; use std::io; use std::time::Duration; use tokio::codec::Framed; @@ -28,24 +28,22 @@ const REQUEST_TIMEOUT: u64 = 3; pub struct RPCProtocol; impl UpgradeInfo for RPCProtocol { - type Info = RawProtocolId; + type Info = ProtocolId; type InfoIter = Vec; fn protocol_info(&self) -> Self::InfoIter { vec![ - ProtocolId::new("hello", "1.0.0", "ssz").into(), - ProtocolId::new("goodbye", "1.0.0", "ssz").into(), - ProtocolId::new("beacon_block_roots", "1.0.0", "ssz").into(), - ProtocolId::new("beacon_block_headers", "1.0.0", "ssz").into(), - ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz").into(), + ProtocolId::new("hello", "1.0.0", "ssz"), + ProtocolId::new("goodbye", "1.0.0", "ssz"), + ProtocolId::new("beacon_block_roots", "1.0.0", "ssz"), + ProtocolId::new("beacon_block_headers", "1.0.0", "ssz"), + ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz"), ] } } -/// The raw protocol id sent over the wire. -type RawProtocolId = Vec; - /// Tracks the types in a protocol id. +#[derive(Clone)] pub struct ProtocolId { /// The rpc message type/name. pub message_name: String, @@ -55,44 +53,31 @@ pub struct ProtocolId { /// The encoding of the RPC. pub encoding: String, + + /// The protocol id that is formed from the above fields. + protocol_id: String, } /// An RPC protocol ID. impl ProtocolId { pub fn new(message_name: &str, version: &str, encoding: &str) -> Self { + let protocol_id = format!( + "{}/{}/{}/{}", + PROTOCOL_PREFIX, message_name, version, encoding + ); + ProtocolId { message_name: message_name.into(), version: version.into(), encoding: encoding.into(), + protocol_id, } } - - /// Converts a raw RPC protocol id string into an `RPCProtocolId` - pub fn from_bytes(bytes: &[u8]) -> Result { - let protocol_string = String::from_utf8(bytes.to_vec()) - .map_err(|_| RPCError::InvalidProtocol("Invalid protocol Id"))?; - let protocol_list: Vec<&str> = protocol_string.as_str().split('/').take(7).collect(); - - if protocol_list.len() != 7 { - return Err(RPCError::InvalidProtocol("Not enough '/'")); - } - - Ok(ProtocolId { - message_name: protocol_list[4].into(), - version: protocol_list[5].into(), - encoding: protocol_list[6].into(), - }) - } } -impl Into for ProtocolId { - fn into(self) -> RawProtocolId { - format!( - "{}/{}/{}/{}", - PROTOCOL_PREFIX, self.message_name, self.version, self.encoding - ) - .as_bytes() - .to_vec() +impl ProtocolName for ProtocolId { + fn protocol_name(&self) -> &[u8] { + self.protocol_id.as_bytes() } } @@ -127,16 +112,11 @@ where fn upgrade_inbound( self, socket: upgrade::Negotiated, - protocol: RawProtocolId, + protocol: ProtocolId, ) -> Self::Future { - // TODO: Verify this - let protocol_id = - ProtocolId::from_bytes(&protocol).expect("Can decode all supported protocols"); - - match protocol_id.encoding.as_str() { + match protocol.encoding.as_str() { "ssz" | _ => { - let ssz_codec = - BaseInboundCodec::new(SSZInboundCodec::new(protocol_id, MAX_RPC_SIZE)); + let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE)); let codec = InboundCodec::SSZ(ssz_codec); Framed::new(socket, codec) .into_future() @@ -171,7 +151,7 @@ pub enum RPCRequest { } impl UpgradeInfo for RPCRequest { - type Info = RawProtocolId; + type Info = ProtocolId; type InfoIter = Vec; // add further protocols as we support more encodings/versions @@ -182,22 +162,25 @@ impl UpgradeInfo for RPCRequest { /// Implements the encoding per supported protocol for RPCRequest. impl RPCRequest { - pub fn supported_protocols(&self) -> Vec { + pub fn supported_protocols(&self) -> Vec { match self { // add more protocols when versions/encodings are supported - RPCRequest::Hello(_) => vec![ProtocolId::new("hello", "1.0.0", "ssz").into()], - RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1.0.0", "ssz").into()], + RPCRequest::Hello(_) => vec![ + ProtocolId::new("hello", "1.0.0", "ssz"), + ProtocolId::new("goodbye", "1.0.0", "ssz"), + ], + RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1.0.0", "ssz")], RPCRequest::BeaconBlockRoots(_) => { - vec![ProtocolId::new("beacon_block_roots", "1.0.0", "ssz").into()] + vec![ProtocolId::new("beacon_block_roots", "1.0.0", "ssz")] } RPCRequest::BeaconBlockHeaders(_) => { - vec![ProtocolId::new("beacon_block_headers", "1.0.0", "ssz").into()] + vec![ProtocolId::new("beacon_block_headers", "1.0.0", "ssz")] } RPCRequest::BeaconBlockBodies(_) => { - vec![ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz").into()] + vec![ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz")] } RPCRequest::BeaconChainState(_) => { - vec![ProtocolId::new("beacon_block_state", "1.0.0", "ssz").into()] + vec![ProtocolId::new("beacon_block_state", "1.0.0", "ssz")] } } } @@ -230,12 +213,9 @@ where socket: upgrade::Negotiated, protocol: Self::Info, ) -> Self::Future { - let protocol_id = - ProtocolId::from_bytes(&protocol).expect("Can decode all supported protocols"); - - match protocol_id.encoding.as_str() { + match protocol.encoding.as_str() { "ssz" | _ => { - let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol_id, 4096)); + let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, 4096)); let codec = OutboundCodec::SSZ(ssz_codec); Framed::new(socket, codec).send(self) } From 0613bc16fc54f5d02434ec7540500ba255ab5dc9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 6 Aug 2019 15:09:47 +1000 Subject: [PATCH 014/305] Update to latest libp2p, gossipsub improvements --- beacon_node/eth2-libp2p/Cargo.toml | 4 ++-- beacon_node/eth2-libp2p/src/behaviour.rs | 6 ++---- beacon_node/eth2-libp2p/src/config.rs | 8 ++++++-- beacon_node/eth2-libp2p/src/discovery.rs | 6 ++---- beacon_node/eth2-libp2p/src/lib.rs | 2 +- beacon_node/eth2-libp2p/src/rpc/handler.rs | 7 ++++--- beacon_node/eth2-libp2p/src/rpc/mod.rs | 6 +++--- beacon_node/eth2-libp2p/src/service.rs | 20 +++++++++++++------- beacon_node/rpc/src/attestation.rs | 4 ++-- beacon_node/rpc/src/beacon_block.rs | 4 ++-- beacon_node/src/main.rs | 15 +++++++++++---- 11 files changed, 48 insertions(+), 34 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 405c72cc4..f5fe8a877 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "be5710bbde69d8c5be732c13ba64239e2f370a7b" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "be5710bbde69d8c5be732c13ba64239e2f370a7b", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "35104cca27231b9178e1fea5b3788ea41ba8af76" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "35104cca27231b9178e1fea5b3788ea41ba8af76", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 33acd41e1..fcb147949 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -4,14 +4,12 @@ use crate::{error, NetworkConfig}; use crate::{Topic, TopicHash}; use futures::prelude::*; use libp2p::{ - core::{ - identity::Keypair, - swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}, - }, + core::identity::Keypair, discv5::Discv5Event, gossipsub::{Gossipsub, GossipsubEvent}, identify::{Identify, IdentifyEvent}, ping::{Ping, PingConfig, PingEvent}, + swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess}, tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index d04eae14b..44d07795b 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -64,9 +64,9 @@ impl Default for Config { discovery_port: 9000, max_peers: 10, //TODO: Set realistic values for production + // Note: This defaults topics to plain strings. Not hashes gs_config: GossipsubConfigBuilder::new() - .max_gossip_size(4_000_000) - .inactivity_timeout(Duration::from_secs(90)) + .max_transmit_size(1_000_000) .heartbeat_interval(Duration::from_secs(20)) .build(), boot_nodes: vec![], @@ -134,6 +134,10 @@ impl Config { .collect::, _>>()?; } + if let Some(topics_str) = args.value_of("topics") { + self.topics = topics_str.split(',').map(|s| s.into()).collect(); + } + if let Some(discovery_address_str) = args.value_of("discovery-address") { self.discovery_address = discovery_address_str .parse() diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 96cf71846..4c1794945 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -4,13 +4,11 @@ use crate::{error, NetworkConfig}; /// Currently using discv5 for peer discovery. /// use futures::prelude::*; -use libp2p::core::swarm::{ - ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters, -}; -use libp2p::core::{identity::Keypair, Multiaddr, PeerId, ProtocolsHandler}; +use libp2p::core::{identity::Keypair, ConnectedPoint, Multiaddr, PeerId}; use libp2p::discv5::{Discv5, Discv5Event}; use libp2p::enr::{Enr, EnrBuilder, NodeId}; use libp2p::multiaddr::Protocol; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; use slog::{debug, info, o, warn}; use std::collections::HashSet; use std::fs::File; diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 7a3b2e632..ca6ac3760 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -13,7 +13,7 @@ pub use behaviour::PubsubMessage; pub use config::{ Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC, SHARD_TOPIC_PREFIX, }; -pub use libp2p::floodsub::{Topic, TopicBuilder, TopicHash}; +pub use libp2p::gossipsub::{Topic, TopicHash}; pub use libp2p::multiaddr; pub use libp2p::Multiaddr; pub use libp2p::{ diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index 4e796f6fb..76e04d24e 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -5,10 +5,10 @@ use crate::rpc::protocol::{InboundFramed, OutboundFramed}; use core::marker::PhantomData; use fnv::FnvHashMap; use futures::prelude::*; -use libp2p::core::protocols_handler::{ +use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::protocols_handler::{ KeepAlive, ProtocolsHandler, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol, }; -use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; use smallvec::SmallVec; use std::time::{Duration, Instant}; use tokio_io::{AsyncRead, AsyncWrite}; @@ -273,7 +273,8 @@ where Self::Error, > { if let Some(err) = self.pending_error.take() { - return Err(err); + dbg!(&err); + //return Err(err); } // return any events that need to be reported diff --git a/beacon_node/eth2-libp2p/src/rpc/mod.rs b/beacon_node/eth2-libp2p/src/rpc/mod.rs index 88060e602..5593660ff 100644 --- a/beacon_node/eth2-libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/mod.rs @@ -6,9 +6,9 @@ use futures::prelude::*; use handler::RPCHandler; -use libp2p::core::protocols_handler::ProtocolsHandler; -use libp2p::core::swarm::{ - ConnectedPoint, NetworkBehaviour, NetworkBehaviourAction, PollParameters, +use libp2p::core::ConnectedPoint; +use libp2p::swarm::{ + protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction, PollParameters, }; use libp2p::{Multiaddr, PeerId}; pub use methods::{ErrorMessage, HelloMessage, RPCErrorResponse, RPCResponse, RequestId}; diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 5c7c0c7f1..5a2fc8d8b 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -3,7 +3,7 @@ use crate::error; use crate::multiaddr::Protocol; use crate::rpc::RPCEvent; use crate::NetworkConfig; -use crate::{TopicBuilder, TopicHash}; +use crate::{Topic, TopicHash}; use crate::{BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC}; use futures::prelude::*; use futures::Stream; @@ -90,15 +90,21 @@ impl Service { // subscribe to default gossipsub topics let mut topics = vec![]; //TODO: Handle multiple shard attestations. For now we simply use a separate topic for - //attestations - topics.push(BEACON_ATTESTATION_TOPIC.to_string()); - topics.push(BEACON_PUBSUB_TOPIC.to_string()); - topics.append(&mut config.topics.clone()); + // attestations + topics.push(Topic::new(BEACON_ATTESTATION_TOPIC.into())); + topics.push(Topic::new(BEACON_PUBSUB_TOPIC.into())); + topics.append( + &mut config + .topics + .iter() + .cloned() + .map(|s| Topic::new(s)) + .collect(), + ); let mut subscribed_topics = vec![]; for topic in topics { - let t = TopicBuilder::new(topic.clone()).build(); - if swarm.subscribe(t) { + if swarm.subscribe(topic.clone()) { trace!(log, "Subscribed to topic: {:?}", topic); subscribed_topics.push(topic); } else { diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 5ea8368fd..cbbe4de6e 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -1,6 +1,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::PubsubMessage; -use eth2_libp2p::TopicBuilder; +use eth2_libp2p::Topic; use eth2_libp2p::BEACON_ATTESTATION_TOPIC; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; @@ -140,7 +140,7 @@ impl AttestationService for AttestationServiceInstance { ); // valid attestation, propagate to the network - let topic = TopicBuilder::new(BEACON_ATTESTATION_TOPIC).build(); + let topic = Topic::new(BEACON_ATTESTATION_TOPIC.into()); let message = PubsubMessage::Attestation(attestation); self.network_chan diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index b42bbb208..2a8ae2c6b 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -1,6 +1,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::BEACON_PUBSUB_TOPIC; -use eth2_libp2p::{PubsubMessage, TopicBuilder}; +use eth2_libp2p::{PubsubMessage, Topic}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use network::NetworkMessage; @@ -106,7 +106,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { ); // get the network topic to send on - let topic = TopicBuilder::new(BEACON_PUBSUB_TOPIC).build(); + let topic = Topic::new(BEACON_PUBSUB_TOPIC.into()); let message = PubsubMessage::Block(block); // Publish the block to the p2p network via gossipsub. diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 9a1af2e08..c85eeedac 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -52,14 +52,14 @@ fn main() { .arg( Arg::with_name("listen-address") .long("listen-address") - .value_name("Address") + .value_name("ADDRESS") .help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).") .takes_value(true), ) .arg( Arg::with_name("port") .long("port") - .value_name("Lighthouse Port") + .value_name("PORT") .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") .takes_value(true), ) @@ -80,17 +80,24 @@ fn main() { .arg( Arg::with_name("discovery-port") .long("disc-port") - .value_name("DiscoveryPort") + .value_name("PORT") .help("The discovery UDP port.") .takes_value(true), ) .arg( Arg::with_name("discovery-address") .long("discovery-address") - .value_name("Address") + .value_name("ADDRESS") .help("The IP address to broadcast to other peers on how to reach this node.") .takes_value(true), ) + .arg( + Arg::with_name("topics") + .long("topics") + .value_name("STRING") + .help("One or more comma-delimited gossipsub topic strings to subscribe to.") + .takes_value(true), + ) .arg( Arg::with_name("libp2p-addresses") .long("libp2p-addresses") From 107bbdcccd66d4fa4125bc6f5b3f4fec3353032f Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 6 Aug 2019 17:54:38 +1000 Subject: [PATCH 015/305] Updates to latest interop branch. - Shifts decoding of objects into message handler. - Updates to latest interop gossipsub. - Adds interop spec constant. --- beacon_node/eth2-libp2p/Cargo.toml | 4 +- beacon_node/eth2-libp2p/src/behaviour.rs | 74 ++++++------- beacon_node/eth2-libp2p/src/config.rs | 2 +- beacon_node/eth2-libp2p/src/lib.rs | 2 +- beacon_node/eth2-libp2p/src/rpc/handler.rs | 17 ++- beacon_node/eth2-libp2p/src/rpc/mod.rs | 12 +-- beacon_node/eth2-libp2p/src/service.rs | 23 ++-- beacon_node/http_server/src/lib.rs | 2 +- beacon_node/network/src/message_handler.rs | 101 ++++++++++++------ beacon_node/network/src/service.rs | 33 +++--- beacon_node/network/src/sync/simple_sync.rs | 38 +++---- beacon_node/rpc/src/attestation.rs | 8 +- beacon_node/rpc/src/beacon_block.rs | 12 +-- beacon_node/rpc/src/lib.rs | 2 +- .../src/beacon_state/beacon_state_types.rs | 24 ++++- validator_client/src/main.rs | 2 +- 16 files changed, 199 insertions(+), 157 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index f5fe8a877..0ea182bc6 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "35104cca27231b9178e1fea5b3788ea41ba8af76" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "35104cca27231b9178e1fea5b3788ea41ba8af76", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index fcb147949..fc224e91a 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -2,6 +2,7 @@ use crate::discovery::Discovery; use crate::rpc::{RPCEvent, RPCMessage, RPC}; use crate::{error, NetworkConfig}; use crate::{Topic, TopicHash}; +use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC}; use futures::prelude::*; use libp2p::{ core::identity::Keypair, @@ -13,11 +14,10 @@ use libp2p::{ tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; -use slog::{debug, o, trace, warn}; -use ssz::{ssz_encode, Decode, DecodeError, Encode}; +use slog::{debug, o, trace}; +use ssz::{ssz_encode, Encode}; use std::num::NonZeroU32; use std::time::Duration; -use types::{Attestation, BeaconBlock}; /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core @@ -87,23 +87,12 @@ impl NetworkBehaviourEventProcess { trace!(self.log, "Received GossipEvent"; "msg" => format!("{:?}", gs_msg)); - let pubsub_message = match PubsubMessage::from_ssz_bytes(&gs_msg.data) { - //TODO: Punish peer on error - Err(e) => { - warn!( - self.log, - "Received undecodable message from Peer {:?} error", gs_msg.source; - "error" => format!("{:?}", e) - ); - return; - } - Ok(msg) => msg, - }; + let msg = PubsubMessage::from_topics(&gs_msg.topics, gs_msg.data); self.events.push(BehaviourEvent::GossipMessage { source: gs_msg.source, topics: gs_msg.topics, - message: Box::new(pubsub_message), + message: msg, }); } GossipsubEvent::Subscribed { .. } => {} @@ -225,7 +214,7 @@ pub enum BehaviourEvent { GossipMessage { source: PeerId, topics: Vec, - message: Box, + message: PubsubMessage, }, } @@ -233,41 +222,50 @@ pub enum BehaviourEvent { #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. - Block(BeaconBlock), + Block(Vec), /// Gossipsub message providing notification of a new attestation. - Attestation(Attestation), + Attestation(Vec), + /// Gossipsub message from an unknown topic. + Unknown(Vec), +} + +impl PubsubMessage { + /* Note: This is assuming we are not hashing topics. If we choose to hash topics, these will + * need to be modified. + * + * Also note that a message can be associated with many topics. As soon as one of the topics is + * known we match. If none of the topics are known we return an unknown state. + */ + fn from_topics(topics: &Vec, data: Vec) -> Self { + for topic in topics { + match topic.as_str() { + BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data), + BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data), + _ => {} + } + } + PubsubMessage::Unknown(data) + } } -//TODO: Correctly encode/decode enums. Prefixing with integer for now. impl Encode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false } fn ssz_append(&self, buf: &mut Vec) { - let offset = ::ssz_fixed_len() + as Encode>::ssz_fixed_len(); - - let mut encoder = ssz::SszEncoder::container(buf, offset); - match self { - PubsubMessage::Block(block_gossip) => { - encoder.append(&0_u32); - + PubsubMessage::Block(inner) + | PubsubMessage::Attestation(inner) + | PubsubMessage::Unknown(inner) => { // Encode the gossip as a Vec; - encoder.append(&block_gossip.as_ssz_bytes()); - } - PubsubMessage::Attestation(attestation_gossip) => { - encoder.append(&1_u32); - - // Encode the gossip as a Vec; - encoder.append(&attestation_gossip.as_ssz_bytes()); + buf.append(&mut inner.as_ssz_bytes()); } } - - encoder.finalize(); } } +/* impl Decode for PubsubMessage { fn is_ssz_fixed_len() -> bool { false @@ -295,7 +293,9 @@ impl Decode for PubsubMessage { } } } +*/ +/* #[cfg(test)] mod test { use super::*; @@ -313,4 +313,6 @@ mod test { assert_eq!(original, decoded); } + } +*/ diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 44d07795b..ddf14cc04 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -7,7 +7,7 @@ use std::path::PathBuf; use std::time::Duration; /// The beacon node topic string to subscribe to. -pub const BEACON_PUBSUB_TOPIC: &str = "beacon_block"; +pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation"; pub const SHARD_TOPIC_PREFIX: &str = "shard"; diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index ca6ac3760..54a4f2a99 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -11,7 +11,7 @@ mod service; pub use behaviour::PubsubMessage; pub use config::{ - Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC, SHARD_TOPIC_PREFIX, + Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX, }; pub use libp2p::gossipsub::{Topic, TopicHash}; pub use libp2p::multiaddr; diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index 76e04d24e..355cc52ee 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -12,16 +12,14 @@ use libp2p::swarm::protocols_handler::{ use smallvec::SmallVec; use std::time::{Duration, Instant}; use tokio_io::{AsyncRead, AsyncWrite}; -use types::EthSpec; /// The time (in seconds) before a substream that is awaiting a response times out. pub const RESPONSE_TIMEOUT: u64 = 9; /// Implementation of `ProtocolsHandler` for the RPC protocol. -pub struct RPCHandler +pub struct RPCHandler where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { /// The upgrade for inbound substreams. listen_protocol: SubstreamProtocol, @@ -56,8 +54,8 @@ where /// After the given duration has elapsed, an inactive connection will shutdown. inactive_timeout: Duration, - /// Phantom EthSpec. - _phantom: PhantomData, + /// Marker to pin the generic stream. + _phantom: PhantomData, } /// An outbound substream is waiting a response from the user. @@ -90,10 +88,9 @@ where }, } -impl RPCHandler +impl RPCHandler where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { pub fn new( listen_protocol: SubstreamProtocol, @@ -145,20 +142,18 @@ where } } -impl Default for RPCHandler +impl Default for RPCHandler where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { fn default() -> Self { RPCHandler::new(SubstreamProtocol::new(RPCProtocol), Duration::from_secs(30)) } } -impl ProtocolsHandler for RPCHandler +impl ProtocolsHandler for RPCHandler where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { type InEvent = RPCEvent; type OutEvent = RPCEvent; diff --git a/beacon_node/eth2-libp2p/src/rpc/mod.rs b/beacon_node/eth2-libp2p/src/rpc/mod.rs index 5593660ff..756a62e71 100644 --- a/beacon_node/eth2-libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/mod.rs @@ -16,7 +16,6 @@ pub use protocol::{RPCError, RPCProtocol, RPCRequest}; use slog::o; use std::marker::PhantomData; use tokio::io::{AsyncRead, AsyncWrite}; -use types::EthSpec; pub(crate) mod codec; mod handler; @@ -50,16 +49,16 @@ impl RPCEvent { /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. -pub struct RPC { +pub struct RPC { /// Queue of events to processed. events: Vec>, /// Pins the generic substream. - marker: PhantomData<(TSubstream, E)>, + marker: PhantomData<(TSubstream)>, /// Slog logger for RPC behaviour. _log: slog::Logger, } -impl RPC { +impl RPC { pub fn new(log: &slog::Logger) -> Self { let log = log.new(o!("Service" => "Libp2p-RPC")); RPC { @@ -80,12 +79,11 @@ impl RPC { } } -impl NetworkBehaviour for RPC +impl NetworkBehaviour for RPC where TSubstream: AsyncRead + AsyncWrite, - E: EthSpec, { - type ProtocolsHandler = RPCHandler; + type ProtocolsHandler = RPCHandler; type OutEvent = RPCMessage; fn new_handler(&mut self) -> Self::ProtocolsHandler { diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 5a2fc8d8b..316aa0579 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -4,7 +4,7 @@ use crate::multiaddr::Protocol; use crate::rpc::RPCEvent; use crate::NetworkConfig; use crate::{Topic, TopicHash}; -use crate::{BEACON_ATTESTATION_TOPIC, BEACON_PUBSUB_TOPIC}; +use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC}; use futures::prelude::*; use futures::Stream; use libp2p::core::{ @@ -21,25 +21,24 @@ use std::fs::File; use std::io::prelude::*; use std::io::{Error, ErrorKind}; use std::time::Duration; -use types::EthSpec; type Libp2pStream = Boxed<(PeerId, StreamMuxerBox), Error>; -type Libp2pBehaviour = Behaviour, E>; +type Libp2pBehaviour = Behaviour>; const NETWORK_KEY_FILENAME: &str = "key"; /// The configuration and state of the libp2p components for the beacon node. -pub struct Service { +pub struct Service { /// The libp2p Swarm handler. //TODO: Make this private - pub swarm: Swarm>, + pub swarm: Swarm, /// This node's PeerId. _local_peer_id: PeerId, /// The libp2p logger handle. pub log: slog::Logger, } -impl Service { +impl Service { pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result { debug!(log, "Network-libp2p Service starting"); @@ -92,7 +91,7 @@ impl Service { //TODO: Handle multiple shard attestations. For now we simply use a separate topic for // attestations topics.push(Topic::new(BEACON_ATTESTATION_TOPIC.into())); - topics.push(Topic::new(BEACON_PUBSUB_TOPIC.into())); + topics.push(Topic::new(BEACON_BLOCK_TOPIC.into())); topics.append( &mut config .topics @@ -121,8 +120,8 @@ impl Service { } } -impl Stream for Service { - type Item = Libp2pEvent; +impl Stream for Service { + type Item = Libp2pEvent; type Error = crate::error::Error; fn poll(&mut self) -> Poll, Self::Error> { @@ -136,7 +135,7 @@ impl Stream for Service { topics, message, } => { - trace!(self.log, "Pubsub message received: {:?}", message); + trace!(self.log, "Gossipsub message received"; "Message" => format!("{:?}", message)); return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage { source, topics, @@ -196,7 +195,7 @@ fn build_transport(local_private_key: Keypair) -> Boxed<(PeerId, StreamMuxerBox) } /// Events that can be obtained from polling the Libp2p Service. -pub enum Libp2pEvent { +pub enum Libp2pEvent { /// An RPC response request has been received on the swarm. RPC(PeerId, RPCEvent), /// Initiated the connection to a new peer. @@ -207,7 +206,7 @@ pub enum Libp2pEvent { PubsubMessage { source: PeerId, topics: Vec, - message: Box>, + message: PubsubMessage, }, } diff --git a/beacon_node/http_server/src/lib.rs b/beacon_node/http_server/src/lib.rs index b20e43de8..f1d006a5b 100644 --- a/beacon_node/http_server/src/lib.rs +++ b/beacon_node/http_server/src/lib.rs @@ -76,7 +76,7 @@ pub fn create_iron_http_server( pub fn start_service( config: &HttpServerConfig, executor: &TaskExecutor, - _network_chan: mpsc::UnboundedSender>, + _network_chan: mpsc::UnboundedSender, beacon_chain: Arc>, db_path: PathBuf, metrics_registry: Registry, diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index eaddce533..72a507ad7 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -14,7 +14,7 @@ use slog::{debug, warn}; use ssz::{Decode, DecodeError}; use std::sync::Arc; use tokio::sync::mpsc; -use types::{BeaconBlockHeader, EthSpec}; +use types::{Attestation, BeaconBlock, BeaconBlockHeader}; /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { @@ -23,14 +23,14 @@ pub struct MessageHandler { /// The syncing framework. sync: SimpleSync, /// The context required to send messages to, and process messages from peers. - network_context: NetworkContext, + network_context: NetworkContext, /// The `MessageHandler` logger. log: slog::Logger, } /// Types of messages the handler can receive. #[derive(Debug)] -pub enum HandlerMessage { +pub enum HandlerMessage { /// We have initiated a connection to a new peer. PeerDialed(PeerId), /// Peer has disconnected, @@ -38,17 +38,17 @@ pub enum HandlerMessage { /// An RPC response/request has been received. RPC(PeerId, RPCEvent), /// A gossip message has been received. - PubsubMessage(PeerId, Box>), + PubsubMessage(PeerId, PubsubMessage), } impl MessageHandler { /// Initializes and runs the MessageHandler. pub fn spawn( beacon_chain: Arc>, - network_send: mpsc::UnboundedSender>, + network_send: mpsc::UnboundedSender, executor: &tokio::runtime::TaskExecutor, log: slog::Logger, - ) -> error::Result>> { + ) -> error::Result> { debug!(log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); @@ -78,7 +78,7 @@ impl MessageHandler { } /// Handle all messages incoming from the network service. - fn handle_message(&mut self, message: HandlerMessage) { + fn handle_message(&mut self, message: HandlerMessage) { match message { // we have initiated a connection to a peer HandlerMessage::PeerDialed(peer_id) => { @@ -94,7 +94,7 @@ impl MessageHandler { } // we have received an RPC message request/response HandlerMessage::PubsubMessage(peer_id, gossip) => { - self.handle_gossip(peer_id, *gossip); + self.handle_gossip(peer_id, gossip); } } } @@ -218,6 +218,62 @@ impl MessageHandler { } } + /// Handle various RPC errors + fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { + //TODO: Handle error correctly + warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "Request Id" => format!("{}", request_id), "Error" => format!("{:?}", error)); + } + + /// Handle RPC messages + fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { + match gossip_message { + PubsubMessage::Block(message) => match self.decode_gossip_block(message) { + Err(e) => { + debug!(self.log, "Invalid Gossiped Beacon Block"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + Ok(block) => { + let _should_forward_on = + self.sync + .on_block_gossip(peer_id, block, &mut self.network_context); + } + }, + PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { + Err(e) => { + debug!(self.log, "Invalid Gossiped Attestation"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + Ok(attestation) => { + self.sync + .on_attestation_gossip(peer_id, attestation, &mut self.network_context) + } + }, + PubsubMessage::Unknown(message) => { + // Received a message from an unknown topic. Ignore for now + debug!(self.log, "Unknown Gossip Message"; "Peer" => format!("{}", peer_id), "Message" => format!("{:?}", message)); + } + } + } + + /* Decoding of blocks and attestations from the network. + * + * TODO: Apply efficient decoding/verification of these objects + */ + + fn decode_gossip_block( + &self, + beacon_block: Vec, + ) -> Result, DecodeError> { + //TODO: Apply verification before decoding. + BeaconBlock::from_ssz_bytes(&beacon_block) + } + + fn decode_gossip_attestation( + &self, + beacon_block: Vec, + ) -> Result, DecodeError> { + //TODO: Apply verification before decoding. + Attestation::from_ssz_bytes(&beacon_block) + } + /// Verifies and decodes the ssz-encoded block bodies received from peers. fn decode_block_bodies( &self, @@ -241,39 +297,18 @@ impl MessageHandler { //TODO: Implement faster header verification before decoding entirely Vec::from_ssz_bytes(&headers_response.headers) } - - /// Handle various RPC errors - fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { - //TODO: Handle error correctly - warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "Request Id" => format!("{}", request_id), "Error" => format!("{:?}", error)); - } - - /// Handle RPC messages - fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { - match gossip_message { - PubsubMessage::Block(message) => { - let _should_forward_on = - self.sync - .on_block_gossip(peer_id, message, &mut self.network_context); - } - PubsubMessage::Attestation(message) => { - self.sync - .on_attestation_gossip(peer_id, message, &mut self.network_context) - } - } - } } // TODO: RPC Rewrite makes this struct fairly pointless -pub struct NetworkContext { +pub struct NetworkContext { /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender>, + network_send: mpsc::UnboundedSender, /// The `MessageHandler` logger. log: slog::Logger, } -impl NetworkContext { - pub fn new(network_send: mpsc::UnboundedSender>, log: slog::Logger) -> Self { +impl NetworkContext { + pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { Self { network_send, log } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 7a21f7f28..e5ca2a917 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -14,13 +14,12 @@ use slog::{debug, info, o, trace}; use std::sync::Arc; use tokio::runtime::TaskExecutor; use tokio::sync::{mpsc, oneshot}; -use types::EthSpec; /// Service that handles communication between internal services and the eth2_libp2p network service. pub struct Service { - libp2p_service: Arc>>, + libp2p_service: Arc>, _libp2p_exit: oneshot::Sender<()>, - _network_send: mpsc::UnboundedSender>, + _network_send: mpsc::UnboundedSender, _phantom: PhantomData, //message_handler: MessageHandler, //message_handler_send: Sender } @@ -31,9 +30,9 @@ impl Service { config: &NetworkConfig, executor: &TaskExecutor, log: slog::Logger, - ) -> error::Result<(Arc, mpsc::UnboundedSender>)> { + ) -> error::Result<(Arc, mpsc::UnboundedSender)> { // build the network channel - let (network_send, network_recv) = mpsc::unbounded_channel::>(); + let (network_send, network_recv) = mpsc::unbounded_channel::(); // launch message handler thread let message_handler_log = log.new(o!("Service" => "MessageHandler")); let message_handler_send = MessageHandler::spawn( @@ -65,15 +64,15 @@ impl Service { Ok((Arc::new(network_service), network_send)) } - pub fn libp2p_service(&self) -> Arc>> { + pub fn libp2p_service(&self) -> Arc> { self.libp2p_service.clone() } } -fn spawn_service( - libp2p_service: Arc>>, - network_recv: mpsc::UnboundedReceiver>, - message_handler_send: mpsc::UnboundedSender>, +fn spawn_service( + libp2p_service: Arc>, + network_recv: mpsc::UnboundedReceiver, + message_handler_send: mpsc::UnboundedSender, executor: &TaskExecutor, log: slog::Logger, ) -> error::Result> { @@ -99,10 +98,10 @@ fn spawn_service( } //TODO: Potentially handle channel errors -fn network_service( - libp2p_service: Arc>>, - mut network_recv: mpsc::UnboundedReceiver>, - mut message_handler_send: mpsc::UnboundedSender>, +fn network_service( + libp2p_service: Arc>, + mut network_recv: mpsc::UnboundedReceiver, + mut message_handler_send: mpsc::UnboundedSender, log: slog::Logger, ) -> impl futures::Future { futures::future::poll_fn(move || -> Result<_, eth2_libp2p::error::Error> { @@ -119,7 +118,7 @@ fn network_service( }, NetworkMessage::Publish { topics, message } => { debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics)); - libp2p_service.lock().swarm.publish(topics, *message); + libp2p_service.lock().swarm.publish(topics, message); } }, Ok(Async::NotReady) => break, @@ -176,14 +175,14 @@ fn network_service( /// Types of messages that the network service can receive. #[derive(Debug)] -pub enum NetworkMessage { +pub enum NetworkMessage { /// Send a message to libp2p service. //TODO: Define typing for messages across the wire Send(PeerId, OutgoingMessage), /// Publish a message to pubsub mechanism. Publish { topics: Vec, - message: Box>, + message: PubsubMessage, }, } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 9a9d15503..40a1881dd 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -123,7 +123,7 @@ impl SimpleSync { /// Handle the connection of a new peer. /// /// Sends a `Hello` message to the peer. - pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { + pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id)); network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); @@ -137,7 +137,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, hello: HelloMessage, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); @@ -156,7 +156,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, hello: HelloMessage, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); @@ -171,7 +171,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, hello: HelloMessage, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { let remote = PeerSyncInfo::from(hello); let local = PeerSyncInfo::from(&self.chain); @@ -278,7 +278,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, req: BeaconBlockRootsRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -323,7 +323,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, res: BeaconBlockRootsResponse, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -387,7 +387,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -440,7 +440,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, headers: Vec, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -472,7 +472,7 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { let block_bodies: Vec> = req .block_roots @@ -518,7 +518,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, res: DecodedBeaconBlockBodiesResponse, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -557,7 +557,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, block: BeaconBlock, - network: &mut NetworkContext, + network: &mut NetworkContext, ) -> bool { if let Some(outcome) = self.process_block(peer_id.clone(), block.clone(), network, &"gossip") @@ -627,7 +627,7 @@ impl SimpleSync { &mut self, _peer_id: PeerId, msg: Attestation, - _network: &mut NetworkContext, + _network: &mut NetworkContext, ) { match self.chain.process_attestation(msg) { Ok(()) => info!(self.log, "ImportedAttestation"; "source" => "gossip"), @@ -642,7 +642,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, req: BeaconBlockRootsRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { // Potentially set state to sync. if self.state == SyncState::Idle && req.count > SLOT_IMPORT_TOLERANCE { @@ -666,7 +666,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -683,7 +683,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, + network: &mut NetworkContext, ) { debug!( self.log, @@ -719,7 +719,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, block_root: Hash256, - network: &mut NetworkContext, + network: &mut NetworkContext, source: &str, ) -> Option { match self.import_queue.attempt_complete_block(block_root) { @@ -812,7 +812,7 @@ impl SimpleSync { &mut self, peer_id: PeerId, block: BeaconBlock, - network: &mut NetworkContext, + network: &mut NetworkContext, source: &str, ) -> Option { let processing_result = self.chain.process_block(block.clone()); @@ -917,8 +917,8 @@ fn hello_message(beacon_chain: &BeaconChain) -> HelloMes network_id: spec.network_id, //TODO: Correctly define the chain id chain_id: spec.network_id as u64, - latest_finalized_root: state.finalized_root, - latest_finalized_epoch: state.finalized_epoch, + latest_finalized_root: state.finalized_checkpoint.root, + latest_finalized_epoch: state.finalized_checkpoint.epoch, best_root: beacon_chain.head().beacon_block_root, best_slot: state.slot, } diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index cbbe4de6e..3de3639d8 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -11,7 +11,7 @@ use protos::services::{ }; use protos::services_grpc::AttestationService; use slog::{error, info, trace, warn}; -use ssz::{ssz_encode, Decode}; +use ssz::{ssz_encode, Decode, Encode}; use std::sync::Arc; use tokio::sync::mpsc; use types::Attestation; @@ -19,7 +19,7 @@ use types::Attestation; #[derive(Clone)] pub struct AttestationServiceInstance { pub chain: Arc>, - pub network_chan: mpsc::UnboundedSender>, + pub network_chan: mpsc::UnboundedSender, pub log: slog::Logger, } @@ -141,12 +141,12 @@ impl AttestationService for AttestationServiceInstance { // valid attestation, propagate to the network let topic = Topic::new(BEACON_ATTESTATION_TOPIC.into()); - let message = PubsubMessage::Attestation(attestation); + let message = PubsubMessage::Attestation(attestation.as_ssz_bytes()); self.network_chan .try_send(NetworkMessage::Publish { topics: vec![topic], - message: Box::new(message), + message: message, }) .unwrap_or_else(|e| { error!( diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index 2a8ae2c6b..b1a67399e 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -1,5 +1,5 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; -use eth2_libp2p::BEACON_PUBSUB_TOPIC; +use eth2_libp2p::BEACON_BLOCK_TOPIC; use eth2_libp2p::{PubsubMessage, Topic}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; @@ -11,7 +11,7 @@ use protos::services::{ use protos::services_grpc::BeaconBlockService; use slog::Logger; use slog::{error, info, trace, warn}; -use ssz::{ssz_encode, Decode}; +use ssz::{ssz_encode, Decode, Encode}; use std::sync::Arc; use tokio::sync::mpsc; use types::{BeaconBlock, Signature, Slot}; @@ -19,7 +19,7 @@ use types::{BeaconBlock, Signature, Slot}; #[derive(Clone)] pub struct BeaconBlockServiceInstance { pub chain: Arc>, - pub network_chan: mpsc::UnboundedSender>, + pub network_chan: mpsc::UnboundedSender, pub log: Logger, } @@ -106,14 +106,14 @@ impl BeaconBlockService for BeaconBlockServiceInstance { ); // get the network topic to send on - let topic = Topic::new(BEACON_PUBSUB_TOPIC.into()); - let message = PubsubMessage::Block(block); + let topic = Topic::new(BEACON_BLOCK_TOPIC.into()); + let message = PubsubMessage::Block(block.as_ssz_bytes()); // Publish the block to the p2p network via gossipsub. self.network_chan .try_send(NetworkMessage::Publish { topics: vec![topic], - message: Box::new(message), + message: message, }) .unwrap_or_else(|e| { error!( diff --git a/beacon_node/rpc/src/lib.rs b/beacon_node/rpc/src/lib.rs index de9039505..eef009292 100644 --- a/beacon_node/rpc/src/lib.rs +++ b/beacon_node/rpc/src/lib.rs @@ -25,7 +25,7 @@ use tokio::sync::mpsc; pub fn start_server( config: &RPCConfig, executor: &TaskExecutor, - network_chan: mpsc::UnboundedSender>, + network_chan: mpsc::UnboundedSender, beacon_chain: Arc>, log: &slog::Logger, ) -> exit_future::Signal { diff --git a/eth2/types/src/beacon_state/beacon_state_types.rs b/eth2/types/src/beacon_state/beacon_state_types.rs index dd6ca3272..0e76942dd 100644 --- a/eth2/types/src/beacon_state/beacon_state_types.rs +++ b/eth2/types/src/beacon_state/beacon_state_types.rs @@ -207,12 +207,26 @@ pub struct InteropEthSpec; impl EthSpec for InteropEthSpec { type ShardCount = U8; - type SlotsPerHistoricalRoot = U64; - type LatestRandaoMixesLength = U64; - type LatestActiveIndexRootsLength = U64; - type LatestSlashedExitLength = U64; type SlotsPerEpoch = U8; - type GenesisEpoch = U0; + type SlotsPerHistoricalRoot = U64; + type SlotsPerEth1VotingPeriod = U16; + type EpochsPerHistoricalVector = U64; + type EpochsPerSlashingsVector = U64; + type MaxPendingAttestations = U1024; // 128 max attestations * 8 slots per epoch + + params_from_eth_spec!(MainnetEthSpec { + JustificationBitsLength, + MaxValidatorsPerCommittee, + GenesisEpoch, + HistoricalRootsLimit, + ValidatorRegistryLimit, + MaxProposerSlashings, + MaxAttesterSlashings, + MaxAttestations, + MaxDeposits, + MaxVoluntaryExits, + MaxTransfers + }); fn default_spec() -> ChainSpec { ChainSpec::interop() diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 756f82991..76acb2f1a 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -214,7 +214,7 @@ fn main() { eth2_config, log.clone(), ), - "interop" => ValidatorService::::start::( + "interop" => ValidatorService::::start( client_config, eth2_config, log.clone(), From edd99fafb6c42212bda9bcaa8f77d11c15515e23 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Thu, 25 Jul 2019 15:08:18 +0200 Subject: [PATCH 016/305] Getting attestation slot via helper method --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/fork_choice.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 67d928127..8a9421a1b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -10,7 +10,7 @@ use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{RwLock, RwLockReadGuard}; use slot_clock::SlotClock; use state_processing::per_block_processing::errors::{ - AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, + AttesterSlashingValidationError, DepositValidationError, ExitValidationError, ProposerSlashingValidationError, TransferValidationError, }; use state_processing::{ diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 92b683590..0f98ac9ce 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -174,13 +174,13 @@ impl ForkChoice { &attestation.aggregation_bitfield, )?; - let target_slot = attestation.data.target_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let block_slot = state.get_attestation_slot(&attestation.data)?; Ok(validator_indices .iter() .find(|&&v| { match self.backend.latest_message(v) { - Some((_, slot)) => target_slot > slot, + Some((_, slot)) => block_slot > slot, None => true } }).is_some()) From 78f39115229e4bf5ace88193303443c50297e613 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Fri, 26 Jul 2019 12:48:17 +0200 Subject: [PATCH 017/305] Refactored attestation creation in test utils --- beacon_node/beacon_chain/src/test_utils.rs | 127 ++++++++++++--------- 1 file changed, 72 insertions(+), 55 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 991d29418..c43309cbf 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -8,11 +8,7 @@ use std::sync::Arc; use store::MemoryStore; use store::Store; use tree_hash::{SignedRoot, TreeHash}; -use types::{ - test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation, - AttestationDataAndCustodyBit, BeaconBlock, BeaconState, Bitfield, ChainSpec, Domain, EthSpec, - Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot, -}; +use types::{test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation, AttestationDataAndCustodyBit, BeaconBlock, BeaconState, Bitfield, ChainSpec, Domain, EthSpec, Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot, CrosslinkCommittee}; pub use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; @@ -171,7 +167,7 @@ where if let BlockProcessingOutcome::Processed { block_root } = outcome { head_block_root = Some(block_root); - self.add_attestations_to_op_pool( + self.add_attestations_to_chain( &attestation_strategy, &new_state, block_root, @@ -256,18 +252,16 @@ where (block, state) } - /// Adds attestations to the `BeaconChain` operations pool to be included in future blocks. + /// Adds attestations to the `BeaconChain` operations pool and fork choice. /// /// The `attestation_strategy` dictates which validators should attest. - fn add_attestations_to_op_pool( + fn add_attestations_to_chain( &self, attestation_strategy: &AttestationStrategy, state: &BeaconState, head_block_root: Hash256, head_block_slot: Slot, ) { - let spec = &self.spec; - let fork = &state.fork; let attesting_validators: Vec = match attestation_strategy { AttestationStrategy::AllValidators => (0..self.keypairs.len()).collect(), @@ -279,55 +273,18 @@ where .expect("should get committees") .iter() .for_each(|cc| { - let committee_size = cc.committee.len(); - for (i, validator_index) in cc.committee.iter().enumerate() { // Note: searching this array is worst-case `O(n)`. A hashset could be a better // alternative. if attesting_validators.contains(validator_index) { - let data = self - .chain - .produce_attestation_data_for_block( - cc.shard, - head_block_root, - head_block_slot, - state, - ) - .expect("should produce attestation data"); - - let mut aggregation_bitfield = Bitfield::new(); - aggregation_bitfield.set(i, true); - aggregation_bitfield.set(committee_size, false); - - let mut custody_bitfield = Bitfield::new(); - custody_bitfield.set(committee_size, false); - - let signature = { - let message = AttestationDataAndCustodyBit { - data: data.clone(), - custody_bit: false, - } - .tree_hash_root(); - - let domain = - spec.get_domain(data.target_epoch, Domain::Attestation, fork); - - let mut agg_sig = AggregateSignature::new(); - agg_sig.add(&Signature::new( - &message, - domain, - self.get_sk(*validator_index), - )); - - agg_sig - }; - - let attestation = Attestation { - aggregation_bitfield, - data, - custody_bitfield, - signature, - }; + let attestation = self.create_attestation( + *validator_index, + cc, + head_block_root, + head_block_slot, + state, + i + ); self.chain .process_attestation(attestation) @@ -337,6 +294,66 @@ where }); } + /// Creates an attestation for a validator with the given data. + pub fn create_attestation( + &self, + validator_index: usize, + crosslink_committee: &CrosslinkCommittee, + head_block_root: Hash256, + head_block_slot: Slot, + state: &BeaconState, + bitfield_index: usize + ) -> Attestation { + let committee_size = crosslink_committee.committee.len(); + let spec = &self.spec; + let fork = &state.fork; + + let data = self + .chain + .produce_attestation_data_for_block( + crosslink_committee.shard, + head_block_root, + head_block_slot, + state, + ) + .expect("should produce attestation data"); + + let mut aggregation_bitfield = Bitfield::new(); + aggregation_bitfield.set(bitfield_index, true); + aggregation_bitfield.set(committee_size, false); + + let mut custody_bitfield = Bitfield::new(); + custody_bitfield.set(committee_size, false); + + let signature = { + let message = AttestationDataAndCustodyBit { + data: data.clone(), + custody_bit: false, + } + .tree_hash_root(); + + let domain = + spec.get_domain(data.target_epoch, Domain::Attestation, fork); + + let mut agg_sig = AggregateSignature::new(); + agg_sig.add(&Signature::new( + &message, + domain, + self.get_sk(validator_index), + )); + + agg_sig + }; + + Attestation { + aggregation_bitfield, + data, + custody_bitfield, + signature, + } + } + + /// Returns the secret key for the given validator index. fn get_sk(&self, validator_index: usize) -> &SecretKey { &self.keypairs[validator_index].sk From dcac8d56bd163845c0b928abf1ca54a85e179fd2 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Sat, 27 Jul 2019 22:32:11 +0200 Subject: [PATCH 018/305] Revert "Refactored attestation creation in test utils" This reverts commit 4d277fe4239a7194758b18fb5c00dfe0b8231306. --- beacon_node/beacon_chain/src/test_utils.rs | 127 +++++++++------------ 1 file changed, 55 insertions(+), 72 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index c43309cbf..991d29418 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -8,7 +8,11 @@ use std::sync::Arc; use store::MemoryStore; use store::Store; use tree_hash::{SignedRoot, TreeHash}; -use types::{test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation, AttestationDataAndCustodyBit, BeaconBlock, BeaconState, Bitfield, ChainSpec, Domain, EthSpec, Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot, CrosslinkCommittee}; +use types::{ + test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation, + AttestationDataAndCustodyBit, BeaconBlock, BeaconState, Bitfield, ChainSpec, Domain, EthSpec, + Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot, +}; pub use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; @@ -167,7 +171,7 @@ where if let BlockProcessingOutcome::Processed { block_root } = outcome { head_block_root = Some(block_root); - self.add_attestations_to_chain( + self.add_attestations_to_op_pool( &attestation_strategy, &new_state, block_root, @@ -252,16 +256,18 @@ where (block, state) } - /// Adds attestations to the `BeaconChain` operations pool and fork choice. + /// Adds attestations to the `BeaconChain` operations pool to be included in future blocks. /// /// The `attestation_strategy` dictates which validators should attest. - fn add_attestations_to_chain( + fn add_attestations_to_op_pool( &self, attestation_strategy: &AttestationStrategy, state: &BeaconState, head_block_root: Hash256, head_block_slot: Slot, ) { + let spec = &self.spec; + let fork = &state.fork; let attesting_validators: Vec = match attestation_strategy { AttestationStrategy::AllValidators => (0..self.keypairs.len()).collect(), @@ -273,18 +279,55 @@ where .expect("should get committees") .iter() .for_each(|cc| { + let committee_size = cc.committee.len(); + for (i, validator_index) in cc.committee.iter().enumerate() { // Note: searching this array is worst-case `O(n)`. A hashset could be a better // alternative. if attesting_validators.contains(validator_index) { - let attestation = self.create_attestation( - *validator_index, - cc, - head_block_root, - head_block_slot, - state, - i - ); + let data = self + .chain + .produce_attestation_data_for_block( + cc.shard, + head_block_root, + head_block_slot, + state, + ) + .expect("should produce attestation data"); + + let mut aggregation_bitfield = Bitfield::new(); + aggregation_bitfield.set(i, true); + aggregation_bitfield.set(committee_size, false); + + let mut custody_bitfield = Bitfield::new(); + custody_bitfield.set(committee_size, false); + + let signature = { + let message = AttestationDataAndCustodyBit { + data: data.clone(), + custody_bit: false, + } + .tree_hash_root(); + + let domain = + spec.get_domain(data.target_epoch, Domain::Attestation, fork); + + let mut agg_sig = AggregateSignature::new(); + agg_sig.add(&Signature::new( + &message, + domain, + self.get_sk(*validator_index), + )); + + agg_sig + }; + + let attestation = Attestation { + aggregation_bitfield, + data, + custody_bitfield, + signature, + }; self.chain .process_attestation(attestation) @@ -294,66 +337,6 @@ where }); } - /// Creates an attestation for a validator with the given data. - pub fn create_attestation( - &self, - validator_index: usize, - crosslink_committee: &CrosslinkCommittee, - head_block_root: Hash256, - head_block_slot: Slot, - state: &BeaconState, - bitfield_index: usize - ) -> Attestation { - let committee_size = crosslink_committee.committee.len(); - let spec = &self.spec; - let fork = &state.fork; - - let data = self - .chain - .produce_attestation_data_for_block( - crosslink_committee.shard, - head_block_root, - head_block_slot, - state, - ) - .expect("should produce attestation data"); - - let mut aggregation_bitfield = Bitfield::new(); - aggregation_bitfield.set(bitfield_index, true); - aggregation_bitfield.set(committee_size, false); - - let mut custody_bitfield = Bitfield::new(); - custody_bitfield.set(committee_size, false); - - let signature = { - let message = AttestationDataAndCustodyBit { - data: data.clone(), - custody_bit: false, - } - .tree_hash_root(); - - let domain = - spec.get_domain(data.target_epoch, Domain::Attestation, fork); - - let mut agg_sig = AggregateSignature::new(); - agg_sig.add(&Signature::new( - &message, - domain, - self.get_sk(validator_index), - )); - - agg_sig - }; - - Attestation { - aggregation_bitfield, - data, - custody_bitfield, - signature, - } - } - - /// Returns the secret key for the given validator index. fn get_sk(&self, validator_index: usize) -> &SecretKey { &self.keypairs[validator_index].sk From f4b169ce80b8b5acbc1b32b9ab488acabdb0bc84 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Mon, 29 Jul 2019 22:51:42 +0200 Subject: [PATCH 019/305] Integration tests for free attestation processing --- beacon_node/beacon_chain/src/fork_choice.rs | 7 +- beacon_node/beacon_chain/src/test_utils.rs | 6 +- beacon_node/beacon_chain/tests/tests.rs | 92 ++++++++++++++++++++- 3 files changed, 99 insertions(+), 6 deletions(-) diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 0f98ac9ce..7d1830afe 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -3,7 +3,7 @@ use lmd_ghost::LmdGhost; use state_processing::common::get_attesting_indices_unsorted; use std::sync::Arc; use store::{Error as StoreError, Store}; -use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256}; +use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, Slot}; use state_processing::common; type Result = std::result::Result; @@ -186,6 +186,11 @@ impl ForkChoice { }).is_some()) } + // Returns the latest message for a given validator + pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { + self.backend.latest_message(validator_index) + } + /// Inform the fork choice that the given block (and corresponding root) have been finalized so /// it may prune it's storage. /// diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 991d29418..9a440b887 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -171,7 +171,7 @@ where if let BlockProcessingOutcome::Processed { block_root } = outcome { head_block_root = Some(block_root); - self.add_attestations_to_op_pool( + self.add_free_attestations( &attestation_strategy, &new_state, block_root, @@ -256,10 +256,10 @@ where (block, state) } - /// Adds attestations to the `BeaconChain` operations pool to be included in future blocks. + /// Adds attestations to the `BeaconChain` operations pool and fork choice. /// /// The `attestation_strategy` dictates which validators should attest. - fn add_attestations_to_op_pool( + fn add_free_attestations( &self, attestation_strategy: &AttestationStrategy, state: &BeaconState, diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 882d9f235..2f4e5bade 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -1,4 +1,3 @@ -#![cfg(not(debug_assertions))] use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, @@ -8,7 +7,7 @@ use lmd_ghost::ThreadSafeReducedTree; use rand::Rng; use store::{MemoryStore, Store}; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use types::{Deposit, EthSpec, Hash256, MinimalEthSpec, Slot}; +use types::{Deposit, EthSpec, Hash256, MinimalEthSpec, Slot, RelativeEpoch}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 24; @@ -265,3 +264,92 @@ fn roundtrip_operation_pool() { assert_eq!(harness.chain.op_pool, restored_op_pool); } + +#[test] +fn free_attestations_added_to_fork_choice_some_none() { + let num_blocks_produced = MinimalEthSpec::slots_per_epoch() / 2; + + let harness = get_harness(VALIDATOR_COUNT); + + harness.extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + let state = &harness.chain.head().beacon_state; + let fork_choice = &harness.chain.fork_choice; + + let validators: Vec = (0..VALIDATOR_COUNT).collect(); + let slots: Vec = validators + .iter() + .map(|&v| + state.get_attestation_duties(v, RelativeEpoch::Current) + .expect("should get attester duties") + .unwrap() + .slot + ).collect(); + let validator_slots: Vec<(&usize, Slot)> = validators.iter().zip(slots).collect(); + + for (validator, slot) in validator_slots.clone() { + let latest_message = fork_choice.latest_message(*validator); + + if slot <= num_blocks_produced && slot != 0{ + assert_eq!( + latest_message.unwrap().1, slot, + "Latest message slot should be equal to attester duty." + ) + } else { + assert!( + latest_message.is_none(), + "Latest message slot should be None." + ) + } + } +} + +#[test] +fn free_attestations_added_to_fork_choice_all_updated() { + let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1; + + let harness = get_harness(VALIDATOR_COUNT); + + harness.extend_chain( + num_blocks_produced as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ); + + let state = &harness.chain.head().beacon_state; + let fork_choice = &harness.chain.fork_choice; + + let validators: Vec = (0..VALIDATOR_COUNT).collect(); + let slots: Vec = validators + .iter() + .map(|&v| + state.get_attestation_duties(v, RelativeEpoch::Current) + .expect("should get attester duties") + .unwrap() + .slot + ).collect(); + let validator_slots: Vec<(&usize, Slot)> = validators.iter().zip(slots).collect(); + + for (validator, slot) in validator_slots { + let latest_message = fork_choice.latest_message(*validator); + + assert_eq!( + latest_message.unwrap().1, slot, + "Latest message slot should be equal to attester duty." + ); + + if slot != num_blocks_produced { + let block_root = state.get_block_root(slot) + .expect("Should get block root at slot"); + + assert_eq!( + latest_message.unwrap().0, *block_root, + "Latest message block root should be equal to block at slot." + ); + } + } +} \ No newline at end of file From c431bd993e9ed0d7aeffd37574e30a416955ea9c Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Tue, 6 Aug 2019 14:56:13 +0200 Subject: [PATCH 020/305] Implicit conflicts resolved. --- beacon_node/beacon_chain/src/beacon_chain.rs | 10 +++++----- beacon_node/beacon_chain/src/fork_choice.rs | 12 ++++-------- beacon_node/beacon_chain/tests/tests.rs | 1 + 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3e8467a49..49e2cec83 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -520,8 +520,8 @@ impl BeaconChain { if let Some(state) = self.get_attestation_state(&attestation) { if self.fork_choice.should_process_attestation(&state, &attestation)? { - let indexed_attestation = common::convert_to_indexed(&state, &attestation)?; - per_block_processing::verify_indexed_attestation(&state, &indexed_attestation, &self.spec)?; + let indexed_attestation = common::get_indexed_attestation(&state, &attestation)?; + per_block_processing::is_valid_indexed_attestation(&state, &indexed_attestation, &self.spec)?; self.fork_choice.process_attestation(&state, &attestation)?; } } @@ -540,14 +540,14 @@ impl BeaconChain { } /// Retrieves the `BeaconState` used to create the attestation. - fn get_attestation_state(&self, attestation: &Attestation) -> Option> { + fn get_attestation_state(&self, attestation: &Attestation) -> Option> { // Current state is used if the attestation targets a historic block and a slot within an // equal or adjacent epoch. let slots_per_epoch = T::EthSpec::slots_per_epoch(); let min_slot = (self.state.read().slot.epoch(slots_per_epoch) - 1).start_slot(slots_per_epoch); let blocks = BestBlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), self.state.read().slot.clone()); for (root, slot) in blocks { - if root == attestation.data.target_root { + if root == attestation.data.target.root { return Some(self.state.read().clone()); } @@ -557,7 +557,7 @@ impl BeaconChain { }; // A different state is retrieved from the database. - match self.store.get::(&attestation.data.target_root) { + match self.store.get::>(&attestation.data.target.root) { Ok(Some(block)) => match self.store.get::>(&block.state_root) { Ok(state) => state, _ => None diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 29b3664f1..3900575ae 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -4,7 +4,6 @@ use state_processing::common::get_attesting_indices; use std::sync::Arc; use store::{Error as StoreError, Store}; use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, Slot}; -use state_processing::common; type Result = std::result::Result; @@ -172,14 +171,11 @@ impl ForkChoice { } /// Determines whether or not the given attestation contains a latest message. - pub fn should_process_attestation(&self, state: &BeaconState, attestation: &Attestation) -> Result { - let validator_indices = common::get_attesting_indices_unsorted( - state, - &attestation.data, - &attestation.aggregation_bitfield, - )?; + pub fn should_process_attestation(&self, state: &BeaconState, attestation: &Attestation) -> Result { + let validator_indices = + get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; - let block_slot = state.get_attestation_slot(&attestation.data)?; + let block_slot = state.get_attestation_data_slot(&attestation.data)?; Ok(validator_indices .iter() diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 730b8ec67..cc1a84973 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -1,3 +1,4 @@ +#![cfg(not(debug_assertions))] use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, From ce73705498f3c39504b2822f67f422da7a3bfdb2 Mon Sep 17 00:00:00 2001 From: Grant Wuerker Date: Tue, 6 Aug 2019 19:17:15 +0200 Subject: [PATCH 021/305] formatting --- beacon_node/beacon_chain/src/beacon_chain.rs | 52 +++++++++++++------- beacon_node/beacon_chain/src/errors.rs | 6 ++- beacon_node/beacon_chain/src/fork_choice.rs | 22 +++++---- beacon_node/beacon_chain/src/test_utils.rs | 7 +-- beacon_node/beacon_chain/tests/tests.rs | 34 ++++++++----- beacon_node/rpc/src/attestation.rs | 14 ++++-- eth2/lmd_ghost/src/reduced_tree.rs | 6 +-- 7 files changed, 85 insertions(+), 56 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 49e2cec83..0becbf2c9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3,6 +3,7 @@ use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::metrics::Metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; +use crate::BeaconChainError; use lmd_ghost::LmdGhost; use log::trace; use operation_pool::DepositInsertStatus; @@ -11,19 +12,18 @@ use parking_lot::{RwLock, RwLockReadGuard}; use slog::{error, info, warn, Logger}; use slot_clock::SlotClock; use state_processing::per_block_processing::errors::{ - AttesterSlashingValidationError, DepositValidationError, - ExitValidationError, ProposerSlashingValidationError, TransferValidationError, + AttesterSlashingValidationError, DepositValidationError, ExitValidationError, + ProposerSlashingValidationError, TransferValidationError, }; use state_processing::{ - per_block_processing, per_block_processing_without_verifying_block_signature, - per_slot_processing, BlockProcessingError, common + common, per_block_processing, per_block_processing_without_verifying_block_signature, + per_slot_processing, BlockProcessingError, }; use std::sync::Arc; use store::iter::{BestBlockRootsIterator, BlockIterator, BlockRootsIterator, StateRootsIterator}; use store::{Error as DBError, Store}; use tree_hash::TreeHash; use types::*; -use crate::BeaconChainError; // Text included in blocks. // Must be 32-bytes or panic. @@ -511,17 +511,21 @@ impl BeaconChain { /// /// If valid, the attestation is added to the `op_pool` and aggregated with another attestation /// if possible. - pub fn process_attestation( - &self, - attestation: Attestation, - ) -> Result<(), Error> { + pub fn process_attestation(&self, attestation: Attestation) -> Result<(), Error> { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); if let Some(state) = self.get_attestation_state(&attestation) { - if self.fork_choice.should_process_attestation(&state, &attestation)? { + if self + .fork_choice + .should_process_attestation(&state, &attestation)? + { let indexed_attestation = common::get_indexed_attestation(&state, &attestation)?; - per_block_processing::is_valid_indexed_attestation(&state, &indexed_attestation, &self.spec)?; + per_block_processing::is_valid_indexed_attestation( + &state, + &indexed_attestation, + &self.spec, + )?; self.fork_choice.process_attestation(&state, &attestation)?; } } @@ -540,12 +544,20 @@ impl BeaconChain { } /// Retrieves the `BeaconState` used to create the attestation. - fn get_attestation_state(&self, attestation: &Attestation) -> Option> { + fn get_attestation_state( + &self, + attestation: &Attestation, + ) -> Option> { // Current state is used if the attestation targets a historic block and a slot within an // equal or adjacent epoch. let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let min_slot = (self.state.read().slot.epoch(slots_per_epoch) - 1).start_slot(slots_per_epoch); - let blocks = BestBlockRootsIterator::owned(self.store.clone(), self.state.read().clone(), self.state.read().slot.clone()); + let min_slot = + (self.state.read().slot.epoch(slots_per_epoch) - 1).start_slot(slots_per_epoch); + let blocks = BestBlockRootsIterator::owned( + self.store.clone(), + self.state.read().clone(), + self.state.read().slot.clone(), + ); for (root, slot) in blocks { if root == attestation.data.target.root { return Some(self.state.read().clone()); @@ -554,15 +566,18 @@ impl BeaconChain { if slot == min_slot { break; } - }; + } // A different state is retrieved from the database. - match self.store.get::>(&attestation.data.target.root) { + match self + .store + .get::>(&attestation.data.target.root) + { Ok(Some(block)) => match self.store.get::>(&block.state_root) { Ok(state) => state, - _ => None + _ => None, }, - _ => None + _ => None, } } @@ -1031,4 +1046,3 @@ impl From for Error { Error::BeaconStateError(e) } } - diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 4e2170ca8..266c598ac 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,9 +1,11 @@ use crate::fork_choice::Error as ForkChoiceError; use crate::metrics::Error as MetricsError; +use state_processing::per_block_processing::errors::{ + AttestationValidationError, IndexedAttestationValidationError, +}; use state_processing::BlockProcessingError; use state_processing::SlotProcessingError; use types::*; -use state_processing::per_block_processing::errors::{AttestationValidationError, IndexedAttestationValidationError}; macro_rules! easy_from_to { ($from: ident, $to: ident) => { @@ -33,7 +35,7 @@ pub enum BeaconChainError { SlotProcessingError(SlotProcessingError), MetricsError(String), AttestationValidationError(AttestationValidationError), - IndexedAttestationValidationError(IndexedAttestationValidationError) + IndexedAttestationValidationError(IndexedAttestationValidationError), } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 3900575ae..d16a8f9a8 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -3,7 +3,9 @@ use lmd_ghost::LmdGhost; use state_processing::common::get_attesting_indices; use std::sync::Arc; use store::{Error as StoreError, Store}; -use types::{Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, Slot}; +use types::{ + Attestation, BeaconBlock, BeaconState, BeaconStateError, Epoch, EthSpec, Hash256, Slot, +}; type Result = std::result::Result; @@ -171,7 +173,11 @@ impl ForkChoice { } /// Determines whether or not the given attestation contains a latest message. - pub fn should_process_attestation(&self, state: &BeaconState, attestation: &Attestation) -> Result { + pub fn should_process_attestation( + &self, + state: &BeaconState, + attestation: &Attestation, + ) -> Result { let validator_indices = get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; @@ -179,12 +185,11 @@ impl ForkChoice { Ok(validator_indices .iter() - .find(|&&v| { - match self.backend.latest_message(v) { - Some((_, slot)) => block_slot > slot, - None => true - } - }).is_some()) + .find(|&&v| match self.backend.latest_message(v) { + Some((_, slot)) => block_slot > slot, + None => true, + }) + .is_some()) } // Returns the latest message for a given validator @@ -224,4 +229,3 @@ impl From for Error { Error::BackendError(e) } } - diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 8f0d4c8ee..ab1a31690 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -178,12 +178,7 @@ where if let BlockProcessingOutcome::Processed { block_root } = outcome { head_block_root = Some(block_root); - self.add_free_attestations( - &attestation_strategy, - &new_state, - block_root, - slot, - ); + self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot); } else { panic!("block should be successfully processed: {:?}", outcome); } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index cc1a84973..5b8a09faf 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -8,7 +8,7 @@ use lmd_ghost::ThreadSafeReducedTree; use rand::Rng; use store::{MemoryStore, Store}; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use types::{Deposit, EthSpec, Hash256, MinimalEthSpec, Slot, RelativeEpoch}; +use types::{Deposit, EthSpec, Hash256, MinimalEthSpec, RelativeEpoch, Slot}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 24; @@ -270,20 +270,23 @@ fn free_attestations_added_to_fork_choice_some_none() { let validators: Vec = (0..VALIDATOR_COUNT).collect(); let slots: Vec = validators .iter() - .map(|&v| - state.get_attestation_duties(v, RelativeEpoch::Current) + .map(|&v| { + state + .get_attestation_duties(v, RelativeEpoch::Current) .expect("should get attester duties") .unwrap() .slot - ).collect(); + }) + .collect(); let validator_slots: Vec<(&usize, Slot)> = validators.iter().zip(slots).collect(); for (validator, slot) in validator_slots.clone() { let latest_message = fork_choice.latest_message(*validator); - if slot <= num_blocks_produced && slot != 0{ + if slot <= num_blocks_produced && slot != 0 { assert_eq!( - latest_message.unwrap().1, slot, + latest_message.unwrap().1, + slot, "Latest message slot should be equal to attester duty." ) } else { @@ -313,30 +316,35 @@ fn free_attestations_added_to_fork_choice_all_updated() { let validators: Vec = (0..VALIDATOR_COUNT).collect(); let slots: Vec = validators .iter() - .map(|&v| - state.get_attestation_duties(v, RelativeEpoch::Current) + .map(|&v| { + state + .get_attestation_duties(v, RelativeEpoch::Current) .expect("should get attester duties") .unwrap() .slot - ).collect(); + }) + .collect(); let validator_slots: Vec<(&usize, Slot)> = validators.iter().zip(slots).collect(); for (validator, slot) in validator_slots { let latest_message = fork_choice.latest_message(*validator); assert_eq!( - latest_message.unwrap().1, slot, + latest_message.unwrap().1, + slot, "Latest message slot should be equal to attester duty." ); if slot != num_blocks_produced { - let block_root = state.get_block_root(slot) + let block_root = state + .get_block_root(slot) .expect("Should get block root at slot"); assert_eq!( - latest_message.unwrap().0, *block_root, + latest_message.unwrap().0, + *block_root, "Latest message block root should be equal to block at slot." ); } } -} \ No newline at end of file +} diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index c7b3a5711..00a643151 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -1,4 +1,4 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes, BeaconChainError}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2_libp2p::PubsubMessage; use eth2_libp2p::TopicBuilder; use eth2_libp2p::BEACON_ATTESTATION_TOPIC; @@ -179,7 +179,11 @@ impl AttestationService for AttestationServiceInstance { "error" => format!("{:?}", e), ); resp.set_success(false); - resp.set_msg(format!("InvalidIndexedAttestation: {:?}", e).as_bytes().to_vec()); + resp.set_msg( + format!("InvalidIndexedAttestation: {:?}", e) + .as_bytes() + .to_vec(), + ); } Err(e) => { // Some other error @@ -190,7 +194,11 @@ impl AttestationService for AttestationServiceInstance { "error" => format!("{:?}", e), ); resp.set_success(false); - resp.set_msg(format!("There was a beacon chain error: {:?}", e).as_bytes().to_vec()); + resp.set_msg( + format!("There was a beacon chain error: {:?}", e) + .as_bytes() + .to_vec(), + ); } }; diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index 0ef78c37e..5d7074804 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -111,9 +111,7 @@ where } fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { - self.core - .write() - .latest_message(validator_index) + self.core.write().latest_message(validator_index) } } @@ -263,7 +261,7 @@ where pub fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)> { match self.latest_votes.get(validator_index) { Some(v) => Some((v.hash.clone(), v.slot.clone())), - None => None + None => None, } } From 2c3fc318bafcd230ff6b9c3c44519d2f6197018e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 7 Aug 2019 13:20:15 +1000 Subject: [PATCH 022/305] Do first pass on Grants code --- beacon_node/beacon_chain/src/beacon_chain.rs | 143 ++++++++++++++++--- beacon_node/beacon_chain/src/errors.rs | 3 + beacon_node/network/src/sync/simple_sync.rs | 7 +- eth2/types/src/beacon_block.rs | 5 + 4 files changed, 141 insertions(+), 17 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0becbf2c9..fed48036d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -54,6 +54,12 @@ pub enum BlockProcessingOutcome { PerBlockProcessingError(BlockProcessingError), } +#[derive(Debug, PartialEq)] +pub enum AttestationProcessingOutcome { + Processed, + UnknownHeadBlock { beacon_block_root: Hash256 }, +} + pub trait BeaconChainTypes { type Store: store::Store; type SlotClock: slot_clock::SlotClock; @@ -511,28 +517,114 @@ impl BeaconChain { /// /// If valid, the attestation is added to the `op_pool` and aggregated with another attestation /// if possible. - pub fn process_attestation(&self, attestation: Attestation) -> Result<(), Error> { + pub fn process_attestation( + &self, + attestation: Attestation, + ) -> Result { + // From the store, load the attestation's "head block". + // + // An honest validator would have set this block to be the head of the chain (i.e., the + // result of running fork choice). + if let Some(attestation_head_block) = self + .store + .get::>(&attestation.data.beacon_block_root)? + { + // Attempt to process the attestation using the `self.head()` state. + // + // This is purely an effort to avoid loading a `BeaconState` unnecessarily from the DB. + let outcome: Option> = { + // Take a read lock on the head beacon state. + // + // The purpose of this whole `let processed ...` block is to ensure that the read + // lock is dropped if we don't end up using the head beacon state. + let state = &self.head().beacon_state; + + // If it turns out that the attestation was made using the head state, then there + // is no need to load a state from the database to process the attestation. + if state.current_epoch() == attestation_head_block.epoch() + && state + .get_block_root(attestation_head_block.slot) + .map(|root| *root == attestation.data.beacon_block_root) + .unwrap_or_else(|_| false) + { + // The head state is able to be used to validate this attestation. No need to load + // anything from the database. + Some(self.process_attestation_for_state_and_block( + attestation.clone(), + state, + &attestation_head_block, + )) + } else { + None + } + }; + + // TODO: we could try and see if the "speculative state" (e.g., self.state) can support + // this, without needing to load it from the db. + + if let Some(result) = outcome { + result + } else { + // The state required to verify this attestation must be loaded from the database. + let mut state: BeaconState = self + .store + .get(&attestation_head_block.state_root)? + .ok_or_else(|| Error::MissingBeaconState(attestation_head_block.state_root))?; + + // Ensure the state loaded from the database matches the state of the attestation + // head block. + for _ in state.slot.as_u64()..attestation_head_block.slot.as_u64() { + per_slot_processing(&mut state, &self.spec)?; + } + + self.process_attestation_for_state_and_block( + attestation, + &state, + &attestation_head_block, + ) + } + } else { + // Reject any block where we have not processed `attestation.data.beacon_block_root`. + // + // This is likely overly restrictive, we could store the attestation for later + // processing. + warn!( + self.log, + "Dropping attestation for unknown block"; + "block" => format!("{}", attestation.data.beacon_block_root) + ); + Ok(AttestationProcessingOutcome::UnknownHeadBlock { + beacon_block_root: attestation.data.beacon_block_root, + }) + } + } + + fn process_attestation_for_state_and_block( + &self, + attestation: Attestation, + state: &BeaconState, + head_block: &BeaconBlock, + ) -> Result { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); - if let Some(state) = self.get_attestation_state(&attestation) { - if self - .fork_choice - .should_process_attestation(&state, &attestation)? - { - let indexed_attestation = common::get_indexed_attestation(&state, &attestation)?; - per_block_processing::is_valid_indexed_attestation( - &state, - &indexed_attestation, - &self.spec, - )?; - self.fork_choice.process_attestation(&state, &attestation)?; - } + if self + .fork_choice + .should_process_attestation(state, &attestation)? + { + // TODO: check validation. + let indexed_attestation = common::get_indexed_attestation(state, &attestation)?; + per_block_processing::is_valid_indexed_attestation( + state, + &indexed_attestation, + &self.spec, + )?; + self.fork_choice.process_attestation(&state, &attestation)?; } let result = self .op_pool - .insert_attestation(attestation, &*self.state.read(), &self.spec); + .insert_attestation(attestation, state, &self.spec); timer.observe_duration(); @@ -540,14 +632,32 @@ impl BeaconChain { self.metrics.attestation_processing_successes.inc(); } - result.map_err(|e| BeaconChainError::AttestationValidationError(e)) + result + .map(|_| AttestationProcessingOutcome::Processed) + .map_err(|e| Error::AttestationValidationError(e)) } + fn state_can_process_attestation( + state: &BeaconState, + data: &AttestationData, + head_block: &BeaconBlock, + ) -> bool { + (state.current_epoch() - 1 <= data.target.epoch) + && (data.target.epoch <= state.current_epoch() + 1) + && state + .get_block_root(head_block.slot) + .map(|root| *root == data.beacon_block_root) + .unwrap_or_else(|_| false) + } + + /* /// Retrieves the `BeaconState` used to create the attestation. fn get_attestation_state( &self, attestation: &Attestation, ) -> Option> { + let state = &self.head().beacon_state; + // Current state is used if the attestation targets a historic block and a slot within an // equal or adjacent epoch. let slots_per_epoch = T::EthSpec::slots_per_epoch(); @@ -580,6 +690,7 @@ impl BeaconChain { _ => None, } } + */ /// Accept some deposit and queue it for inclusion in an appropriate block. pub fn process_deposit( diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 266c598ac..0b8fae7bf 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -34,6 +34,9 @@ pub enum BeaconChainError { MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), MetricsError(String), + NoStateForAttestation { + beacon_block_root: Hash256, + }, AttestationValidationError(AttestationValidationError), IndexedAttestationValidationError(IndexedAttestationValidationError), } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index ac001415c..13e9203dd 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -630,7 +630,12 @@ impl SimpleSync { _network: &mut NetworkContext, ) { match self.chain.process_attestation(msg) { - Ok(()) => info!(self.log, "ImportedAttestation"; "source" => "gossip"), + Ok(outcome) => info!( + self.log, + "Processed attestation"; + "source" => "gossip", + "outcome" => format!("{:?}", outcome) + ), Err(e) => { warn!(self.log, "InvalidAttestation"; "source" => "gossip", "error" => format!("{:?}", e)) } diff --git a/eth2/types/src/beacon_block.rs b/eth2/types/src/beacon_block.rs index 772ef0c46..ecf879799 100644 --- a/eth2/types/src/beacon_block.rs +++ b/eth2/types/src/beacon_block.rs @@ -62,6 +62,11 @@ impl BeaconBlock { } } + /// Returns the epoch corresponding to `self.slot`. + pub fn epoch(&self) -> Epoch { + self.slot.epoch(T::slots_per_epoch()) + } + /// Returns the `signed_root` of the block. /// /// Spec v0.8.1 From 907a4e5a4b7f8e4a70a2c790d6f85daa48fbd45e Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 7 Aug 2019 14:54:08 +1000 Subject: [PATCH 023/305] Configuration updates allow for verbosity CLI flag and spec constants --- beacon_node/client/src/config.rs | 12 +++++- beacon_node/src/main.rs | 61 ++++++++++++++++++++----------- eth2/utils/eth2_config/src/lib.rs | 7 ++++ validator_client/eth2_config.toml | 47 ------------------------ validator_client/src/main.rs | 59 +++++++++++++++++++----------- 5 files changed, 94 insertions(+), 92 deletions(-) delete mode 100644 validator_client/eth2_config.toml diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 1a27de406..176625d77 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,3 +1,4 @@ +use crate::Eth2Config; use clap::ArgMatches; use http_server::HttpServerConfig; use network::NetworkConfig; @@ -56,8 +57,6 @@ impl Default for Config { log_file: PathBuf::from(""), db_type: "disk".to_string(), db_name: "chain_db".to_string(), - // Note: there are no default bootnodes specified. - // Once bootnodes are established, add them here. network: NetworkConfig::new(), rpc: rpc::RPCConfig::default(), http: HttpServerConfig::default(), @@ -129,6 +128,15 @@ impl Config { self.data_dir = PathBuf::from(dir); }; + if let Some(default_spec) = args.value_of("default-spec") { + match default_spec { + "mainnet" => self.spec_constants = Eth2Config::mainnet().spec_constants, + "minimal" => self.spec_constants = Eth2Config::minimal().spec_constants, + "interop" => self.spec_constants = Eth2Config::interop().spec_constants, + _ => {} // not supported + } + } + if let Some(dir) = args.value_of("db") { self.db_type = dir.to_string(); }; diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index c85eeedac..be57c6c9d 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -193,12 +193,9 @@ fn main() { .long("default-spec") .value_name("TITLE") .short("default-spec") - .help("Specifies the default eth2 spec to be used. Overridden by any spec loaded - from disk. A spec will be written to disk after this flag is used, so it is - primarily used for creating eth2 spec files.") + .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") .takes_value(true) .possible_values(&["mainnet", "minimal", "interop"]) - .default_value("minimal"), ) .arg( Arg::with_name("recent-genesis") @@ -217,7 +214,7 @@ fn main() { .help("The title of the spec constants for chain config.") .takes_value(true) .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) - .default_value("info"), + .default_value("trace"), ) .arg( Arg::with_name("verbosity") @@ -316,26 +313,42 @@ fn main() { let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); - // Attempt to load the `Eth2Config` from file. + // Initialise the `Eth2Config`. // - // If the file doesn't exist, create a default one depending on the CLI flags. - let mut eth2_config = match read_from_file::(eth2_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { - let default = match matches.value_of("default-spec") { - Some("mainnet") => Eth2Config::mainnet(), - Some("minimal") => Eth2Config::minimal(), - _ => unreachable!(), // Guarded by slog. - }; - if let Err(e) = write_to_file(eth2_config_path, &default) { + // If a CLI parameter is set, overwrite any config file present. + // If a parameter is not set, use either the config file present or default to minimal. + let cli_config = match matches.value_of("default-spec") { + Some("mainnet") => Some(Eth2Config::mainnet()), + Some("minimal") => Some(Eth2Config::minimal()), + Some("interop") => Some(Eth2Config::interop()), + _ => None, + }; + // if cli is specified, write the new config + let mut eth2_config = { + if let Some(cli_config) = cli_config { + if let Err(e) = write_to_file(eth2_config_path, &cli_config) { crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); return; } - default - } - Err(e) => { - crit!(log, "Failed to load/generate an Eth2Config"; "error" => format!("{:?}", e)); - return; + cli_config + } else { + // config not specified, read from disk + match read_from_file::(eth2_config_path.clone()) { + Ok(Some(c)) => c, + Ok(None) => { + // set default to minimal + let eth2_config = Eth2Config::minimal(); + if let Err(e) = write_to_file(eth2_config_path, ð2_config) { + crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + eth2_config + } + Err(e) => { + crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); + return; + } + } } }; @@ -348,6 +361,12 @@ fn main() { } }; + // check to ensure the spec constants between the client and eth2_config match + if eth2_config.spec_constants != client_config.spec_constants { + crit!(log, "Specification constants do not match."; "Client Config" => format!("{}", client_config.spec_constants), "Eth2 Config" => format!("{}", eth2_config.spec_constants)); + return; + } + // Start the node using a `tokio` executor. match run::run_beacon_node(client_config, eth2_config, &log) { Ok(_) => {} diff --git a/eth2/utils/eth2_config/src/lib.rs b/eth2/utils/eth2_config/src/lib.rs index 17cbc4211..794a27e4e 100644 --- a/eth2/utils/eth2_config/src/lib.rs +++ b/eth2/utils/eth2_config/src/lib.rs @@ -37,6 +37,13 @@ impl Eth2Config { spec: ChainSpec::minimal(), } } + + pub fn interop() -> Self { + Self { + spec_constants: "interop".to_string(), + spec: ChainSpec::interop(), + } + } } impl Eth2Config { diff --git a/validator_client/eth2_config.toml b/validator_client/eth2_config.toml deleted file mode 100644 index 1e0781378..000000000 --- a/validator_client/eth2_config.toml +++ /dev/null @@ -1,47 +0,0 @@ -spec_constants = "minimal" - -[spec] -target_committee_size = 4 -max_indices_per_attestation = 4096 -min_per_epoch_churn_limit = 4 -churn_limit_quotient = 65536 -base_rewards_per_epoch = 5 -shuffle_round_count = 10 -deposit_contract_tree_depth = 32 -min_deposit_amount = 1000000000 -max_effective_balance = 32000000000 -ejection_balance = 16000000000 -effective_balance_increment = 1000000000 -genesis_slot = 0 -zero_hash = "0x0000000000000000000000000000000000000000000000000000000000000000" -bls_withdrawal_prefix_byte = "0x00" -genesis_time = 4294967295 -seconds_per_slot = 6 -min_attestation_inclusion_delay = 2 -min_seed_lookahead = 1 -activation_exit_delay = 4 -slots_per_eth1_voting_period = 16 -slots_per_historical_root = 8192 -min_validator_withdrawability_delay = 256 -persistent_committee_period = 2048 -max_crosslink_epochs = 64 -min_epochs_to_inactivity_penalty = 4 -base_reward_quotient = 32 -whistleblowing_reward_quotient = 512 -proposer_reward_quotient = 8 -inactivity_penalty_quotient = 33554432 -min_slashing_penalty_quotient = 32 -max_proposer_slashings = 16 -max_attester_slashings = 1 -max_attestations = 128 -max_deposits = 16 -max_voluntary_exits = 16 -max_transfers = 0 -domain_beacon_proposer = 0 -domain_randao = 1 -domain_attestation = 2 -domain_deposit = 3 -domain_voluntary_exit = 4 -domain_transfer = 5 -boot_nodes = ["/ip4/127.0.0.1/tcp/9000"] -chain_id = 2 diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 76acb2f1a..0782df323 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -64,14 +64,13 @@ fn main() { .takes_value(true), ) .arg( - Arg::with_name("spec-constants") - .long("spec-constants") + Arg::with_name("default-spec") + .long("default-spec") .value_name("TITLE") - .short("s") - .help("The title of the spec constants for chain config.") + .short("default-spec") + .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") .takes_value(true) .possible_values(&["mainnet", "minimal", "interop"]) - .default_value("minimal"), ) .arg( Arg::with_name("debug-level") @@ -126,7 +125,7 @@ fn main() { let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME); - // Attempt to lead the `ClientConfig` from disk. + // Attempt to load the `ClientConfig` from disk. // // If file doesn't exist, create a new, default one. let mut client_config = match read_from_file::( @@ -164,26 +163,42 @@ fn main() { .and_then(|s| Some(PathBuf::from(s))) .unwrap_or_else(|| data_dir.join(ETH2_CONFIG_FILENAME)); - // Attempt to load the `Eth2Config` from file. + // Initialise the `Eth2Config`. // - // If the file doesn't exist, create a default one depending on the CLI flags. - let mut eth2_config = match read_from_file::(eth2_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { - let default = match matches.value_of("spec-constants") { - Some("mainnet") => Eth2Config::mainnet(), - Some("minimal") => Eth2Config::minimal(), - _ => unreachable!(), // Guarded by slog. - }; - if let Err(e) = write_to_file(eth2_config_path, &default) { + // If a CLI parameter is set, overwrite any config file present. + // If a parameter is not set, use either the config file present or default to minimal. + let cli_config = match matches.value_of("default-spec") { + Some("mainnet") => Some(Eth2Config::mainnet()), + Some("minimal") => Some(Eth2Config::minimal()), + Some("interop") => Some(Eth2Config::interop()), + _ => None, + }; + // if cli is specified, write the new config + let mut eth2_config = { + if let Some(cli_config) = cli_config { + if let Err(e) = write_to_file(eth2_config_path, &cli_config) { crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); return; } - default - } - Err(e) => { - crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); - return; + cli_config + } else { + // config not specified, read from disk + match read_from_file::(eth2_config_path.clone()) { + Ok(Some(c)) => c, + Ok(None) => { + // set default to minimal + let eth2_config = Eth2Config::minimal(); + if let Err(e) = write_to_file(eth2_config_path, ð2_config) { + crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + eth2_config + } + Err(e) => { + crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); + return; + } + } } }; From dba7bfc4e14d6bd57a7617d5464dfaa1d0f46581 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 7 Aug 2019 15:17:21 +1000 Subject: [PATCH 024/305] Update submodules to master --- tests/ef_tests/eth2.0-spec-tests | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ef_tests/eth2.0-spec-tests b/tests/ef_tests/eth2.0-spec-tests index d40578264..aaa1673f5 160000 --- a/tests/ef_tests/eth2.0-spec-tests +++ b/tests/ef_tests/eth2.0-spec-tests @@ -1 +1 @@ -Subproject commit d405782646190595927cc0a59f504f7b00a760f3 +Subproject commit aaa1673f508103e11304833e0456e4149f880065 From b3e0aad7bfa3a3ebfd69f61163b18048438924e8 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 7 Aug 2019 15:55:09 +1000 Subject: [PATCH 025/305] Correct minimal chainspec modifications --- eth2/types/src/chain_spec.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index d6eaa123d..9dec626d4 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -202,15 +202,12 @@ impl ChainSpec { pub fn minimal() -> Self { // Note: bootnodes to be updated when static nodes exist. let boot_nodes = vec![]; - let genesis_slot = Slot::new(0); Self { target_committee_size: 4, shuffle_round_count: 10, min_genesis_active_validator_count: 64, max_epochs_per_crosslink: 4, - min_attestation_inclusion_delay: 2, - genesis_slot, network_id: 2, // lighthouse testnet network id boot_nodes, ..ChainSpec::mainnet() @@ -221,15 +218,12 @@ impl ChainSpec { /// /// This allows us to customize a chain spec for interop testing. pub fn interop() -> Self { - let genesis_slot = Slot::new(0); let boot_nodes = vec![]; Self { seconds_per_slot: 12, target_committee_size: 4, shuffle_round_count: 10, - min_attestation_inclusion_delay: 2, - genesis_slot, network_id: 13, boot_nodes, ..ChainSpec::mainnet() From fe2402b361b9e7d8bc6f23cb022da875c32c050c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 7 Aug 2019 16:02:30 +1000 Subject: [PATCH 026/305] Add another attestation processing test --- beacon_node/beacon_chain/src/beacon_chain.rs | 9 ++--- beacon_node/beacon_chain/src/lib.rs | 4 ++- beacon_node/beacon_chain/src/test_utils.rs | 30 +++++++++++++++-- beacon_node/beacon_chain/tests/tests.rs | 35 ++++++++++++++++++++ 4 files changed, 70 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c58e619bc..8d5922850 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -543,7 +543,7 @@ impl BeaconChain { // Attempt to process the attestation using the `self.head()` state. // // This is purely an effort to avoid loading a `BeaconState` unnecessarily from the DB. - let outcome: Option> = { + let optional_outcome: Option> = { // Take a read lock on the head beacon state. // // The purpose of this whole `let processed ...` block is to ensure that the read @@ -553,10 +553,11 @@ impl BeaconChain { // If it turns out that the attestation was made using the head state, then there // is no need to load a state from the database to process the attestation. if state.current_epoch() == attestation_head_block.epoch() - && state + && (state .get_block_root(attestation_head_block.slot) .map(|root| *root == attestation.data.beacon_block_root) .unwrap_or_else(|_| false) + || attestation.data.beacon_block_root == self.head().beacon_block_root) { // The head state is able to be used to validate this attestation. No need to load // anything from the database. @@ -573,8 +574,8 @@ impl BeaconChain { // TODO: we could try and see if the "speculative state" (e.g., self.state) can support // this, without needing to load it from the db. - if let Some(result) = outcome { - result + if let Some(outcome) = optional_outcome { + outcome } else { // The state required to verify this attestation must be loaded from the database. let mut state: BeaconState = self diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index c2efcad13..3188760a4 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -7,7 +7,9 @@ mod metrics; mod persisted_beacon_chain; pub mod test_utils; -pub use self::beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +pub use self::beacon_chain::{ + AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BlockProcessingOutcome, +}; pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use lmd_ghost; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1049c66ad..ce6d4d20c 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -268,6 +268,28 @@ where head_block_root: Hash256, head_block_slot: Slot, ) { + self.get_free_attestations( + attestation_strategy, + state, + head_block_root, + head_block_slot, + ) + .into_iter() + .for_each(|attestation| { + self.chain + .process_attestation(attestation) + .expect("should process attestation"); + }); + } + + /// Generates a `Vec` for some attestation strategy and head_block. + pub fn get_free_attestations( + &self, + attestation_strategy: &AttestationStrategy, + state: &BeaconState, + head_block_root: Hash256, + head_block_slot: Slot, + ) -> Vec> { let spec = &self.spec; let fork = &state.fork; @@ -276,6 +298,8 @@ where AttestationStrategy::SomeValidators(vec) => vec.clone(), }; + let mut vec = vec![]; + state .get_crosslink_committees_at_slot(state.slot) .expect("should get committees") @@ -328,12 +352,12 @@ where signature, }; - self.chain - .process_attestation(attestation) - .expect("should process attestation"); + vec.push(attestation) } } }); + + vec } /// Creates two forks: diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 5b8a09faf..1f8400849 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -4,6 +4,7 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, BEACON_CHAIN_DB_KEY, }; +use beacon_chain::AttestationProcessingOutcome; use lmd_ghost::ThreadSafeReducedTree; use rand::Rng; use store::{MemoryStore, Store}; @@ -298,6 +299,40 @@ fn free_attestations_added_to_fork_choice_some_none() { } } +#[test] +fn free_attestations_over_slots() { + let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; + + let harness = get_harness(VALIDATOR_COUNT); + + let mut attestations = vec![]; + + for _ in 0..num_blocks_produced { + harness.extend_chain( + 2, + BlockStrategy::OnCanonicalHead, + // Don't produce & include any attestations (we'll collect them later). + AttestationStrategy::SomeValidators(vec![]), + ); + + attestations.append(&mut harness.get_free_attestations( + &AttestationStrategy::AllValidators, + &harness.chain.head().beacon_state, + harness.chain.head().beacon_block_root, + harness.chain.head().beacon_block.slot, + )); + + harness.advance_slot(); + } + + for attestation in attestations { + assert_eq!( + harness.chain.process_attestation(attestation), + Ok(AttestationProcessingOutcome::Processed) + ) + } +} + #[test] fn free_attestations_added_to_fork_choice_all_updated() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 2 - 1; From 107f32642f2b82db7becce53bce7638f635834fa Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 7 Aug 2019 16:33:21 +1000 Subject: [PATCH 027/305] Duplication of validator polls are no longer fatal --- validator_client/src/service.rs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index c4ccbc204..3ddb96e4c 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -23,7 +23,7 @@ use protos::services_grpc::{ AttestationServiceClient, BeaconBlockServiceClient, BeaconNodeServiceClient, ValidatorServiceClient, }; -use slog::{error, info, warn}; +use slog::{crit, error, info, warn}; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::marker::PhantomData; use std::sync::Arc; @@ -37,7 +37,7 @@ use types::{ChainSpec, Epoch, EthSpec, Fork, Slot}; /// A fixed amount of time after a slot to perform operations. This gives the node time to complete /// per-slot processes. -const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(200); +const TIME_DELAY_FROM_SLOT: Duration = Duration::from_millis(100); /// The validator service. This is the main thread that executes and maintains validator /// duties. @@ -106,7 +106,7 @@ impl Service Service self.current_slot, - "The Timer should poll a new slot" - ); + // this is a non-fatal error. If the slot clock repeats, the node could + // have been slow to process the previous slot and is now duplicating tasks. + // We ignore duplicated but raise a critical error. + if current_slot <= self.current_slot { + crit!( + self.log, + "The validator tried to duplicate a slot. Likely missed the previous slot" + ); + return Err("Duplicate slot".into()); + } self.current_slot = current_slot; info!(self.log, "Processing"; "slot" => current_slot.as_u64(), "epoch" => current_epoch.as_u64()); Ok(()) From 378fe05c895a19a960c51ec91d1f89084fc561ce Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 7 Aug 2019 16:40:49 +1000 Subject: [PATCH 028/305] Tidy attestation processing --- beacon_node/beacon_chain/Cargo.toml | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 16 +------------ beacon_node/beacon_chain/src/test_utils.rs | 24 ++++++++++++++++---- beacon_node/beacon_chain/tests/tests.rs | 15 ++++++++---- 4 files changed, 33 insertions(+), 23 deletions(-) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index af6736ede..89260cf51 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -24,3 +24,4 @@ lmd_ghost = { path = "../../eth2/lmd_ghost" } [dev-dependencies] rand = "0.5.5" +lazy_static = "1.3.0" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 8d5922850..5fc59ba66 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4,7 +4,6 @@ use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics::Metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; -use crate::BeaconChainError; use lmd_ghost::LmdGhost; use log::trace; use operation_pool::DepositInsertStatus; @@ -615,7 +614,7 @@ impl BeaconChain { &self, attestation: Attestation, state: &BeaconState, - head_block: &BeaconBlock, + _head_block: &BeaconBlock, ) -> Result { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); @@ -649,19 +648,6 @@ impl BeaconChain { .map_err(|e| Error::AttestationValidationError(e)) } - fn state_can_process_attestation( - state: &BeaconState, - data: &AttestationData, - head_block: &BeaconBlock, - ) -> bool { - (state.current_epoch() - 1 <= data.target.epoch) - && (data.target.epoch <= state.current_epoch() + 1) - && state - .get_block_root(head_block.slot) - .map(|root| *root == data.beacon_block_root) - .unwrap_or_else(|_| false) - } - /* /// Retrieves the `BeaconState` used to create the attestation. fn get_attestation_state( diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index ce6d4d20c..293d3b9b9 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -84,14 +84,30 @@ where { /// Instantiate a new harness with `validator_count` initial validators. pub fn new(validator_count: usize) -> Self { + let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists( + validator_count, + &E::default_spec(), + ); + let (genesis_state, keypairs) = state_builder.build(); + + Self::from_state_and_keypairs(genesis_state, keypairs) + } + + /// Instantiate a new harness with an initial validator for each key supplied. + pub fn from_keypairs(keypairs: Vec) -> Self { + let state_builder = TestingBeaconStateBuilder::from_keypairs(keypairs, &E::default_spec()); + let (genesis_state, keypairs) = state_builder.build(); + + Self::from_state_and_keypairs(genesis_state, keypairs) + } + + /// Instantiate a new harness with the given genesis state and a keypair for each of the + /// initial validators in the given state. + pub fn from_state_and_keypairs(genesis_state: BeaconState, keypairs: Vec) -> Self { let spec = E::default_spec(); let store = Arc::new(MemoryStore::open()); - let state_builder = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec); - let (genesis_state, keypairs) = state_builder.build(); - let mut genesis_block = BeaconBlock::empty(&spec); genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 1f8400849..d286aaec0 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -1,5 +1,8 @@ #![cfg(not(debug_assertions))] +#[macro_use] +extern crate lazy_static; + use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, BEACON_CHAIN_DB_KEY, @@ -9,17 +12,21 @@ use lmd_ghost::ThreadSafeReducedTree; use rand::Rng; use store::{MemoryStore, Store}; use types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; -use types::{Deposit, EthSpec, Hash256, MinimalEthSpec, RelativeEpoch, Slot}; +use types::{Deposit, EthSpec, Hash256, Keypair, MinimalEthSpec, RelativeEpoch, Slot}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 24; +lazy_static! { + /// A cached set of keys. + static ref KEYPAIRS: Vec = types::test_utils::generate_deterministic_keypairs(VALIDATOR_COUNT); +} + type TestForkChoice = ThreadSafeReducedTree; fn get_harness(validator_count: usize) -> BeaconChainHarness { - let harness = BeaconChainHarness::new(validator_count); + let harness = BeaconChainHarness::from_keypairs(KEYPAIRS[0..validator_count].to_vec()); - // Move past the zero slot. harness.advance_slot(); harness @@ -300,7 +307,7 @@ fn free_attestations_added_to_fork_choice_some_none() { } #[test] -fn free_attestations_over_slots() { +fn attestations_with_increasing_slots() { let num_blocks_produced = MinimalEthSpec::slots_per_epoch() * 5; let harness = get_harness(VALIDATOR_COUNT); From 65ce94b2efc5acd97acfb742dd72380626fa210e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 7 Aug 2019 16:54:35 +1000 Subject: [PATCH 029/305] Remove old code fragment --- beacon_node/beacon_chain/src/beacon_chain.rs | 42 -------------------- 1 file changed, 42 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5fc59ba66..60b65c95b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -648,48 +648,6 @@ impl BeaconChain { .map_err(|e| Error::AttestationValidationError(e)) } - /* - /// Retrieves the `BeaconState` used to create the attestation. - fn get_attestation_state( - &self, - attestation: &Attestation, - ) -> Option> { - let state = &self.head().beacon_state; - - // Current state is used if the attestation targets a historic block and a slot within an - // equal or adjacent epoch. - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let min_slot = - (self.state.read().slot.epoch(slots_per_epoch) - 1).start_slot(slots_per_epoch); - let blocks = BestBlockRootsIterator::owned( - self.store.clone(), - self.state.read().clone(), - self.state.read().slot.clone(), - ); - for (root, slot) in blocks { - if root == attestation.data.target.root { - return Some(self.state.read().clone()); - } - - if slot == min_slot { - break; - } - } - - // A different state is retrieved from the database. - match self - .store - .get::>(&attestation.data.target.root) - { - Ok(Some(block)) => match self.store.get::>(&block.state_root) { - Ok(state) => state, - _ => None, - }, - _ => None, - } - } - */ - /// Accept some deposit and queue it for inclusion in an appropriate block. pub fn process_deposit( &self, From 9f9af746eaa255d11bca18e17368cbacb3666d22 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 8 Aug 2019 10:29:27 +1000 Subject: [PATCH 030/305] Add non-compiling half finished changes --- .../src/per_block_processing.rs | 14 ++- .../verify_attestation.rs | 113 +++++++----------- 2 files changed, 54 insertions(+), 73 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 3c8921555..3acadfde2 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -14,10 +14,7 @@ pub use self::verify_proposer_slashing::verify_proposer_slashing; pub use is_valid_indexed_attestation::{ is_valid_indexed_attestation, is_valid_indexed_attestation_without_signature, }; -pub use verify_attestation::{ - verify_attestation, verify_attestation_time_independent_only, - verify_attestation_without_signature, -}; +pub use verify_attestation::{verify_attestation_for_block, verify_attestation_for_state}; pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, }; @@ -37,6 +34,12 @@ mod verify_exit; mod verify_proposer_slashing; mod verify_transfer; +#[derive(PartialEq)] +pub enum VerifySignatures { + True, + False, +} + /// Updates the state for a new block, whilst validating that the block is valid. /// /// Returns `Ok(())` if the block is valid and the state was successfully updated. Otherwise @@ -312,7 +315,8 @@ pub fn process_attestations( .par_iter() .enumerate() .try_for_each(|(i, attestation)| { - verify_attestation(state, attestation, spec).map_err(|e| e.into_with_index(i)) + verify_attestation_for_block(state, attestation, spec, VerifySignatures::True) + .map_err(|e| e.into_with_index(i)) })?; // Update the state in series. diff --git a/eth2/state_processing/src/per_block_processing/verify_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_attestation.rs index af2530045..bca6a9085 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attestation.rs @@ -1,4 +1,5 @@ use super::errors::{AttestationInvalid as Invalid, AttestationValidationError as Error}; +use super::VerifySignatures; use crate::common::get_indexed_attestation; use crate::per_block_processing::{ is_valid_indexed_attestation, is_valid_indexed_attestation_without_signature, @@ -6,67 +7,23 @@ use crate::per_block_processing::{ use tree_hash::TreeHash; use types::*; -/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the -/// given state. -/// -/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. -/// -/// Spec v0.8.0 -pub fn verify_attestation( - state: &BeaconState, - attestation: &Attestation, - spec: &ChainSpec, -) -> Result<(), Error> { - verify_attestation_parametric(state, attestation, spec, true, false) -} - -/// Like `verify_attestation` but doesn't run checks which may become true in future states. -pub fn verify_attestation_time_independent_only( - state: &BeaconState, - attestation: &Attestation, - spec: &ChainSpec, -) -> Result<(), Error> { - verify_attestation_parametric(state, attestation, spec, true, true) -} - -/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the -/// given state, without validating the aggregate signature. -/// -/// Returns `Ok(())` if the `Attestation` is valid, otherwise indicates the reason for invalidity. -/// -/// Spec v0.8.0 -pub fn verify_attestation_without_signature( - state: &BeaconState, - attestation: &Attestation, - spec: &ChainSpec, -) -> Result<(), Error> { - verify_attestation_parametric(state, attestation, spec, false, false) -} - /// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the /// given state, optionally validating the aggregate signature. /// -/// /// Spec v0.8.0 -fn verify_attestation_parametric( +pub fn verify_attestation_for_block( state: &BeaconState, attestation: &Attestation, spec: &ChainSpec, - verify_signature: bool, - time_independent_only: bool, + verify_signatures: VerifySignatures, ) -> Result<(), Error> { let data = &attestation.data; - verify!( - data.crosslink.shard < T::ShardCount::to_u64(), - Invalid::BadShard - ); // Check attestation slot. let attestation_slot = state.get_attestation_data_slot(&data)?; verify!( - time_independent_only - || attestation_slot + spec.min_attestation_inclusion_delay <= state.slot, + attestation_slot + spec.min_attestation_inclusion_delay <= state.slot, Invalid::IncludedTooEarly { state: state.slot, delay: spec.min_attestation_inclusion_delay, @@ -81,27 +38,47 @@ fn verify_attestation_parametric( } ); - // Verify the Casper FFG vote and crosslink data. - if !time_independent_only { - let parent_crosslink = verify_casper_ffg_vote(attestation, state)?; + verify_attestation_for_state(state, attestation, spec, verify_signatures) +} - verify!( - data.crosslink.parent_root == Hash256::from_slice(&parent_crosslink.tree_hash_root()), - Invalid::BadParentCrosslinkHash - ); - verify!( - data.crosslink.start_epoch == parent_crosslink.end_epoch, - Invalid::BadParentCrosslinkStartEpoch - ); - verify!( - data.crosslink.end_epoch - == std::cmp::min( - data.target.epoch, - parent_crosslink.end_epoch + spec.max_epochs_per_crosslink - ), - Invalid::BadParentCrosslinkEndEpoch - ); - } +/// Returns `Ok(())` if `attestation` is a valid attestation to the chain that preceeds the given +/// `state`. +/// +/// Returns a descriptive `Err` if the attestation is malformed or does not accurately reflect the +/// prior blocks in `state`. +/// +/// Spec v0.8.0 +pub fn verify_attestation_for_state( + state: &BeaconState, + attestation: &Attestation, + spec: &ChainSpec, + verify_signature: VerifySignatures, +) -> Result<(), Error> { + let data = &attestation.data; + verify!( + data.crosslink.shard < T::ShardCount::to_u64(), + Invalid::BadShard + ); + + // Verify the Casper FFG vote and crosslink data. + let parent_crosslink = verify_casper_ffg_vote(attestation, state)?; + + verify!( + data.crosslink.parent_root == Hash256::from_slice(&parent_crosslink.tree_hash_root()), + Invalid::BadParentCrosslinkHash + ); + verify!( + data.crosslink.start_epoch == parent_crosslink.end_epoch, + Invalid::BadParentCrosslinkStartEpoch + ); + verify!( + data.crosslink.end_epoch + == std::cmp::min( + data.target.epoch, + parent_crosslink.end_epoch + spec.max_epochs_per_crosslink + ), + Invalid::BadParentCrosslinkEndEpoch + ); // Crosslink data root is zero (to be removed in phase 1). verify!( @@ -111,7 +88,7 @@ fn verify_attestation_parametric( // Check signature and bitfields let indexed_attestation = get_indexed_attestation(state, attestation)?; - if verify_signature { + if verify_signature == VerifySignatures::True { is_valid_indexed_attestation(state, &indexed_attestation, spec)?; } else { is_valid_indexed_attestation_without_signature(state, &indexed_attestation, spec)?; From 7c134a7504d2e8ff8b8cdd7d20459f96abde04a9 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 8 Aug 2019 16:47:24 +1000 Subject: [PATCH 031/305] Simplify, fix bugs, add tests for chain iters --- beacon_node/beacon_chain/src/beacon_chain.rs | 53 ++++----------- beacon_node/beacon_chain/src/test_utils.rs | 32 +++------ beacon_node/beacon_chain/tests/tests.rs | 68 +++++++++++++++++++- beacon_node/store/src/iter.rs | 30 ++++----- eth2/lmd_ghost/src/reduced_tree.rs | 6 +- 5 files changed, 106 insertions(+), 83 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 60b65c95b..e8dcd50ab 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -244,15 +244,12 @@ impl BeaconChain { /// /// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot /// returned may be earlier than the wall-clock slot. - pub fn rev_iter_block_roots( - &self, - slot: Slot, - ) -> ReverseBlockRootIterator { + pub fn rev_iter_block_roots(&self) -> ReverseBlockRootIterator { let state = &self.head().beacon_state; let block_root = self.head().beacon_block_root; let block_slot = state.slot; - let iter = BlockRootsIterator::owned(self.store.clone(), state.clone(), slot); + let iter = BlockRootsIterator::owned(self.store.clone(), state.clone()); ReverseBlockRootIterator::new((block_root, block_slot), iter) } @@ -267,15 +264,12 @@ impl BeaconChain { /// /// Because this iterator starts at the `head` of the chain (viz., the best block), the first slot /// returned may be earlier than the wall-clock slot. - pub fn rev_iter_state_roots( - &self, - slot: Slot, - ) -> ReverseStateRootIterator { + pub fn rev_iter_state_roots(&self) -> ReverseStateRootIterator { let state = &self.head().beacon_state; let state_root = self.head().beacon_state_root; let state_slot = state.slot; - let iter = StateRootsIterator::owned(self.store.clone(), state.clone(), slot); + let iter = StateRootsIterator::owned(self.store.clone(), state.clone()); ReverseStateRootIterator::new((state_root, state_slot), iter) } @@ -448,9 +442,8 @@ impl BeaconChain { pub fn produce_attestation_data(&self, shard: u64) -> Result { let state = self.state.read(); let head_block_root = self.head().beacon_block_root; - let head_block_slot = self.head().beacon_block.slot; - self.produce_attestation_data_for_block(shard, head_block_root, head_block_slot, &*state) + self.produce_attestation_data_for_block(shard, head_block_root, &*state) } /// Produce an `AttestationData` that attests to the chain denoted by `block_root` and `state`. @@ -461,39 +454,19 @@ impl BeaconChain { &self, shard: u64, head_block_root: Hash256, - head_block_slot: Slot, state: &BeaconState, ) -> Result { // Collect some metrics. self.metrics.attestation_production_requests.inc(); let timer = self.metrics.attestation_production_times.start_timer(); - let slots_per_epoch = T::EthSpec::slots_per_epoch(); - let current_epoch_start_slot = state.current_epoch().start_slot(slots_per_epoch); - // The `target_root` is the root of the first block of the current epoch. - // - // The `state` does not know the root of the block for it's current slot (it only knows - // about blocks from prior slots). This creates an edge-case when the state is on the first - // slot of the epoch -- we're unable to obtain the `target_root` because it is not a prior - // root. - // - // This edge case is handled in two ways: - // - // - If the head block is on the same slot as the state, we use it's root. - // - Otherwise, assume the current slot has been skipped and use the block root from the - // prior slot. - // - // For all other cases, we simply read the `target_root` from `state.latest_block_roots`. - let target_root = if state.slot == current_epoch_start_slot { - if head_block_slot == current_epoch_start_slot { - head_block_root - } else { - *state.get_block_root(current_epoch_start_slot - 1)? - } - } else { - *state.get_block_root(current_epoch_start_slot)? - }; + let target_root = self + .rev_iter_block_roots() + .find(|(_root, slot)| *slot % T::EthSpec::slots_per_epoch() == 0) + .map(|(root, _slot)| root) + .ok_or_else(|| Error::UnableToFindTargetRoot(self.head().beacon_state.slot))?; + let target = Checkpoint { epoch: state.current_epoch(), root: target_root, @@ -523,7 +496,7 @@ impl BeaconChain { }) } - /// Accept a new attestation from the network. + /// Accept a new, potentially invalid attestation from the network. /// /// If valid, the attestation is added to the `op_pool` and aggregated with another attestation /// if possible. @@ -614,7 +587,7 @@ impl BeaconChain { &self, attestation: Attestation, state: &BeaconState, - _head_block: &BeaconBlock, + block: &BeaconBlock, ) -> Result { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 293d3b9b9..f2ec5a0fd 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -194,7 +194,7 @@ where if let BlockProcessingOutcome::Processed { block_root } = outcome { head_block_root = Some(block_root); - self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot); + self.add_free_attestations(&attestation_strategy, &new_state, block_root); } else { panic!("block should be successfully processed: {:?}", outcome); } @@ -209,7 +209,7 @@ where fn get_state_at_slot(&self, state_slot: Slot) -> BeaconState { let state_root = self .chain - .rev_iter_state_roots(self.chain.head().beacon_state.slot - 1) + .rev_iter_state_roots() .find(|(_hash, slot)| *slot == state_slot) .map(|(hash, _slot)| hash) .expect("could not find state root"); @@ -282,20 +282,14 @@ where attestation_strategy: &AttestationStrategy, state: &BeaconState, head_block_root: Hash256, - head_block_slot: Slot, ) { - self.get_free_attestations( - attestation_strategy, - state, - head_block_root, - head_block_slot, - ) - .into_iter() - .for_each(|attestation| { - self.chain - .process_attestation(attestation) - .expect("should process attestation"); - }); + self.get_free_attestations(attestation_strategy, state, head_block_root) + .into_iter() + .for_each(|attestation| { + self.chain + .process_attestation(attestation) + .expect("should process attestation"); + }); } /// Generates a `Vec` for some attestation strategy and head_block. @@ -304,7 +298,6 @@ where attestation_strategy: &AttestationStrategy, state: &BeaconState, head_block_root: Hash256, - head_block_slot: Slot, ) -> Vec> { let spec = &self.spec; let fork = &state.fork; @@ -329,12 +322,7 @@ where if attesting_validators.contains(validator_index) { let data = self .chain - .produce_attestation_data_for_block( - cc.shard, - head_block_root, - head_block_slot, - state, - ) + .produce_attestation_data_for_block(cc.shard, head_block_root, state) .expect("should produce attestation data"); let mut aggregation_bits = BitList::with_capacity(committee_size).unwrap(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index d286aaec0..8dc4ae6ec 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -32,6 +32,73 @@ fn get_harness(validator_count: usize) -> BeaconChainHarness = harness.chain.rev_iter_block_roots().collect(); + let state_roots: Vec<(Hash256, Slot)> = harness.chain.rev_iter_state_roots().collect(); + + assert_eq!( + block_roots.len(), + state_roots.len(), + "should be an equal amount of block and state roots" + ); + + assert!( + block_roots.iter().any(|(_root, slot)| *slot == 0), + "should contain genesis block root" + ); + assert!( + state_roots.iter().any(|(_root, slot)| *slot == 0), + "should contain genesis state root" + ); + + assert_eq!( + block_roots.len(), + num_blocks_produced as usize + 1, + "should contain all produced blocks, plus the genesis block" + ); + + block_roots.windows(2).for_each(|x| { + assert_eq!( + x[1].1, + x[0].1 - 1, + "block root slots should be decreasing by one" + ) + }); + state_roots.windows(2).for_each(|x| { + assert_eq!( + x[1].1, + x[0].1 - 1, + "state root slots should be decreasing by one" + ) + }); + + let head = &harness.chain.head(); + + assert_eq!( + *block_roots.first().expect("should have some block roots"), + (head.beacon_block_root, head.beacon_block.slot), + "first block root and slot should be for the head block" + ); + + assert_eq!( + *state_roots.first().expect("should have some state roots"), + (head.beacon_state_root, head.beacon_state.slot), + "first state root and slot should be for the head state" + ); +} + #[test] fn chooses_fork() { let harness = get_harness(VALIDATOR_COUNT); @@ -326,7 +393,6 @@ fn attestations_with_increasing_slots() { &AttestationStrategy::AllValidators, &harness.chain.head().beacon_state, harness.chain.head().beacon_block_root, - harness.chain.head().beacon_block.slot, )); harness.advance_slot(); diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index c4e557b2d..84bf3759f 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -20,7 +20,7 @@ impl<'a, U: Store, E: EthSpec> AncestorIter> for fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { let state = store.get::>(&self.state_root).ok()??; - Some(BlockRootsIterator::owned(store, state, self.slot)) + Some(BlockRootsIterator::owned(store, state)) } } @@ -32,19 +32,19 @@ pub struct StateRootsIterator<'a, T: EthSpec, U> { } impl<'a, T: EthSpec, U: Store> StateRootsIterator<'a, T, U> { - pub fn new(store: Arc, beacon_state: &'a BeaconState, start_slot: Slot) -> Self { + pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { Self { store, + slot: beacon_state.slot, beacon_state: Cow::Borrowed(beacon_state), - slot: start_slot + 1, } } - pub fn owned(store: Arc, beacon_state: BeaconState, start_slot: Slot) -> Self { + pub fn owned(store: Arc, beacon_state: BeaconState) -> Self { Self { store, + slot: beacon_state.slot, beacon_state: Cow::Owned(beacon_state), - slot: start_slot + 1, } } } @@ -88,16 +88,16 @@ pub struct BlockIterator<'a, T: EthSpec, U> { impl<'a, T: EthSpec, U: Store> BlockIterator<'a, T, U> { /// Create a new iterator over all blocks in the given `beacon_state` and prior states. - pub fn new(store: Arc, beacon_state: &'a BeaconState, start_slot: Slot) -> Self { + pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { Self { - roots: BlockRootsIterator::new(store, beacon_state, start_slot), + roots: BlockRootsIterator::new(store, beacon_state), } } /// Create a new iterator over all blocks in the given `beacon_state` and prior states. - pub fn owned(store: Arc, beacon_state: BeaconState, start_slot: Slot) -> Self { + pub fn owned(store: Arc, beacon_state: BeaconState) -> Self { Self { - roots: BlockRootsIterator::owned(store, beacon_state, start_slot), + roots: BlockRootsIterator::owned(store, beacon_state), } } } @@ -128,20 +128,20 @@ pub struct BlockRootsIterator<'a, T: EthSpec, U> { impl<'a, T: EthSpec, U: Store> BlockRootsIterator<'a, T, U> { /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn new(store: Arc, beacon_state: &'a BeaconState, start_slot: Slot) -> Self { + pub fn new(store: Arc, beacon_state: &'a BeaconState) -> Self { Self { store, + slot: beacon_state.slot, beacon_state: Cow::Borrowed(beacon_state), - slot: start_slot + 1, } } /// Create a new iterator over all block roots in the given `beacon_state` and prior states. - pub fn owned(store: Arc, beacon_state: BeaconState, start_slot: Slot) -> Self { + pub fn owned(store: Arc, beacon_state: BeaconState) -> Self { Self { store, + slot: beacon_state.slot, beacon_state: Cow::Owned(beacon_state), - slot: start_slot + 1, } } } @@ -218,7 +218,7 @@ mod test { state_b.state_roots[0] = state_a_root; store.put(&state_a_root, &state_a).unwrap(); - let iter = BlockRootsIterator::new(store.clone(), &state_b, state_b.slot - 1); + let iter = BlockRootsIterator::new(store.clone(), &state_b); assert!( iter.clone().find(|(_root, slot)| *slot == 0).is_some(), @@ -267,7 +267,7 @@ mod test { store.put(&state_a_root, &state_a).unwrap(); store.put(&state_b_root, &state_b).unwrap(); - let iter = StateRootsIterator::new(store.clone(), &state_b, state_b.slot - 1); + let iter = StateRootsIterator::new(store.clone(), &state_b); assert!( iter.clone().find(|(_root, slot)| *slot == 0).is_some(), diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index 5d7074804..9668620b7 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -611,11 +611,7 @@ where let block = self.get_block(child)?; let state = self.get_state(block.state_root)?; - Ok(BlockRootsIterator::owned( - self.store.clone(), - state, - block.slot - 1, - )) + Ok(BlockRootsIterator::owned(self.store.clone(), state)) } /// Verify the integrity of `self`. Returns `Ok(())` if the tree has integrity, otherwise returns `Err(description)`. From b1591c3c12d777377524fcd37809ad4508e4e7c9 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 8 Aug 2019 16:49:27 +1000 Subject: [PATCH 032/305] Remove attestation processing from op pool --- beacon_node/beacon_chain/src/beacon_chain.rs | 68 +++++++++++++++++-- beacon_node/beacon_chain/src/errors.rs | 1 + beacon_node/beacon_chain/src/fork_choice.rs | 18 ++--- eth2/operation_pool/src/lib.rs | 20 ++++-- .../src/per_block_processing.rs | 6 +- .../verify_attestation.rs | 10 +-- 6 files changed, 92 insertions(+), 31 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e8dcd50ab..8982cdf79 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -11,12 +11,15 @@ use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{RwLock, RwLockReadGuard}; use slog::{error, info, warn, Logger}; use slot_clock::SlotClock; -use state_processing::per_block_processing::errors::{ - AttesterSlashingValidationError, DepositValidationError, ExitValidationError, - ProposerSlashingValidationError, TransferValidationError, +use state_processing::per_block_processing::{ + errors::{ + AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, + ExitValidationError, ProposerSlashingValidationError, TransferValidationError, + }, + verify_attestation_for_state, VerifySignatures, }; use state_processing::{ - common, per_block_processing, per_block_processing_without_verifying_block_signature, + per_block_processing, per_block_processing_without_verifying_block_signature, per_slot_processing, BlockProcessingError, }; use std::sync::Arc; @@ -58,6 +61,7 @@ pub enum BlockProcessingOutcome { pub enum AttestationProcessingOutcome { Processed, UnknownHeadBlock { beacon_block_root: Hash256 }, + Invalid(AttestationValidationError), } pub trait BeaconChainTypes { @@ -543,9 +547,6 @@ impl BeaconChain { } }; - // TODO: we could try and see if the "speculative state" (e.g., self.state) can support - // this, without needing to load it from the db. - if let Some(outcome) = optional_outcome { outcome } else { @@ -583,6 +584,25 @@ impl BeaconChain { } } + /// Verifies the `attestation` against the `state` to which it is attesting. + /// + /// Updates fork choice with any new latest messages, but _does not_ find or update the head. + /// + /// ## Notes + /// + /// The given `state` must fulfil one of the following conditions: + /// + /// - `state` corresponds to the `block.state_root` identified by + /// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`. + /// - `state.slot` is in the same epoch as `block.slot` and + /// `attestation.data.beacon_block_root` is in `state.block_roots`. (Viz., the attestation was + /// attesting to an ancestor of `state` from the same epoch as `state`. + /// + /// Additionally, `attestation.data.beacon_block_root` **must** be available to read in + /// `self.store` _and_ be the root of the given `block`. + /// + /// If the given conditions are not fulfilled, the function may error or provide a false + /// negative (indicating that a given `attestation` is invalid when it is was validly formed). fn process_attestation_for_state_and_block( &self, attestation: Attestation, @@ -592,6 +612,39 @@ impl BeaconChain { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); + let result = if let Err(e) = + verify_attestation_for_state(state, &attestation, &self.spec, VerifySignatures::True) + { + warn!( + self.log, + "Invalid attestation"; + "state_epoch" => state.current_epoch(), + "error" => format!("{:?}", e), + ); + + Ok(AttestationProcessingOutcome::Invalid(e)) + } else { + // Provide the attestation to fork choice, updating the validator latest messages but + // _without_ finding and updating the head. + self.fork_choice + .process_attestation(&state, &attestation, block)?; + + // Provide the valid attestation to op pool, which may choose to retain the + // attestation for inclusion in a future block. + self.op_pool + .insert_attestation(attestation, state, &self.spec)?; + + // Update the metrics. + self.metrics.attestation_processing_successes.inc(); + + Ok(AttestationProcessingOutcome::Processed) + }; + + timer.observe_duration(); + + result + + /* if self .fork_choice .should_process_attestation(state, &attestation)? @@ -619,6 +672,7 @@ impl BeaconChain { result .map(|_| AttestationProcessingOutcome::Processed) .map_err(|e| Error::AttestationValidationError(e)) + */ } /// Accept some deposit and queue it for inclusion in an appropriate block. diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 0b8fae7bf..7a51fc425 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -26,6 +26,7 @@ pub enum BeaconChainError { previous_epoch: Epoch, new_epoch: Epoch, }, + UnableToFindTargetRoot(Slot), BeaconStateError(BeaconStateError), DBInconsistent(String), DBError(store::Error), diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 71415d191..83d6c335f 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -20,7 +20,6 @@ pub enum Error { pub struct ForkChoice { backend: T::LmdGhost, - store: Arc, /// Used for resolving the `0x00..00` alias back to genesis. /// /// Does not necessarily need to be the _actual_ genesis, it suffices to be the finalized root @@ -39,7 +38,6 @@ impl ForkChoice { genesis_block_root: Hash256, ) -> Self { Self { - store: store.clone(), backend: T::LmdGhost::new(store, genesis_block, genesis_block_root), genesis_block_root, } @@ -119,7 +117,7 @@ impl ForkChoice { // // https://github.com/ethereum/eth2.0-specs/blob/v0.7.0/specs/core/0_fork-choice.md for attestation in &block.body.attestations { - self.process_attestation(state, attestation)?; + self.process_attestation(state, attestation, block)?; } self.backend.process_block(block, block_root)?; @@ -127,13 +125,14 @@ impl ForkChoice { Ok(()) } - /// Process an attestation. + /// Process an attestation which references `block` in `attestation.data.beacon_block_root`. /// /// Assumes the attestation is valid. pub fn process_attestation( &self, state: &BeaconState, attestation: &Attestation, + block: &BeaconBlock, ) -> Result<()> { let block_hash = attestation.data.beacon_block_root; @@ -152,20 +151,13 @@ impl ForkChoice { // to genesis just by being present in the chain. // // Additionally, don't add any block hash to fork choice unless we have imported the block. - if block_hash != Hash256::zero() - && self - .store - .exists::>(&block_hash) - .unwrap_or(false) - { + if block_hash != Hash256::zero() { let validator_indices = get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; - let block_slot = state.get_attestation_data_slot(&attestation.data)?; - for validator_index in validator_indices { self.backend - .process_attestation(validator_index, block_hash, block_slot)?; + .process_attestation(validator_index, block_hash, block.slot)?; } } diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 92d5fb168..ba9ca81c0 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -15,9 +15,10 @@ use state_processing::per_block_processing::errors::{ ExitValidationError, ProposerSlashingValidationError, TransferValidationError, }; use state_processing::per_block_processing::{ - get_slashable_indices_modular, verify_attestation, verify_attestation_time_independent_only, + get_slashable_indices_modular, verify_attestation_for_block_inclusion, verify_attester_slashing, verify_exit, verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only, + VerifySignatures, }; use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; use std::marker::PhantomData; @@ -64,15 +65,16 @@ impl OperationPool { } /// Insert an attestation into the pool, aggregating it with existing attestations if possible. + /// + /// ## Note + /// + /// This function assumes the given `attestation` is valid. pub fn insert_attestation( &self, attestation: Attestation, state: &BeaconState, spec: &ChainSpec, ) -> Result<(), AttestationValidationError> { - // Check that attestation signatures are valid. - verify_attestation_time_independent_only(state, &attestation, spec)?; - let id = AttestationId::from_data(&attestation.data, state, spec); // Take a write lock on the attestations map. @@ -128,7 +130,15 @@ impl OperationPool { }) .flat_map(|(_, attestations)| attestations) // That are valid... - .filter(|attestation| verify_attestation(state, attestation, spec).is_ok()) + .filter(|attestation| { + verify_attestation_for_block_inclusion( + state, + attestation, + spec, + VerifySignatures::True, + ) + .is_ok() + }) .map(|att| AttMaxCover::new(att, earliest_attestation_validators(att, state))); maximum_cover(valid_attestations, T::MaxAttestations::to_usize()) diff --git a/eth2/state_processing/src/per_block_processing.rs b/eth2/state_processing/src/per_block_processing.rs index 3acadfde2..a64158ac9 100644 --- a/eth2/state_processing/src/per_block_processing.rs +++ b/eth2/state_processing/src/per_block_processing.rs @@ -14,7 +14,9 @@ pub use self::verify_proposer_slashing::verify_proposer_slashing; pub use is_valid_indexed_attestation::{ is_valid_indexed_attestation, is_valid_indexed_attestation_without_signature, }; -pub use verify_attestation::{verify_attestation_for_block, verify_attestation_for_state}; +pub use verify_attestation::{ + verify_attestation_for_block_inclusion, verify_attestation_for_state, +}; pub use verify_deposit::{ get_existing_validator_index, verify_deposit_merkle_proof, verify_deposit_signature, }; @@ -315,7 +317,7 @@ pub fn process_attestations( .par_iter() .enumerate() .try_for_each(|(i, attestation)| { - verify_attestation_for_block(state, attestation, spec, VerifySignatures::True) + verify_attestation_for_block_inclusion(state, attestation, spec, VerifySignatures::True) .map_err(|e| e.into_with_index(i)) })?; diff --git a/eth2/state_processing/src/per_block_processing/verify_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_attestation.rs index bca6a9085..74dbefa23 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attestation.rs @@ -7,11 +7,13 @@ use crate::per_block_processing::{ use tree_hash::TreeHash; use types::*; -/// Indicates if an `Attestation` is valid to be included in a block in the current epoch of the -/// given state, optionally validating the aggregate signature. +/// Returns `Ok(())` if the given `attestation` is valid to be included in a block that is applied +/// to `state`. Otherwise, returns a descriptive `Err`. +/// +/// Optionally verifies the aggregate signature, depending on `verify_signatures`. /// /// Spec v0.8.0 -pub fn verify_attestation_for_block( +pub fn verify_attestation_for_block_inclusion( state: &BeaconState, attestation: &Attestation, spec: &ChainSpec, @@ -41,7 +43,7 @@ pub fn verify_attestation_for_block( verify_attestation_for_state(state, attestation, spec, verify_signatures) } -/// Returns `Ok(())` if `attestation` is a valid attestation to the chain that preceeds the given +/// Returns `Ok(())` if `attestation` is a valid attestation to the chain that precedes the given /// `state`. /// /// Returns a descriptive `Err` if the attestation is malformed or does not accurately reflect the From 76bb6710844d4f683d1681ef738efe9e5880b137 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 9 Aug 2019 11:54:35 +1000 Subject: [PATCH 033/305] Fix bug with fork choice, tidy --- beacon_node/beacon_chain/src/beacon_chain.rs | 63 +++++++++---------- beacon_node/beacon_chain/src/fork_choice.rs | 14 ++++- beacon_node/beacon_chain/src/test_utils.rs | 6 +- beacon_node/beacon_chain/tests/tests.rs | 22 ++++--- .../src/per_block_processing/errors.rs | 3 + .../verify_attestation.rs | 11 ++++ 6 files changed, 70 insertions(+), 49 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3d50d701c..81e5bdd65 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -524,7 +524,13 @@ impl BeaconChain { // If it turns out that the attestation was made using the head state, then there // is no need to load a state from the database to process the attestation. - if state.current_epoch() == attestation_head_block.epoch() + // + // Note: use the epoch of the target because it indicates which epoch the + // attestation was created in. You cannot use the epoch of the head block, because + // the block doesn't necessarily need to be in the same epoch as the attestation + // (e.g., if there are skip slots between the epoch the block was created in and + // the epoch for the attestation). + if state.current_epoch() == attestation.data.target.epoch && (state .get_block_root(attestation_head_block.slot) .map(|root| *root == attestation.data.beacon_block_root) @@ -546,7 +552,11 @@ impl BeaconChain { if let Some(outcome) = optional_outcome { outcome } else { - // The state required to verify this attestation must be loaded from the database. + // Use the `data.beacon_block_root` to load the state from the latest non-skipped + // slot preceding the attestations creation. + // + // This state is guaranteed to be in the same chain as the attestation, but it's + // not guaranteed to be from the same slot or epoch as the attestation. let mut state: BeaconState = self .store .get(&attestation_head_block.state_root)? @@ -554,7 +564,21 @@ impl BeaconChain { // Ensure the state loaded from the database matches the state of the attestation // head block. - for _ in state.slot.as_u64()..attestation_head_block.slot.as_u64() { + // + // The state needs to be advanced from the current slot through to the epoch in + // which the attestation was created in. It would be an error to try and use + // `state.get_attestation_data_slot(..)` because the state matching the + // `data.beacon_block_root` isn't necessarily in a nearby epoch to the attestation + // (e.g., if there were lots of skip slots since the head of the chain and the + // epoch creation epoch). + for _ in state.slot.as_u64() + ..attestation + .data + .target + .epoch + .start_slot(T::EthSpec::slots_per_epoch()) + .as_u64() + { per_slot_processing(&mut state, &self.spec)?; } @@ -639,36 +663,6 @@ impl BeaconChain { timer.observe_duration(); result - - /* - if self - .fork_choice - .should_process_attestation(state, &attestation)? - { - // TODO: check validation. - let indexed_attestation = common::get_indexed_attestation(state, &attestation)?; - per_block_processing::is_valid_indexed_attestation( - state, - &indexed_attestation, - &self.spec, - )?; - self.fork_choice.process_attestation(&state, &attestation)?; - } - - let result = self - .op_pool - .insert_attestation(attestation, state, &self.spec); - - timer.observe_duration(); - - if result.is_ok() { - self.metrics.attestation_processing_successes.inc(); - } - - result - .map(|_| AttestationProcessingOutcome::Processed) - .map_err(|e| Error::AttestationValidationError(e)) - */ } /// Accept some deposit and queue it for inclusion in an appropriate block. @@ -735,7 +729,7 @@ impl BeaconChain { return Ok(BlockProcessingOutcome::GenesisBlock); } - let block_root = block.block_header().canonical_root(); + let block_root = block.canonical_root(); if block_root == self.genesis_block_root { return Ok(BlockProcessingOutcome::GenesisBlock); @@ -781,6 +775,7 @@ impl BeaconChain { per_slot_processing(&mut state, &self.spec)?; } + state.build_committee_cache(RelativeEpoch::Previous, &self.spec)?; state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; // Apply the received block to its parent state (which has been transitioned into this diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 83d6c335f..6800f61d8 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -19,6 +19,7 @@ pub enum Error { } pub struct ForkChoice { + store: Arc, backend: T::LmdGhost, /// Used for resolving the `0x00..00` alias back to genesis. /// @@ -38,6 +39,7 @@ impl ForkChoice { genesis_block_root: Hash256, ) -> Self { Self { + store: store.clone(), backend: T::LmdGhost::new(store, genesis_block, genesis_block_root), genesis_block_root, } @@ -117,9 +119,19 @@ impl ForkChoice { // // https://github.com/ethereum/eth2.0-specs/blob/v0.7.0/specs/core/0_fork-choice.md for attestation in &block.body.attestations { - self.process_attestation(state, attestation, block)?; + let block = self + .store + .get::>(&attestation.data.beacon_block_root)? + .ok_or_else(|| Error::MissingBlock(attestation.data.beacon_block_root))?; + + self.process_attestation(state, attestation, &block)?; } + // This does not apply a vote to the block, it just makes fork choice aware of the block so + // it can still be identified as the head even if it doesn't have any votes. + // + // A case where a block without any votes can be the head is where it is the only child of + // a block that has the majority of votes applied to it. self.backend.process_block(block, block_root)?; Ok(()) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index f2ec5a0fd..6997f52ae 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -349,14 +349,12 @@ where agg_sig }; - let attestation = Attestation { + vec.push(Attestation { aggregation_bits, data, custody_bits, signature, - }; - - vec.push(attestation) + }) } } }); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 8dc4ae6ec..c22f02563 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -342,27 +342,29 @@ fn free_attestations_added_to_fork_choice_some_none() { let state = &harness.chain.head().beacon_state; let fork_choice = &harness.chain.fork_choice; - let validators: Vec = (0..VALIDATOR_COUNT).collect(); - let slots: Vec = validators - .iter() - .map(|&v| { - state - .get_attestation_duties(v, RelativeEpoch::Current) + let validator_slots: Vec<(usize, Slot)> = (0..VALIDATOR_COUNT) + .into_iter() + .map(|validator_index| { + let slot = state + .get_attestation_duties(validator_index, RelativeEpoch::Current) .expect("should get attester duties") .unwrap() - .slot + .slot; + + (validator_index, slot) }) .collect(); - let validator_slots: Vec<(&usize, Slot)> = validators.iter().zip(slots).collect(); for (validator, slot) in validator_slots.clone() { - let latest_message = fork_choice.latest_message(*validator); + let latest_message = fork_choice.latest_message(validator); if slot <= num_blocks_produced && slot != 0 { assert_eq!( latest_message.unwrap().1, slot, - "Latest message slot should be equal to attester duty." + "Latest message slot for {} should be equal to slot {}.", + validator, + slot ) } else { assert!( diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index 65179167c..436ec96ce 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -136,6 +136,9 @@ pub enum AttestationInvalid { delay: u64, attestation: Slot, }, + /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the + /// future). + AttestsToFutureState { state: Slot, attestation: Slot }, /// Attestation slot is too far in the past to be included in a block. IncludedTooLate { state: Slot, attestation: Slot }, /// Attestation target epoch does not match the current or previous epoch. diff --git a/eth2/state_processing/src/per_block_processing/verify_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_attestation.rs index 74dbefa23..127d251de 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attestation.rs @@ -62,6 +62,17 @@ pub fn verify_attestation_for_state( Invalid::BadShard ); + let attestation_slot = state.get_attestation_data_slot(&data)?; + + // An attestation cannot attest to a state that is later than itself. + verify!( + attestation_slot <= state.slot, + Invalid::AttestsToFutureState { + state: state.slot, + attestation: attestation_slot + } + ); + // Verify the Casper FFG vote and crosslink data. let parent_crosslink = verify_casper_ffg_vote(attestation, state)?; From d191812d4bfbed364f8f9157a708ff516011a026 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 9 Aug 2019 12:23:10 +1000 Subject: [PATCH 034/305] Fix overly restrictive check in fork choice. --- beacon_node/beacon_chain/src/beacon_chain.rs | 33 +++++++++++++++---- .../src/per_block_processing/errors.rs | 3 -- .../verify_attestation.rs | 11 ------- 3 files changed, 27 insertions(+), 20 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 81e5bdd65..7af64924e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -60,7 +60,15 @@ pub enum BlockProcessingOutcome { #[derive(Debug, PartialEq)] pub enum AttestationProcessingOutcome { Processed, - UnknownHeadBlock { beacon_block_root: Hash256 }, + UnknownHeadBlock { + beacon_block_root: Hash256, + }, + /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the + /// future). + AttestsToFutureState { + state: Slot, + attestation: Slot, + }, Invalid(AttestationValidationError), } @@ -582,11 +590,24 @@ impl BeaconChain { per_slot_processing(&mut state, &self.spec)?; } - self.process_attestation_for_state_and_block( - attestation, - &state, - &attestation_head_block, - ) + let attestation_slot = state.get_attestation_data_slot(&attestation.data)?; + + // Reject any attestation where the `state` loaded from `data.beacon_block_root` + // has a higher slot than the attestation. + // + // Permitting this would allow for attesters to vote on _future_ slots. + if attestation_slot > state.slot { + Ok(AttestationProcessingOutcome::AttestsToFutureState { + state: state.slot, + attestation: attestation_slot, + }) + } else { + self.process_attestation_for_state_and_block( + attestation, + &state, + &attestation_head_block, + ) + } } } else { // Reject any block where we have not processed `attestation.data.beacon_block_root`. diff --git a/eth2/state_processing/src/per_block_processing/errors.rs b/eth2/state_processing/src/per_block_processing/errors.rs index 436ec96ce..65179167c 100644 --- a/eth2/state_processing/src/per_block_processing/errors.rs +++ b/eth2/state_processing/src/per_block_processing/errors.rs @@ -136,9 +136,6 @@ pub enum AttestationInvalid { delay: u64, attestation: Slot, }, - /// The attestation is attesting to a state that is later than itself. (Viz., attesting to the - /// future). - AttestsToFutureState { state: Slot, attestation: Slot }, /// Attestation slot is too far in the past to be included in a block. IncludedTooLate { state: Slot, attestation: Slot }, /// Attestation target epoch does not match the current or previous epoch. diff --git a/eth2/state_processing/src/per_block_processing/verify_attestation.rs b/eth2/state_processing/src/per_block_processing/verify_attestation.rs index 127d251de..74dbefa23 100644 --- a/eth2/state_processing/src/per_block_processing/verify_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/verify_attestation.rs @@ -62,17 +62,6 @@ pub fn verify_attestation_for_state( Invalid::BadShard ); - let attestation_slot = state.get_attestation_data_slot(&data)?; - - // An attestation cannot attest to a state that is later than itself. - verify!( - attestation_slot <= state.slot, - Invalid::AttestsToFutureState { - state: state.slot, - attestation: attestation_slot - } - ); - // Verify the Casper FFG vote and crosslink data. let parent_crosslink = verify_casper_ffg_vote(attestation, state)?; From 67fe21c1c03f550c74d6e0b190f05843770e6fcf Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 9 Aug 2019 12:32:32 +1000 Subject: [PATCH 035/305] Ensure committee cache is build during attn proc --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 7af64924e..834b04582 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -590,6 +590,8 @@ impl BeaconChain { per_slot_processing(&mut state, &self.spec)?; } + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + let attestation_slot = state.get_attestation_data_slot(&attestation.data)?; // Reject any attestation where the `state` loaded from `data.beacon_block_root` From f4121d9debb4ff235d8d6c74236d00d373be4020 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 9 Aug 2019 12:34:56 +1000 Subject: [PATCH 036/305] Ignore unknown blocks at fork choice --- beacon_node/beacon_chain/src/fork_choice.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 6800f61d8..640f5223d 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -119,12 +119,14 @@ impl ForkChoice { // // https://github.com/ethereum/eth2.0-specs/blob/v0.7.0/specs/core/0_fork-choice.md for attestation in &block.body.attestations { - let block = self + // If the `data.beacon_block_root` block is not known to us, simply ignore the latest + // vote. + if let Some(block) = self .store .get::>(&attestation.data.beacon_block_root)? - .ok_or_else(|| Error::MissingBlock(attestation.data.beacon_block_root))?; - - self.process_attestation(state, attestation, &block)?; + { + self.process_attestation(state, attestation, &block)?; + } } // This does not apply a vote to the block, it just makes fork choice aware of the block so From 3210489a36892260799acfc2094b7d17e33c619a Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 9 Aug 2019 13:23:47 +1000 Subject: [PATCH 037/305] Apply PR suggestions --- beacon_node/eth2-libp2p/src/behaviour.rs | 58 ++-------------------- beacon_node/eth2-libp2p/src/rpc/handler.rs | 5 +- beacon_node/src/main.rs | 41 +++++++++------ validator_client/src/main.rs | 39 ++++++++++----- 4 files changed, 61 insertions(+), 82 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index fc224e91a..b87f8a061 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -19,6 +19,8 @@ use ssz::{ssz_encode, Encode}; use std::num::NonZeroU32; use std::time::Duration; +const MAX_IDENTIFY_ADDRESSES: usize = 20; + /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. @@ -148,12 +150,12 @@ impl NetworkBehaviourEventProcess { - if info.listen_addrs.len() > 20 { + if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { debug!( self.log, "More than 20 addresses have been identified, truncating" ); - info.listen_addrs.truncate(20); + info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES); } debug!(self.log, "Identified Peer"; "Peer" => format!("{}", peer_id), "Protocol Version" => info.protocol_version, @@ -264,55 +266,3 @@ impl Encode for PubsubMessage { } } } - -/* -impl Decode for PubsubMessage { - fn is_ssz_fixed_len() -> bool { - false - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - let mut builder = ssz::SszDecoderBuilder::new(&bytes); - - builder.register_type::()?; - builder.register_type::>()?; - - let mut decoder = builder.build()?; - - let id: u32 = decoder.decode_next()?; - let body: Vec = decoder.decode_next()?; - - match id { - 0 => Ok(PubsubMessage::Block(BeaconBlock::from_ssz_bytes(&body)?)), - 1 => Ok(PubsubMessage::Attestation(Attestation::from_ssz_bytes( - &body, - )?)), - _ => Err(DecodeError::BytesInvalid( - "Invalid PubsubMessage id".to_string(), - )), - } - } -} -*/ - -/* -#[cfg(test)] -mod test { - use super::*; - use types::*; - - #[test] - fn ssz_encoding() { - let original = PubsubMessage::Block(BeaconBlock::::empty( - &MainnetEthSpec::default_spec(), - )); - - let encoded = ssz_encode(&original); - - let decoded = PubsubMessage::from_ssz_bytes(&encoded).unwrap(); - - assert_eq!(original, decoded); - } - -} -*/ diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index 355cc52ee..dbc32c5a4 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -268,8 +268,11 @@ where Self::Error, > { if let Some(err) = self.pending_error.take() { + // Returning an error here will result in dropping any peer that doesn't support any of + // the RPC protocols. For our immediate purposes we permit this and simply log that an + // upgrade was not supported. + // TODO: Add a logger to the handler for trace output. dbg!(&err); - //return Err(err); } // return any events that need to be reported diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index be57c6c9d..b34259f5a 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -4,7 +4,7 @@ use clap::{App, Arg}; use client::{ClientConfig, Eth2Config}; use env_logger::{Builder, Env}; use eth2_config::{read_from_file, write_to_file}; -use slog::{crit, o, Drain, Level}; +use slog::{crit, o, warn, Drain, Level}; use std::fs; use std::path::PathBuf; @@ -323,19 +323,36 @@ fn main() { Some("interop") => Some(Eth2Config::interop()), _ => None, }; - // if cli is specified, write the new config + // if a CLI flag is specified, write the new config if it doesn't exist, + // otherwise notify the user that the file will not be written. + let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { + Ok(config) => config, + Err(e) => { + crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); + return; + } + }; + let mut eth2_config = { if let Some(cli_config) = cli_config { - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; + if eth2_config_from_file.is_none() { + // write to file if one doesn't exist + if let Err(e) = write_to_file(eth2_config_path, &cli_config) { + crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + } else { + warn!( + log, + "Eth2Config file exists. Configuration file is ignored, using default" + ); } cli_config } else { - // config not specified, read from disk - match read_from_file::(eth2_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { + // CLI config not specified, read from disk + match eth2_config_from_file { + Some(config) => config, + None => { // set default to minimal let eth2_config = Eth2Config::minimal(); if let Err(e) = write_to_file(eth2_config_path, ð2_config) { @@ -344,10 +361,6 @@ fn main() { } eth2_config } - Err(e) => { - crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); - return; - } } } }; @@ -363,7 +376,7 @@ fn main() { // check to ensure the spec constants between the client and eth2_config match if eth2_config.spec_constants != client_config.spec_constants { - crit!(log, "Specification constants do not match."; "Client Config" => format!("{}", client_config.spec_constants), "Eth2 Config" => format!("{}", eth2_config.spec_constants)); + crit!(log, "Specification constants do not match."; "client_config" => format!("{}", client_config.spec_constants), "eth2_config" => format!("{}", eth2_config.spec_constants)); return; } diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 0782df323..83a874df7 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -11,7 +11,7 @@ use crate::service::Service as ValidatorService; use clap::{App, Arg}; use eth2_config::{read_from_file, write_to_file, Eth2Config}; use protos::services_grpc::ValidatorServiceClient; -use slog::{crit, error, info, o, Drain, Level}; +use slog::{crit, error, info, o, warn, Drain, Level}; use std::fs; use std::path::PathBuf; use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; @@ -173,19 +173,36 @@ fn main() { Some("interop") => Some(Eth2Config::interop()), _ => None, }; - // if cli is specified, write the new config + // if a CLI flag is specified, write the new config if it doesn't exist, + // otherwise notify the user that the file will not be written. + let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { + Ok(config) => config, + Err(e) => { + crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); + return; + } + }; + let mut eth2_config = { if let Some(cli_config) = cli_config { - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; + if eth2_config_from_file.is_none() { + // write to file if one doesn't exist + if let Err(e) = write_to_file(eth2_config_path, &cli_config) { + crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); + return; + } + } else { + warn!( + log, + "Eth2Config file exists. Configuration file is ignored, using default" + ); } cli_config } else { - // config not specified, read from disk - match read_from_file::(eth2_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { + // CLI config not specified, read from disk + match eth2_config_from_file { + Some(config) => config, + None => { // set default to minimal let eth2_config = Eth2Config::minimal(); if let Err(e) = write_to_file(eth2_config_path, ð2_config) { @@ -194,10 +211,6 @@ fn main() { } eth2_config } - Err(e) => { - crit!(log, "Failed to instantiate an Eth2Config"; "error" => format!("{:?}", e)); - return; - } } } }; From ce5061603250b10f2e18a1090c5751f028460c32 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 11:31:36 +1000 Subject: [PATCH 038/305] Improve logging --- beacon_node/client/src/lib.rs | 11 ++------ beacon_node/client/src/notifier.rs | 2 +- beacon_node/eth2-libp2p/src/discovery.rs | 36 +++++++++++------------- beacon_node/eth2-libp2p/src/service.rs | 19 ++++++------- beacon_node/network/src/service.rs | 17 +++++------ 5 files changed, 38 insertions(+), 47 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 65ba071fa..4b64c1070 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -100,16 +100,9 @@ where } do_state_catchup(&beacon_chain, &log); - // Start the network service, libp2p and syncing threads - // TODO: Add beacon_chain reference to network parameters let network_config = &client_config.network; - let network_logger = log.new(o!("Service" => "Network")); - let (network, network_send) = NetworkService::new( - beacon_chain.clone(), - network_config, - executor, - network_logger, - )?; + let (network, network_send) = + NetworkService::new(beacon_chain.clone(), network_config, executor, log.clone())?; // spawn the RPC server let rpc_exit_signal = if client_config.rpc.enabled { diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1c7cf3867..a763196c9 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -38,7 +38,7 @@ pub fn run( // Panics if libp2p is poisoned. let connected_peer_count = libp2p.lock().swarm.connected_peers(); - debug!(log, "libp2p"; "peer_count" => connected_peer_count); + debug!(log, "Libp2p connected peer status"; "peer_count" => connected_peer_count); if connected_peer_count <= WARN_PEER_COUNT { warn!(log, "Low libp2p peer count"; "peer_count" => connected_peer_count); diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 4c1794945..3e34b9b03 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -9,7 +9,7 @@ use libp2p::discv5::{Discv5, Discv5Event}; use libp2p::enr::{Enr, EnrBuilder, NodeId}; use libp2p::multiaddr::Protocol; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; -use slog::{debug, info, o, warn}; +use slog::{debug, info, warn}; use std::collections::HashSet; use std::fs::File; use std::io::prelude::*; @@ -63,7 +63,7 @@ impl Discovery { config: &NetworkConfig, log: &slog::Logger, ) -> error::Result { - let log = log.new(o!("Service" => "Libp2p-Discovery")); + let log = log.clone(); // checks if current ENR matches that found on disk let local_enr = load_enr(local_key, config, &log)?; @@ -73,19 +73,19 @@ impl Discovery { None => String::from(""), }; - info!(log, "Local ENR: {}", local_enr.to_base64()); - debug!(log, "Local Node Id: {}", local_enr.node_id()); - debug!(log, "Local ENR seq: {}", local_enr.seq()); + info!(log, "ENR Initialised"; "ENR" => local_enr.to_base64(), "Seq" => local_enr.seq()); + debug!(log, "Discv5 Node ID Initialised"; "node_id" => format!("{}",local_enr.node_id())); let mut discovery = Discv5::new(local_enr, local_key.clone(), config.listen_address) - .map_err(|e| format!("Discv5 service failed: {:?}", e))?; + .map_err(|e| format!("Discv5 service failed. Error: {:?}", e))?; // Add bootnodes to routing table for bootnode_enr in config.boot_nodes.clone() { debug!( log, - "Adding node to routing table: {}", - bootnode_enr.node_id() + "Adding node to routing table"; + "Node ID" => format!("{}", + bootnode_enr.node_id()) ); discovery.add_enr(bootnode_enr); } @@ -123,7 +123,7 @@ impl Discovery { fn find_peers(&mut self) { // pick a random NodeId let random_node = NodeId::random(); - debug!(self.log, "Searching for peers..."); + debug!(self.log, "Searching for peers"); self.discovery.find_node(random_node); // update the time until next discovery @@ -201,7 +201,7 @@ where } Ok(Async::NotReady) => break, Err(e) => { - warn!(self.log, "Discovery peer search failed: {:?}", e); + warn!(self.log, "Discovery peer search failed"; "Error" => format!("{:?}", e)); } } } @@ -227,16 +227,16 @@ where }); } Discv5Event::FindNodeResult { closer_peers, .. } => { - debug!(self.log, "Discv5 query found {} peers", closer_peers.len()); + debug!(self.log, "Discovery query completed"; "peers_found" => closer_peers.len()); if closer_peers.is_empty() { - debug!(self.log, "Discv5 random query yielded empty results"); + debug!(self.log, "Discovery random query found no peers"); } for peer_id in closer_peers { // if we need more peers, attempt a connection if self.connected_peers.len() < self.max_peers && self.connected_peers.get(&peer_id).is_none() { - debug!(self.log, "Discv5: Peer discovered"; "Peer"=> format!("{:?}", peer_id)); + debug!(self.log, "Peer discovered"; "peer_id"=> format!("{:?}", peer_id)); return Async::Ready(NetworkBehaviourAction::DialPeer { peer_id, }); @@ -283,14 +283,12 @@ fn load_enr( Ok(_) => { match Enr::from_str(&enr_string) { Ok(enr) => { - debug!(log, "ENR found in file: {:?}", enr_f); - if enr.node_id() == local_enr.node_id() { if enr.ip() == config.discovery_address.into() && enr.tcp() == Some(config.libp2p_port) && enr.udp() == Some(config.discovery_port) { - debug!(log, "ENR loaded from file"); + debug!(log, "ENR loaded from file"; "File" => format!("{:?}", enr_f)); // the stored ENR has the same configuration, use it return Ok(enr); } @@ -300,11 +298,11 @@ fn load_enr( local_enr.set_seq(new_seq_no, local_key).map_err(|e| { format!("Could not update ENR sequence number: {:?}", e) })?; - debug!(log, "ENR sequence number increased to: {}", new_seq_no); + debug!(log, "ENR sequence number increased"; "Seq" => new_seq_no); } } Err(e) => { - warn!(log, "ENR from file could not be decoded: {:?}", e); + warn!(log, "ENR from file could not be decoded"; "Error" => format!("{:?}", e)); } } } @@ -327,7 +325,7 @@ fn save_enr_to_disc(dir: &Path, enr: &Enr, log: &slog::Logger) { Err(e) => { warn!( log, - "Could not write ENR to file: {:?}{:?}. Error: {}", dir, ENR_FILENAME, e + "Could not write ENR to file"; "File" => format!("{:?}{:?}",dir, ENR_FILENAME), "Error" => format!("{}", e) ); } } diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 316aa0579..e0867e87f 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -40,13 +40,12 @@ pub struct Service { impl Service { pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result { - debug!(log, "Network-libp2p Service starting"); + trace!(log, "Libp2p Service starting"); // load the private key from CLI flag, disk or generate a new one let local_private_key = load_private_key(&config, &log); - let local_peer_id = PeerId::from(local_private_key.public()); - info!(log, "Local peer id: {:?}", local_peer_id); + info!(log, "Libp2p Service"; "peer_id" => format!("{:?}", local_peer_id)); let mut swarm = { // Set up the transport - tcp/ws with secio and mplex/yamux @@ -67,21 +66,21 @@ impl Service { Ok(_) => { let mut log_address = listen_multiaddr; log_address.push(Protocol::P2p(local_peer_id.clone().into())); - info!(log, "Listening on: {}", log_address); + info!(log, "Listening established"; "Address" => format!("{}", log_address)); } Err(err) => warn!( log, - "Cannot listen on: {} because: {:?}", listen_multiaddr, err + "Failed to listen on address"; "Address" => format!("{}", listen_multiaddr), "Error" => format!("{:?}", err) ), }; // attempt to connect to user-input libp2p nodes for multiaddr in config.libp2p_nodes { match Swarm::dial_addr(&mut swarm, multiaddr.clone()) { - Ok(()) => debug!(log, "Dialing libp2p node: {}", multiaddr), + Ok(()) => debug!(log, "Dialing libp2p peer"; "Address" => format!("{}", multiaddr)), Err(err) => debug!( log, - "Could not connect to node: {} error: {:?}", multiaddr, err + "Could not connect to peer"; "Address" => format!("{}", multiaddr), "Error" => format!("{:?}", err) ), }; } @@ -104,13 +103,13 @@ impl Service { let mut subscribed_topics = vec![]; for topic in topics { if swarm.subscribe(topic.clone()) { - trace!(log, "Subscribed to topic: {:?}", topic); + trace!(log, "Subscribed to topic"; "Topic" => format!("{}", topic)); subscribed_topics.push(topic); } else { - warn!(log, "Could not subscribe to topic: {:?}", topic) + warn!(log, "Could not subscribe to topic"; "Topic" => format!("{}", topic)); } } - info!(log, "Subscribed to topics: {:?}", subscribed_topics); + info!(log, "Subscribed to topics"; "Topics" => format!("{:?}", subscribed_topics.iter().map(|t| format!("{}", t)).collect::>())); Ok(Service { _local_peer_id: local_peer_id, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index e5ca2a917..df0404cfa 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -20,8 +20,7 @@ pub struct Service { libp2p_service: Arc>, _libp2p_exit: oneshot::Sender<()>, _network_send: mpsc::UnboundedSender, - _phantom: PhantomData, //message_handler: MessageHandler, - //message_handler_send: Sender + _phantom: PhantomData, } impl Service { @@ -42,17 +41,19 @@ impl Service { message_handler_log, )?; + let network_log = log.new(o!("Service" => "Network")); // launch libp2p service - let libp2p_log = log.new(o!("Service" => "Libp2p")); - let libp2p_service = Arc::new(Mutex::new(LibP2PService::new(config.clone(), libp2p_log)?)); + let libp2p_service = Arc::new(Mutex::new(LibP2PService::new( + config.clone(), + network_log.clone(), + )?)); - // TODO: Spawn thread to handle libp2p messages and pass to message handler thread. let libp2p_exit = spawn_service( libp2p_service.clone(), network_recv, message_handler_send, executor, - log, + network_log, )?; let network_service = Service { libp2p_service, @@ -142,13 +143,13 @@ fn network_service( .map_err(|_| "Failed to send RPC to handler")?; } Libp2pEvent::PeerDialed(peer_id) => { - debug!(log, "Peer Dialed: {:?}", peer_id); + debug!(log, "Peer Dialed"; "PeerID" => format!("{:?}", peer_id)); message_handler_send .try_send(HandlerMessage::PeerDialed(peer_id)) .map_err(|_| "Failed to send PeerDialed to handler")?; } Libp2pEvent::PeerDisconnected(peer_id) => { - debug!(log, "Peer Disconnected: {:?}", peer_id); + debug!(log, "Peer Disconnected"; "PeerID" => format!("{:?}", peer_id)); message_handler_send .try_send(HandlerMessage::PeerDisconnected(peer_id)) .map_err(|_| "Failed to send PeerDisconnected to handler")?; From d83fa670681f96d705da89300e7c4ad126049bff Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 12:06:46 +1000 Subject: [PATCH 039/305] Subscribe to all required gossipsub topics --- beacon_node/eth2-libp2p/src/config.rs | 13 +++++++++---- beacon_node/eth2-libp2p/src/lib.rs | 4 +--- beacon_node/eth2-libp2p/src/service.rs | 24 +++++++++++++++++++----- 3 files changed, 29 insertions(+), 12 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index ddf14cc04..d7648ec3f 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -6,9 +6,14 @@ use serde_derive::{Deserialize, Serialize}; use std::path::PathBuf; use std::time::Duration; -/// The beacon node topic string to subscribe to. +/// The gossipsub topic names. +pub const TOPIC_PREFIX: &str = "eth2"; +pub const TOPIC_ENCODING_POSTFIX: &str = "ssz"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; pub const BEACON_ATTESTATION_TOPIC: &str = "beacon_attestation"; +pub const VOLUNTARY_EXIT_TOPIC: &str = "voluntary_exit"; +pub const PROPOSER_SLASHING_TOPIC: &str = "proposer_slashing"; +pub const ATTESTER_SLASHING_TOPIC: &str = "attester_slashing"; pub const SHARD_TOPIC_PREFIX: &str = "shard"; #[derive(Clone, Debug, Serialize, Deserialize)] @@ -63,10 +68,10 @@ impl Default for Config { discovery_address: "127.0.0.1".parse().expect("valid ip address"), discovery_port: 9000, max_peers: 10, - //TODO: Set realistic values for production - // Note: This defaults topics to plain strings. Not hashes + // Note: The topics by default are sent as plain strings. Hashes are an optional + // parameter. gs_config: GossipsubConfigBuilder::new() - .max_transmit_size(1_000_000) + .max_transmit_size(1_048_576) .heartbeat_interval(Duration::from_secs(20)) .build(), boot_nodes: vec![], diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 54a4f2a99..7c3a93d61 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -10,9 +10,7 @@ pub mod rpc; mod service; pub use behaviour::PubsubMessage; -pub use config::{ - Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX, -}; +pub use config::{Config as NetworkConfig, *}; pub use libp2p::gossipsub::{Topic, TopicHash}; pub use libp2p::multiaddr; pub use libp2p::Multiaddr; diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index e0867e87f..98718445b 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -1,10 +1,10 @@ use crate::behaviour::{Behaviour, BehaviourEvent, PubsubMessage}; +use crate::config::*; use crate::error; use crate::multiaddr::Protocol; use crate::rpc::RPCEvent; use crate::NetworkConfig; use crate::{Topic, TopicHash}; -use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC}; use futures::prelude::*; use futures::Stream; use libp2p::core::{ @@ -87,10 +87,24 @@ impl Service { // subscribe to default gossipsub topics let mut topics = vec![]; - //TODO: Handle multiple shard attestations. For now we simply use a separate topic for - // attestations - topics.push(Topic::new(BEACON_ATTESTATION_TOPIC.into())); - topics.push(Topic::new(BEACON_BLOCK_TOPIC.into())); + + /* Here we subscribe to all the required gossipsub topics required for interop. + * The topic builder adds the required prefix and postfix to the hardcoded topics that we + * must subscribe to. + */ + let topic_builder = |topic| { + Topic::new(format!( + "/{}/{}/{}", + TOPIC_PREFIX, topic, TOPIC_ENCODING_POSTFIX, + )) + }; + topics.push(topic_builder(BEACON_BLOCK_TOPIC)); + topics.push(topic_builder(BEACON_ATTESTATION_TOPIC)); + topics.push(topic_builder(VOLUNTARY_EXIT_TOPIC)); + topics.push(topic_builder(PROPOSER_SLASHING_TOPIC)); + topics.push(topic_builder(ATTESTER_SLASHING_TOPIC)); + + // Add any topics specified by the user topics.append( &mut config .topics From 80f15f5d700693520ed7ca722e1cd9b0227147c2 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 12:38:54 +1000 Subject: [PATCH 040/305] Correct gossipsub message encoding. Add extended topics --- beacon_node/eth2-libp2p/src/behaviour.rs | 54 ++++++++++++++---------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index b87f8a061..749d2e5b4 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -1,8 +1,8 @@ +use crate::config::*; use crate::discovery::Discovery; use crate::rpc::{RPCEvent, RPCMessage, RPC}; use crate::{error, NetworkConfig}; use crate::{Topic, TopicHash}; -use crate::{BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC}; use futures::prelude::*; use libp2p::{ core::identity::Keypair, @@ -15,7 +15,6 @@ use libp2p::{ NetworkBehaviour, PeerId, }; use slog::{debug, o, trace}; -use ssz::{ssz_encode, Encode}; use std::num::NonZeroU32; use std::time::Duration; @@ -189,9 +188,9 @@ impl Behaviour { /// Publishes a message on the pubsub (gossipsub) behaviour. pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { - let message_bytes = ssz_encode(&message); + let message_data = message.to_data(); for topic in topics { - self.gossipsub.publish(topic, message_bytes.clone()); + self.gossipsub.publish(topic, message_data.clone()); } } @@ -220,13 +219,20 @@ pub enum BehaviourEvent { }, } -/// Messages that are passed to and from the pubsub (Gossipsub) behaviour. +/// Messages that are passed to and from the pubsub (Gossipsub) behaviour. These are encoded and +/// decoded upstream. #[derive(Debug, Clone, PartialEq)] pub enum PubsubMessage { /// Gossipsub message providing notification of a new block. Block(Vec), /// Gossipsub message providing notification of a new attestation. Attestation(Vec), + /// Gossipsub message providing notification of a voluntary exit. + VoluntaryExit(Vec), + /// Gossipsub message providing notification of a new proposer slashing. + ProposerSlashing(Vec), + /// Gossipsub message providing notification of a new attester slashing. + AttesterSlashing(Vec), /// Gossipsub message from an unknown topic. Unknown(Vec), } @@ -240,29 +246,33 @@ impl PubsubMessage { */ fn from_topics(topics: &Vec, data: Vec) -> Self { for topic in topics { - match topic.as_str() { - BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data), - BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data), - _ => {} + // compare the prefix and postfix, then match on the topic + let topic_parts: Vec<&str> = topic.as_str().split('/').collect(); + if topic_parts.len() == 4 + && topic_parts[1] == TOPIC_PREFIX + && topic_parts[3] == TOPIC_ENCODING_POSTFIX + { + match topic_parts[2] { + BEACON_BLOCK_TOPIC => return PubsubMessage::Block(data), + BEACON_ATTESTATION_TOPIC => return PubsubMessage::Attestation(data), + VOLUNTARY_EXIT_TOPIC => return PubsubMessage::VoluntaryExit(data), + PROPOSER_SLASHING_TOPIC => return PubsubMessage::ProposerSlashing(data), + ATTESTER_SLASHING_TOPIC => return PubsubMessage::AttesterSlashing(data), + _ => {} + } } } PubsubMessage::Unknown(data) } -} -impl Encode for PubsubMessage { - fn is_ssz_fixed_len() -> bool { - false - } - - fn ssz_append(&self, buf: &mut Vec) { + fn to_data(self) -> Vec { match self { - PubsubMessage::Block(inner) - | PubsubMessage::Attestation(inner) - | PubsubMessage::Unknown(inner) => { - // Encode the gossip as a Vec; - buf.append(&mut inner.as_ssz_bytes()); - } + PubsubMessage::Block(data) + | PubsubMessage::Attestation(data) + | PubsubMessage::VoluntaryExit(data) + | PubsubMessage::ProposerSlashing(data) + | PubsubMessage::AttesterSlashing(data) + | PubsubMessage::Unknown(data) => data, } } } From 5a74239ebcf0473120cdfc1acb4bf31fcc338f24 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 14:58:33 +1000 Subject: [PATCH 041/305] Add decoding/encoding for extended gossip topics. Correct logging CLI --- beacon_node/Cargo.toml | 4 +- beacon_node/eth2-libp2p/src/config.rs | 2 + beacon_node/network/src/message_handler.rs | 86 ++++++++++++++++++---- beacon_node/src/main.rs | 17 +---- 4 files changed, 79 insertions(+), 30 deletions(-) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 9124047e4..cba73b8a4 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -11,7 +11,7 @@ store = { path = "./store" } client = { path = "client" } version = { path = "version" } clap = "2.32.0" -slog = { version = "^2.2.3" , features = ["max_level_trace"] } +slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } slog-term = "^2.4.0" slog-async = "^2.3.0" ctrlc = { version = "3.1.1", features = ["termination"] } @@ -22,3 +22,5 @@ exit-future = "0.1.3" env_logger = "0.6.1" dirs = "2.0.1" logging = { path = "../eth2/utils/logging" } +slog-scope = "4.1.2" +slog-stdlog = "3.0.5" diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index d7648ec3f..7cb501c1f 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -7,6 +7,8 @@ use std::path::PathBuf; use std::time::Duration; /// The gossipsub topic names. +// These constants form a topic name of the form /TOPIC_PREFIX/TOPIC/ENCODING_POSTFIX +// For example /eth2/beacon_block/ssz pub const TOPIC_PREFIX: &str = "eth2"; pub const TOPIC_ENCODING_POSTFIX: &str = "ssz"; pub const BEACON_BLOCK_TOPIC: &str = "beacon_block"; diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 72a507ad7..b86dcb969 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -10,11 +10,13 @@ use eth2_libp2p::{ }; use futures::future::Future; use futures::stream::Stream; -use slog::{debug, warn}; +use slog::{debug, trace, warn}; use ssz::{Decode, DecodeError}; use std::sync::Arc; use tokio::sync::mpsc; -use types::{Attestation, BeaconBlock, BeaconBlockHeader}; +use types::{ + Attestation, AttesterSlashing, BeaconBlock, BeaconBlockHeader, ProposerSlashing, VoluntaryExit, +}; /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { @@ -49,7 +51,7 @@ impl MessageHandler { executor: &tokio::runtime::TaskExecutor, log: slog::Logger, ) -> error::Result> { - debug!(log, "Service starting"); + trace!(log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); @@ -65,7 +67,6 @@ impl MessageHandler { }; // spawn handler task - // TODO: Handle manual termination of thread executor.spawn( handler_recv .for_each(move |msg| Ok(handler.handle_message(msg))) @@ -221,43 +222,79 @@ impl MessageHandler { /// Handle various RPC errors fn handle_rpc_error(&mut self, peer_id: PeerId, request_id: RequestId, error: RPCError) { //TODO: Handle error correctly - warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "Request Id" => format!("{}", request_id), "Error" => format!("{:?}", error)); + warn!(self.log, "RPC Error"; "Peer" => format!("{:?}", peer_id), "request_id" => format!("{}", request_id), "Error" => format!("{:?}", error)); } /// Handle RPC messages fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { match gossip_message { PubsubMessage::Block(message) => match self.decode_gossip_block(message) { - Err(e) => { - debug!(self.log, "Invalid Gossiped Beacon Block"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); - } Ok(block) => { let _should_forward_on = self.sync .on_block_gossip(peer_id, block, &mut self.network_context); } + Err(e) => { + debug!(self.log, "Invalid gossiped beacon block"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } }, PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { - Err(e) => { - debug!(self.log, "Invalid Gossiped Attestation"; "Peer" => format!("{}", peer_id), "Error" => format!("{:?}", e)); - } Ok(attestation) => { self.sync .on_attestation_gossip(peer_id, attestation, &mut self.network_context) } + Err(e) => { + debug!(self.log, "Invalid gossiped attestation"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } }, + PubsubMessage::VoluntaryExit(message) => match self.decode_gossip_exit(message) { + Ok(_exit) => { + // TODO: Handle exits + debug!(self.log, "Received a voluntary exit"; "peer_id" => format!("{}", peer_id) ); + } + Err(e) => { + debug!(self.log, "Invalid gossiped exit"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + }, + PubsubMessage::ProposerSlashing(message) => { + match self.decode_gossip_proposer_slashing(message) { + Ok(_slashing) => { + // TODO: Handle proposer slashings + debug!(self.log, "Received a proposer slashing"; "peer_id" => format!("{}", peer_id) ); + } + Err(e) => { + debug!(self.log, "Invalid gossiped proposer slashing"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + } + } + PubsubMessage::AttesterSlashing(message) => { + match self.decode_gossip_attestation_slashing(message) { + Ok(_slashing) => { + // TODO: Handle attester slashings + debug!(self.log, "Received an attester slashing"; "peer_id" => format!("{}", peer_id) ); + } + Err(e) => { + debug!(self.log, "Invalid gossiped attester slashing"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); + } + } + } PubsubMessage::Unknown(message) => { // Received a message from an unknown topic. Ignore for now - debug!(self.log, "Unknown Gossip Message"; "Peer" => format!("{}", peer_id), "Message" => format!("{:?}", message)); + debug!(self.log, "Unknown Gossip Message"; "peer_id" => format!("{}", peer_id), "Message" => format!("{:?}", message)); } } } - /* Decoding of blocks and attestations from the network. + /* Decoding of gossipsub objects from the network. + * + * The decoding is done in the message handler as it has access to to a `BeaconChain` and can + * therefore apply more efficient logic in decoding and verification. * * TODO: Apply efficient decoding/verification of these objects */ + /* Gossipsub Domain Decoding */ + // Note: These are not generics as type-specific verification will need to be applied. fn decode_gossip_block( &self, beacon_block: Vec, @@ -274,6 +311,29 @@ impl MessageHandler { Attestation::from_ssz_bytes(&beacon_block) } + fn decode_gossip_exit(&self, voluntary_exit: Vec) -> Result { + //TODO: Apply verification before decoding. + VoluntaryExit::from_ssz_bytes(&voluntary_exit) + } + + fn decode_gossip_proposer_slashing( + &self, + proposer_slashing: Vec, + ) -> Result { + //TODO: Apply verification before decoding. + ProposerSlashing::from_ssz_bytes(&proposer_slashing) + } + + fn decode_gossip_attestation_slashing( + &self, + attester_slashing: Vec, + ) -> Result, DecodeError> { + //TODO: Apply verification before decoding. + AttesterSlashing::from_ssz_bytes(&attester_slashing) + } + + /* Req/Resp Domain Decoding */ + /// Verifies and decodes the ssz-encoded block bodies received from peers. fn decode_block_bodies( &self, diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index b34259f5a..086ccc5be 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -9,7 +9,6 @@ use std::fs; use std::path::PathBuf; pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; - pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; pub const TESTNET_CONFIG_FILENAME: &str = "testnet.toml"; @@ -214,14 +213,7 @@ fn main() { .help("The title of the spec constants for chain config.") .takes_value(true) .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) - .default_value("trace"), - ) - .arg( - Arg::with_name("verbosity") - .short("v") - .multiple(true) - .help("Sets the verbosity level") - .takes_value(true), + .default_value("info"), ) .get_matches(); @@ -241,13 +233,6 @@ fn main() { _ => unreachable!("guarded by clap"), }; - let drain = match matches.occurrences_of("verbosity") { - 0 => drain.filter_level(Level::Info), - 1 => drain.filter_level(Level::Debug), - 2 => drain.filter_level(Level::Trace), - _ => drain.filter_level(Level::Trace), - }; - let mut log = slog::Logger::root(drain.fuse(), o!()); let data_dir = match matches From ec73dfe90b0568fcbc22a775b2e2bf509fde6370 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 8 Aug 2019 17:46:39 +1000 Subject: [PATCH 042/305] Starting of req/resp overhaul --- beacon_node/eth2-libp2p/Cargo.toml | 1 + beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs | 114 ++++---------- beacon_node/eth2-libp2p/src/rpc/methods.rs | 153 +++++-------------- beacon_node/eth2-libp2p/src/rpc/protocol.rs | 55 +++---- 4 files changed, 96 insertions(+), 227 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 794b09712..55081aed5 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -26,3 +26,4 @@ smallvec = "0.6.10" fnv = "1.0.6" unsigned-varint = "0.2.2" bytes = "0.4.12" +tokio-io-timeout = "0.3.1" diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs index 8e2bdaa64..f7262118d 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs @@ -41,10 +41,8 @@ impl Encoder for SSZInboundCodec { RPCErrorResponse::Success(resp) => { match resp { RPCResponse::Hello(res) => res.as_ssz_bytes(), - RPCResponse::BeaconBlockRoots(res) => res.as_ssz_bytes(), - RPCResponse::BeaconBlockHeaders(res) => res.headers, // already raw bytes - RPCResponse::BeaconBlockBodies(res) => res.block_bodies, // already raw bytes - RPCResponse::BeaconChainState(res) => res.as_ssz_bytes(), + RPCResponse::BeaconBlocks(res) => res, // already raw bytes + RPCResponse::RecentBeaconBlocks(res) => res, // already raw bytes } } RPCErrorResponse::InvalidRequest(err) => err.as_ssz_bytes(), @@ -72,52 +70,30 @@ impl Decoder for SSZInboundCodec { match self.inner.decode(src).map_err(RPCError::from) { Ok(Some(packet)) => match self.protocol.message_name.as_str() { "hello" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::Hello(HelloMessage::from_ssz_bytes( + "1" => Ok(Some(RPCRequest::Hello(HelloMessage::from_ssz_bytes( &packet, )?))), - _ => Err(RPCError::InvalidProtocol("Unknown HELLO version")), + _ => unreachable!("Cannot negotiate an unknown version"), }, "goodbye" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( + "1" => Ok(Some(RPCRequest::Goodbye(GoodbyeReason::from_ssz_bytes( &packet, )?))), - _ => Err(RPCError::InvalidProtocol( - "Unknown GOODBYE version.as_str()", - )), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_roots" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconBlockRoots( - BeaconBlockRootsRequest::from_ssz_bytes(&packet)?, + "beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCRequest::BeaconBlocks( + BeaconBlocksRequest::from_ssz_bytes(&packet)?, ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_ROOTS version.", - )), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_headers" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconBlockHeaders( - BeaconBlockHeadersRequest::from_ssz_bytes(&packet)?, + "recent_beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCRequest::RecentBeaconBlocks( + RecentBeaconBlocksRequest::from_ssz_bytes(&packet)?, ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_HEADERS version.", - )), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_bodies" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconBlockBodies( - BeaconBlockBodiesRequest::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_BODIES version.", - )), - }, - "beacon_chain_state" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCRequest::BeaconChainState( - BeaconChainStateRequest::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_CHAIN_STATE version.", - )), - }, - _ => Err(RPCError::InvalidProtocol("Unknown message name.")), + _ => unreachable!("Cannot negotiate an unknown protocol"), }, Ok(None) => Ok(None), Err(e) => Err(e), @@ -156,10 +132,8 @@ impl Encoder for SSZOutboundCodec { let bytes = match item { RPCRequest::Hello(req) => req.as_ssz_bytes(), RPCRequest::Goodbye(req) => req.as_ssz_bytes(), - RPCRequest::BeaconBlockRoots(req) => req.as_ssz_bytes(), - RPCRequest::BeaconBlockHeaders(req) => req.as_ssz_bytes(), - RPCRequest::BeaconBlockBodies(req) => req.as_ssz_bytes(), - RPCRequest::BeaconChainState(req) => req.as_ssz_bytes(), + RPCRequest::BeaconBlocks(req) => req.as_ssz_bytes(), + RPCRequest::RecentBeaconBlocks(req) => req.as_ssz_bytes(), }; // length-prefix self.inner @@ -168,7 +142,11 @@ impl Encoder for SSZOutboundCodec { } } -// Decoder for outbound +// Decoder for outbound streams +// +// The majority of the decoding has now been pushed upstream due to the changing specification. +// We prefer to decode blocks and attestations with extra knowledge about the chain to perform +// faster verification checks before decoding entire blocks/attestations. impl Decoder for SSZOutboundCodec { type Item = RPCResponse; type Error = RPCError; @@ -177,51 +155,21 @@ impl Decoder for SSZOutboundCodec { match self.inner.decode(src).map_err(RPCError::from) { Ok(Some(packet)) => match self.protocol.message_name.as_str() { "hello" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes( + "1" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes( &packet, )?))), - _ => Err(RPCError::InvalidProtocol("Unknown HELLO version.")), + _ => unreachable!("Cannot negotiate an unknown version"), }, "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), - "beacon_block_roots" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconBlockRoots( - BeaconBlockRootsResponse::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_ROOTS version.", - )), + "beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::BeaconBlocks(packet.to_vec()))), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_headers" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconBlockHeaders( - BeaconBlockHeadersResponse { - headers: packet.to_vec(), - }, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_HEADERS version.", - )), + "recent_beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(packet.to_vec()))), + _ => unreachable!("Cannot negotiate an unknown version"), }, - "beacon_block_bodies" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconBlockBodies( - BeaconBlockBodiesResponse { - block_bodies: packet.to_vec(), - // this gets filled in the protocol handler - block_roots: None, - }, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_BLOCK_BODIES version.", - )), - }, - "beacon_chain_state" => match self.protocol.version.as_str() { - "1.0.0" => Ok(Some(RPCResponse::BeaconChainState( - BeaconChainStateResponse::from_ssz_bytes(&packet)?, - ))), - _ => Err(RPCError::InvalidProtocol( - "Unknown BEACON_CHAIN_STATE version.", - )), - }, - _ => Err(RPCError::InvalidProtocol("Unknown method")), + _ => unreachable!("Cannot negotiate an unknown protocol"), }, Ok(None) => Ok(None), Err(e) => Err(e), diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index 2e5a9a7ff..8fef1a75a 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -2,7 +2,7 @@ use ssz::{impl_decode_via_from, impl_encode_via_from}; use ssz_derive::{Decode, Encode}; -use types::{BeaconBlockBody, Epoch, EthSpec, Hash256, Slot}; +use types::{Epoch, Hash256, Slot}; /* Request/Response data structures for RPC methods */ @@ -13,23 +13,20 @@ pub type RequestId = usize; /// The HELLO request/response handshake message. #[derive(Encode, Decode, Clone, Debug)] pub struct HelloMessage { - /// The network ID of the peer. - pub network_id: u8, + /// The fork version of the chain we are broadcasting. + pub fork_version: [u8; 4], - /// The chain id for the HELLO request. - pub chain_id: u64, + /// Latest finalized root. + pub finalized_root: Hash256, - /// The peers last finalized root. - pub latest_finalized_root: Hash256, + /// Latest finalized epoch. + pub finalized_epoch: Epoch, - /// The peers last finalized epoch. - pub latest_finalized_epoch: Epoch, + /// The latest block root. + pub head_root: Hash256, - /// The peers last block root. - pub best_root: Hash256, - - /// The peers last slot. - pub best_slot: Slot, + /// The slot associated with the latest block root. + pub head_slot: Slot, } /// The reason given for a `Goodbye` message. @@ -74,108 +71,42 @@ impl_decode_via_from!(GoodbyeReason, u64); /// Request a number of beacon block roots from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockRootsRequest { - /// The starting slot of the requested blocks. - pub start_slot: Slot, +pub struct BeaconBlocksRequest { + /// The hash tree root of a block on the requested chain. + pub head_block_root: Hash256, + + /// The starting slot to request blocks. + pub start_slot: u64, /// The number of blocks from the start slot. - pub count: u64, // this must be less than 32768. //TODO: Enforce this in the lower layers + pub count: u64, + + /// The step increment to receive blocks. + /// + /// A value of 1 returns every block. + /// A value of 2 returns every second block. + /// A value of 3 returns every third block and so on. + pub step: u64, } +// TODO: Currently handle encoding/decoding of blocks in the message handler. Leave this struct +// here in case encoding/decoding of ssz requires an object. +/* /// Response containing a number of beacon block roots from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockRootsResponse { +pub struct BeaconBlocksResponse { /// List of requested blocks and associated slots. - pub roots: Vec, -} - -/// Contains a block root and associated slot. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BlockRootSlot { - /// The block root. - pub block_root: Hash256, - - /// The block slot. - pub slot: Slot, -} - -/// The response of a beacon block roots request. -impl BeaconBlockRootsResponse { - /// Returns `true` if each `self.roots.slot[i]` is higher than the preceding `i`. - pub fn slots_are_ascending(&self) -> bool { - for window in self.roots.windows(2) { - if window[0].slot >= window[1].slot { - return false; - } - } - - true - } -} - -/// Request a number of beacon block headers from a peer. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockHeadersRequest { - /// The starting header hash of the requested headers. - pub start_root: Hash256, - - /// The starting slot of the requested headers. - pub start_slot: Slot, - - /// The maximum number of headers than can be returned. - pub max_headers: u64, - - /// The maximum number of slots to skip between blocks. - pub skip_slots: u64, -} - -/// Response containing requested block headers. -#[derive(Clone, Debug, PartialEq)] -pub struct BeaconBlockHeadersResponse { - /// The list of ssz-encoded requested beacon block headers. - pub headers: Vec, + pub beacon_blocks: Vec, } +*/ /// Request a number of beacon block bodies from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlockBodiesRequest { +pub struct RecentBeaconBlocksRequest { /// The list of beacon block bodies being requested. pub block_roots: Vec, } -/// Response containing the list of requested beacon block bodies. -#[derive(Clone, Debug, PartialEq)] -pub struct BeaconBlockBodiesResponse { - /// The list of hashes that were sent in the request and match these roots response. None when - /// sending outbound. - pub block_roots: Option>, - /// The list of ssz-encoded beacon block bodies being requested. - pub block_bodies: Vec, -} - -/// The decoded version of `BeaconBlockBodiesResponse` which is expected in `SimpleSync`. -pub struct DecodedBeaconBlockBodiesResponse { - /// The list of hashes sent in the request to get this response. - pub block_roots: Vec, - /// The valid decoded block bodies. - pub block_bodies: Vec>, -} - -/// Request values for tree hashes which yield a blocks `state_root`. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconChainStateRequest { - /// The tree hashes that a value is requested for. - pub hashes: Vec, -} - -/// Request values for tree hashes which yield a blocks `state_root`. -// Note: TBD -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconChainStateResponse { - /// The values corresponding the to the requested tree hashes. - pub values: bool, //TBD - stubbed with encodable bool -} - /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages @@ -183,14 +114,10 @@ pub struct BeaconChainStateResponse { pub enum RPCResponse { /// A HELLO message. Hello(HelloMessage), - /// A response to a get BEACON_BLOCK_ROOTS request. - BeaconBlockRoots(BeaconBlockRootsResponse), - /// A response to a get BEACON_BLOCK_HEADERS request. - BeaconBlockHeaders(BeaconBlockHeadersResponse), - /// A response to a get BEACON_BLOCK_BODIES request. - BeaconBlockBodies(BeaconBlockBodiesResponse), - /// A response to a get BEACON_CHAIN_STATE request. - BeaconChainState(BeaconChainStateResponse), + /// A response to a get BEACON_BLOCKS request. + BeaconBlocks(Vec), + /// A response to a get RECENT_BEACON_BLOCKS request. + RecentBeaconBlocks(Vec), } #[derive(Debug)] @@ -206,8 +133,8 @@ impl RPCErrorResponse { pub fn as_u8(&self) -> u8 { match self { RPCErrorResponse::Success(_) => 0, - RPCErrorResponse::InvalidRequest(_) => 2, - RPCErrorResponse::ServerError(_) => 3, + RPCErrorResponse::InvalidRequest(_) => 1, + RPCErrorResponse::ServerError(_) => 2, RPCErrorResponse::Unknown(_) => 255, } } @@ -223,8 +150,8 @@ impl RPCErrorResponse { /// Builds an RPCErrorResponse from a response code and an ErrorMessage pub fn from_error(response_code: u8, err: ErrorMessage) -> Self { match response_code { - 2 => RPCErrorResponse::InvalidRequest(err), - 3 => RPCErrorResponse::ServerError(err), + 1 => RPCErrorResponse::InvalidRequest(err), + 2 => RPCErrorResponse::ServerError(err), _ => RPCErrorResponse::Unknown(err), } } diff --git a/beacon_node/eth2-libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs index b606fc743..be1efdf5d 100644 --- a/beacon_node/eth2-libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2-libp2p/src/rpc/protocol.rs @@ -16,13 +16,17 @@ use tokio::io::{AsyncRead, AsyncWrite}; use tokio::prelude::*; use tokio::timer::timeout; use tokio::util::FutureExt; +use tokio_io_timeout::TimeoutStream; /// The maximum bytes that can be sent across the RPC. const MAX_RPC_SIZE: usize = 4_194_304; // 4M /// The protocol prefix the RPC protocol id. -const PROTOCOL_PREFIX: &str = "/eth2/beacon_node/rpc"; -/// The number of seconds to wait for a request once a protocol has been established before the stream is terminated. -const REQUEST_TIMEOUT: u64 = 3; +const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; +/// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte). +const TTFB_TIMEOUT: u64 = 5; +/// The number of seconds to wait for the first bytes of a request once a protocol has been +/// established before the stream is terminated. +const REQUEST_TIMEOUT: u64 = 15; #[derive(Debug, Clone)] pub struct RPCProtocol; @@ -33,11 +37,10 @@ impl UpgradeInfo for RPCProtocol { fn protocol_info(&self) -> Self::InfoIter { vec![ - ProtocolId::new("hello", "1.0.0", "ssz"), - ProtocolId::new("goodbye", "1.0.0", "ssz"), - ProtocolId::new("beacon_block_roots", "1.0.0", "ssz"), - ProtocolId::new("beacon_block_headers", "1.0.0", "ssz"), - ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz"), + ProtocolId::new("hello", "1", "ssz"), + ProtocolId::new("goodbye", "1", "ssz"), + ProtocolId::new("beacon_blocks", "1", "ssz"), + ProtocolId::new("recent_beacon_blocks", "1", "ssz"), ] } } @@ -87,7 +90,7 @@ impl ProtocolName for ProtocolId { // handler to respond to once ready. pub type InboundOutput = (RPCRequest, InboundFramed); -pub type InboundFramed = Framed, InboundCodec>; +pub type InboundFramed = Framed>, InboundCodec>; type FnAndThen = fn( (Option, InboundFramed), ) -> FutureResult, RPCError>; @@ -118,7 +121,9 @@ where "ssz" | _ => { let ssz_codec = BaseInboundCodec::new(SSZInboundCodec::new(protocol, MAX_RPC_SIZE)); let codec = InboundCodec::SSZ(ssz_codec); - Framed::new(socket, codec) + let mut timed_socket = TimeoutStream::new(socket); + timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT))); + Framed::new(timed_socket, codec) .into_future() .timeout(Duration::from_secs(REQUEST_TIMEOUT)) .map_err(RPCError::from as FnMapErr) @@ -144,10 +149,8 @@ where pub enum RPCRequest { Hello(HelloMessage), Goodbye(GoodbyeReason), - BeaconBlockRoots(BeaconBlockRootsRequest), - BeaconBlockHeaders(BeaconBlockHeadersRequest), - BeaconBlockBodies(BeaconBlockBodiesRequest), - BeaconChainState(BeaconChainStateRequest), + BeaconBlocks(BeaconBlocksRequest), + RecentBeaconBlocks(RecentBeaconBlocksRequest), } impl UpgradeInfo for RPCRequest { @@ -165,22 +168,11 @@ impl RPCRequest { pub fn supported_protocols(&self) -> Vec { match self { // add more protocols when versions/encodings are supported - RPCRequest::Hello(_) => vec![ - ProtocolId::new("hello", "1.0.0", "ssz"), - ProtocolId::new("goodbye", "1.0.0", "ssz"), - ], - RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1.0.0", "ssz")], - RPCRequest::BeaconBlockRoots(_) => { - vec![ProtocolId::new("beacon_block_roots", "1.0.0", "ssz")] - } - RPCRequest::BeaconBlockHeaders(_) => { - vec![ProtocolId::new("beacon_block_headers", "1.0.0", "ssz")] - } - RPCRequest::BeaconBlockBodies(_) => { - vec![ProtocolId::new("beacon_block_bodies", "1.0.0", "ssz")] - } - RPCRequest::BeaconChainState(_) => { - vec![ProtocolId::new("beacon_block_state", "1.0.0", "ssz")] + RPCRequest::Hello(_) => vec![ProtocolId::new("hello", "1", "ssz")], + RPCRequest::Goodbye(_) => vec![ProtocolId::new("goodbye", "1", "ssz")], + RPCRequest::BeaconBlocks(_) => vec![ProtocolId::new("beacon_blocks", "1", "ssz")], + RPCRequest::RecentBeaconBlocks(_) => { + vec![ProtocolId::new("recent_beacon_blocks", "1", "ssz")] } } } @@ -215,7 +207,8 @@ where ) -> Self::Future { match protocol.encoding.as_str() { "ssz" | _ => { - let ssz_codec = BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, 4096)); + let ssz_codec = + BaseOutboundCodec::new(SSZOutboundCodec::new(protocol, MAX_RPC_SIZE)); let codec = OutboundCodec::SSZ(ssz_codec); Framed::new(socket, codec).send(self) } From 66419d00eadc4068243364c93d651e473954f34c Mon Sep 17 00:00:00 2001 From: Age Manning Date: Fri, 9 Aug 2019 10:39:32 +1000 Subject: [PATCH 043/305] Remove redundant slog dependencies --- beacon_node/Cargo.toml | 2 -- 1 file changed, 2 deletions(-) diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index cba73b8a4..32b7e9211 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -22,5 +22,3 @@ exit-future = "0.1.3" env_logger = "0.6.1" dirs = "2.0.1" logging = { path = "../eth2/utils/logging" } -slog-scope = "4.1.2" -slog-stdlog = "3.0.5" From 64a6e1475c567d9dd137033a93f6ab27291a0dd8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 10 Aug 2019 11:31:31 +1000 Subject: [PATCH 044/305] Various minor fixes --- beacon_node/beacon_chain/src/beacon_chain.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 834b04582..7fefb7690 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -539,11 +539,11 @@ impl BeaconChain { // (e.g., if there are skip slots between the epoch the block was created in and // the epoch for the attestation). if state.current_epoch() == attestation.data.target.epoch - && (state - .get_block_root(attestation_head_block.slot) - .map(|root| *root == attestation.data.beacon_block_root) - .unwrap_or_else(|_| false) - || attestation.data.beacon_block_root == self.head().beacon_block_root) + && (attestation.data.beacon_block_root == self.head().beacon_block_root + || state + .get_block_root(attestation_head_block.slot) + .map(|root| *root == attestation.data.beacon_block_root) + .unwrap_or_else(|_| false)) { // The head state is able to be used to validate this attestation. No need to load // anything from the database. @@ -558,6 +558,7 @@ impl BeaconChain { }; if let Some(outcome) = optional_outcome { + // Verification was already completed with an in-memory state. Return that result. outcome } else { // Use the `data.beacon_block_root` to load the state from the latest non-skipped @@ -612,13 +613,13 @@ impl BeaconChain { } } } else { - // Reject any block where we have not processed `attestation.data.beacon_block_root`. + // Drop any attestation where we have not processed `attestation.data.beacon_block_root`. // // This is likely overly restrictive, we could store the attestation for later // processing. warn!( self.log, - "Dropping attestation for unknown block"; + "Dropped attestation for unknown block"; "block" => format!("{}", attestation.data.beacon_block_root) ); Ok(AttestationProcessingOutcome::UnknownHeadBlock { From 0d4b58978ccd5423f8026a8436215c14279abff4 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 10 Aug 2019 17:19:27 +1000 Subject: [PATCH 045/305] Make fork choice write lock in to read lock --- eth2/lmd_ghost/src/reduced_tree.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index 9668620b7..822c388f6 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -111,7 +111,7 @@ where } fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { - self.core.write().latest_message(validator_index) + self.core.read().latest_message(validator_index) } } @@ -258,10 +258,10 @@ where Ok(head_node.block_hash) } - pub fn latest_message(&mut self, validator_index: usize) -> Option<(Hash256, Slot)> { - match self.latest_votes.get(validator_index) { - Some(v) => Some((v.hash.clone(), v.slot.clone())), - None => None, + pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { + match self.latest_votes.get_ref(validator_index) { + Some(Some(v)) => Some((v.hash.clone(), v.slot.clone())), + _ => None, } } @@ -776,6 +776,14 @@ where &self.0[i] } + pub fn get_ref(&self, i: usize) -> Option<&T> { + if i < self.0.len() { + Some(&self.0[i]) + } else { + None + } + } + pub fn insert(&mut self, i: usize, element: T) { self.ensure(i); self.0[i] = element; From 1beab66078e41a7ae46b9c304d1dd478de1716e5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 10 Aug 2019 17:21:54 +1000 Subject: [PATCH 046/305] Remove unused method --- beacon_node/beacon_chain/src/fork_choice.rs | 24 +++------------------ 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 640f5223d..edd426f29 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -178,27 +178,9 @@ impl ForkChoice { Ok(()) } - /// Determines whether or not the given attestation contains a latest message. - pub fn should_process_attestation( - &self, - state: &BeaconState, - attestation: &Attestation, - ) -> Result { - let validator_indices = - get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; - - let block_slot = state.get_attestation_data_slot(&attestation.data)?; - - Ok(validator_indices - .iter() - .find(|&&v| match self.backend.latest_message(v) { - Some((_, slot)) => block_slot > slot, - None => true, - }) - .is_some()) - } - - // Returns the latest message for a given validator + /// Returns the latest message for a given validator, if any. + /// + /// Returns `(block_root, block_slot)`. pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { self.backend.latest_message(validator_index) } From 963fb7bc87901a00a1bfcdb1c3120cdfd6985f14 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 10 Aug 2019 17:36:53 +1000 Subject: [PATCH 047/305] Tidy comments --- beacon_node/beacon_chain/src/beacon_chain.rs | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 7fefb7690..c92a05a72 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -506,8 +506,16 @@ impl BeaconChain { /// Accept a new, potentially invalid attestation from the network. /// - /// If valid, the attestation is added to the `op_pool` and aggregated with another attestation - /// if possible. + /// If valid, the attestation is added to `self.op_pool` and `self.fork_choice`. + /// + /// Returns an `Ok(AttestationProcessingOutcome)` if the chain was able to make a determination + /// about the `attestation` (wether it was invalid or not). Returns an `Err` if the was an + /// error during this process and no determination was able to be made. + /// + /// ## Notes + /// + /// - Whilst the `attestation` is added to fork choice, the head is not updated. That must be + /// done separately. pub fn process_attestation( &self, attestation: Attestation, @@ -538,6 +546,9 @@ impl BeaconChain { // the block doesn't necessarily need to be in the same epoch as the attestation // (e.g., if there are skip slots between the epoch the block was created in and // the epoch for the attestation). + // + // This check also ensures that the slot for `data.beacon_block_root` is not higher + // than `state.root` by ensuring that the block is in the history of `state`. if state.current_epoch() == attestation.data.target.epoch && (attestation.data.beacon_block_root == self.head().beacon_block_root || state @@ -638,9 +649,8 @@ impl BeaconChain { /// /// - `state` corresponds to the `block.state_root` identified by /// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`. - /// - `state.slot` is in the same epoch as `block.slot` and - /// `attestation.data.beacon_block_root` is in `state.block_roots`. (Viz., the attestation was - /// attesting to an ancestor of `state` from the same epoch as `state`. + /// - `state.slot` is in the same epoch as `data.target.epoch` and + /// `attestation.data.beacon_block_root` is in the history of `state`. /// /// Additionally, `attestation.data.beacon_block_root` **must** be available to read in /// `self.store` _and_ be the root of the given `block`. From 04bef689e33c0f3f4288a96d90bd06f91e0eacea Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 10 Aug 2019 17:47:34 +1000 Subject: [PATCH 048/305] Fix attestation prod. target roots change --- beacon_node/beacon_chain/src/beacon_chain.rs | 34 ++++++++++++++++---- beacon_node/beacon_chain/src/test_utils.rs | 30 +++++++++++------ beacon_node/beacon_chain/tests/tests.rs | 1 + 3 files changed, 50 insertions(+), 15 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index c92a05a72..7488a7795 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -450,8 +450,9 @@ impl BeaconChain { pub fn produce_attestation_data(&self, shard: u64) -> Result { let state = self.state.read(); let head_block_root = self.head().beacon_block_root; + let head_block_slot = self.head().beacon_block.slot; - self.produce_attestation_data_for_block(shard, head_block_root, &*state) + self.produce_attestation_data_for_block(shard, head_block_root, head_block_slot, &*state) } /// Produce an `AttestationData` that attests to the chain denoted by `block_root` and `state`. @@ -462,18 +463,39 @@ impl BeaconChain { &self, shard: u64, head_block_root: Hash256, + head_block_slot: Slot, state: &BeaconState, ) -> Result { // Collect some metrics. self.metrics.attestation_production_requests.inc(); let timer = self.metrics.attestation_production_times.start_timer(); + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let current_epoch_start_slot = state.current_epoch().start_slot(slots_per_epoch); + // The `target_root` is the root of the first block of the current epoch. - let target_root = self - .rev_iter_block_roots() - .find(|(_root, slot)| *slot % T::EthSpec::slots_per_epoch() == 0) - .map(|(root, _slot)| root) - .ok_or_else(|| Error::UnableToFindTargetRoot(self.head().beacon_state.slot))?; + // + // The `state` does not know the root of the block for it's current slot (it only knows + // about blocks from prior slots). This creates an edge-case when the state is on the first + // slot of the epoch -- we're unable to obtain the `target_root` because it is not a prior + // root. + // + // This edge case is handled in two ways: + // + // - If the head block is on the same slot as the state, we use it's root. + // - Otherwise, assume the current slot has been skipped and use the block root from the + // prior slot. + // + // For all other cases, we simply read the `target_root` from `state.latest_block_roots`. + let target_root = if state.slot == current_epoch_start_slot { + if head_block_slot == current_epoch_start_slot { + head_block_root + } else { + *state.get_block_root(current_epoch_start_slot - 1)? + } + } else { + *state.get_block_root(current_epoch_start_slot)? + }; let target = Checkpoint { epoch: state.current_epoch(), diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 6997f52ae..298c637db 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -194,7 +194,7 @@ where if let BlockProcessingOutcome::Processed { block_root } = outcome { head_block_root = Some(block_root); - self.add_free_attestations(&attestation_strategy, &new_state, block_root); + self.add_free_attestations(&attestation_strategy, &new_state, block_root, slot); } else { panic!("block should be successfully processed: {:?}", outcome); } @@ -282,14 +282,20 @@ where attestation_strategy: &AttestationStrategy, state: &BeaconState, head_block_root: Hash256, + head_block_slot: Slot, ) { - self.get_free_attestations(attestation_strategy, state, head_block_root) - .into_iter() - .for_each(|attestation| { - self.chain - .process_attestation(attestation) - .expect("should process attestation"); - }); + self.get_free_attestations( + attestation_strategy, + state, + head_block_root, + head_block_slot, + ) + .into_iter() + .for_each(|attestation| { + self.chain + .process_attestation(attestation) + .expect("should process attestation"); + }); } /// Generates a `Vec` for some attestation strategy and head_block. @@ -298,6 +304,7 @@ where attestation_strategy: &AttestationStrategy, state: &BeaconState, head_block_root: Hash256, + head_block_slot: Slot, ) -> Vec> { let spec = &self.spec; let fork = &state.fork; @@ -322,7 +329,12 @@ where if attesting_validators.contains(validator_index) { let data = self .chain - .produce_attestation_data_for_block(cc.shard, head_block_root, state) + .produce_attestation_data_for_block( + cc.shard, + head_block_root, + head_block_slot, + state, + ) .expect("should produce attestation data"); let mut aggregation_bits = BitList::with_capacity(committee_size).unwrap(); diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index c22f02563..22b667f15 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -395,6 +395,7 @@ fn attestations_with_increasing_slots() { &AttestationStrategy::AllValidators, &harness.chain.head().beacon_state, harness.chain.head().beacon_block_root, + harness.chain.head().beacon_block.slot, )); harness.advance_slot(); From 6c9ebf4b9647932379b64baee681ddf16e4dc144 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 09:15:39 +1000 Subject: [PATCH 049/305] Fix compile error in store iters --- beacon_node/store/src/iter.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 3d01b7015..c97241903 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -15,7 +15,7 @@ pub trait AncestorIter { } impl<'a, U: Store, E: EthSpec> AncestorIter> for BeaconBlock { - /// Iterates across all the prior block roots of `self`, starting at the most recent and ending + /// Iterates across all available prior block roots of `self`, starting at the most recent and ending /// at genesis. fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { let state = store.get::>(&self.state_root).ok()??; @@ -25,11 +25,11 @@ impl<'a, U: Store, E: EthSpec> AncestorIter> for } impl<'a, U: Store, E: EthSpec> AncestorIter> for BeaconState { - /// Iterates across all the prior state roots of `self`, starting at the most recent and ending + /// Iterates across all available prior state roots of `self`, starting at the most recent and ending /// at genesis. fn try_iter_ancestor_roots(&self, store: Arc) -> Option> { // The `self.clone()` here is wasteful. - Some(StateRootsIterator::owned(store, self.clone(), self.slot)) + Some(StateRootsIterator::owned(store, self.clone())) } } From 4020d13064fb6e6085e90aad23d2b1a5891f03df Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 09:34:49 +1000 Subject: [PATCH 050/305] Reject any attestation prior to finalization --- beacon_node/beacon_chain/src/beacon_chain.rs | 44 +++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d30b12c98..9ccf59589 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -69,6 +69,11 @@ pub enum AttestationProcessingOutcome { state: Slot, attestation: Slot, }, + /// The slot is finalized, no need to import. + FinalizedSlot { + attestation: Epoch, + finalized: Epoch, + }, Invalid(AttestationValidationError), } @@ -550,6 +555,23 @@ impl BeaconChain { .store .get::>(&attestation.data.beacon_block_root)? { + let finalized_epoch = self.head().beacon_state.finalized_checkpoint.epoch; + + if attestation_head_block.slot + <= finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()) + { + // Ignore any attestation where the slot of `data.beacon_block_root` is equal to or + // prior to the finalized epoch. + // + // For any valid attestation if the `beacon_block_root` is prior to finalization, then + // all other parameters (source, target, etc) must all be prior to finalization and + // therefore no longer interesting. + return Ok(AttestationProcessingOutcome::FinalizedSlot { + attestation: attestation_head_block.epoch(), + finalized: finalized_epoch, + }); + } + // Attempt to process the attestation using the `self.head()` state. // // This is purely an effort to avoid loading a `BeaconState` unnecessarily from the DB. @@ -688,7 +710,27 @@ impl BeaconChain { self.metrics.attestation_processing_requests.inc(); let timer = self.metrics.attestation_processing_times.start_timer(); - let result = if let Err(e) = + // Find the highest between: + // + // - The highest valid finalized epoch we've ever seen (i.e., the head). + // - The finalized epoch that this attestation was created against. + let finalized_epoch = std::cmp::max( + self.head().beacon_state.finalized_checkpoint.epoch, + state.finalized_checkpoint.epoch, + ); + + let result = if block.slot <= finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()) { + // Ignore any attestation where the slot of `data.beacon_block_root` is equal to or + // prior to the finalized epoch. + // + // For any valid attestation if the `beacon_block_root` is prior to finalization, then + // all other parameters (source, target, etc) must all be prior to finalization and + // therefore no longer interesting. + Ok(AttestationProcessingOutcome::FinalizedSlot { + attestation: block.slot.epoch(T::EthSpec::slots_per_epoch()), + finalized: finalized_epoch, + }) + } else if let Err(e) = verify_attestation_for_state(state, &attestation, &self.spec, VerifySignatures::True) { warn!( From 48733917be2a59ba87b01a0bc4678347ebb96f4f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 12:12:19 +1000 Subject: [PATCH 051/305] Begin metrics refactor --- beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/beacon_chain/src/beacon_chain.rs | 7 +++++++ beacon_node/beacon_chain/src/lib.rs | 6 ++++++ beacon_node/beacon_chain/src/metrics.rs | 12 ++++++++++++ beacon_node/rest_api/Cargo.toml | 1 + beacon_node/rest_api/src/lib.rs | 2 ++ beacon_node/rest_api/src/metrics.rs | 17 +++++++++++++++++ 7 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 beacon_node/rest_api/src/metrics.rs diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 778224a3d..43e7614b6 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -17,6 +17,7 @@ sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } eth2_ssz = "0.1" eth2_ssz_derive = "0.1" +lazy_static = "1.3.0" state_processing = { path = "../../eth2/state_processing" } tree_hash = "0.1" types = { path = "../../eth2/types" } @@ -24,4 +25,3 @@ lmd_ghost = { path = "../../eth2/lmd_ghost" } [dev-dependencies] rand = "0.5.5" -lazy_static = "1.3.0" diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9ccf59589..e31844d58 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -2,6 +2,7 @@ use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; +use crate::metrics; use crate::metrics::Metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use lmd_ghost::LmdGhost; @@ -848,6 +849,10 @@ impl BeaconChain { return Ok(BlockProcessingOutcome::BlockIsAlreadyKnown); } + // Records the time taken to load the block and state from the database during block + // processing. + let db_read_timer = metrics::BLOCK_PROCESSING_DB_READ.start_timer(); + // Load the blocks parent block from the database, returning invalid if that block is not // found. let parent_block: BeaconBlock = match self.store.get(&block.parent_root)? { @@ -867,6 +872,8 @@ impl BeaconChain { .get(&parent_state_root)? .ok_or_else(|| Error::DBInconsistent(format!("Missing state {}", parent_state_root)))?; + db_read_timer.observe_duration(); + // Transition the parent state to the block slot. let mut state: BeaconState = parent_state; for _ in state.slot.as_u64()..block.slot.as_u64() { diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 3188760a4..e24534a2e 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,3 +1,8 @@ +#[macro_use] +extern crate prometheus; +#[macro_use] +extern crate lazy_static; + mod beacon_chain; mod checkpoint; mod errors; @@ -13,6 +18,7 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use lmd_ghost; +pub use metrics::gather_metrics; pub use parking_lot; pub use slot_clock; pub use state_processing::per_block_processing::errors::{ diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index fa1718ebf..fcb564e32 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,6 +1,18 @@ pub use prometheus::Error; use prometheus::{Histogram, HistogramOpts, IntCounter, Opts, Registry}; +lazy_static! { + pub static ref BLOCK_PROCESSING_DB_READ: Histogram = register_histogram!( + "block_processing_db_read_times", + "Time spent loading block and state from DB" + ) + .unwrap(); +} + +pub fn gather_metrics() -> Vec { + prometheus::gather() +} + pub struct Metrics { pub block_processing_requests: IntCounter, pub block_processing_successes: IntCounter, diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index fb6cb8413..821d6c0ea 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -18,6 +18,7 @@ state_processing = { path = "../../eth2/state_processing" } types = { path = "../../eth2/types" } clap = "2.32.0" http = "^0.1.17" +prometheus = { version = "^0.6", features = ["process"] } hyper = "0.12.32" futures = "0.1" exit-future = "0.1.3" diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index a94a8cdf4..7dc0df578 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -3,6 +3,7 @@ extern crate hyper; mod beacon; mod config; mod helpers; +mod metrics; mod node; mod url_query; @@ -103,6 +104,7 @@ pub fn start_server( let result = match (req.method(), path.as_ref()) { (&Method::GET, "/beacon/state") => beacon::get_state::(req), (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), + (&Method::GET, "/metrics") => metrics::get_prometheus(req), (&Method::GET, "/node/version") => node::get_version(req), (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs new file mode 100644 index 000000000..1ecdf8b68 --- /dev/null +++ b/beacon_node/rest_api/src/metrics.rs @@ -0,0 +1,17 @@ +use crate::{success_response, ApiError, ApiResult}; +use hyper::{Body, Request}; +use prometheus::{Encoder, TextEncoder}; + +/// Returns the full set of Prometheus metrics for the Beacon Node application. +pub fn get_prometheus(_req: Request) -> ApiResult { + let mut buffer = vec![]; + let encoder = TextEncoder::new(); + + encoder + .encode(&beacon_chain::gather_metrics(), &mut buffer) + .unwrap(); + + String::from_utf8(buffer) + .map(|string| success_response(Body::from(string))) + .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e))) +} From 9995b390b5077ec8c8f92e3fe741590357bad05d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 14:11:13 +1000 Subject: [PATCH 052/305] Move beacon_chain to new metrics structure. --- beacon_node/beacon_chain/src/beacon_chain.rs | 41 ++-- beacon_node/beacon_chain/src/metrics.rs | 242 ++++++++----------- beacon_node/client/src/lib.rs | 5 - 3 files changed, 117 insertions(+), 171 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e31844d58..df9523624 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -3,7 +3,6 @@ use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics; -use crate::metrics::Metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use lmd_ghost::LmdGhost; use log::trace; @@ -107,8 +106,6 @@ pub struct BeaconChain { /// A state-machine that is updated with information from the network and chooses a canonical /// head block. pub fork_choice: ForkChoice, - /// Stores metrics about this `BeaconChain`. - pub metrics: Metrics, /// Logging to CLI, etc. log: Logger, } @@ -158,7 +155,6 @@ impl BeaconChain { canonical_head, genesis_block_root, fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root), - metrics: Metrics::new()?, store, log, }) @@ -196,7 +192,6 @@ impl BeaconChain { canonical_head: RwLock::new(p.canonical_head), state: RwLock::new(p.state), genesis_block_root: p.genesis_block_root, - metrics: Metrics::new()?, store, log, })) @@ -473,8 +468,8 @@ impl BeaconChain { state: &BeaconState, ) -> Result { // Collect some metrics. - self.metrics.attestation_production_requests.inc(); - let timer = self.metrics.attestation_production_times.start_timer(); + metrics::ATTESTATION_PRODUCTION_REQUESTS.inc(); + let timer = metrics::ATTESTATION_PRODUCTION_TIMES.start_timer(); let slots_per_epoch = T::EthSpec::slots_per_epoch(); let current_epoch_start_slot = state.current_epoch().start_slot(slots_per_epoch); @@ -521,7 +516,7 @@ impl BeaconChain { }; // Collect some metrics. - self.metrics.attestation_production_successes.inc(); + metrics::ATTESTATION_PRODUCTION_SUCCESSES.inc(); timer.observe_duration(); Ok(AttestationData { @@ -708,8 +703,8 @@ impl BeaconChain { state: &BeaconState, block: &BeaconBlock, ) -> Result { - self.metrics.attestation_processing_requests.inc(); - let timer = self.metrics.attestation_processing_times.start_timer(); + metrics::ATTESTATION_PROCESSING_REQUESTS.inc(); + let timer = metrics::ATTESTATION_PROCESSING_TIMES.start_timer(); // Find the highest between: // @@ -754,7 +749,7 @@ impl BeaconChain { .insert_attestation(attestation, state, &self.spec)?; // Update the metrics. - self.metrics.attestation_processing_successes.inc(); + metrics::ATTESTATION_PROCESSING_SUCCESSES.inc(); Ok(AttestationProcessingOutcome::Processed) }; @@ -810,8 +805,8 @@ impl BeaconChain { &self, block: BeaconBlock, ) -> Result { - self.metrics.block_processing_requests.inc(); - let timer = self.metrics.block_processing_times.start_timer(); + metrics::BLOCK_PROCESSING_REQUESTS.inc(); + let timer = metrics::BLOCK_PROCESSING_TIMES.start_timer(); let finalized_slot = self .state @@ -926,10 +921,8 @@ impl BeaconChain { ) }; - self.metrics.block_processing_successes.inc(); - self.metrics - .operations_per_block_attestation - .observe(block.body.attestations.len() as f64); + metrics::BLOCK_PROCESSING_SUCCESSES.inc(); + metrics::OPERATIONS_PER_BLOCK_ATTESTATION.observe(block.body.attestations.len() as f64); timer.observe_duration(); Ok(BlockProcessingOutcome::Processed { block_root }) @@ -965,8 +958,8 @@ impl BeaconChain { produce_at_slot: Slot, randao_reveal: Signature, ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { - self.metrics.block_production_requests.inc(); - let timer = self.metrics.block_production_times.start_timer(); + metrics::BLOCK_PRODUCTION_REQUESTS.inc(); + let timer = metrics::BLOCK_PRODUCTION_TIMES.start_timer(); // If required, transition the new state to the present slot. while state.slot < produce_at_slot { @@ -1018,7 +1011,7 @@ impl BeaconChain { block.state_root = state_root; - self.metrics.block_production_successes.inc(); + metrics::BLOCK_PRODUCTION_SUCCESSES.inc(); timer.observe_duration(); Ok((block, state)) @@ -1026,10 +1019,10 @@ impl BeaconChain { /// Execute the fork choice algorithm and enthrone the result as the canonical head. pub fn fork_choice(&self) -> Result<(), Error> { - self.metrics.fork_choice_requests.inc(); + metrics::FORK_CHOICE_REQUESTS.inc(); // Start fork choice metrics timer. - let timer = self.metrics.fork_choice_times.start_timer(); + let timer = metrics::FORK_CHOICE_TIMES.start_timer(); // Determine the root of the block that is the head of the chain. let beacon_block_root = self.fork_choice.find_head(&self)?; @@ -1039,7 +1032,7 @@ impl BeaconChain { // If a new head was chosen. if beacon_block_root != self.head().beacon_block_root { - self.metrics.fork_choice_changed_head.inc(); + metrics::FORK_CHOICE_CHANGED_HEAD.inc(); let beacon_block: BeaconBlock = self .store @@ -1057,7 +1050,7 @@ impl BeaconChain { // If we switched to a new chain (instead of building atop the present chain). if self.head().beacon_block_root != beacon_block.parent_root { - self.metrics.fork_choice_reorg_count.inc(); + metrics::FORK_CHOICE_REORG_COUNT.inc(); warn!( self.log, "Beacon chain re-org"; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index fcb564e32..8b8307e93 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,155 +1,113 @@ pub use prometheus::Error; -use prometheus::{Histogram, HistogramOpts, IntCounter, Opts, Registry}; +use prometheus::{Histogram, IntCounter}; lazy_static! { + /* + * Block Processing + */ pub static ref BLOCK_PROCESSING_DB_READ: Histogram = register_histogram!( "block_processing_db_read_times", "Time spent loading block and state from DB" ) .unwrap(); + pub static ref BLOCK_PROCESSING_REQUESTS: IntCounter = register_int_counter!( + "block_processing_requests", + "Count of blocks sumbitted for processing" + ) + .unwrap(); + pub static ref BLOCK_PROCESSING_SUCCESSES: IntCounter = register_int_counter!( + "block_processing_successes", + "Count of blocks processed without error" + ) + .unwrap(); + pub static ref BLOCK_PROCESSING_TIMES: Histogram = + register_histogram!("block_processing_times", "Full runtime of block processing") + .unwrap(); + + /* + * Block Production + */ + pub static ref BLOCK_PRODUCTION_REQUESTS: IntCounter = register_int_counter!( + "block_production_requests", + "Count of all block production requests" + ) + .unwrap(); + pub static ref BLOCK_PRODUCTION_SUCCESSES: IntCounter = register_int_counter!( + "block_production_successes", + "Count of blocks sucessfully produced." + ) + .unwrap(); + pub static ref BLOCK_PRODUCTION_TIMES: Histogram = + register_histogram!("block_production_times", "Full runtime of block production").unwrap(); + + /* + * Block Statistics + */ + pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Histogram = register_histogram!( + "operations_per_block_attestation", + "Number of attestations in a block" + ) + .unwrap(); + + /* + * Attestation Processing + */ + pub static ref ATTESTATION_PROCESSING_REQUESTS: IntCounter = register_int_counter!( + "attestation_processing_requests", + "Count of all attestations submitted for processing" + ) + .unwrap(); + pub static ref ATTESTATION_PROCESSING_SUCCESSES: IntCounter = register_int_counter!( + "attestation_processing_successes", + "total_attestation_processing_successes" + ) + .unwrap(); + pub static ref ATTESTATION_PROCESSING_TIMES: Histogram = register_histogram!( + "attestation_processing_times", + "Full runtime of attestation processing" + ) + .unwrap(); + + /* + * Attestation Production + */ + pub static ref ATTESTATION_PRODUCTION_REQUESTS: IntCounter = register_int_counter!( + "attestation_production_requests", + "Count of all attestation production requests" + ) + .unwrap(); + pub static ref ATTESTATION_PRODUCTION_SUCCESSES: IntCounter = register_int_counter!( + "attestation_production_successes", + "Count of attestations processed without error" + ) + .unwrap(); + pub static ref ATTESTATION_PRODUCTION_TIMES: Histogram = register_histogram!( + "attestation_production_times", + "Full runtime of attestation production" + ).unwrap(); + + /* + * Fork Choice + */ + pub static ref FORK_CHOICE_REQUESTS: IntCounter = register_int_counter!( + "fork_choice_requests", + "Count of occasions where fork choice has tried to find a head" + ) + .unwrap(); + pub static ref FORK_CHOICE_CHANGED_HEAD: IntCounter = register_int_counter!( + "fork_choice_changed_head", + "Count of occasions fork choice has found a new head" + ) + .unwrap(); + pub static ref FORK_CHOICE_REORG_COUNT: IntCounter = register_int_counter!( + "fork_choice_reorg_count", + "Count of occasions fork choice has switched to a different chain" + ) + .unwrap(); + pub static ref FORK_CHOICE_TIMES: Histogram = + register_histogram!("fork_choice_time", "Full runtime of fork choice").unwrap(); } pub fn gather_metrics() -> Vec { prometheus::gather() } - -pub struct Metrics { - pub block_processing_requests: IntCounter, - pub block_processing_successes: IntCounter, - pub block_processing_times: Histogram, - pub block_production_requests: IntCounter, - pub block_production_successes: IntCounter, - pub block_production_times: Histogram, - pub attestation_production_requests: IntCounter, - pub attestation_production_successes: IntCounter, - pub attestation_production_times: Histogram, - pub attestation_processing_requests: IntCounter, - pub attestation_processing_successes: IntCounter, - pub attestation_processing_times: Histogram, - pub fork_choice_requests: IntCounter, - pub fork_choice_changed_head: IntCounter, - pub fork_choice_reorg_count: IntCounter, - pub fork_choice_times: Histogram, - pub operations_per_block_attestation: Histogram, -} - -impl Metrics { - pub fn new() -> Result { - Ok(Self { - block_processing_requests: { - let opts = Opts::new("block_processing_requests", "total_blocks_processed"); - IntCounter::with_opts(opts)? - }, - block_processing_successes: { - let opts = Opts::new("block_processing_successes", "total_valid_blocks_processed"); - IntCounter::with_opts(opts)? - }, - block_processing_times: { - let opts = HistogramOpts::new("block_processing_times", "block_processing_time"); - Histogram::with_opts(opts)? - }, - block_production_requests: { - let opts = Opts::new("block_production_requests", "attempts_to_produce_new_block"); - IntCounter::with_opts(opts)? - }, - block_production_successes: { - let opts = Opts::new("block_production_successes", "blocks_successfully_produced"); - IntCounter::with_opts(opts)? - }, - block_production_times: { - let opts = HistogramOpts::new("block_production_times", "block_production_time"); - Histogram::with_opts(opts)? - }, - attestation_production_requests: { - let opts = Opts::new( - "attestation_production_requests", - "total_attestation_production_requests", - ); - IntCounter::with_opts(opts)? - }, - attestation_production_successes: { - let opts = Opts::new( - "attestation_production_successes", - "total_attestation_production_successes", - ); - IntCounter::with_opts(opts)? - }, - attestation_production_times: { - let opts = HistogramOpts::new( - "attestation_production_times", - "attestation_production_time", - ); - Histogram::with_opts(opts)? - }, - attestation_processing_requests: { - let opts = Opts::new( - "attestation_processing_requests", - "total_attestation_processing_requests", - ); - IntCounter::with_opts(opts)? - }, - attestation_processing_successes: { - let opts = Opts::new( - "attestation_processing_successes", - "total_attestation_processing_successes", - ); - IntCounter::with_opts(opts)? - }, - attestation_processing_times: { - let opts = HistogramOpts::new( - "attestation_processing_times", - "attestation_processing_time", - ); - Histogram::with_opts(opts)? - }, - fork_choice_requests: { - let opts = Opts::new("fork_choice_requests", "total_times_fork_choice_called"); - IntCounter::with_opts(opts)? - }, - fork_choice_changed_head: { - let opts = Opts::new( - "fork_choice_changed_head", - "total_times_fork_choice_chose_a_new_head", - ); - IntCounter::with_opts(opts)? - }, - fork_choice_reorg_count: { - let opts = Opts::new("fork_choice_reorg_count", "number_of_reorgs"); - IntCounter::with_opts(opts)? - }, - fork_choice_times: { - let opts = HistogramOpts::new("fork_choice_time", "total_time_to_run_fork_choice"); - Histogram::with_opts(opts)? - }, - operations_per_block_attestation: { - let opts = HistogramOpts::new( - "operations_per_block_attestation", - "count_of_attestations_per_block", - ); - Histogram::with_opts(opts)? - }, - }) - } - - pub fn register(&self, registry: &Registry) -> Result<(), Error> { - registry.register(Box::new(self.block_processing_requests.clone()))?; - registry.register(Box::new(self.block_processing_successes.clone()))?; - registry.register(Box::new(self.block_processing_times.clone()))?; - registry.register(Box::new(self.block_production_requests.clone()))?; - registry.register(Box::new(self.block_production_successes.clone()))?; - registry.register(Box::new(self.block_production_times.clone()))?; - registry.register(Box::new(self.attestation_production_requests.clone()))?; - registry.register(Box::new(self.attestation_production_successes.clone()))?; - registry.register(Box::new(self.attestation_production_times.clone()))?; - registry.register(Box::new(self.attestation_processing_requests.clone()))?; - registry.register(Box::new(self.attestation_processing_successes.clone()))?; - registry.register(Box::new(self.attestation_processing_times.clone()))?; - registry.register(Box::new(self.fork_choice_requests.clone()))?; - registry.register(Box::new(self.fork_choice_changed_head.clone()))?; - registry.register(Box::new(self.fork_choice_reorg_count.clone()))?; - registry.register(Box::new(self.fork_choice_times.clone()))?; - registry.register(Box::new(self.operations_per_block_attestation.clone()))?; - - Ok(()) - } -} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 65ba071fa..e06c5b60e 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -71,11 +71,6 @@ where eth2_config.spec.clone(), log.clone(), )?); - // Registry all beacon chain metrics with the global registry. - beacon_chain - .metrics - .register(&metrics_registry) - .expect("Failed to registry metrics"); if beacon_chain.read_slot_clock().is_none() { panic!("Cannot start client before genesis!") From e33d0703efcff8d37936968d3b7d591b4ab07b2a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 14:43:31 +1000 Subject: [PATCH 053/305] Make metrics not panic if already defined --- beacon_node/beacon_chain/src/beacon_chain.rs | 51 +++++---- beacon_node/beacon_chain/src/metrics.rs | 113 ++++++++++--------- 2 files changed, 88 insertions(+), 76 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index df9523624..b0bb6a159 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -468,8 +468,8 @@ impl BeaconChain { state: &BeaconState, ) -> Result { // Collect some metrics. - metrics::ATTESTATION_PRODUCTION_REQUESTS.inc(); - let timer = metrics::ATTESTATION_PRODUCTION_TIMES.start_timer(); + metrics::inc_counter(&metrics::ATTESTATION_PRODUCTION_REQUESTS); + let timer = metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_TIMES); let slots_per_epoch = T::EthSpec::slots_per_epoch(); let current_epoch_start_slot = state.current_epoch().start_slot(slots_per_epoch); @@ -516,8 +516,8 @@ impl BeaconChain { }; // Collect some metrics. - metrics::ATTESTATION_PRODUCTION_SUCCESSES.inc(); - timer.observe_duration(); + metrics::inc_counter(&metrics::ATTESTATION_PRODUCTION_SUCCESSES); + metrics::stop_timer(timer); Ok(AttestationData { beacon_block_root: head_block_root, @@ -703,8 +703,8 @@ impl BeaconChain { state: &BeaconState, block: &BeaconBlock, ) -> Result { - metrics::ATTESTATION_PROCESSING_REQUESTS.inc(); - let timer = metrics::ATTESTATION_PROCESSING_TIMES.start_timer(); + metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_REQUESTS); + let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_TIMES); // Find the highest between: // @@ -749,12 +749,12 @@ impl BeaconChain { .insert_attestation(attestation, state, &self.spec)?; // Update the metrics. - metrics::ATTESTATION_PROCESSING_SUCCESSES.inc(); + metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_SUCCESSES); Ok(AttestationProcessingOutcome::Processed) }; - timer.observe_duration(); + timer.map(|t| t.observe_duration()); result } @@ -805,8 +805,8 @@ impl BeaconChain { &self, block: BeaconBlock, ) -> Result { - metrics::BLOCK_PROCESSING_REQUESTS.inc(); - let timer = metrics::BLOCK_PROCESSING_TIMES.start_timer(); + metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); + let timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); let finalized_slot = self .state @@ -846,7 +846,7 @@ impl BeaconChain { // Records the time taken to load the block and state from the database during block // processing. - let db_read_timer = metrics::BLOCK_PROCESSING_DB_READ.start_timer(); + let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ); // Load the blocks parent block from the database, returning invalid if that block is not // found. @@ -867,7 +867,7 @@ impl BeaconChain { .get(&parent_state_root)? .ok_or_else(|| Error::DBInconsistent(format!("Missing state {}", parent_state_root)))?; - db_read_timer.observe_duration(); + metrics::stop_timer(db_read_timer); // Transition the parent state to the block slot. let mut state: BeaconState = parent_state; @@ -921,9 +921,12 @@ impl BeaconChain { ) }; - metrics::BLOCK_PROCESSING_SUCCESSES.inc(); - metrics::OPERATIONS_PER_BLOCK_ATTESTATION.observe(block.body.attestations.len() as f64); - timer.observe_duration(); + metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); + metrics::observe( + &metrics::OPERATIONS_PER_BLOCK_ATTESTATION, + block.body.attestations.len() as f64, + ); + metrics::stop_timer(timer); Ok(BlockProcessingOutcome::Processed { block_root }) } @@ -958,8 +961,8 @@ impl BeaconChain { produce_at_slot: Slot, randao_reveal: Signature, ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { - metrics::BLOCK_PRODUCTION_REQUESTS.inc(); - let timer = metrics::BLOCK_PRODUCTION_TIMES.start_timer(); + metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS); + let timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES); // If required, transition the new state to the present slot. while state.slot < produce_at_slot { @@ -1011,28 +1014,28 @@ impl BeaconChain { block.state_root = state_root; - metrics::BLOCK_PRODUCTION_SUCCESSES.inc(); - timer.observe_duration(); + metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES); + metrics::stop_timer(timer); Ok((block, state)) } /// Execute the fork choice algorithm and enthrone the result as the canonical head. pub fn fork_choice(&self) -> Result<(), Error> { - metrics::FORK_CHOICE_REQUESTS.inc(); + metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); // Start fork choice metrics timer. - let timer = metrics::FORK_CHOICE_TIMES.start_timer(); + let timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES); // Determine the root of the block that is the head of the chain. let beacon_block_root = self.fork_choice.find_head(&self)?; // End fork choice metrics timer. - timer.observe_duration(); + metrics::stop_timer(timer); // If a new head was chosen. if beacon_block_root != self.head().beacon_block_root { - metrics::FORK_CHOICE_CHANGED_HEAD.inc(); + metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); let beacon_block: BeaconBlock = self .store @@ -1050,7 +1053,7 @@ impl BeaconChain { // If we switched to a new chain (instead of building atop the present chain). if self.head().beacon_block_root != beacon_block.parent_root { - metrics::FORK_CHOICE_REORG_COUNT.inc(); + metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT); warn!( self.log, "Beacon chain re-org"; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 8b8307e93..417c2904a 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,111 +1,120 @@ pub use prometheus::Error; -use prometheus::{Histogram, IntCounter}; +use prometheus::{Histogram, HistogramTimer, IntCounter, Result}; + +pub fn start_timer(histogram: &Result) -> Option { + if let Ok(histogram) = histogram { + Some(histogram.start_timer()) + } else { + None + } +} + +pub fn stop_timer(timer: Option) { + timer.map(|t| t.observe_duration()); +} + +pub fn inc_counter(counter: &Result) { + if let Ok(counter) = counter { + counter.inc(); + } +} + +pub fn observe(histogram: &Result, value: f64) { + if let Ok(histogram) = histogram { + histogram.observe(value); + } +} lazy_static! { /* * Block Processing */ - pub static ref BLOCK_PROCESSING_DB_READ: Histogram = register_histogram!( + pub static ref BLOCK_PROCESSING_DB_READ: Result = register_histogram!( "block_processing_db_read_times", "Time spent loading block and state from DB" - ) - .unwrap(); - pub static ref BLOCK_PROCESSING_REQUESTS: IntCounter = register_int_counter!( + ); + pub static ref BLOCK_PROCESSING_REQUESTS: Result = register_int_counter!( "block_processing_requests", "Count of blocks sumbitted for processing" - ) - .unwrap(); - pub static ref BLOCK_PROCESSING_SUCCESSES: IntCounter = register_int_counter!( + ); + pub static ref BLOCK_PROCESSING_SUCCESSES: Result = register_int_counter!( "block_processing_successes", "Count of blocks processed without error" - ) - .unwrap(); - pub static ref BLOCK_PROCESSING_TIMES: Histogram = - register_histogram!("block_processing_times", "Full runtime of block processing") - .unwrap(); + ); + pub static ref BLOCK_PROCESSING_TIMES: Result = + register_histogram!("block_processing_times", "Full runtime of block processing"); /* * Block Production */ - pub static ref BLOCK_PRODUCTION_REQUESTS: IntCounter = register_int_counter!( + pub static ref BLOCK_PRODUCTION_REQUESTS: Result = register_int_counter!( "block_production_requests", "Count of all block production requests" - ) - .unwrap(); - pub static ref BLOCK_PRODUCTION_SUCCESSES: IntCounter = register_int_counter!( + ); + pub static ref BLOCK_PRODUCTION_SUCCESSES: Result = register_int_counter!( "block_production_successes", "Count of blocks sucessfully produced." - ) - .unwrap(); - pub static ref BLOCK_PRODUCTION_TIMES: Histogram = - register_histogram!("block_production_times", "Full runtime of block production").unwrap(); + ); + pub static ref BLOCK_PRODUCTION_TIMES: Result = + register_histogram!("block_production_times", "Full runtime of block production"); /* * Block Statistics */ - pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Histogram = register_histogram!( + pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result = register_histogram!( "operations_per_block_attestation", "Number of attestations in a block" - ) - .unwrap(); + ); /* * Attestation Processing */ - pub static ref ATTESTATION_PROCESSING_REQUESTS: IntCounter = register_int_counter!( + pub static ref ATTESTATION_PROCESSING_REQUESTS: Result = register_int_counter!( "attestation_processing_requests", "Count of all attestations submitted for processing" - ) - .unwrap(); - pub static ref ATTESTATION_PROCESSING_SUCCESSES: IntCounter = register_int_counter!( + ); + pub static ref ATTESTATION_PROCESSING_SUCCESSES: Result = register_int_counter!( "attestation_processing_successes", "total_attestation_processing_successes" - ) - .unwrap(); - pub static ref ATTESTATION_PROCESSING_TIMES: Histogram = register_histogram!( + ); + pub static ref ATTESTATION_PROCESSING_TIMES: Result = register_histogram!( "attestation_processing_times", "Full runtime of attestation processing" - ) - .unwrap(); + ); /* * Attestation Production */ - pub static ref ATTESTATION_PRODUCTION_REQUESTS: IntCounter = register_int_counter!( + pub static ref ATTESTATION_PRODUCTION_REQUESTS: Result = register_int_counter!( "attestation_production_requests", "Count of all attestation production requests" - ) - .unwrap(); - pub static ref ATTESTATION_PRODUCTION_SUCCESSES: IntCounter = register_int_counter!( + ); + pub static ref ATTESTATION_PRODUCTION_SUCCESSES: Result = register_int_counter!( "attestation_production_successes", "Count of attestations processed without error" - ) - .unwrap(); - pub static ref ATTESTATION_PRODUCTION_TIMES: Histogram = register_histogram!( + ); + pub static ref ATTESTATION_PRODUCTION_TIMES: Result = register_histogram!( "attestation_production_times", "Full runtime of attestation production" - ).unwrap(); + ); /* * Fork Choice */ - pub static ref FORK_CHOICE_REQUESTS: IntCounter = register_int_counter!( + pub static ref FORK_CHOICE_REQUESTS: Result = register_int_counter!( "fork_choice_requests", "Count of occasions where fork choice has tried to find a head" - ) - .unwrap(); - pub static ref FORK_CHOICE_CHANGED_HEAD: IntCounter = register_int_counter!( + ); + pub static ref FORK_CHOICE_CHANGED_HEAD: Result = register_int_counter!( "fork_choice_changed_head", "Count of occasions fork choice has found a new head" - ) - .unwrap(); - pub static ref FORK_CHOICE_REORG_COUNT: IntCounter = register_int_counter!( + ); + pub static ref FORK_CHOICE_REORG_COUNT: Result = register_int_counter!( "fork_choice_reorg_count", "Count of occasions fork choice has switched to a different chain" - ) - .unwrap(); - pub static ref FORK_CHOICE_TIMES: Histogram = - register_histogram!("fork_choice_time", "Full runtime of fork choice").unwrap(); + ); + pub static ref FORK_CHOICE_TIMES: Result = + register_histogram!("fork_choice_time", "Full runtime of fork choice"); } pub fn gather_metrics() -> Vec { From 36ff115b04a90f767e08d5b52754a643aa2c950d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 14:46:20 +1000 Subject: [PATCH 054/305] Use global prometheus gather at rest api --- beacon_node/beacon_chain/src/metrics.rs | 4 ---- beacon_node/rest_api/src/metrics.rs | 4 +--- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 417c2904a..dc2919cc4 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -116,7 +116,3 @@ lazy_static! { pub static ref FORK_CHOICE_TIMES: Result = register_histogram!("fork_choice_time", "Full runtime of fork choice"); } - -pub fn gather_metrics() -> Vec { - prometheus::gather() -} diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 1ecdf8b68..b0f5b8605 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -7,9 +7,7 @@ pub fn get_prometheus(_req: Request) -> ApiResult { let mut buffer = vec![]; let encoder = TextEncoder::new(); - encoder - .encode(&beacon_chain::gather_metrics(), &mut buffer) - .unwrap(); + encoder.encode(&prometheus::gather(), &mut buffer).unwrap(); String::from_utf8(buffer) .map(|string| success_response(Body::from(string))) From 2108895fca7c34928b7d0540d8ea0c84740d56ac Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 15:34:10 +1000 Subject: [PATCH 055/305] Unify common metric fns into a crate --- Cargo.toml | 1 + beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/beacon_chain/src/errors.rs | 8 --- beacon_node/beacon_chain/src/lib.rs | 3 -- beacon_node/beacon_chain/src/metrics.rs | 67 ++++++++---------------- eth2/utils/lighthouse_metrics/Cargo.toml | 11 ++++ eth2/utils/lighthouse_metrics/src/lib.rs | 49 +++++++++++++++++ 7 files changed, 83 insertions(+), 58 deletions(-) create mode 100644 eth2/utils/lighthouse_metrics/Cargo.toml create mode 100644 eth2/utils/lighthouse_metrics/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index f5ee02a17..9b7b87a0d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,6 +11,7 @@ members = [ "eth2/utils/eth2_interop_keypairs", "eth2/utils/logging", "eth2/utils/eth2_hashing", + "eth2/utils/lighthouse_metrics", "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", "eth2/utils/serde_hex", diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 43e7614b6..850aa2e94 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] store = { path = "../store" } parking_lot = "0.7" -prometheus = "^0.6" +lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } log = "0.4" operation_pool = { path = "../../eth2/operation_pool" } serde = "1.0" diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 7a51fc425..22df90397 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,5 +1,4 @@ use crate::fork_choice::Error as ForkChoiceError; -use crate::metrics::Error as MetricsError; use state_processing::per_block_processing::errors::{ AttestationValidationError, IndexedAttestationValidationError, }; @@ -34,7 +33,6 @@ pub enum BeaconChainError { MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), - MetricsError(String), NoStateForAttestation { beacon_block_root: Hash256, }, @@ -44,12 +42,6 @@ pub enum BeaconChainError { easy_from_to!(SlotProcessingError, BeaconChainError); -impl From for BeaconChainError { - fn from(e: MetricsError) -> BeaconChainError { - BeaconChainError::MetricsError(format!("{:?}", e)) - } -} - #[derive(Debug, PartialEq)] pub enum BlockProductionError { UnableToGetBlockRootFromState, diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index e24534a2e..98bd60a35 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,6 +1,4 @@ #[macro_use] -extern crate prometheus; -#[macro_use] extern crate lazy_static; mod beacon_chain; @@ -18,7 +16,6 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use lmd_ghost; -pub use metrics::gather_metrics; pub use parking_lot; pub use slot_clock; pub use state_processing::per_block_processing::errors::{ diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index dc2919cc4..03f4783ff 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,67 +1,42 @@ -pub use prometheus::Error; -use prometheus::{Histogram, HistogramTimer, IntCounter, Result}; - -pub fn start_timer(histogram: &Result) -> Option { - if let Ok(histogram) = histogram { - Some(histogram.start_timer()) - } else { - None - } -} - -pub fn stop_timer(timer: Option) { - timer.map(|t| t.observe_duration()); -} - -pub fn inc_counter(counter: &Result) { - if let Ok(counter) = counter { - counter.inc(); - } -} - -pub fn observe(histogram: &Result, value: f64) { - if let Ok(histogram) = histogram { - histogram.observe(value); - } -} +pub use lighthouse_metrics::*; lazy_static! { /* * Block Processing */ - pub static ref BLOCK_PROCESSING_DB_READ: Result = register_histogram!( + pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( "block_processing_db_read_times", "Time spent loading block and state from DB" ); - pub static ref BLOCK_PROCESSING_REQUESTS: Result = register_int_counter!( + pub static ref BLOCK_PROCESSING_REQUESTS: Result = try_create_int_counter( "block_processing_requests", - "Count of blocks sumbitted for processing" + "Count of blocks submitted for processing" ); - pub static ref BLOCK_PROCESSING_SUCCESSES: Result = register_int_counter!( + pub static ref BLOCK_PROCESSING_SUCCESSES: Result = try_create_int_counter( "block_processing_successes", "Count of blocks processed without error" ); pub static ref BLOCK_PROCESSING_TIMES: Result = - register_histogram!("block_processing_times", "Full runtime of block processing"); + try_create_histogram("block_processing_times", "Full runtime of block processing"); /* * Block Production */ - pub static ref BLOCK_PRODUCTION_REQUESTS: Result = register_int_counter!( + pub static ref BLOCK_PRODUCTION_REQUESTS: Result = try_create_int_counter( "block_production_requests", "Count of all block production requests" ); - pub static ref BLOCK_PRODUCTION_SUCCESSES: Result = register_int_counter!( + pub static ref BLOCK_PRODUCTION_SUCCESSES: Result = try_create_int_counter( "block_production_successes", - "Count of blocks sucessfully produced." + "Count of blocks successfully produced." ); pub static ref BLOCK_PRODUCTION_TIMES: Result = - register_histogram!("block_production_times", "Full runtime of block production"); + try_create_histogram("block_production_times", "Full runtime of block production"); /* * Block Statistics */ - pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result = register_histogram!( + pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result = try_create_histogram( "operations_per_block_attestation", "Number of attestations in a block" ); @@ -69,15 +44,15 @@ lazy_static! { /* * Attestation Processing */ - pub static ref ATTESTATION_PROCESSING_REQUESTS: Result = register_int_counter!( + pub static ref ATTESTATION_PROCESSING_REQUESTS: Result = try_create_int_counter( "attestation_processing_requests", "Count of all attestations submitted for processing" ); - pub static ref ATTESTATION_PROCESSING_SUCCESSES: Result = register_int_counter!( + pub static ref ATTESTATION_PROCESSING_SUCCESSES: Result = try_create_int_counter( "attestation_processing_successes", "total_attestation_processing_successes" ); - pub static ref ATTESTATION_PROCESSING_TIMES: Result = register_histogram!( + pub static ref ATTESTATION_PROCESSING_TIMES: Result = try_create_histogram( "attestation_processing_times", "Full runtime of attestation processing" ); @@ -85,15 +60,15 @@ lazy_static! { /* * Attestation Production */ - pub static ref ATTESTATION_PRODUCTION_REQUESTS: Result = register_int_counter!( + pub static ref ATTESTATION_PRODUCTION_REQUESTS: Result = try_create_int_counter( "attestation_production_requests", "Count of all attestation production requests" ); - pub static ref ATTESTATION_PRODUCTION_SUCCESSES: Result = register_int_counter!( + pub static ref ATTESTATION_PRODUCTION_SUCCESSES: Result = try_create_int_counter( "attestation_production_successes", "Count of attestations processed without error" ); - pub static ref ATTESTATION_PRODUCTION_TIMES: Result = register_histogram!( + pub static ref ATTESTATION_PRODUCTION_TIMES: Result = try_create_histogram( "attestation_production_times", "Full runtime of attestation production" ); @@ -101,18 +76,18 @@ lazy_static! { /* * Fork Choice */ - pub static ref FORK_CHOICE_REQUESTS: Result = register_int_counter!( + pub static ref FORK_CHOICE_REQUESTS: Result = try_create_int_counter( "fork_choice_requests", "Count of occasions where fork choice has tried to find a head" ); - pub static ref FORK_CHOICE_CHANGED_HEAD: Result = register_int_counter!( + pub static ref FORK_CHOICE_CHANGED_HEAD: Result = try_create_int_counter( "fork_choice_changed_head", "Count of occasions fork choice has found a new head" ); - pub static ref FORK_CHOICE_REORG_COUNT: Result = register_int_counter!( + pub static ref FORK_CHOICE_REORG_COUNT: Result = try_create_int_counter( "fork_choice_reorg_count", "Count of occasions fork choice has switched to a different chain" ); pub static ref FORK_CHOICE_TIMES: Result = - register_histogram!("fork_choice_time", "Full runtime of fork choice"); + try_create_histogram("fork_choice_time", "Full runtime of fork choice"); } diff --git a/eth2/utils/lighthouse_metrics/Cargo.toml b/eth2/utils/lighthouse_metrics/Cargo.toml new file mode 100644 index 000000000..0a24a96fb --- /dev/null +++ b/eth2/utils/lighthouse_metrics/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "lighthouse_metrics" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +lazy_static = "1.3.0" +prometheus = "^0.6" diff --git a/eth2/utils/lighthouse_metrics/src/lib.rs b/eth2/utils/lighthouse_metrics/src/lib.rs new file mode 100644 index 000000000..e6e30f6bb --- /dev/null +++ b/eth2/utils/lighthouse_metrics/src/lib.rs @@ -0,0 +1,49 @@ +use prometheus::{HistogramOpts, HistogramTimer, Opts}; + +pub use prometheus::{Histogram, IntCounter, Result}; + +pub fn try_create_int_counter(name: &str, help: &str) -> Result { + let opts = Opts::new(name, help); + let counter = IntCounter::with_opts(opts)?; + prometheus::register(Box::new(counter.clone()))?; + Ok(counter) +} + +pub fn try_create_histogram(name: &str, help: &str) -> Result { + let opts = HistogramOpts::new(name, help); + let histogram = Histogram::with_opts(opts)?; + prometheus::register(Box::new(histogram.clone()))?; + Ok(histogram) +} + +pub fn start_timer(histogram: &Result) -> Option { + if let Ok(histogram) = histogram { + Some(histogram.start_timer()) + } else { + None + } +} + +pub fn stop_timer(timer: Option) { + timer.map(|t| t.observe_duration()); +} + +pub fn inc_counter(counter: &Result) { + if let Ok(counter) = counter { + counter.inc(); + } +} + +pub fn observe(histogram: &Result, value: f64) { + if let Ok(histogram) = histogram { + histogram.observe(value); + } +} + +#[cfg(test)] +mod tests { + #[test] + fn it_works() { + assert_eq!(2 + 2, 4); + } +} From 441eb41b6bd3a36d5f673c23d392ef5a9796706d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 15:53:34 +1000 Subject: [PATCH 056/305] Add heavy metering to block processing --- beacon_node/beacon_chain/src/beacon_chain.rs | 34 ++++++++++++++++-- beacon_node/beacon_chain/src/metrics.rs | 36 +++++++++++++++++--- 2 files changed, 64 insertions(+), 6 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b0bb6a159..f5fb954b9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -806,7 +806,7 @@ impl BeaconChain { block: BeaconBlock, ) -> Result { metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); - let timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); + let full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); let finalized_slot = self .state @@ -869,15 +869,25 @@ impl BeaconChain { metrics::stop_timer(db_read_timer); + let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); + // Transition the parent state to the block slot. let mut state: BeaconState = parent_state; for _ in state.slot.as_u64()..block.slot.as_u64() { per_slot_processing(&mut state, &self.spec)?; } + metrics::stop_timer(catchup_timer); + + let commitee_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_COMMITTEE); + state.build_committee_cache(RelativeEpoch::Previous, &self.spec)?; state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + metrics::stop_timer(commitee_timer); + + let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE); + // Apply the received block to its parent state (which has been transitioned into this // slot). match per_block_processing(&mut state, &block, &self.spec) { @@ -888,16 +898,29 @@ impl BeaconChain { _ => {} } + metrics::stop_timer(core_timer); + + let state_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_STATE_ROOT); + let state_root = state.canonical_root(); if block.state_root != state_root { return Ok(BlockProcessingOutcome::StateRootMismatch); } + metrics::stop_timer(state_root_timer); + + let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); + // Store the block and state. self.store.put(&block_root, &block)?; self.store.put(&state_root, &state)?; + metrics::stop_timer(db_write_timer); + + let fork_choice_register_timer = + metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_REGISTER); + // Register the new block with the fork choice service. if let Err(e) = self.fork_choice.process_block(&state, &block, block_root) { error!( @@ -909,6 +932,11 @@ impl BeaconChain { ) } + metrics::stop_timer(fork_choice_register_timer); + + let find_head_timer = + metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_FIND_HEAD); + // Execute the fork choice algorithm, enthroning a new head if discovered. // // Note: in the future we may choose to run fork-choice less often, potentially based upon @@ -921,12 +949,14 @@ impl BeaconChain { ) }; + metrics::stop_timer(find_head_timer); + metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); metrics::observe( &metrics::OPERATIONS_PER_BLOCK_ATTESTATION, block.body.attestations.len() as f64, ); - metrics::stop_timer(timer); + metrics::stop_timer(full_timer); Ok(BlockProcessingOutcome::Processed { block_root }) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 03f4783ff..38a7af9e1 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -4,10 +4,6 @@ lazy_static! { /* * Block Processing */ - pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( - "block_processing_db_read_times", - "Time spent loading block and state from DB" - ); pub static ref BLOCK_PROCESSING_REQUESTS: Result = try_create_int_counter( "block_processing_requests", "Count of blocks submitted for processing" @@ -18,6 +14,38 @@ lazy_static! { ); pub static ref BLOCK_PROCESSING_TIMES: Result = try_create_histogram("block_processing_times", "Full runtime of block processing"); + pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( + "block_processing_db_read_times", + "Time spent loading block and state from DB for block processing" + ); + pub static ref BLOCK_PROCESSING_CATCHUP_STATE: Result = try_create_histogram( + "block_processing_catch-up_state_times", + "Time spent skipping slots on a state before processing a block." + ); + pub static ref BLOCK_PROCESSING_COMMITTEE: Result = try_create_histogram( + "block_processing_committee_building_times", + "Time spent building/obtaining committees for block processing." + ); + pub static ref BLOCK_PROCESSING_CORE: Result = try_create_histogram( + "block_processing_core_times", + "Time spent doing the core per_block_processing state processing." + ); + pub static ref BLOCK_PROCESSING_STATE_ROOT: Result = try_create_histogram( + "block_processing_state_root_times", + "Time spent calculating the state root when processing a block." + ); + pub static ref BLOCK_PROCESSING_DB_WRITE: Result = try_create_histogram( + "block_processing_db_write_times", + "Time spent writing a newly processed block and state to DB" + ); + pub static ref BLOCK_PROCESSING_FORK_CHOICE_REGISTER: Result = try_create_histogram( + "block_processing_fork_choice_register_times", + "Time spent registering the new block with fork choice (but not finding head)" + ); + pub static ref BLOCK_PROCESSING_FORK_CHOICE_FIND_HEAD: Result = try_create_histogram( + "block_processing_fork_choice_find_head_times", + "Time spent finding the new head after processing a new block" + ); /* * Block Production From 76f42ac7ffd7d25e4c92393370b5b4717cacab49 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 16:15:26 +1000 Subject: [PATCH 057/305] Remove hypen from prometheus metric name --- beacon_node/beacon_chain/src/metrics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 38a7af9e1..d0b6e27fc 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -19,7 +19,7 @@ lazy_static! { "Time spent loading block and state from DB for block processing" ); pub static ref BLOCK_PROCESSING_CATCHUP_STATE: Result = try_create_histogram( - "block_processing_catch-up_state_times", + "block_processing_catch_up_state_times", "Time spent skipping slots on a state before processing a block." ); pub static ref BLOCK_PROCESSING_COMMITTEE: Result = try_create_histogram( From 42d300bdc35df563598fcd65488b5fb21342a60b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 17:49:32 +1000 Subject: [PATCH 058/305] Add more beacon chain metrics --- beacon_node/beacon_chain/src/beacon_chain.rs | 18 ++++++++++++++---- beacon_node/beacon_chain/src/fork_choice.rs | 20 +++++++++++++++++--- beacon_node/beacon_chain/src/metrics.rs | 20 ++++++++++++++++++++ 3 files changed, 51 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f5fb954b9..6f9a2b414 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1060,11 +1060,8 @@ impl BeaconChain { // Determine the root of the block that is the head of the chain. let beacon_block_root = self.fork_choice.find_head(&self)?; - // End fork choice metrics timer. - metrics::stop_timer(timer); - // If a new head was chosen. - if beacon_block_root != self.head().beacon_block_root { + let result = if beacon_block_root != self.head().beacon_block_root { metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); let beacon_block: BeaconBlock = self @@ -1127,11 +1124,22 @@ impl BeaconChain { } } else { Ok(()) + }; + + // End fork choice metrics timer. + metrics::stop_timer(timer); + + if let Err(_) = result { + metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS); } + + result } /// Update the canonical head to `new_head`. fn update_canonical_head(&self, new_head: CheckPoint) -> Result<(), Error> { + let timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); + // Update the checkpoint that stores the head of the chain at the time it received the // block. *self.canonical_head.write() = new_head; @@ -1158,6 +1166,8 @@ impl BeaconChain { // Save `self` to `self.store`. self.persist()?; + metrics::stop_timer(timer); + Ok(()) } diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index edd426f29..77fdaacdc 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -1,4 +1,4 @@ -use crate::{BeaconChain, BeaconChainTypes}; +use crate::{metrics, BeaconChain, BeaconChainTypes}; use lmd_ghost::LmdGhost; use state_processing::common::get_attesting_indices; use std::sync::Arc; @@ -46,6 +46,8 @@ impl ForkChoice { } pub fn find_head(&self, chain: &BeaconChain) -> Result { + let timer = metrics::start_timer(&metrics::FORK_CHOICE_FIND_HEAD_TIMES); + let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); // From the specification: @@ -97,9 +99,14 @@ impl ForkChoice { .map(|v| v.effective_balance) }; - self.backend + let result = self + .backend .find_head(start_block_slot, start_block_root, weight) - .map_err(Into::into) + .map_err(Into::into); + + metrics::stop_timer(timer); + + result } /// Process all attestations in the given `block`. @@ -112,6 +119,7 @@ impl ForkChoice { block: &BeaconBlock, block_root: Hash256, ) -> Result<()> { + let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES); // Note: we never count the block as a latest message, only attestations. // // I (Paul H) do not have an explicit reference to this, but I derive it from this @@ -136,6 +144,8 @@ impl ForkChoice { // a block that has the majority of votes applied to it. self.backend.process_block(block, block_root)?; + metrics::stop_timer(timer); + Ok(()) } @@ -148,6 +158,8 @@ impl ForkChoice { attestation: &Attestation, block: &BeaconBlock, ) -> Result<()> { + let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES); + let block_hash = attestation.data.beacon_block_root; // Ignore any attestations to the zero hash. @@ -175,6 +187,8 @@ impl ForkChoice { } } + metrics::stop_timer(timer); + Ok(()) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index d0b6e27fc..34f359ad8 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -108,6 +108,10 @@ lazy_static! { "fork_choice_requests", "Count of occasions where fork choice has tried to find a head" ); + pub static ref FORK_CHOICE_ERRORS: Result = try_create_int_counter( + "fork_choice_errors", + "Count of occasions where fork choice has returned an error when trying to find a head" + ); pub static ref FORK_CHOICE_CHANGED_HEAD: Result = try_create_int_counter( "fork_choice_changed_head", "Count of occasions fork choice has found a new head" @@ -118,4 +122,20 @@ lazy_static! { ); pub static ref FORK_CHOICE_TIMES: Result = try_create_histogram("fork_choice_time", "Full runtime of fork choice"); + pub static ref FORK_CHOICE_FIND_HEAD_TIMES: Result = + try_create_histogram("fork_choice_find_head_time", "Full runtime of fork choice find_head function"); + pub static ref FORK_CHOICE_PROCESS_BLOCK_TIMES: Result = try_create_histogram( + "fork_choice_process_block_time", + "Time taken to add a block and all attestations to fork choice" + ); + pub static ref FORK_CHOICE_PROCESS_ATTESTATION_TIMES: Result = try_create_histogram( + "fork_choice_process_attestation_time", + "Time taken to add an attestation to fork choice" + ); + + /* + * Head Updating + */ + pub static ref UPDATE_HEAD_TIMES: Result = + try_create_histogram("update_head_times", "Time taken to update the canonical head"); } From 78db947e6e65f4d0960ca5b9340305c663856244 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 18:28:57 +1000 Subject: [PATCH 059/305] Add beacon chain persistence metric --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 ++++ beacon_node/beacon_chain/src/metrics.rs | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6f9a2b414..96ff339a6 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -199,6 +199,8 @@ impl BeaconChain { /// Attempt to save this instance to `self.store`. pub fn persist(&self) -> Result<(), Error> { + let timer = metrics::start_timer(&metrics::PERSIST_CHAIN); + let p: PersistedBeaconChain = PersistedBeaconChain { canonical_head: self.canonical_head.read().clone(), op_pool: PersistedOperationPool::from_operation_pool(&self.op_pool), @@ -209,6 +211,8 @@ impl BeaconChain { let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); self.store.put(&key, &p)?; + metrics::stop_timer(timer); + Ok(()) } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 34f359ad8..b91125463 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -138,4 +138,10 @@ lazy_static! { */ pub static ref UPDATE_HEAD_TIMES: Result = try_create_histogram("update_head_times", "Time taken to update the canonical head"); + + /* + * Persisting BeaconChain to disk + */ + pub static ref PERSIST_CHAIN: Result = + try_create_histogram("persist_chain", "Time taken to update the canonical head"); } From 6150f0ae1a549dcc1d76c831c4ca5cae03300dd7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 11 Aug 2019 18:29:11 +1000 Subject: [PATCH 060/305] Prune op pool on finalization --- beacon_node/beacon_chain/src/beacon_chain.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 96ff339a6..0e0583309 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1199,6 +1199,9 @@ impl BeaconChain { self.fork_choice .process_finalization(&finalized_block, finalized_block_root)?; + self.op_pool + .prune_all(&self.head().beacon_state, &self.spec); + Ok(()) } } From 7140dbc45da4a8895a155450f1777fa1655991ac Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 13:26:58 +1000 Subject: [PATCH 061/305] Add extra prom beacon chain metrics --- beacon_node/beacon_chain/Cargo.toml | 2 +- beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/metrics.rs | 156 ++++++++++++++++++++++- beacon_node/client/src/lib.rs | 1 + beacon_node/rest_api/src/lib.rs | 20 ++- beacon_node/rest_api/src/metrics.rs | 18 ++- beacon_node/store/Cargo.toml | 2 + beacon_node/store/src/lib.rs | 4 + beacon_node/store/src/metrics.rs | 25 ++++ eth2/utils/lighthouse_metrics/src/lib.rs | 15 ++- 10 files changed, 233 insertions(+), 11 deletions(-) create mode 100644 beacon_node/store/src/metrics.rs diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 850aa2e94..462d44e92 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] store = { path = "../store" } parking_lot = "0.7" +lazy_static = "1.3.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } log = "0.4" operation_pool = { path = "../../eth2/operation_pool" } @@ -17,7 +18,6 @@ sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } eth2_ssz = "0.1" eth2_ssz_derive = "0.1" -lazy_static = "1.3.0" state_processing = { path = "../../eth2/state_processing" } tree_hash = "0.1" types = { path = "../../eth2/types" } diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 98bd60a35..1262bc537 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -16,6 +16,7 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use lmd_ghost; +pub use metrics::scrape_for_metrics; pub use parking_lot; pub use slot_clock; pub use state_processing::per_block_processing::errors::{ diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index b91125463..6ed8218f0 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,4 +1,6 @@ +use crate::{BeaconChain, BeaconChainTypes}; pub use lighthouse_metrics::*; +use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; lazy_static! { /* @@ -133,15 +135,157 @@ lazy_static! { "Time taken to add an attestation to fork choice" ); - /* - * Head Updating - */ - pub static ref UPDATE_HEAD_TIMES: Result = - try_create_histogram("update_head_times", "Time taken to update the canonical head"); - /* * Persisting BeaconChain to disk */ pub static ref PERSIST_CHAIN: Result = try_create_histogram("persist_chain", "Time taken to update the canonical head"); } + +// Lazy-static is split so we don't reach the crate-level recursion limit. +lazy_static! { + /* + * Slot Clock + */ + pub static ref PRESENT_SLOT: Result = + try_create_int_gauge("present_slot", "The present slot, according to system time"); + pub static ref PRESENT_EPOCH: Result = + try_create_int_gauge("present_epoch", "The present epoch, according to system time"); + + /* + * Chain Head + */ + pub static ref UPDATE_HEAD_TIMES: Result = + try_create_histogram("update_head_times", "Time taken to update the canonical head"); + pub static ref HEAD_STATE_SLOT: Result = + try_create_int_gauge("head_state_slot", "Slot of the block at the head of the chain"); + pub static ref HEAD_STATE_ROOT: Result = + try_create_int_gauge("head_state_root", "Root of the block at the head of the chain"); + pub static ref HEAD_STATE_LATEST_BLOCK_SLOT: Result = + try_create_int_gauge("head_state_latest_block_slot", "Latest block slot at the head of the chain"); + pub static ref HEAD_STATE_CURRENT_JUSTIFIED_ROOT: Result = + try_create_int_gauge("head_state_current_justified_root", "Current justified root at the head of the chain"); + pub static ref HEAD_STATE_CURRENT_JUSTIFIED_EPOCH: Result = + try_create_int_gauge("head_state_current_justified_epoch", "Current justified epoch at the head of the chain"); + pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT: Result = + try_create_int_gauge("head_state_previous_justified_root", "Previous justified root at the head of the chain"); + pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH: Result = + try_create_int_gauge("head_state_previous_justified_epoch", "Previous justified epoch at the head of the chain"); + pub static ref HEAD_STATE_FINALIZED_ROOT: Result = + try_create_int_gauge("head_state_finalized_root", "Finalized root at the head of the chain"); + pub static ref HEAD_STATE_FINALIZED_EPOCH: Result = + try_create_int_gauge("head_state_finalized_epoch", "Finalized epoch at the head of the chain"); + pub static ref HEAD_STATE_TOTAL_VALIDATORS: Result = + try_create_int_gauge("head_state_total_validators", "Count of validators at the head of the chain"); + pub static ref HEAD_STATE_ACTIVE_VALIDATORS: Result = + try_create_int_gauge("head_state_active_validators", "Count of active validators at the head of the chain"); + pub static ref HEAD_STATE_VALIDATOR_BALANCES: Result = + try_create_int_gauge("head_state_validator_balances", "Sum of all validator balances at the head of the chain"); + pub static ref HEAD_STATE_SLASHED_VALIDATORS: Result = + try_create_int_gauge("head_state_slashed_validators", "Count of all slashed validators at the head of the chain"); + pub static ref HEAD_STATE_WITHDRAWN_VALIDATORS: Result = + try_create_int_gauge("head_state_withdrawn_validators", "Sum of all validator balances at the head of the chain"); + pub static ref HEAD_STATE_ETH1_DEPOSIT_INDEX: Result = + try_create_int_gauge("head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain"); +} + +/// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, +/// head state info, etc) and update the Prometheus `DEFAULT_REGISTRY`. +pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { + set_gauge_by_slot( + &PRESENT_SLOT, + beacon_chain + .read_slot_clock() + .unwrap_or_else(|| Slot::new(0)), + ); + + set_gauge_by_epoch( + &PRESENT_EPOCH, + beacon_chain + .read_slot_clock() + .map(|s| s.epoch(T::EthSpec::slots_per_epoch())) + .unwrap_or_else(|| Epoch::new(0)), + ); + + scrape_head_state::( + &beacon_chain.head().beacon_state, + beacon_chain.head().beacon_state_root, + ); +} + +/// Scrape the given `state` assuming it's the head state, updating the `DEFAULT_REGISTRY`. +fn scrape_head_state(state: &BeaconState, state_root: Hash256) { + set_gauge_by_slot(&HEAD_STATE_SLOT, state.slot); + set_gauge_by_hash(&HEAD_STATE_ROOT, state_root); + set_gauge_by_slot( + &HEAD_STATE_LATEST_BLOCK_SLOT, + state.latest_block_header.slot, + ); + set_gauge_by_hash( + &HEAD_STATE_CURRENT_JUSTIFIED_ROOT, + state.current_justified_checkpoint.root, + ); + set_gauge_by_epoch( + &HEAD_STATE_CURRENT_JUSTIFIED_EPOCH, + state.current_justified_checkpoint.epoch, + ); + set_gauge_by_hash( + &HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT, + state.previous_justified_checkpoint.root, + ); + set_gauge_by_epoch( + &HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH, + state.previous_justified_checkpoint.epoch, + ); + set_gauge_by_hash(&HEAD_STATE_FINALIZED_ROOT, state.finalized_checkpoint.root); + set_gauge_by_epoch( + &HEAD_STATE_FINALIZED_EPOCH, + state.finalized_checkpoint.epoch, + ); + set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators.len()); + set_gauge_by_u64( + &HEAD_STATE_VALIDATOR_BALANCES, + state.balances.iter().fold(0_u64, |acc, i| acc + i), + ); + set_gauge_by_usize( + &HEAD_STATE_ACTIVE_VALIDATORS, + state + .validators + .iter() + .filter(|v| v.is_active_at(state.current_epoch())) + .count(), + ); + set_gauge_by_usize( + &HEAD_STATE_SLASHED_VALIDATORS, + state.validators.iter().filter(|v| v.slashed).count(), + ); + set_gauge_by_usize( + &HEAD_STATE_WITHDRAWN_VALIDATORS, + state + .validators + .iter() + .filter(|v| v.is_withdrawable_at(state.current_epoch())) + .count(), + ); + set_gauge_by_u64(&HEAD_STATE_ETH1_DEPOSIT_INDEX, state.eth1_deposit_index); +} + +fn set_gauge_by_slot(gauge: &Result, value: Slot) { + set_gauge(gauge, value.as_u64() as i64); +} + +fn set_gauge_by_epoch(gauge: &Result, value: Epoch) { + set_gauge(gauge, value.as_u64() as i64); +} + +fn set_gauge_by_hash(gauge: &Result, value: Hash256) { + set_gauge(gauge, value.to_low_u64_le() as i64); +} + +fn set_gauge_by_usize(gauge: &Result, value: usize) { + set_gauge(gauge, value as i64); +} + +fn set_gauge_by_u64(gauge: &Result, value: u64) { + set_gauge(gauge, value as i64); +} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index e06c5b60e..c74787f60 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -142,6 +142,7 @@ where &client_config.rest_api, executor, beacon_chain.clone(), + client_config.db_path().expect("unable to read datadir"), &log, ) { Ok(s) => Some(s), diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 7dc0df578..fea67618b 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -13,6 +13,8 @@ use hyper::rt::Future; use hyper::service::service_fn_ok; use hyper::{Body, Method, Response, Server, StatusCode}; use slog::{info, o, warn}; +use std::ops::Deref; +use std::path::PathBuf; use std::sync::Arc; use tokio::runtime::TaskExecutor; use url_query::UrlQuery; @@ -68,6 +70,7 @@ pub fn start_server( config: &ApiConfig, executor: &TaskExecutor, beacon_chain: Arc>, + db_path: PathBuf, log: &slog::Logger, ) -> Result { let log = log.new(o!("Service" => "Api")); @@ -81,6 +84,8 @@ pub fn start_server( Ok(()) }); + let db_path = DBPath(db_path); + // Get the address to bind to let bind_addr = (config.listen_address, config.port).into(); @@ -91,12 +96,14 @@ pub fn start_server( let service = move || { let log = server_log.clone(); let beacon_chain = server_bc.clone(); + let db_path = db_path.clone(); // Create a simple handler for the router, inject our stateful objects into the request. service_fn_ok(move |mut req| { req.extensions_mut().insert::(log.clone()); req.extensions_mut() .insert::>>(beacon_chain.clone()); + req.extensions_mut().insert::(db_path.clone()); let path = req.uri().path().to_string(); @@ -104,7 +111,7 @@ pub fn start_server( let result = match (req.method(), path.as_ref()) { (&Method::GET, "/beacon/state") => beacon::get_state::(req), (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), - (&Method::GET, "/metrics") => metrics::get_prometheus(req), + (&Method::GET, "/metrics") => metrics::get_prometheus::(req), (&Method::GET, "/node/version") => node::get_version(req), (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), @@ -154,3 +161,14 @@ fn success_response(body: Body) -> Response { .body(body) .expect("We should always be able to make response from the success body.") } + +#[derive(Clone)] +pub struct DBPath(PathBuf); + +impl Deref for DBPath { + type Target = PathBuf; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index b0f5b8605..0cd700c44 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -1,12 +1,26 @@ -use crate::{success_response, ApiError, ApiResult}; +use crate::{success_response, ApiError, ApiResult, DBPath}; +use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; use prometheus::{Encoder, TextEncoder}; +use std::sync::Arc; /// Returns the full set of Prometheus metrics for the Beacon Node application. -pub fn get_prometheus(_req: Request) -> ApiResult { +pub fn get_prometheus(req: Request) -> ApiResult { let mut buffer = vec![]; let encoder = TextEncoder::new(); + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let db_path = req + .extensions() + .get::() + .ok_or_else(|| ApiError::ServerError("DBPath extension missing".to_string()))?; + + store::scrape_for_metrics(&db_path); + beacon_chain::scrape_for_metrics(&beacon_chain); + encoder.encode(&prometheus::gather(), &mut buffer).unwrap(); String::from_utf8(buffer) diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 9607e8b8e..cd9711253 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -15,3 +15,5 @@ eth2_ssz = "0.1" eth2_ssz_derive = "0.1" tree_hash = "0.1" types = { path = "../../eth2/types" } +lazy_static = "1.3.0" +lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 5b8d58320..9c0e3cbae 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -7,18 +7,22 @@ //! //! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See //! tests for implementation examples. +#[macro_use] +extern crate lazy_static; mod block_at_slot; mod errors; mod impls; mod leveldb_store; mod memory_store; +mod metrics; pub mod iter; pub use self::leveldb_store::LevelDB as DiskStore; pub use self::memory_store::MemoryStore; pub use errors::Error; +pub use metrics::scrape_for_metrics; pub use types::*; /// An object capable of storing and retrieving objects implementing `StoreItem`. diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs new file mode 100644 index 000000000..b6a055f10 --- /dev/null +++ b/beacon_node/store/src/metrics.rs @@ -0,0 +1,25 @@ +pub use lighthouse_metrics::{set_gauge, try_create_int_gauge, *}; + +use std::fs; +use std::path::PathBuf; + +lazy_static! { + pub static ref DISK_DB_SIZE: Result = + try_create_int_gauge("database_size", "Size of the on-disk database (bytes)"); +} + +/// Updates the global metrics registry with store-related information. +pub fn scrape_for_metrics(db_path: &PathBuf) { + let db_size = if let Ok(iter) = fs::read_dir(db_path) { + iter.filter_map(std::result::Result::ok) + .map(size_of_dir_entry) + .fold(0_u64, |sum, val| sum + val) + } else { + 0 + }; + set_gauge(&DISK_DB_SIZE, db_size as i64); +} + +fn size_of_dir_entry(dir: fs::DirEntry) -> u64 { + dir.metadata().map(|m| m.len()).unwrap_or(0) +} diff --git a/eth2/utils/lighthouse_metrics/src/lib.rs b/eth2/utils/lighthouse_metrics/src/lib.rs index e6e30f6bb..d55fcd3e2 100644 --- a/eth2/utils/lighthouse_metrics/src/lib.rs +++ b/eth2/utils/lighthouse_metrics/src/lib.rs @@ -1,6 +1,6 @@ use prometheus::{HistogramOpts, HistogramTimer, Opts}; -pub use prometheus::{Histogram, IntCounter, Result}; +pub use prometheus::{Histogram, IntCounter, IntGauge, Result}; pub fn try_create_int_counter(name: &str, help: &str) -> Result { let opts = Opts::new(name, help); @@ -9,6 +9,13 @@ pub fn try_create_int_counter(name: &str, help: &str) -> Result { Ok(counter) } +pub fn try_create_int_gauge(name: &str, help: &str) -> Result { + let opts = Opts::new(name, help); + let gauge = IntGauge::with_opts(opts)?; + prometheus::register(Box::new(gauge.clone()))?; + Ok(gauge) +} + pub fn try_create_histogram(name: &str, help: &str) -> Result { let opts = HistogramOpts::new(name, help); let histogram = Histogram::with_opts(opts)?; @@ -34,6 +41,12 @@ pub fn inc_counter(counter: &Result) { } } +pub fn set_gauge(gauge: &Result, value: i64) { + if let Ok(gauge) = gauge { + gauge.set(value); + } +} + pub fn observe(histogram: &Result, value: f64) { if let Ok(histogram) = histogram { histogram.observe(value); From 913ee4694eb4310b08def5feef4f111233b6c3e5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 13:35:16 +1000 Subject: [PATCH 062/305] Prefix BeaconChain metrics with "beacon_" --- beacon_node/beacon_chain/src/metrics.rs | 94 ++++++++++++------------- 1 file changed, 47 insertions(+), 47 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 6ed8218f0..227f1090f 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -7,45 +7,45 @@ lazy_static! { * Block Processing */ pub static ref BLOCK_PROCESSING_REQUESTS: Result = try_create_int_counter( - "block_processing_requests", + "beacon_block_processing_requests", "Count of blocks submitted for processing" ); pub static ref BLOCK_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "block_processing_successes", + "beacon_block_processing_successes", "Count of blocks processed without error" ); pub static ref BLOCK_PROCESSING_TIMES: Result = try_create_histogram("block_processing_times", "Full runtime of block processing"); pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( - "block_processing_db_read_times", + "beacon_block_processing_db_read_times", "Time spent loading block and state from DB for block processing" ); pub static ref BLOCK_PROCESSING_CATCHUP_STATE: Result = try_create_histogram( - "block_processing_catch_up_state_times", + "beacon_block_processing_catch_up_state_times", "Time spent skipping slots on a state before processing a block." ); pub static ref BLOCK_PROCESSING_COMMITTEE: Result = try_create_histogram( - "block_processing_committee_building_times", + "beacon_block_processing_committee_building_times", "Time spent building/obtaining committees for block processing." ); pub static ref BLOCK_PROCESSING_CORE: Result = try_create_histogram( - "block_processing_core_times", + "beacon_block_processing_core_times", "Time spent doing the core per_block_processing state processing." ); pub static ref BLOCK_PROCESSING_STATE_ROOT: Result = try_create_histogram( - "block_processing_state_root_times", + "beacon_block_processing_state_root_times", "Time spent calculating the state root when processing a block." ); pub static ref BLOCK_PROCESSING_DB_WRITE: Result = try_create_histogram( - "block_processing_db_write_times", + "beacon_block_processing_db_write_times", "Time spent writing a newly processed block and state to DB" ); pub static ref BLOCK_PROCESSING_FORK_CHOICE_REGISTER: Result = try_create_histogram( - "block_processing_fork_choice_register_times", + "beacon_block_processing_fork_choice_register_times", "Time spent registering the new block with fork choice (but not finding head)" ); pub static ref BLOCK_PROCESSING_FORK_CHOICE_FIND_HEAD: Result = try_create_histogram( - "block_processing_fork_choice_find_head_times", + "beacon_block_processing_fork_choice_find_head_times", "Time spent finding the new head after processing a new block" ); @@ -53,21 +53,21 @@ lazy_static! { * Block Production */ pub static ref BLOCK_PRODUCTION_REQUESTS: Result = try_create_int_counter( - "block_production_requests", + "beacon_block_production_requests", "Count of all block production requests" ); pub static ref BLOCK_PRODUCTION_SUCCESSES: Result = try_create_int_counter( - "block_production_successes", + "beacon_block_production_successes", "Count of blocks successfully produced." ); pub static ref BLOCK_PRODUCTION_TIMES: Result = - try_create_histogram("block_production_times", "Full runtime of block production"); + try_create_histogram("beacon_block_production_times", "Full runtime of block production"); /* * Block Statistics */ pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result = try_create_histogram( - "operations_per_block_attestation", + "beacon_operations_per_block_attestation", "Number of attestations in a block" ); @@ -75,15 +75,15 @@ lazy_static! { * Attestation Processing */ pub static ref ATTESTATION_PROCESSING_REQUESTS: Result = try_create_int_counter( - "attestation_processing_requests", + "beacon_attestation_processing_requests", "Count of all attestations submitted for processing" ); pub static ref ATTESTATION_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "attestation_processing_successes", + "beacon_attestation_processing_successes", "total_attestation_processing_successes" ); pub static ref ATTESTATION_PROCESSING_TIMES: Result = try_create_histogram( - "attestation_processing_times", + "beacon_attestation_processing_times", "Full runtime of attestation processing" ); @@ -91,15 +91,15 @@ lazy_static! { * Attestation Production */ pub static ref ATTESTATION_PRODUCTION_REQUESTS: Result = try_create_int_counter( - "attestation_production_requests", + "beacon_attestation_production_requests", "Count of all attestation production requests" ); pub static ref ATTESTATION_PRODUCTION_SUCCESSES: Result = try_create_int_counter( - "attestation_production_successes", + "beacon_attestation_production_successes", "Count of attestations processed without error" ); pub static ref ATTESTATION_PRODUCTION_TIMES: Result = try_create_histogram( - "attestation_production_times", + "beacon_attestation_production_times", "Full runtime of attestation production" ); @@ -107,31 +107,31 @@ lazy_static! { * Fork Choice */ pub static ref FORK_CHOICE_REQUESTS: Result = try_create_int_counter( - "fork_choice_requests", + "beacon_fork_choice_requests", "Count of occasions where fork choice has tried to find a head" ); pub static ref FORK_CHOICE_ERRORS: Result = try_create_int_counter( - "fork_choice_errors", + "beacon_fork_choice_errors", "Count of occasions where fork choice has returned an error when trying to find a head" ); pub static ref FORK_CHOICE_CHANGED_HEAD: Result = try_create_int_counter( - "fork_choice_changed_head", + "beacon_fork_choice_changed_head", "Count of occasions fork choice has found a new head" ); pub static ref FORK_CHOICE_REORG_COUNT: Result = try_create_int_counter( - "fork_choice_reorg_count", + "beacon_fork_choice_reorg_count", "Count of occasions fork choice has switched to a different chain" ); pub static ref FORK_CHOICE_TIMES: Result = - try_create_histogram("fork_choice_time", "Full runtime of fork choice"); + try_create_histogram("beacon_fork_choice_time", "Full runtime of fork choice"); pub static ref FORK_CHOICE_FIND_HEAD_TIMES: Result = - try_create_histogram("fork_choice_find_head_time", "Full runtime of fork choice find_head function"); + try_create_histogram("beacon_fork_choice_find_head_time", "Full runtime of fork choice find_head function"); pub static ref FORK_CHOICE_PROCESS_BLOCK_TIMES: Result = try_create_histogram( - "fork_choice_process_block_time", + "beacon_fork_choice_process_block_time", "Time taken to add a block and all attestations to fork choice" ); pub static ref FORK_CHOICE_PROCESS_ATTESTATION_TIMES: Result = try_create_histogram( - "fork_choice_process_attestation_time", + "beacon_fork_choice_process_attestation_time", "Time taken to add an attestation to fork choice" ); @@ -139,7 +139,7 @@ lazy_static! { * Persisting BeaconChain to disk */ pub static ref PERSIST_CHAIN: Result = - try_create_histogram("persist_chain", "Time taken to update the canonical head"); + try_create_histogram("beacon_persist_chain", "Time taken to update the canonical head"); } // Lazy-static is split so we don't reach the crate-level recursion limit. @@ -148,45 +148,45 @@ lazy_static! { * Slot Clock */ pub static ref PRESENT_SLOT: Result = - try_create_int_gauge("present_slot", "The present slot, according to system time"); + try_create_int_gauge("beacon_present_slot", "The present slot, according to system time"); pub static ref PRESENT_EPOCH: Result = - try_create_int_gauge("present_epoch", "The present epoch, according to system time"); + try_create_int_gauge("beacon_present_epoch", "The present epoch, according to system time"); /* * Chain Head */ pub static ref UPDATE_HEAD_TIMES: Result = - try_create_histogram("update_head_times", "Time taken to update the canonical head"); + try_create_histogram("beacon_update_head_times", "Time taken to update the canonical head"); pub static ref HEAD_STATE_SLOT: Result = - try_create_int_gauge("head_state_slot", "Slot of the block at the head of the chain"); + try_create_int_gauge("beacon_head_state_slot", "Slot of the block at the head of the chain"); pub static ref HEAD_STATE_ROOT: Result = - try_create_int_gauge("head_state_root", "Root of the block at the head of the chain"); + try_create_int_gauge("beacon_head_state_root", "Root of the block at the head of the chain"); pub static ref HEAD_STATE_LATEST_BLOCK_SLOT: Result = - try_create_int_gauge("head_state_latest_block_slot", "Latest block slot at the head of the chain"); + try_create_int_gauge("beacon_head_state_latest_block_slot", "Latest block slot at the head of the chain"); pub static ref HEAD_STATE_CURRENT_JUSTIFIED_ROOT: Result = - try_create_int_gauge("head_state_current_justified_root", "Current justified root at the head of the chain"); + try_create_int_gauge("beacon_head_state_current_justified_root", "Current justified root at the head of the chain"); pub static ref HEAD_STATE_CURRENT_JUSTIFIED_EPOCH: Result = - try_create_int_gauge("head_state_current_justified_epoch", "Current justified epoch at the head of the chain"); + try_create_int_gauge("beacon_head_state_current_justified_epoch", "Current justified epoch at the head of the chain"); pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT: Result = - try_create_int_gauge("head_state_previous_justified_root", "Previous justified root at the head of the chain"); + try_create_int_gauge("beacon_head_state_previous_justified_root", "Previous justified root at the head of the chain"); pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH: Result = - try_create_int_gauge("head_state_previous_justified_epoch", "Previous justified epoch at the head of the chain"); + try_create_int_gauge("beacon_head_state_previous_justified_epoch", "Previous justified epoch at the head of the chain"); pub static ref HEAD_STATE_FINALIZED_ROOT: Result = - try_create_int_gauge("head_state_finalized_root", "Finalized root at the head of the chain"); + try_create_int_gauge("beacon_head_state_finalized_root", "Finalized root at the head of the chain"); pub static ref HEAD_STATE_FINALIZED_EPOCH: Result = - try_create_int_gauge("head_state_finalized_epoch", "Finalized epoch at the head of the chain"); + try_create_int_gauge("beacon_head_state_finalized_epoch", "Finalized epoch at the head of the chain"); pub static ref HEAD_STATE_TOTAL_VALIDATORS: Result = - try_create_int_gauge("head_state_total_validators", "Count of validators at the head of the chain"); + try_create_int_gauge("beacon_head_state_total_validators", "Count of validators at the head of the chain"); pub static ref HEAD_STATE_ACTIVE_VALIDATORS: Result = - try_create_int_gauge("head_state_active_validators", "Count of active validators at the head of the chain"); + try_create_int_gauge("beacon_head_state_active_validators", "Count of active validators at the head of the chain"); pub static ref HEAD_STATE_VALIDATOR_BALANCES: Result = - try_create_int_gauge("head_state_validator_balances", "Sum of all validator balances at the head of the chain"); + try_create_int_gauge("beacon_head_state_validator_balances", "Sum of all validator balances at the head of the chain"); pub static ref HEAD_STATE_SLASHED_VALIDATORS: Result = - try_create_int_gauge("head_state_slashed_validators", "Count of all slashed validators at the head of the chain"); + try_create_int_gauge("beacon_head_state_slashed_validators", "Count of all slashed validators at the head of the chain"); pub static ref HEAD_STATE_WITHDRAWN_VALIDATORS: Result = - try_create_int_gauge("head_state_withdrawn_validators", "Sum of all validator balances at the head of the chain"); + try_create_int_gauge("beacon_head_state_withdrawn_validators", "Sum of all validator balances at the head of the chain"); pub static ref HEAD_STATE_ETH1_DEPOSIT_INDEX: Result = - try_create_int_gauge("head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain"); + try_create_int_gauge("beacon_head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain"); } /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, From 0b4a8893a4a94826b723e39b29ea8aaf64bb8912 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 13:49:09 +1000 Subject: [PATCH 063/305] Add more store metrics --- beacon_node/store/src/leveldb_store.rs | 22 ++++++++++++++++++-- beacon_node/store/src/metrics.rs | 26 +++++++++++++++++++++++- eth2/utils/lighthouse_metrics/src/lib.rs | 6 ++++++ 3 files changed, 51 insertions(+), 3 deletions(-) diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 699861e3a..a085d845a 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -1,4 +1,5 @@ use super::*; +use crate::metrics; use db_key::Key; use leveldb::database::kv::KV; use leveldb::database::Database; @@ -62,15 +63,27 @@ impl Store for LevelDB { fn get_bytes(&self, col: &str, key: &[u8]) -> Result>, Error> { let column_key = Self::get_key_for_col(col, key); - self.db + metrics::inc_counter(&metrics::DISK_DB_READ_COUNT); + + let result = self + .db .get(self.read_options(), column_key) - .map_err(Into::into) + .map_err(Into::into); + + if let Ok(Some(bytes)) = &result { + metrics::inc_counter_by(&metrics::DISK_DB_READ_BYTES, bytes.len() as i64) + } + + result } /// Store some `value` in `column`, indexed with `key`. fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); + metrics::inc_counter(&metrics::DISK_DB_WRITE_COUNT); + metrics::inc_counter_by(&metrics::DISK_DB_WRITE_BYTES, val.len() as i64); + self.db .put(self.write_options(), column_key, val) .map_err(Into::into) @@ -80,6 +93,8 @@ impl Store for LevelDB { fn key_exists(&self, col: &str, key: &[u8]) -> Result { let column_key = Self::get_key_for_col(col, key); + metrics::inc_counter(&metrics::DISK_DB_EXISTS_COUNT); + self.db .get(self.read_options(), column_key) .map_err(Into::into) @@ -89,6 +104,9 @@ impl Store for LevelDB { /// Removes `key` from `column`. fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> { let column_key = Self::get_key_for_col(col, key); + + metrics::inc_counter(&metrics::DISK_DB_DELETE_COUNT); + self.db .delete(self.write_options(), column_key) .map_err(Into::into) diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index b6a055f10..430e9c38e 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -5,7 +5,31 @@ use std::path::PathBuf; lazy_static! { pub static ref DISK_DB_SIZE: Result = - try_create_int_gauge("database_size", "Size of the on-disk database (bytes)"); + try_create_int_gauge("store_disk_db_size", "Size of the on-disk database (bytes)"); + pub static ref DISK_DB_WRITE_BYTES: Result = try_create_int_counter( + "store_disk_db_write_bytes", + "Number of bytes attempted to be written to the on-disk DB" + ); + pub static ref DISK_DB_READ_BYTES: Result = try_create_int_counter( + "store_disk_db_read_bytes", + "Number of bytes read from the on-disk DB" + ); + pub static ref DISK_DB_READ_COUNT: Result = try_create_int_counter( + "store_disk_db_read_count", + "Total number of reads to the on-disk DB" + ); + pub static ref DISK_DB_WRITE_COUNT: Result = try_create_int_counter( + "store_disk_db_write_count", + "Total number of writes to the on-disk DB" + ); + pub static ref DISK_DB_EXISTS_COUNT: Result = try_create_int_counter( + "store_disk_db_exists_count", + "Total number of checks if a key is in the on-disk DB" + ); + pub static ref DISK_DB_DELETE_COUNT: Result = try_create_int_counter( + "store_disk_db_delete_count", + "Total number of deletions from the on-disk DB" + ); } /// Updates the global metrics registry with store-related information. diff --git a/eth2/utils/lighthouse_metrics/src/lib.rs b/eth2/utils/lighthouse_metrics/src/lib.rs index d55fcd3e2..a8656d017 100644 --- a/eth2/utils/lighthouse_metrics/src/lib.rs +++ b/eth2/utils/lighthouse_metrics/src/lib.rs @@ -41,6 +41,12 @@ pub fn inc_counter(counter: &Result) { } } +pub fn inc_counter_by(counter: &Result, value: i64) { + if let Ok(counter) = counter { + counter.inc_by(value); + } +} + pub fn set_gauge(gauge: &Result, value: i64) { if let Ok(gauge) = gauge { gauge.set(value); From cac0e5c83284fb05cf9d465cb8e2fc8dc0f3e4aa Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 14:16:20 +1000 Subject: [PATCH 064/305] Add basic metrics to libp2p --- beacon_node/eth2-libp2p/Cargo.toml | 2 ++ beacon_node/eth2-libp2p/src/discovery.rs | 4 ++++ beacon_node/eth2-libp2p/src/lib.rs | 4 ++++ beacon_node/eth2-libp2p/src/metrics.rs | 16 ++++++++++++++++ 4 files changed, 26 insertions(+) create mode 100644 beacon_node/eth2-libp2p/src/metrics.rs diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 794b09712..006b895a1 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -26,3 +26,5 @@ smallvec = "0.6.10" fnv = "1.0.6" unsigned-varint = "0.2.2" bytes = "0.4.12" +lazy_static = "1.3.0" +lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 4c1794945..d9f2f7465 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -1,3 +1,4 @@ +use crate::metrics; use crate::{error, NetworkConfig}; /// This manages the discovery and management of peers. /// @@ -158,10 +159,12 @@ where } fn inject_connected(&mut self, peer_id: PeerId, _endpoint: ConnectedPoint) { + metrics::inc_counter(&metrics::PEER_CONNECT_COUNT); self.connected_peers.insert(peer_id); } fn inject_disconnected(&mut self, peer_id: &PeerId, _endpoint: ConnectedPoint) { + metrics::inc_counter(&metrics::PEER_DISCONNECT_COUNT); self.connected_peers.remove(peer_id); } @@ -217,6 +220,7 @@ where } Discv5Event::SocketUpdated(socket) => { info!(self.log, "Address updated"; "IP" => format!("{}",socket.ip())); + metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT); let mut address = Multiaddr::from(socket.ip()); address.push(Protocol::Tcp(self.tcp_port)); let enr = self.discovery.local_enr(); diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 54a4f2a99..33d5ba9ed 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -2,10 +2,14 @@ /// all required libp2p functionality. /// /// This crate builds and manages the libp2p services required by the beacon node. +#[macro_use] +extern crate lazy_static; + pub mod behaviour; mod config; mod discovery; pub mod error; +mod metrics; pub mod rpc; mod service; diff --git a/beacon_node/eth2-libp2p/src/metrics.rs b/beacon_node/eth2-libp2p/src/metrics.rs new file mode 100644 index 000000000..a47037669 --- /dev/null +++ b/beacon_node/eth2-libp2p/src/metrics.rs @@ -0,0 +1,16 @@ +pub use lighthouse_metrics::*; + +lazy_static! { + pub static ref ADDRESS_UPDATE_COUNT: Result = try_create_int_counter( + "libp2p_address_update_count", + "Count of libp2p socked updated events (when our view of our IP address has changed)" + ); + pub static ref PEER_CONNECT_COUNT: Result = try_create_int_counter( + "libp2p_peer_connect_count", + "Count of libp2p peer connect events (not the current number of connected peers)" + ); + pub static ref PEER_DISCONNECT_COUNT: Result = try_create_int_counter( + "libp2p_peer_disconnect_count", + "Count of libp2p peer disconnect events" + ); +} From af334b2cf0a6278c576b23d93d1748fdb4a51960 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 14:30:46 +1000 Subject: [PATCH 065/305] Add metrics to HTTP server --- beacon_node/rest_api/Cargo.toml | 2 ++ beacon_node/rest_api/src/lib.rs | 17 +++++++++++++---- beacon_node/rest_api/src/metrics.rs | 21 +++++++++++++++++++++ 3 files changed, 36 insertions(+), 4 deletions(-) diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index 821d6c0ea..100e680de 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -24,3 +24,5 @@ futures = "0.1" exit-future = "0.1.3" tokio = "0.1.17" url = "2.0" +lazy_static = "1.3.0" +lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index fea67618b..57019deea 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -1,5 +1,6 @@ -extern crate futures; -extern crate hyper; +#[macro_use] +extern crate lazy_static; + mod beacon; mod config; mod helpers; @@ -100,6 +101,9 @@ pub fn start_server( // Create a simple handler for the router, inject our stateful objects into the request. service_fn_ok(move |mut req| { + metrics::inc_counter(&metrics::REQUEST_COUNT); + let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME); + req.extensions_mut().insert::(log.clone()); req.extensions_mut() .insert::>>(beacon_chain.clone()); @@ -117,9 +121,10 @@ pub fn start_server( _ => Err(ApiError::MethodNotAllowed(path.clone())), }; - match result { + let response = match result { // Return the `hyper::Response`. Ok(response) => { + metrics::inc_counter(&metrics::SUCCESS_COUNT); slog::debug!(log, "Request successful: {:?}", path); response } @@ -128,7 +133,11 @@ pub fn start_server( slog::debug!(log, "Request failure: {:?}", path); e.into() } - } + }; + + metrics::stop_timer(timer); + + response }) }; diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 0cd700c44..c0db810b6 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -4,7 +4,28 @@ use hyper::{Body, Request}; use prometheus::{Encoder, TextEncoder}; use std::sync::Arc; +pub use lighthouse_metrics::*; + +lazy_static! { + pub static ref REQUEST_RESPONSE_TIME: Result = try_create_histogram( + "http_server_request_response_time", + "Time taken to build a response to a HTTP request" + ); + pub static ref REQUEST_COUNT: Result = try_create_int_counter( + "http_server_request_count", + "Total count of HTTP requests received" + ); + pub static ref SUCCESS_COUNT: Result = try_create_int_counter( + "http_server_success_count", + "Total count of HTTP 200 responses sent" + ); +} + /// Returns the full set of Prometheus metrics for the Beacon Node application. +/// +/// # Note +/// +/// This is a HTTP handler method. pub fn get_prometheus(req: Request) -> ApiResult { let mut buffer = vec![]; let encoder = TextEncoder::new(); From 6a1e5f6d26c4dffd126faaedd970b4d8446a1ce3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 15:19:39 +1000 Subject: [PATCH 066/305] Remove old `http_server` crate --- Cargo.toml | 1 - beacon_node/client/Cargo.toml | 1 - beacon_node/client/src/config.rs | 4 - beacon_node/client/src/lib.rs | 22 --- beacon_node/http_server/Cargo.toml | 23 --- beacon_node/http_server/src/api.rs | 71 -------- beacon_node/http_server/src/key.rs | 33 ---- beacon_node/http_server/src/lib.rs | 145 ----------------- beacon_node/http_server/src/metrics.rs | 72 -------- .../http_server/src/metrics/local_metrics.rs | 154 ------------------ beacon_node/rest_api/src/config.rs | 2 +- docs/config_examples/beacon-node.toml | 10 +- 12 files changed, 2 insertions(+), 536 deletions(-) delete mode 100644 beacon_node/http_server/Cargo.toml delete mode 100644 beacon_node/http_server/src/api.rs delete mode 100644 beacon_node/http_server/src/key.rs delete mode 100644 beacon_node/http_server/src/lib.rs delete mode 100644 beacon_node/http_server/src/metrics.rs delete mode 100644 beacon_node/http_server/src/metrics/local_metrics.rs diff --git a/Cargo.toml b/Cargo.toml index 9b7b87a0d..f087539e6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,6 @@ members = [ "beacon_node", "beacon_node/store", "beacon_node/client", - "beacon_node/http_server", "beacon_node/rest_api", "beacon_node/network", "beacon_node/eth2-libp2p", diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 8c72fa417..b13f175a9 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -7,7 +7,6 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } network = { path = "../network" } -http_server = { path = "../http_server" } rpc = { path = "../rpc" } rest_api = { path = "../rest_api" } prometheus = "^0.6" diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index ee62b6281..fcc2cc7da 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,6 +1,5 @@ use crate::Eth2Config; use clap::ArgMatches; -use http_server::HttpServerConfig; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; use slog::{info, o, Drain}; @@ -25,7 +24,6 @@ pub struct Config { pub genesis_state: GenesisState, pub network: network::NetworkConfig, pub rpc: rpc::RPCConfig, - pub http: HttpServerConfig, pub rest_api: rest_api::ApiConfig, } @@ -59,7 +57,6 @@ impl Default for Config { db_name: "chain_db".to_string(), network: NetworkConfig::new(), rpc: rpc::RPCConfig::default(), - http: HttpServerConfig::default(), rest_api: rest_api::ApiConfig::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), genesis_state: GenesisState::RecentGenesis { @@ -143,7 +140,6 @@ impl Config { self.network.apply_cli_args(args)?; self.rpc.apply_cli_args(args)?; - self.http.apply_cli_args(args)?; self.rest_api.apply_cli_args(args)?; if let Some(log_file) = args.value_of("logfile") { diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index c74787f60..5c37ac3e9 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -10,7 +10,6 @@ use beacon_chain::BeaconChain; use exit_future::Signal; use futures::{future::Future, Stream}; use network::Service as NetworkService; -use prometheus::Registry; use slog::{error, info, o}; use slot_clock::SlotClock; use std::marker::PhantomData; @@ -36,8 +35,6 @@ pub struct Client { pub network: Arc>, /// Signal to terminate the RPC server. pub rpc_exit_signal: Option, - /// Signal to terminate the HTTP server. - pub http_exit_signal: Option, /// Signal to terminate the slot timer. pub slot_timer_exit_signal: Option, /// Signal to terminate the API @@ -60,7 +57,6 @@ where log: slog::Logger, executor: &TaskExecutor, ) -> error::Result { - let metrics_registry = Registry::new(); let store = Arc::new(store); let seconds_per_slot = eth2_config.spec.seconds_per_slot; @@ -119,23 +115,6 @@ where None }; - // Start the `http_server` service. - // - // Note: presently we are ignoring the config and _always_ starting a HTTP server. - let http_exit_signal = if client_config.http.enabled { - Some(http_server::start_service( - &client_config.http, - executor, - network_send, - beacon_chain.clone(), - client_config.db_path().expect("unable to read datadir"), - metrics_registry, - &log, - )) - } else { - None - }; - // Start the `rest_api` service let api_exit_signal = if client_config.rest_api.enabled { match rest_api::start_server( @@ -184,7 +163,6 @@ where Ok(Client { _client_config: client_config, beacon_chain, - http_exit_signal, rpc_exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal), api_exit_signal, diff --git a/beacon_node/http_server/Cargo.toml b/beacon_node/http_server/Cargo.toml deleted file mode 100644 index e87ff2997..000000000 --- a/beacon_node/http_server/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "http_server" -version = "0.1.0" -authors = ["Paul Hauner "] -edition = "2018" - -[dependencies] -beacon_chain = { path = "../beacon_chain" } -iron = "^0.6" -router = "^0.6" -network = { path = "../network" } -types = { path = "../../eth2/types" } -slot_clock = { path = "../../eth2/utils/slot_clock" } -persistent = "^0.4" -prometheus = { version = "^0.6", features = ["process"] } -clap = "2.32.0" -futures = "0.1.23" -serde = "1.0" -serde_derive = "1.0" -serde_json = "1.0" -slog = { version = "^2.2.3" , features = ["max_level_trace"] } -tokio = "0.1.17" -exit-future = "0.1.4" diff --git a/beacon_node/http_server/src/api.rs b/beacon_node/http_server/src/api.rs deleted file mode 100644 index 8cb023b02..000000000 --- a/beacon_node/http_server/src/api.rs +++ /dev/null @@ -1,71 +0,0 @@ -use crate::{key::BeaconChainKey, map_persistent_err_to_500}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use iron::prelude::*; -use iron::{ - headers::{CacheControl, CacheDirective, ContentType}, - status::Status, - AfterMiddleware, Handler, IronResult, Request, Response, -}; -use persistent::Read; -use router::Router; -use serde_json::json; -use std::sync::Arc; - -/// Yields a handler for the HTTP API. -pub fn build_handler( - beacon_chain: Arc>, -) -> impl Handler { - let mut router = Router::new(); - - router.get("/node/fork", handle_fork::, "fork"); - - let mut chain = Chain::new(router); - - // Insert `BeaconChain` so it may be accessed in a request. - chain.link(Read::>::both(beacon_chain.clone())); - // Set the content-type headers. - chain.link_after(SetJsonContentType); - // Set the cache headers. - chain.link_after(SetCacheDirectives); - - chain -} - -/// Sets the `cache-control` headers on _all_ responses, unless they are already set. -struct SetCacheDirectives; -impl AfterMiddleware for SetCacheDirectives { - fn after(&self, _req: &mut Request, mut resp: Response) -> IronResult { - // This is run for every requests, AFTER all handlers have been executed - if resp.headers.get::() == None { - resp.headers.set(CacheControl(vec![ - CacheDirective::NoCache, - CacheDirective::NoStore, - ])); - } - Ok(resp) - } -} - -/// Sets the `content-type` headers on _all_ responses, unless they are already set. -struct SetJsonContentType; -impl AfterMiddleware for SetJsonContentType { - fn after(&self, _req: &mut Request, mut resp: Response) -> IronResult { - if resp.headers.get::() == None { - resp.headers.set(ContentType::json()); - } - Ok(resp) - } -} - -fn handle_fork(req: &mut Request) -> IronResult { - let beacon_chain = req - .get::>>() - .map_err(map_persistent_err_to_500)?; - - let response = json!({ - "fork": beacon_chain.head().beacon_state.fork, - "network_id": beacon_chain.spec.network_id - }); - - Ok(Response::with((Status::Ok, response.to_string()))) -} diff --git a/beacon_node/http_server/src/key.rs b/beacon_node/http_server/src/key.rs deleted file mode 100644 index a69da6747..000000000 --- a/beacon_node/http_server/src/key.rs +++ /dev/null @@ -1,33 +0,0 @@ -use crate::metrics::LocalMetrics; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use iron::typemap::Key; -use prometheus::Registry; -use std::marker::PhantomData; -use std::path::PathBuf; -use std::sync::Arc; - -pub struct BeaconChainKey { - _phantom: PhantomData, -} - -impl Key for BeaconChainKey { - type Value = Arc>; -} - -pub struct MetricsRegistryKey; - -impl Key for MetricsRegistryKey { - type Value = Registry; -} - -pub struct LocalMetricsKey; - -impl Key for LocalMetricsKey { - type Value = LocalMetrics; -} - -pub struct DBPathKey; - -impl Key for DBPathKey { - type Value = PathBuf; -} diff --git a/beacon_node/http_server/src/lib.rs b/beacon_node/http_server/src/lib.rs deleted file mode 100644 index f1d006a5b..000000000 --- a/beacon_node/http_server/src/lib.rs +++ /dev/null @@ -1,145 +0,0 @@ -mod api; -mod key; -mod metrics; - -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use clap::ArgMatches; -use futures::Future; -use iron::prelude::*; -use network::NetworkMessage; -use prometheus::Registry; -use router::Router; -use serde_derive::{Deserialize, Serialize}; -use slog::{info, o, warn}; -use std::path::PathBuf; -use std::sync::Arc; -use tokio::runtime::TaskExecutor; -use tokio::sync::mpsc; - -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] -pub struct HttpServerConfig { - pub enabled: bool, - pub listen_address: String, - pub listen_port: String, -} - -impl Default for HttpServerConfig { - fn default() -> Self { - Self { - enabled: false, - listen_address: "127.0.0.1".to_string(), - listen_port: "5052".to_string(), - } - } -} - -impl HttpServerConfig { - pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { - if args.is_present("http") { - self.enabled = true; - } - - if let Some(listen_address) = args.value_of("http-address") { - self.listen_address = listen_address.to_string(); - } - - if let Some(listen_port) = args.value_of("http-port") { - self.listen_port = listen_port.to_string(); - } - - Ok(()) - } -} - -/// Build the `iron` HTTP server, defining the core routes. -pub fn create_iron_http_server( - beacon_chain: Arc>, - db_path: PathBuf, - metrics_registry: Registry, -) -> Iron { - let mut router = Router::new(); - - // A `GET` request to `/metrics` is handled by the `metrics` module. - router.get( - "/metrics", - metrics::build_handler(beacon_chain.clone(), db_path, metrics_registry), - "metrics", - ); - - // Any request to all other endpoints is handled by the `api` module. - router.any("/*", api::build_handler(beacon_chain.clone()), "api"); - - Iron::new(router) -} - -/// Start the HTTP service on the tokio `TaskExecutor`. -pub fn start_service( - config: &HttpServerConfig, - executor: &TaskExecutor, - _network_chan: mpsc::UnboundedSender, - beacon_chain: Arc>, - db_path: PathBuf, - metrics_registry: Registry, - log: &slog::Logger, -) -> exit_future::Signal { - let log = log.new(o!("Service"=>"HTTP")); - - // Create: - // - `shutdown_trigger` a one-shot to shut down this service. - // - `wait_for_shutdown` a future that will wait until someone calls shutdown. - let (shutdown_trigger, wait_for_shutdown) = exit_future::signal(); - - // Create an `iron` http, without starting it yet. - let iron = create_iron_http_server(beacon_chain, db_path, metrics_registry); - - // Create a HTTP server future. - // - // 1. Start the HTTP server - // 2. Build an exit future that will shutdown the server when requested. - // 3. Return the exit future, so the caller may shutdown the service when desired. - let http_service = { - let listen_address = format!("{}:{}", config.listen_address, config.listen_port); - // Start the HTTP server - let server_start_result = iron.http(listen_address.clone()); - - if server_start_result.is_ok() { - info!(log, "HTTP server running on {}", listen_address); - } else { - warn!(log, "HTTP server failed to start on {}", listen_address); - } - - // Build a future that will shutdown the HTTP server when the `shutdown_trigger` is - // triggered. - wait_for_shutdown.and_then(move |_| { - info!(log, "HTTP server shutting down"); - - if let Ok(mut server) = server_start_result { - // According to the documentation, `server.close()` "doesn't work" and the server - // keeps listening. - // - // It is being called anyway, because it seems like the right thing to do. If you - // know this has negative side-effects, please create an issue to discuss. - // - // See: https://docs.rs/iron/0.6.0/iron/struct.Listening.html#impl - match server.close() { - _ => (), - }; - } - info!(log, "HTTP server shutdown complete."); - Ok(()) - }) - }; - - // Attach the HTTP server to the executor. - executor.spawn(http_service); - - shutdown_trigger -} - -/// Helper function for mapping a failure to read state to a 500 server error. -fn map_persistent_err_to_500(e: persistent::PersistentError) -> iron::error::IronError { - iron::error::IronError { - error: Box::new(e), - response: iron::Response::with(iron::status::Status::InternalServerError), - } -} diff --git a/beacon_node/http_server/src/metrics.rs b/beacon_node/http_server/src/metrics.rs deleted file mode 100644 index 1b1ed1f3d..000000000 --- a/beacon_node/http_server/src/metrics.rs +++ /dev/null @@ -1,72 +0,0 @@ -use crate::{ - key::{BeaconChainKey, DBPathKey, LocalMetricsKey, MetricsRegistryKey}, - map_persistent_err_to_500, -}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use iron::prelude::*; -use iron::{status::Status, Handler, IronResult, Request, Response}; -use persistent::Read; -use prometheus::{Encoder, Registry, TextEncoder}; -use std::path::PathBuf; -use std::sync::Arc; - -pub use local_metrics::LocalMetrics; - -mod local_metrics; - -/// Yields a handler for the metrics endpoint. -pub fn build_handler( - beacon_chain: Arc>, - db_path: PathBuf, - metrics_registry: Registry, -) -> impl Handler { - let mut chain = Chain::new(handle_metrics::); - - let local_metrics = LocalMetrics::new().unwrap(); - local_metrics.register(&metrics_registry).unwrap(); - - chain.link(Read::>::both(beacon_chain)); - chain.link(Read::::both(metrics_registry)); - chain.link(Read::::both(local_metrics)); - chain.link(Read::::both(db_path)); - - chain -} - -/// Handle a request for Prometheus metrics. -/// -/// Returns a text string containing all metrics. -fn handle_metrics(req: &mut Request) -> IronResult { - let beacon_chain = req - .get::>>() - .map_err(map_persistent_err_to_500)?; - - let r = req - .get::>() - .map_err(map_persistent_err_to_500)?; - - let local_metrics = req - .get::>() - .map_err(map_persistent_err_to_500)?; - - let db_path = req - .get::>() - .map_err(map_persistent_err_to_500)?; - - // Update metrics that are calculated on each scrape. - local_metrics.update(&beacon_chain, &db_path); - - let mut buffer = vec![]; - let encoder = TextEncoder::new(); - - // Gather `DEFAULT_REGISTRY` metrics. - encoder.encode(&prometheus::gather(), &mut buffer).unwrap(); - - // Gather metrics from our registry. - let metric_families = r.gather(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - let prom_string = String::from_utf8(buffer).unwrap(); - - Ok(Response::with((Status::Ok, prom_string))) -} diff --git a/beacon_node/http_server/src/metrics/local_metrics.rs b/beacon_node/http_server/src/metrics/local_metrics.rs deleted file mode 100644 index b342cca81..000000000 --- a/beacon_node/http_server/src/metrics/local_metrics.rs +++ /dev/null @@ -1,154 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use prometheus::{IntGauge, Opts, Registry}; -use slot_clock::SlotClock; -use std::fs; -use std::path::PathBuf; -use types::{EthSpec, Slot}; - -// If set to `true` will iterate and sum the balances of all validators in the state for each -// scrape. -const SHOULD_SUM_VALIDATOR_BALANCES: bool = true; - -pub struct LocalMetrics { - present_slot: IntGauge, - present_epoch: IntGauge, - best_slot: IntGauge, - best_beacon_block_root: IntGauge, - justified_beacon_block_root: IntGauge, - finalized_beacon_block_root: IntGauge, - validator_count: IntGauge, - justified_epoch: IntGauge, - finalized_epoch: IntGauge, - validator_balances_sum: IntGauge, - database_size: IntGauge, -} - -impl LocalMetrics { - /// Create a new instance. - pub fn new() -> Result { - Ok(Self { - present_slot: { - let opts = Opts::new("present_slot", "slot_at_time_of_scrape"); - IntGauge::with_opts(opts)? - }, - present_epoch: { - let opts = Opts::new("present_epoch", "epoch_at_time_of_scrape"); - IntGauge::with_opts(opts)? - }, - best_slot: { - let opts = Opts::new("best_slot", "slot_of_block_at_chain_head"); - IntGauge::with_opts(opts)? - }, - best_beacon_block_root: { - let opts = Opts::new("best_beacon_block_root", "root_of_block_at_chain_head"); - IntGauge::with_opts(opts)? - }, - justified_beacon_block_root: { - let opts = Opts::new( - "justified_beacon_block_root", - "root_of_block_at_justified_head", - ); - IntGauge::with_opts(opts)? - }, - finalized_beacon_block_root: { - let opts = Opts::new( - "finalized_beacon_block_root", - "root_of_block_at_finalized_head", - ); - IntGauge::with_opts(opts)? - }, - validator_count: { - let opts = Opts::new("validator_count", "number_of_validators"); - IntGauge::with_opts(opts)? - }, - justified_epoch: { - let opts = Opts::new("justified_epoch", "state_justified_epoch"); - IntGauge::with_opts(opts)? - }, - finalized_epoch: { - let opts = Opts::new("finalized_epoch", "state_finalized_epoch"); - IntGauge::with_opts(opts)? - }, - validator_balances_sum: { - let opts = Opts::new("validator_balances_sum", "sum_of_all_validator_balances"); - IntGauge::with_opts(opts)? - }, - database_size: { - let opts = Opts::new("database_size", "size_of_on_disk_db_in_mb"); - IntGauge::with_opts(opts)? - }, - }) - } - - /// Registry this instance with the `registry`. - pub fn register(&self, registry: &Registry) -> Result<(), prometheus::Error> { - registry.register(Box::new(self.present_slot.clone()))?; - registry.register(Box::new(self.present_epoch.clone()))?; - registry.register(Box::new(self.best_slot.clone()))?; - registry.register(Box::new(self.best_beacon_block_root.clone()))?; - registry.register(Box::new(self.justified_beacon_block_root.clone()))?; - registry.register(Box::new(self.finalized_beacon_block_root.clone()))?; - registry.register(Box::new(self.validator_count.clone()))?; - registry.register(Box::new(self.finalized_epoch.clone()))?; - registry.register(Box::new(self.justified_epoch.clone()))?; - registry.register(Box::new(self.validator_balances_sum.clone()))?; - registry.register(Box::new(self.database_size.clone()))?; - - Ok(()) - } - - /// Update the metrics in `self` to the latest values. - pub fn update(&self, beacon_chain: &BeaconChain, db_path: &PathBuf) { - let state = &beacon_chain.head().beacon_state; - - let present_slot = beacon_chain - .slot_clock - .present_slot() - .unwrap_or_else(|_| None) - .unwrap_or_else(|| Slot::new(0)); - self.present_slot.set(present_slot.as_u64() as i64); - self.present_epoch - .set(present_slot.epoch(T::EthSpec::slots_per_epoch()).as_u64() as i64); - - self.best_slot.set(state.slot.as_u64() as i64); - self.best_beacon_block_root - .set(beacon_chain.head().beacon_block_root.to_low_u64_le() as i64); - self.justified_beacon_block_root.set( - beacon_chain - .head() - .beacon_state - .current_justified_checkpoint - .root - .to_low_u64_le() as i64, - ); - self.finalized_beacon_block_root.set( - beacon_chain - .head() - .beacon_state - .finalized_checkpoint - .root - .to_low_u64_le() as i64, - ); - self.validator_count.set(state.validators.len() as i64); - self.justified_epoch - .set(state.current_justified_checkpoint.epoch.as_u64() as i64); - self.finalized_epoch - .set(state.finalized_checkpoint.epoch.as_u64() as i64); - if SHOULD_SUM_VALIDATOR_BALANCES { - self.validator_balances_sum - .set(state.balances.iter().sum::() as i64); - } - let db_size = if let Ok(iter) = fs::read_dir(db_path) { - iter.filter_map(Result::ok) - .map(size_of_dir_entry) - .fold(0_u64, |sum, val| sum + val) - } else { - 0 - }; - self.database_size.set(db_size as i64); - } -} - -fn size_of_dir_entry(dir: fs::DirEntry) -> u64 { - dir.metadata().map(|m| m.len()).unwrap_or(0) -} diff --git a/beacon_node/rest_api/src/config.rs b/beacon_node/rest_api/src/config.rs index c4a9c738a..90ac0821b 100644 --- a/beacon_node/rest_api/src/config.rs +++ b/beacon_node/rest_api/src/config.rs @@ -18,7 +18,7 @@ impl Default for Config { Config { enabled: true, // rest_api enabled by default listen_address: Ipv4Addr::new(127, 0, 0, 1), - port: 1248, + port: 5052, } } } diff --git a/docs/config_examples/beacon-node.toml b/docs/config_examples/beacon-node.toml index 3c9f8b613..f0863934e 100644 --- a/docs/config_examples/beacon-node.toml +++ b/docs/config_examples/beacon-node.toml @@ -78,14 +78,6 @@ enabled = false listen_address = "127.0.0.1" port = 5051 -# -# Legacy HTTP server configuration. To be removed. -# -[http] -enabled = false -listen_address = "127.0.0.1" -listen_port = "5052" - # # RESTful HTTP API server configuration. # @@ -95,4 +87,4 @@ enabled = true # The listen port for the HTTP server. listen_address = "127.0.0.1" # The listen port for the HTTP server. -port = 1248 +port = 5052 From 95a320817e0c724b6d4ed64b9bf2fefacc918aa6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 15:40:51 +1000 Subject: [PATCH 067/305] Update metrics names to be more like standard --- beacon_node/beacon_chain/src/metrics.rs | 65 +++++++++++++------------ beacon_node/rest_api/src/metrics.rs | 6 +-- beacon_node/store/src/metrics.rs | 12 ++--- 3 files changed, 43 insertions(+), 40 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 227f1090f..00a3e5eb2 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -7,45 +7,45 @@ lazy_static! { * Block Processing */ pub static ref BLOCK_PROCESSING_REQUESTS: Result = try_create_int_counter( - "beacon_block_processing_requests", + "beacon_block_processing_requests_total", "Count of blocks submitted for processing" ); pub static ref BLOCK_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "beacon_block_processing_successes", + "beacon_block_processing_successes_total", "Count of blocks processed without error" ); pub static ref BLOCK_PROCESSING_TIMES: Result = - try_create_histogram("block_processing_times", "Full runtime of block processing"); + try_create_histogram("block_processing_seconds", "Full runtime of block processing"); pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( - "beacon_block_processing_db_read_times", + "beacon_block_processing_db_read_seconds", "Time spent loading block and state from DB for block processing" ); pub static ref BLOCK_PROCESSING_CATCHUP_STATE: Result = try_create_histogram( - "beacon_block_processing_catch_up_state_times", + "beacon_block_processing_catch_up_state_seconds", "Time spent skipping slots on a state before processing a block." ); pub static ref BLOCK_PROCESSING_COMMITTEE: Result = try_create_histogram( - "beacon_block_processing_committee_building_times", + "beacon_block_processing_committee_building_seconds", "Time spent building/obtaining committees for block processing." ); pub static ref BLOCK_PROCESSING_CORE: Result = try_create_histogram( - "beacon_block_processing_core_times", + "beacon_block_processing_core_seconds", "Time spent doing the core per_block_processing state processing." ); pub static ref BLOCK_PROCESSING_STATE_ROOT: Result = try_create_histogram( - "beacon_block_processing_state_root_times", + "beacon_block_processing_state_root_seconds", "Time spent calculating the state root when processing a block." ); pub static ref BLOCK_PROCESSING_DB_WRITE: Result = try_create_histogram( - "beacon_block_processing_db_write_times", + "beacon_block_processing_db_write_seconds", "Time spent writing a newly processed block and state to DB" ); pub static ref BLOCK_PROCESSING_FORK_CHOICE_REGISTER: Result = try_create_histogram( - "beacon_block_processing_fork_choice_register_times", + "beacon_block_processing_fork_choice_register_seconds", "Time spent registering the new block with fork choice (but not finding head)" ); pub static ref BLOCK_PROCESSING_FORK_CHOICE_FIND_HEAD: Result = try_create_histogram( - "beacon_block_processing_fork_choice_find_head_times", + "beacon_block_processing_fork_choice_find_head_seconds", "Time spent finding the new head after processing a new block" ); @@ -53,21 +53,21 @@ lazy_static! { * Block Production */ pub static ref BLOCK_PRODUCTION_REQUESTS: Result = try_create_int_counter( - "beacon_block_production_requests", + "beacon_block_production_requests_total", "Count of all block production requests" ); pub static ref BLOCK_PRODUCTION_SUCCESSES: Result = try_create_int_counter( - "beacon_block_production_successes", + "beacon_block_production_successes_total", "Count of blocks successfully produced." ); pub static ref BLOCK_PRODUCTION_TIMES: Result = - try_create_histogram("beacon_block_production_times", "Full runtime of block production"); + try_create_histogram("beacon_block_production_seconds", "Full runtime of block production"); /* * Block Statistics */ pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result = try_create_histogram( - "beacon_operations_per_block_attestation", + "beacon_operations_per_block_attestation_total", "Number of attestations in a block" ); @@ -75,15 +75,15 @@ lazy_static! { * Attestation Processing */ pub static ref ATTESTATION_PROCESSING_REQUESTS: Result = try_create_int_counter( - "beacon_attestation_processing_requests", + "beacon_attestation_processing_requests_total", "Count of all attestations submitted for processing" ); pub static ref ATTESTATION_PROCESSING_SUCCESSES: Result = try_create_int_counter( - "beacon_attestation_processing_successes", + "beacon_attestation_processing_successes_total", "total_attestation_processing_successes" ); pub static ref ATTESTATION_PROCESSING_TIMES: Result = try_create_histogram( - "beacon_attestation_processing_times", + "beacon_attestation_processing_seconds", "Full runtime of attestation processing" ); @@ -91,15 +91,15 @@ lazy_static! { * Attestation Production */ pub static ref ATTESTATION_PRODUCTION_REQUESTS: Result = try_create_int_counter( - "beacon_attestation_production_requests", + "beacon_attestation_production_requests_total", "Count of all attestation production requests" ); pub static ref ATTESTATION_PRODUCTION_SUCCESSES: Result = try_create_int_counter( - "beacon_attestation_production_successes", + "beacon_attestation_production_successes_total", "Count of attestations processed without error" ); pub static ref ATTESTATION_PRODUCTION_TIMES: Result = try_create_histogram( - "beacon_attestation_production_times", + "beacon_attestation_production_seconds", "Full runtime of attestation production" ); @@ -107,19 +107,19 @@ lazy_static! { * Fork Choice */ pub static ref FORK_CHOICE_REQUESTS: Result = try_create_int_counter( - "beacon_fork_choice_requests", + "beacon_fork_choice_requests_total", "Count of occasions where fork choice has tried to find a head" ); pub static ref FORK_CHOICE_ERRORS: Result = try_create_int_counter( - "beacon_fork_choice_errors", + "beacon_fork_choice_errors_total", "Count of occasions where fork choice has returned an error when trying to find a head" ); pub static ref FORK_CHOICE_CHANGED_HEAD: Result = try_create_int_counter( - "beacon_fork_choice_changed_head", + "beacon_fork_choice_changed_head_total", "Count of occasions fork choice has found a new head" ); pub static ref FORK_CHOICE_REORG_COUNT: Result = try_create_int_counter( - "beacon_fork_choice_reorg_count", + "beacon_fork_choice_reorg_total", "Count of occasions fork choice has switched to a different chain" ); pub static ref FORK_CHOICE_TIMES: Result = @@ -156,7 +156,7 @@ lazy_static! { * Chain Head */ pub static ref UPDATE_HEAD_TIMES: Result = - try_create_histogram("beacon_update_head_times", "Time taken to update the canonical head"); + try_create_histogram("beacon_update_head_seconds", "Time taken to update the canonical head"); pub static ref HEAD_STATE_SLOT: Result = try_create_int_gauge("beacon_head_state_slot", "Slot of the block at the head of the chain"); pub static ref HEAD_STATE_ROOT: Result = @@ -175,16 +175,18 @@ lazy_static! { try_create_int_gauge("beacon_head_state_finalized_root", "Finalized root at the head of the chain"); pub static ref HEAD_STATE_FINALIZED_EPOCH: Result = try_create_int_gauge("beacon_head_state_finalized_epoch", "Finalized epoch at the head of the chain"); + pub static ref HEAD_STATE_SHARDS: Result = + try_create_int_gauge("beacon_head_state_shard_total", "Count of shards in the beacon chain"); pub static ref HEAD_STATE_TOTAL_VALIDATORS: Result = - try_create_int_gauge("beacon_head_state_total_validators", "Count of validators at the head of the chain"); + try_create_int_gauge("beacon_head_state_total_validators_total", "Count of validators at the head of the chain"); pub static ref HEAD_STATE_ACTIVE_VALIDATORS: Result = - try_create_int_gauge("beacon_head_state_active_validators", "Count of active validators at the head of the chain"); + try_create_int_gauge("beacon_head_state_active_validators_total", "Count of active validators at the head of the chain"); pub static ref HEAD_STATE_VALIDATOR_BALANCES: Result = - try_create_int_gauge("beacon_head_state_validator_balances", "Sum of all validator balances at the head of the chain"); + try_create_int_gauge("beacon_head_state_validator_balances_total", "Sum of all validator balances at the head of the chain"); pub static ref HEAD_STATE_SLASHED_VALIDATORS: Result = - try_create_int_gauge("beacon_head_state_slashed_validators", "Count of all slashed validators at the head of the chain"); + try_create_int_gauge("beacon_head_state_slashed_validators_total", "Count of all slashed validators at the head of the chain"); pub static ref HEAD_STATE_WITHDRAWN_VALIDATORS: Result = - try_create_int_gauge("beacon_head_state_withdrawn_validators", "Sum of all validator balances at the head of the chain"); + try_create_int_gauge("beacon_head_state_withdrawn_validators_total", "Sum of all validator balances at the head of the chain"); pub static ref HEAD_STATE_ETH1_DEPOSIT_INDEX: Result = try_create_int_gauge("beacon_head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain"); } @@ -242,6 +244,7 @@ fn scrape_head_state(state: &BeaconState, state &HEAD_STATE_FINALIZED_EPOCH, state.finalized_checkpoint.epoch, ); + set_gauge_by_usize(&HEAD_STATE_SHARDS, state.previous_crosslinks.len()); set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators.len()); set_gauge_by_u64( &HEAD_STATE_VALIDATOR_BALANCES, diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index c0db810b6..b0f1c1b98 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -8,15 +8,15 @@ pub use lighthouse_metrics::*; lazy_static! { pub static ref REQUEST_RESPONSE_TIME: Result = try_create_histogram( - "http_server_request_response_time", + "http_server_request_duration_seconds", "Time taken to build a response to a HTTP request" ); pub static ref REQUEST_COUNT: Result = try_create_int_counter( - "http_server_request_count", + "http_server_request_total", "Total count of HTTP requests received" ); pub static ref SUCCESS_COUNT: Result = try_create_int_counter( - "http_server_success_count", + "http_server_success_total", "Total count of HTTP 200 responses sent" ); } diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 430e9c38e..30cbb878b 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -7,27 +7,27 @@ lazy_static! { pub static ref DISK_DB_SIZE: Result = try_create_int_gauge("store_disk_db_size", "Size of the on-disk database (bytes)"); pub static ref DISK_DB_WRITE_BYTES: Result = try_create_int_counter( - "store_disk_db_write_bytes", + "store_disk_db_write_bytes_total", "Number of bytes attempted to be written to the on-disk DB" ); pub static ref DISK_DB_READ_BYTES: Result = try_create_int_counter( - "store_disk_db_read_bytes", + "store_disk_db_read_bytes_total", "Number of bytes read from the on-disk DB" ); pub static ref DISK_DB_READ_COUNT: Result = try_create_int_counter( - "store_disk_db_read_count", + "store_disk_db_read_count_total", "Total number of reads to the on-disk DB" ); pub static ref DISK_DB_WRITE_COUNT: Result = try_create_int_counter( - "store_disk_db_write_count", + "store_disk_db_write_count_total", "Total number of writes to the on-disk DB" ); pub static ref DISK_DB_EXISTS_COUNT: Result = try_create_int_counter( - "store_disk_db_exists_count", + "store_disk_db_exists_count_total", "Total number of checks if a key is in the on-disk DB" ); pub static ref DISK_DB_DELETE_COUNT: Result = try_create_int_counter( - "store_disk_db_delete_count", + "store_disk_db_delete_count_total", "Total number of deletions from the on-disk DB" ); } From d7c546844cfaf58ab63739a181fbf73c924fb4d5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 17:44:47 +1000 Subject: [PATCH 068/305] Fix broken beacon chain metrics, add slot clock metrics --- beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/metrics.rs | 28 +----------------- beacon_node/rest_api/Cargo.toml | 1 + beacon_node/rest_api/src/metrics.rs | 17 +++++++++++ eth2/utils/slot_clock/Cargo.toml | 2 ++ eth2/utils/slot_clock/src/lib.rs | 10 ++++++- eth2/utils/slot_clock/src/metrics.rs | 29 +++++++++++++++++++ .../slot_clock/src/system_time_slot_clock.rs | 4 +++ .../slot_clock/src/testing_slot_clock.rs | 4 +++ 9 files changed, 68 insertions(+), 28 deletions(-) create mode 100644 eth2/utils/slot_clock/src/metrics.rs diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 1262bc537..cc7725dd8 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -1,3 +1,4 @@ +#![recursion_limit = "128"] // For lazy-static #[macro_use] extern crate lazy_static; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 00a3e5eb2..a4b36cd37 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1,6 +1,6 @@ use crate::{BeaconChain, BeaconChainTypes}; pub use lighthouse_metrics::*; -use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; +use types::{BeaconState, Epoch, Hash256, Slot}; lazy_static! { /* @@ -140,17 +140,6 @@ lazy_static! { */ pub static ref PERSIST_CHAIN: Result = try_create_histogram("beacon_persist_chain", "Time taken to update the canonical head"); -} - -// Lazy-static is split so we don't reach the crate-level recursion limit. -lazy_static! { - /* - * Slot Clock - */ - pub static ref PRESENT_SLOT: Result = - try_create_int_gauge("beacon_present_slot", "The present slot, according to system time"); - pub static ref PRESENT_EPOCH: Result = - try_create_int_gauge("beacon_present_epoch", "The present epoch, according to system time"); /* * Chain Head @@ -194,21 +183,6 @@ lazy_static! { /// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot, /// head state info, etc) and update the Prometheus `DEFAULT_REGISTRY`. pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { - set_gauge_by_slot( - &PRESENT_SLOT, - beacon_chain - .read_slot_clock() - .unwrap_or_else(|| Slot::new(0)), - ); - - set_gauge_by_epoch( - &PRESENT_EPOCH, - beacon_chain - .read_slot_clock() - .map(|s| s.epoch(T::EthSpec::slots_per_epoch())) - .unwrap_or_else(|| Epoch::new(0)), - ); - scrape_head_state::( &beacon_chain.head().beacon_state, beacon_chain.head().beacon_state_root, diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index 100e680de..c7026014c 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -26,3 +26,4 @@ tokio = "0.1.17" url = "2.0" lazy_static = "1.3.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } +slot_clock = { path = "../../eth2/utils/slot_clock" } diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index b0f1c1b98..f0ccef5f8 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -39,6 +39,23 @@ pub fn get_prometheus(req: Request) -> ApiR .get::() .ok_or_else(|| ApiError::ServerError("DBPath extension missing".to_string()))?; + // There are two categories of metrics: + // + // - Dynamically updated: things like histograms and event counters that are updated on the + // fly. + // - Statically updated: things which are only updated at the time of the scrape (used where we + // can avoid cluttering up code with metrics calls). + // + // The `prometheus` crate has a `DEFAULT_REGISTRY` global singleton (via `lazy_static`) which + // keeps the state of all the metrics. Dynamically updated things will already be up-to-date in + // the registry (because they update themselves) however statically updated things need to be + // "scraped". + // + // We proceed by, first updating all the static metrics using `scrape_for_metrics(..)`. Then, + // using `prometheus::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into a + // string that can be returned via HTTP. + + slot_clock::scrape_for_metrics::(&beacon_chain.slot_clock); store::scrape_for_metrics(&db_path); beacon_chain::scrape_for_metrics(&beacon_chain); diff --git a/eth2/utils/slot_clock/Cargo.toml b/eth2/utils/slot_clock/Cargo.toml index 31a435725..c4b9df5ed 100644 --- a/eth2/utils/slot_clock/Cargo.toml +++ b/eth2/utils/slot_clock/Cargo.toml @@ -6,3 +6,5 @@ edition = "2018" [dependencies] types = { path = "../../types" } +lazy_static = "1.3.0" +lighthouse_metrics = { path = "../lighthouse_metrics" } diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index 7b86684fa..871743c9e 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -1,9 +1,15 @@ +#[macro_use] +extern crate lazy_static; + +mod metrics; mod system_time_slot_clock; mod testing_slot_clock; +use std::time::Duration; + pub use crate::system_time_slot_clock::{Error as SystemTimeSlotClockError, SystemTimeSlotClock}; pub use crate::testing_slot_clock::{Error as TestingSlotClockError, TestingSlotClock}; -use std::time::Duration; +pub use metrics::scrape_for_metrics; pub use types::Slot; pub trait SlotClock: Send + Sync + Sized { @@ -17,4 +23,6 @@ pub trait SlotClock: Send + Sync + Sized { fn present_slot(&self) -> Result, Self::Error>; fn duration_to_next_slot(&self) -> Result, Self::Error>; + + fn slot_duration_millis(&self) -> u64; } diff --git a/eth2/utils/slot_clock/src/metrics.rs b/eth2/utils/slot_clock/src/metrics.rs new file mode 100644 index 000000000..a9153a10c --- /dev/null +++ b/eth2/utils/slot_clock/src/metrics.rs @@ -0,0 +1,29 @@ +use crate::SlotClock; +pub use lighthouse_metrics::*; +use types::{EthSpec, Slot}; + +lazy_static! { + pub static ref PRESENT_SLOT: Result = + try_create_int_gauge("slotclock_present_slot", "The present wall-clock slot"); + pub static ref PRESENT_EPOCH: Result = + try_create_int_gauge("slotclock_present_epoch", "The present wall-clock epoch"); + pub static ref MILLISECONDS_PER_SLOT: Result = try_create_int_gauge( + "slotclock_slot_time_milliseconds", + "The duration in milliseconds between each slot" + ); +} + +/// Update the global metrics `DEFAULT_REGISTRY` with info from the slot clock. +pub fn scrape_for_metrics(clock: &U) { + let present_slot = match clock.present_slot() { + Ok(Some(slot)) => slot, + _ => Slot::new(0), + }; + + set_gauge(&PRESENT_SLOT, present_slot.as_u64() as i64); + set_gauge( + &PRESENT_EPOCH, + present_slot.epoch(T::slots_per_epoch()).as_u64() as i64, + ); + set_gauge(&MILLISECONDS_PER_SLOT, clock.slot_duration_millis() as i64); +} diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index 7c184b02b..c493a8be8 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -52,6 +52,10 @@ impl SlotClock for SystemTimeSlotClock { fn duration_to_next_slot(&self) -> Result, Error> { duration_to_next_slot(self.genesis_seconds, self.slot_duration_seconds) } + + fn slot_duration_millis(&self) -> u64 { + self.slot_duration_seconds * 1000 + } } impl From for Error { diff --git a/eth2/utils/slot_clock/src/testing_slot_clock.rs b/eth2/utils/slot_clock/src/testing_slot_clock.rs index ab00d2baa..f741d3b87 100644 --- a/eth2/utils/slot_clock/src/testing_slot_clock.rs +++ b/eth2/utils/slot_clock/src/testing_slot_clock.rs @@ -40,6 +40,10 @@ impl SlotClock for TestingSlotClock { fn duration_to_next_slot(&self) -> Result, Error> { Ok(Some(Duration::from_secs(1))) } + + fn slot_duration_millis(&self) -> u64 { + 0 + } } #[cfg(test)] From 7165598b7fe3346ece3420bf808d14391106295a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 18:19:50 +1000 Subject: [PATCH 069/305] Add lighthouse_metrics gather fn --- beacon_node/rest_api/src/metrics.rs | 16 +++++++++------- eth2/utils/lighthouse_metrics/src/lib.rs | 4 ++++ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index f0ccef5f8..064359337 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -46,20 +46,22 @@ pub fn get_prometheus(req: Request) -> ApiR // - Statically updated: things which are only updated at the time of the scrape (used where we // can avoid cluttering up code with metrics calls). // - // The `prometheus` crate has a `DEFAULT_REGISTRY` global singleton (via `lazy_static`) which - // keeps the state of all the metrics. Dynamically updated things will already be up-to-date in - // the registry (because they update themselves) however statically updated things need to be - // "scraped". + // The `lighthouse_metrics` crate has a `DEFAULT_REGISTRY` global singleton (via `lazy_static`) + // which keeps the state of all the metrics. Dynamically updated things will already be + // up-to-date in the registry (because they update themselves) however statically updated + // things need to be "scraped". // // We proceed by, first updating all the static metrics using `scrape_for_metrics(..)`. Then, - // using `prometheus::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into a - // string that can be returned via HTTP. + // using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into + // a string that can be returned via HTTP. slot_clock::scrape_for_metrics::(&beacon_chain.slot_clock); store::scrape_for_metrics(&db_path); beacon_chain::scrape_for_metrics(&beacon_chain); - encoder.encode(&prometheus::gather(), &mut buffer).unwrap(); + encoder + .encode(&lighthouse_metrics::gather(), &mut buffer) + .unwrap(); String::from_utf8(buffer) .map(|string| success_response(Body::from(string))) diff --git a/eth2/utils/lighthouse_metrics/src/lib.rs b/eth2/utils/lighthouse_metrics/src/lib.rs index a8656d017..c9e66e971 100644 --- a/eth2/utils/lighthouse_metrics/src/lib.rs +++ b/eth2/utils/lighthouse_metrics/src/lib.rs @@ -2,6 +2,10 @@ use prometheus::{HistogramOpts, HistogramTimer, Opts}; pub use prometheus::{Histogram, IntCounter, IntGauge, Result}; +pub fn gather() -> Vec { + prometheus::gather() +} + pub fn try_create_int_counter(name: &str, help: &str) -> Result { let opts = Opts::new(name, help); let counter = IntCounter::with_opts(opts)?; From d5d60874e5e38368e8e538f5d4f4a89b30c7423a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 12 Aug 2019 18:20:05 +1000 Subject: [PATCH 070/305] Remove http args --- beacon_node/src/main.rs | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 2e3ad0691..9a52f2638 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -128,28 +128,6 @@ fn main() { .help("Listen port for RPC endpoint.") .takes_value(true), ) - /* - * HTTP server parameters. - */ - .arg( - Arg::with_name("http") - .long("http") - .help("Enable the HTTP server.") - .takes_value(false), - ) - .arg( - Arg::with_name("http-address") - .long("http-address") - .value_name("Address") - .help("Listen address for the HTTP server.") - .takes_value(true), - ) - .arg( - Arg::with_name("http-port") - .long("http-port") - .help("Listen port for the HTTP server.") - .takes_value(true), - ) /* Client related arguments */ .arg( Arg::with_name("api") From 5d4d2f35e1dc59b2566af5b547a1b6132b940454 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 12 Aug 2019 22:07:59 +1000 Subject: [PATCH 071/305] Initial sync re-write. WIP --- beacon_node/eth2-libp2p/src/rpc/handler.rs | 36 +- beacon_node/network/src/message_handler.rs | 121 +++---- beacon_node/network/src/sync/manager.rs | 283 ++++++++++++++++ beacon_node/network/src/sync/simple_sync.rs | 349 +++++--------------- 4 files changed, 405 insertions(+), 384 deletions(-) create mode 100644 beacon_node/network/src/sync/manager.rs diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index dbc32c5a4..a69cd0cda 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -1,4 +1,4 @@ -use super::methods::{RPCErrorResponse, RPCResponse, RequestId}; +use super::methods::RequestId; use super::protocol::{RPCError, RPCProtocol, RPCRequest}; use super::RPCEvent; use crate::rpc::protocol::{InboundFramed, OutboundFramed}; @@ -13,8 +13,8 @@ use smallvec::SmallVec; use std::time::{Duration, Instant}; use tokio_io::{AsyncRead, AsyncWrite}; -/// The time (in seconds) before a substream that is awaiting a response times out. -pub const RESPONSE_TIMEOUT: u64 = 9; +/// The time (in seconds) before a substream that is awaiting a response from the user times out. +pub const RESPONSE_TIMEOUT: u64 = 10; /// Implementation of `ProtocolsHandler` for the RPC protocol. pub struct RPCHandler @@ -314,7 +314,7 @@ where Ok(Async::Ready(response)) => { if let Some(response) = response { return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( - build_response(rpc_event, response), + RPCEvent::Response(rpc_event.id(), response), ))); } else { // stream closed early @@ -365,31 +365,3 @@ where Ok(Async::NotReady) } } - -/// Given a response back from a peer and the request that sent it, construct a response to send -/// back to the user. This allows for some data manipulation of responses given requests. -fn build_response(rpc_event: RPCEvent, rpc_response: RPCErrorResponse) -> RPCEvent { - let id = rpc_event.id(); - - // handle the types of responses - match rpc_response { - RPCErrorResponse::Success(response) => { - match response { - // if the response is block roots, tag on the extra request data - RPCResponse::BeaconBlockBodies(mut resp) => { - if let RPCEvent::Request(_id, RPCRequest::BeaconBlockBodies(bodies_req)) = - rpc_event - { - resp.block_roots = Some(bodies_req.block_roots); - } - RPCEvent::Response( - id, - RPCErrorResponse::Success(RPCResponse::BeaconBlockBodies(resp)), - ) - } - _ => RPCEvent::Response(id, RPCErrorResponse::Success(response)), - } - } - _ => RPCEvent::Response(id, rpc_response), - } -} diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index b86dcb969..6a9a40369 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -14,9 +14,7 @@ use slog::{debug, trace, warn}; use ssz::{Decode, DecodeError}; use std::sync::Arc; use tokio::sync::mpsc; -use types::{ - Attestation, AttesterSlashing, BeaconBlock, BeaconBlockHeader, ProposerSlashing, VoluntaryExit, -}; +use types::{Attestation, AttesterSlashing, BeaconBlock, ProposerSlashing, VoluntaryExit}; /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { @@ -56,9 +54,9 @@ impl MessageHandler { let (handler_send, handler_recv) = mpsc::unbounded_channel(); // Initialise sync and begin processing in thread - // generate the Message handler let sync = SimpleSync::new(beacon_chain.clone(), &log); + // generate the Message handler let mut handler = MessageHandler { _chain: beacon_chain.clone(), sync, @@ -66,7 +64,7 @@ impl MessageHandler { log: log.clone(), }; - // spawn handler task + // spawn handler task and move the message handler instance into the spawned thread executor.spawn( handler_recv .for_each(move |msg| Ok(handler.handle_message(msg))) @@ -89,11 +87,11 @@ impl MessageHandler { HandlerMessage::PeerDisconnected(peer_id) => { self.sync.on_disconnect(peer_id); } - // we have received an RPC message request/response + // An RPC message request/response has been received HandlerMessage::RPC(peer_id, rpc_event) => { self.handle_rpc_message(peer_id, rpc_event); } - // we have received an RPC message request/response + // An RPC message request/response has been received HandlerMessage::PubsubMessage(peer_id, gossip) => { self.handle_gossip(peer_id, gossip); } @@ -106,7 +104,7 @@ impl MessageHandler { fn handle_rpc_message(&mut self, peer_id: PeerId, rpc_message: RPCEvent) { match rpc_message { RPCEvent::Request(id, req) => self.handle_rpc_request(peer_id, id, req), - RPCEvent::Response(_id, resp) => self.handle_rpc_response(peer_id, resp), + RPCEvent::Response(id, resp) => self.handle_rpc_response(peer_id, id, resp), RPCEvent::Error(id, error) => self.handle_rpc_error(peer_id, id, error), } } @@ -121,46 +119,39 @@ impl MessageHandler { &mut self.network_context, ), RPCRequest::Goodbye(goodbye_reason) => self.sync.on_goodbye(peer_id, goodbye_reason), - RPCRequest::BeaconBlockRoots(request) => self.sync.on_beacon_block_roots_request( + RPCRequest::BeaconBlocks(request) => self.sync.on_beacon_blocks_request( peer_id, request_id, request, &mut self.network_context, ), - RPCRequest::BeaconBlockHeaders(request) => self.sync.on_beacon_block_headers_request( + RPCRequest::RecentBeaconBlocks(request) => self.sync.on_recent_beacon_blocks_request( peer_id, request_id, request, &mut self.network_context, ), - RPCRequest::BeaconBlockBodies(request) => self.sync.on_beacon_block_bodies_request( - peer_id, - request_id, - request, - &mut self.network_context, - ), - RPCRequest::BeaconChainState(_) => { - // We do not implement this endpoint, it is not required and will only likely be - // useful for light-client support in later phases. - warn!(self.log, "BeaconChainState RPC call is not supported."); - } } } /// An RPC response has been received from the network. // we match on id and ignore responses past the timeout. - fn handle_rpc_response(&mut self, peer_id: PeerId, error_response: RPCErrorResponse) { + fn handle_rpc_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + error_response: RPCErrorResponse, + ) { // an error could have occurred. - // TODO: Handle Error gracefully match error_response { RPCErrorResponse::InvalidRequest(error) => { - warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Invalid Request" => error.as_string()) + warn!(self.log, "Peer indicated invalid request";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()) } RPCErrorResponse::ServerError(error) => { - warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Server Error" => error.as_string()) + warn!(self.log, "Peer internal server error";"peer_id" => format!("{:?}", peer_id), "error" => error.as_string()) } RPCErrorResponse::Unknown(error) => { - warn!(self.log, "";"peer" => format!("{:?}", peer_id), "Unknown Error" => error.as_string()) + warn!(self.log, "Unknown peer error";"peer" => format!("{:?}", peer_id), "error" => error.as_string()) } RPCErrorResponse::Success(response) => { match response { @@ -171,49 +162,37 @@ impl MessageHandler { &mut self.network_context, ); } - RPCResponse::BeaconBlockRoots(response) => { - self.sync.on_beacon_block_roots_response( - peer_id, - response, - &mut self.network_context, - ); - } - RPCResponse::BeaconBlockHeaders(response) => { - match self.decode_block_headers(response) { - Ok(decoded_block_headers) => { - self.sync.on_beacon_block_headers_response( + RPCResponse::BeaconBlocks(response) => { + match self.decode_beacon_blocks(response) { + Ok(beacon_blocks) => { + self.sync.on_beacon_blocks_response( peer_id, - decoded_block_headers, + beacon_blocks, &mut self.network_context, ); } - Err(_e) => { - warn!(self.log, "Peer sent invalid block headers";"peer" => format!("{:?}", peer_id)) + Err(e) => { + // TODO: Down-vote Peer + warn!(self.log, "Peer sent invalid BEACON_BLOCKS response";"peer" => format!("{:?}", peer_id), "error" => format!("{:?}", e)); } } } - RPCResponse::BeaconBlockBodies(response) => { - match self.decode_block_bodies(response) { - Ok(decoded_block_bodies) => { - self.sync.on_beacon_block_bodies_response( + RPCResponse::RecentBeaconBlocks(response) => { + match self.decode_beacon_blocks(response) { + Ok(beacon_blocks) => { + self.sync.on_recent_beacon_blocks_response( + request_id, peer_id, - decoded_block_bodies, + beacon_blocks, &mut self.network_context, ); } - Err(_e) => { - warn!(self.log, "Peer sent invalid block bodies";"peer" => format!("{:?}", peer_id)) + Err(e) => { + // TODO: Down-vote Peer + warn!(self.log, "Peer sent invalid BEACON_BLOCKS response";"peer" => format!("{:?}", peer_id), "error" => format!("{:?}", e)); } } } - RPCResponse::BeaconChainState(_) => { - // We do not implement this endpoint, it is not required and will only likely be - // useful for light-client support in later phases. - // - // Theoretically, we shouldn't reach this code because we should never send a - // beacon state RPC request. - warn!(self.log, "BeaconChainState RPC call is not supported."); - } } } } @@ -334,36 +313,22 @@ impl MessageHandler { /* Req/Resp Domain Decoding */ - /// Verifies and decodes the ssz-encoded block bodies received from peers. - fn decode_block_bodies( + /// Verifies and decodes an ssz-encoded list of `BeaconBlock`s. This list may contain empty + /// entries encoded with an SSZ NULL. + fn decode_beacon_blocks( &self, - bodies_response: BeaconBlockBodiesResponse, - ) -> Result, DecodeError> { + beacon_blocks: &[u8], + ) -> Result>, DecodeError> { //TODO: Implement faster block verification before decoding entirely - let block_bodies = Vec::from_ssz_bytes(&bodies_response.block_bodies)?; - Ok(DecodedBeaconBlockBodiesResponse { - block_roots: bodies_response - .block_roots - .expect("Responses must have associated roots"), - block_bodies, - }) - } - - /// Verifies and decodes the ssz-encoded block headers received from peers. - fn decode_block_headers( - &self, - headers_response: BeaconBlockHeadersResponse, - ) -> Result, DecodeError> { - //TODO: Implement faster header verification before decoding entirely - Vec::from_ssz_bytes(&headers_response.headers) + Vec::from_ssz_bytes(&beacon_blocks) } } -// TODO: RPC Rewrite makes this struct fairly pointless +/// Wraps a Network Channel to employ various RPC/Sync related network functionality. pub struct NetworkContext { /// The network channel to relay messages to the Network service. network_send: mpsc::UnboundedSender, - /// The `MessageHandler` logger. + /// Logger for the `NetworkContext`. log: slog::Logger, } @@ -388,7 +353,7 @@ impl NetworkContext { &mut self, peer_id: PeerId, request_id: RequestId, - rpc_response: RPCResponse, + rpc_response: RPCErrorResponse, ) { self.send_rpc_event( peer_id, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs new file mode 100644 index 000000000..52c1a72c6 --- /dev/null +++ b/beacon_node/network/src/sync/manager.rs @@ -0,0 +1,283 @@ + +const MAXIMUM_BLOCKS_PER_REQUEST: usize = 10; +const SIMULTANEOUS_REQUESTS: usize = 10; +use super::simple_sync::FUTURE_SLOT_TOLERANCE; + +struct Chunk { + id: usize, + start_slot: Slot, + end_slot: Slot, + } + + +struct CompletedChunk { + peer_id: PeerId, + chunk: Chunk, + blocks: Vec, +} + +struct ProcessedChunk { + peer_id: PeerId, + chunk: Chunk, +} + +#[derive(PartialEq)] +pub enum SyncState { + Idle, + Downloading, + ColdSync { + max_wanted_slot: Slot, + max_wanted_hash: Hash256, + } +} + +pub enum SyncManagerState { + RequestBlocks(peer_id, BeaconBlockRequest), + Stalled, + Idle, +} + +pub struct PeerSyncInfo { + peer_id: PeerId, + fork_version: [u8,4], + finalized_root: Hash256, + finalized_epoch: Epoch, + head_root: Hash256, + head_slot: Slot, + requested_slot_skip: Option<(Slot, usize)>, +} + +pub(crate) struct SyncManager { + /// A reference to the underlying beacon chain. + chain: Arc>, + /// A mapping of Peers to their respective PeerSyncInfo. + available_peers: HashMap, + wanted_chunks: Vec, + pending_chunks: HashMap, + completed_chunks: Vec, + processed_chunks: Vec, // ordered + multi_peer_sections: HashMap + + current_requests: usize, + latest_wanted_slot: Option, + sync_status: SyncStatus, + to_process_chunk_id: usize, + log: Logger, + +} + +impl SyncManager { + /// Adds a sync-able peer and determines which blocks to download given the current state of + /// the chain, known peers and currently requested blocks. + fn add_sync_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo, network &mut NetworkContext) { + + let local = PeerSyncInfo::from(&self.chain); + let remote_finalized_slot = remote.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let local_finalized_slot = local.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); + + // cold sync + if remote_finalized_slot > local.head_slot { + if let SyncState::Idle || SyncState::Downloading = self.sync_state { + info!(self.log, "Cold Sync Started", "start_slot" => local.head_slot, "latest_known_finalized" => remote_finalized_slot); + self.sync_state = SyncState::ColdSync{Slot::from(0), remote.finalized_hash} + } + + if let SyncState::ColdSync{max_wanted_slot, max_wanted_hjash } = self.sync_state { + + // We don't assume that our current head is the canonical chain. So we request blocks from + // our last finalized slot to ensure we are on the finalized chain. + if max_wanted_slot < remote_finalized_slot { + let remaining_blocks = remote_finalized_slot - max_wanted_slot; + for chunk in (0..remaining_blocks/MAXIMUM_BLOCKS_PER_REQUEST) { + self.wanted_chunks.push( + Chunk { + id: self.current_chunk_id, + previous_chunk: self.curent_chunk_id.saturating_sub(1), + start_slot: chunk*MAXIMUM_BLOCKS_PER_REQUEST + self.last_wanted_slot, + end_slot: (section+1)*MAXIMUM_BLOCKS_PER_REQUEST +self.last_wanted_slot, + }) + self.current_chunk_id +=1; + } + + // add any extra partial chunks + self.pending_section.push( Section { + start_slot: (remaining_blocks/MAXIMUM_BLOCKS_PER_REQUEST) + 1, + end_slot: remote_finalized_slot, + }) + self.current_chunk_id +=1; + + info!(self.log, "Cold Sync Updated", "start_slot" => local.head_slot, "latest_known_finalized" => remote_finalized_slot); + + self.sync_state = SyncState::ColdSync{remote_finalized_slot, remote.finalized_hash} + } + } + + else { // hot sync + if remote_head_slot > self.chain.head().beacon_state.slot { + if let SyncState::Idle = self.sync_state { + self.sync_state = SyncState::Downloading + info!(self.log, "Sync Started", "start_slot" => local.head_slot, "latest_known_head" => remote.head_slot.as_u64()); + } + self.latest_known_slot = remote_head_slot; + //TODO Build requests. + } + } + + available_peers.push(remote); + + } + + pub fn add_blocks(&mut self, chunk_id: RequestId, peer_id: PeerId, blocks: Vec) { + + if SyncState::ColdSync{max_wanted_slot, max_wanted_hash} = self.sync_state { + + let chunk = match self.pending_chunks.remove(&peer_id) { + Some(chunks) => { + match chunks.find(|chunk| chunk.id == chunk_id) { + Some(chunk) => chunk, + None => { + warn!(self.log, "Received blocks for an unknown chunk"; + "peer"=> peer_id); + return; + } + } + }, + None => { + warn!(self.log, "Received blocks without a request"; + "peer"=> peer_id); + return; + } + }; + + // add to completed + self.current_requests -= 1; + self.completed_chunks.push(CompletedChunk(peer_id, Chunk)); + } + } + + pub fn inject_error(id: RequestId, peer_id) { + if let SyncState::ColdSync{ _max_wanted_slot, _max_wanted_hash } { + match self.pending_chunks.get(&peer_id) { + Some(chunks) => { + if let Some(pos) = chunks.iter().position(|c| c.id == id) { + chunks.remove(pos); + } + }, + None => { + debug!(self.log, + "Received an error for an unknown request"; + "request_id" => id, + "peer" => peer_id + ); + } + } + } + } + + pub fn poll(&mut self) -> SyncManagerState { + + // if cold sync + if let SyncState::ColdSync(waiting_slot, max_wanted_slot, max_wanted_hash) = self.sync_state { + + // Try to process completed chunks + for completed_chunk in self.completed_chunks { + let chunk = completed_chunk.1; + let last_chunk_id = { + let no_processed_chunks = self.processed_chunks.len(); + if elements == 0 { 0 } else { self.processed_chunks[no_processed_chunks].id } + }; + if chunk.id == last_chunk_id + 1 { + // try and process the chunk + for block in chunk.blocks { + let processing_result = self.chain.process_block(block.clone()); + + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutCome::Processed { block_root} => { + // block successfully processed + }, + BlockProcessingOutcome::BlockIsAlreadyKnown => { + warn!( + self.log, "Block Already Known"; + "source" => source, + "sync" => "Cold Sync", + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + "peer" => format!("{:?}", chunk.0), + ); + }, + _ => { + // An error has occurred + // This could be due to the previous chunk or the current chunk. + // Re-issue both. + warn!( + self.log, "Faulty Chunk"; + "source" => source, + "sync" => "Cold Sync", + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + "peer" => format!("{:?}", chunk.0), + "outcome" => format!("{:?}", outcome), + ); + + // re-issue both chunks + // if both are the same peer. Downgrade the peer. + let past_chunk = self.processed_chunks.pop() + self.wanted_chunks.insert(0, chunk.clone()); + self.wanted_chunks.insert(0, past_chunk.clone()); + if chunk.0 == past_chunk.peer_id { + // downgrade peer + return SyncManagerState::DowngradePeer(chunk.0); + } + break; + } + } + } + } + // chunk successfully processed + debug!(self.log, + "Chunk Processed"; + "id" => chunk.id + "start_slot" => chunk.start_slot, + "end_slot" => chunk.end_slot, + ); + self.processed_chunks.push(chunk); + } + } + + // chunks completed, update the state + self.sync_state = SyncState::ColdSync{waiting_slot, max_wanted_slot, max_wanted_hash}; + + // Remove stales + + // Spawn requests + if self.current_requests <= SIMULTANEOUS_REQUESTS { + if !self.wanted_chunks.is_empty() { + let chunk = self.wanted_chunks.remove(0); + for n in (0..self.peers.len()).rev() { + let peer = self.peers.swap_remove(n); + let peer_finalized_slot = peer.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); + if peer_finalized_slot >= chunk.end_slot { + *self.pending.chunks.entry(&peer_id).or_insert_with(|| Vec::new).push(chunk); + self.active_peers.push(peer); + self.current_requests +=1; + let block_request = BeaconBlockRequest { + head_block_root, + start_slot: chunk.start_slot, + count: chunk.end_slot - chunk.start_slot + step: 1 + } + return SyncManagerState::BlockRequest(peer, block_request); + } + } + // no peers for this chunk + self.wanted_chunks.push(chunk); + return SyncManagerState::Stalled + } + } + } + + // if hot sync + return SyncManagerState::Idle + + } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index c3271888a..e3d3d7cef 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -17,7 +17,7 @@ use types::{ /// The number of slots that we can import blocks ahead of us, before going into full Sync mode. const SLOT_IMPORT_TOLERANCE: u64 = 100; -/// The amount of seconds a block (or partial block) may exist in the import queue. +/// The amount of seconds a block may exist in the import queue. const QUEUE_STALE_SECS: u64 = 100; /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. @@ -30,23 +30,23 @@ const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; /// Keeps track of syncing information for known connected peers. #[derive(Clone, Copy, Debug)] pub struct PeerSyncInfo { - network_id: u8, - chain_id: u64, - latest_finalized_root: Hash256, - latest_finalized_epoch: Epoch, - best_root: Hash256, - best_slot: Slot, + fork_version: [u8,4], + finalized_root: Hash256, + finalized_epoch: Epoch, + head_root: Hash256, + head_slot: Slot, + requested_slot_skip: Option<(Slot, usize)>, } impl From for PeerSyncInfo { fn from(hello: HelloMessage) -> PeerSyncInfo { PeerSyncInfo { - network_id: hello.network_id, - chain_id: hello.chain_id, - latest_finalized_root: hello.latest_finalized_root, - latest_finalized_epoch: hello.latest_finalized_epoch, - best_root: hello.best_root, - best_slot: hello.best_slot, + fork_version: hello.fork_version, + finalized_root: hello.finalized_root, + finalized_epoch: hello.finalized_epoch, + head_root: hello.head_root, + head_slot: hello.head_slot, + requested_slot_skip: None, } } } @@ -71,8 +71,6 @@ pub struct SimpleSync { chain: Arc>, /// A mapping of Peers to their respective PeerSyncInfo. known_peers: HashMap, - /// A queue to allow importing of blocks - import_queue: ImportQueue, /// The current state of the syncing protocol. state: SyncState, log: slog::Logger, @@ -178,8 +176,8 @@ impl SimpleSync { let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); - if local.network_id != remote.network_id { - // The node is on a different network, disconnect them. + if local.fork_version != remote.fork_version { + // The node is on a different network/fork, disconnect them. info!( self.log, "HandshakeFailure"; "peer" => format!("{:?}", peer_id), @@ -187,9 +185,9 @@ impl SimpleSync { ); network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); - } else if remote.latest_finalized_epoch <= local.latest_finalized_epoch - && remote.latest_finalized_root != Hash256::zero() - && local.latest_finalized_root != Hash256::zero() + } else if remote.finalized_epoch <= local.finalized_epoch + && remote.finalized_root != Hash256::zero() + && local.finalized_root != Hash256::zero() && (self.root_at_slot(start_slot(remote.latest_finalized_epoch)) != Some(remote.latest_finalized_root)) { @@ -248,22 +246,37 @@ impl SimpleSync { "remote_latest_finalized_epoch" => remote.latest_finalized_epoch, ); - let start_slot = local - .latest_finalized_epoch - .start_slot(T::EthSpec::slots_per_epoch()); - let required_slots = remote.best_slot - start_slot; - self.request_block_roots( - peer_id, - BeaconBlockRootsRequest { - start_slot, - count: required_slots.as_u64(), - }, - network, - ); + self.process_sync(); } } + self.proess_sync(&mut self) { + loop { + match self.sync_manager.poll() { + SyncManagerState::RequestBlocks(peer_id, req) { + debug!( + self.log, + "RPCRequest(BeaconBlockBodies)"; + "count" => req.block_roots.len(), + "peer" => format!("{:?}", peer_id) + ); + network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(req)); + }, + SyncManagerState::Stalled { + // need more peers to continue sync + warn!(self.log, "No useable peers for sync"); + break; + }, + SyncManagerState::Idle { + // nothing to do + break; + } + } + } + } + + fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain .rev_iter_block_roots(target_slot) @@ -272,213 +285,27 @@ impl SimpleSync { .map(|(root, _slot)| root) } - /// Handle a `BeaconBlockRoots` request from the peer. - pub fn on_beacon_block_roots_request( + /// Handle a `BeaconBlocks` request from the peer. + pub fn on_beacon_blocks_request( &mut self, peer_id: PeerId, request_id: RequestId, - req: BeaconBlockRootsRequest, + req: BeaconBlocksRequest, network: &mut NetworkContext, ) { let state = &self.chain.head().beacon_state; debug!( self.log, - "BlockRootsRequest"; + "BeaconBlocksRequest"; "peer" => format!("{:?}", peer_id), "count" => req.count, "start_slot" => req.start_slot, ); - let mut roots: Vec = self - .chain - .rev_iter_block_roots(std::cmp::min(req.start_slot + req.count, state.slot)) - .take_while(|(_root, slot)| req.start_slot <= *slot) - .map(|(block_root, slot)| BlockRootSlot { slot, block_root }) - .collect(); - - if roots.len() as u64 != req.count { - debug!( - self.log, - "BlockRootsRequest"; - "peer" => format!("{:?}", peer_id), - "msg" => "Failed to return all requested hashes", - "start_slot" => req.start_slot, - "current_slot" => self.chain.present_slot(), - "requested" => req.count, - "returned" => roots.len(), - ); - } - - roots.reverse(); - roots.dedup_by_key(|brs| brs.block_root); - - network.send_rpc_response( - peer_id, - request_id, - RPCResponse::BeaconBlockRoots(BeaconBlockRootsResponse { roots }), - ) - } - - /// Handle a `BeaconBlockRoots` response from the peer. - pub fn on_beacon_block_roots_response( - &mut self, - peer_id: PeerId, - res: BeaconBlockRootsResponse, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "BlockRootsResponse"; - "peer" => format!("{:?}", peer_id), - "count" => res.roots.len(), - ); - - if res.roots.is_empty() { - warn!( - self.log, - "Peer returned empty block roots response"; - "peer_id" => format!("{:?}", peer_id) - ); - return; - } - - // The wire protocol specifies that slots must be in ascending order. - if !res.slots_are_ascending() { - warn!( - self.log, - "Peer returned block roots response with bad slot ordering"; - "peer_id" => format!("{:?}", peer_id) - ); - return; - } - - let new_roots = self - .import_queue - .enqueue_block_roots(&res.roots, peer_id.clone()); - - // No new roots means nothing to do. - // - // This check protects against future panics. - if new_roots.is_empty() { - return; - } - - // Determine the first (earliest) and last (latest) `BlockRootSlot` items. - // - // This logic relies upon slots to be in ascending order, which is enforced earlier. - let first = new_roots.first().expect("Non-empty list must have first"); - let last = new_roots.last().expect("Non-empty list must have last"); - - // Request all headers between the earliest and latest new `BlockRootSlot` items. - self.request_block_headers( - peer_id, - BeaconBlockHeadersRequest { - start_root: first.block_root, - start_slot: first.slot, - max_headers: (last.slot - first.slot + 1).as_u64(), - skip_slots: 0, - }, - network, - ) - } - - /// Handle a `BeaconBlockHeaders` request from the peer. - pub fn on_beacon_block_headers_request( - &mut self, - peer_id: PeerId, - request_id: RequestId, - req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, - ) { - let state = &self.chain.head().beacon_state; - - debug!( - self.log, - "BlockHeadersRequest"; - "peer" => format!("{:?}", peer_id), - "count" => req.max_headers, - ); - - let count = req.max_headers; - - // Collect the block roots. - let mut roots: Vec = self - .chain - .rev_iter_block_roots(std::cmp::min(req.start_slot + count, state.slot)) - .take_while(|(_root, slot)| req.start_slot <= *slot) - .map(|(root, _slot)| root) - .collect(); - - roots.reverse(); - roots.dedup(); - - let headers: Vec = roots - .into_iter() - .step_by(req.skip_slots as usize + 1) - .filter_map(|root| { - let block = self - .chain - .store - .get::>(&root) - .ok()?; - Some(block?.block_header()) - }) - .collect(); - - // ssz-encode the headers - let headers = headers.as_ssz_bytes(); - - network.send_rpc_response( - peer_id, - request_id, - RPCResponse::BeaconBlockHeaders(BeaconBlockHeadersResponse { headers }), - ) - } - - /// Handle a `BeaconBlockHeaders` response from the peer. - pub fn on_beacon_block_headers_response( - &mut self, - peer_id: PeerId, - headers: Vec, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "BlockHeadersResponse"; - "peer" => format!("{:?}", peer_id), - "count" => headers.len(), - ); - - if headers.is_empty() { - warn!( - self.log, - "Peer returned empty block headers response. PeerId: {:?}", peer_id - ); - return; - } - - // Enqueue the headers, obtaining a list of the roots of the headers which were newly added - // to the queue. - let block_roots = self.import_queue.enqueue_headers(headers, peer_id.clone()); - - if !block_roots.is_empty() { - self.request_block_bodies(peer_id, BeaconBlockBodiesRequest { block_roots }, network); - } - } - - /// Handle a `BeaconBlockBodies` request from the peer. - pub fn on_beacon_block_bodies_request( - &mut self, - peer_id: PeerId, - request_id: RequestId, - req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, - ) { - let block_bodies: Vec> = req - .block_roots - .iter() - .filter_map(|root| { + let blocks = Vec> = self + .chain.rev_iter_block_roots().filter(|(_root, slot) req.start_slot <= slot && req.start_slot + req.count >= slot).take_while(|(_root, slot) req.start_slot <= *slot) + .filter_map(|root, slot| { if let Ok(Some(block)) = self.chain.store.get::>(root) { Some(block.body) } else { @@ -494,59 +321,49 @@ impl SimpleSync { }) .collect(); - debug!( - self.log, - "BlockBodiesRequest"; - "peer" => format!("{:?}", peer_id), - "requested" => req.block_roots.len(), - "returned" => block_bodies.len(), - ); + roots.reverse(); + roots.dedup_by_key(|brs| brs.block_root); - let bytes = block_bodies.as_ssz_bytes(); + if roots.len() as u64 != req.count { + debug!( + self.log, + "BeaconBlocksRequest"; + "peer" => format!("{:?}", peer_id), + "msg" => "Failed to return all requested hashes", + "start_slot" => req.start_slot, + "current_slot" => self.chain.present_slot(), + "requested" => req.count, + "returned" => roots.len(), + ); + } network.send_rpc_response( peer_id, request_id, - RPCResponse::BeaconBlockBodies(BeaconBlockBodiesResponse { - block_bodies: bytes, - block_roots: None, - }), + RPCResponse::BeaconBlocks(blocks.as_ssz_bytes()), ) } - /// Handle a `BeaconBlockBodies` response from the peer. - pub fn on_beacon_block_bodies_response( + + /// Handle a `BeaconBlocks` response from the peer. + pub fn on_beacon_blocks_response( &mut self, peer_id: PeerId, - res: DecodedBeaconBlockBodiesResponse, + res: Vec>, network: &mut NetworkContext, ) { debug!( self.log, - "BlockBodiesResponse"; + "BeaconBlocksResponse"; "peer" => format!("{:?}", peer_id), "count" => res.block_bodies.len(), ); - if !res.block_bodies.is_empty() { - // Import all blocks to queue - let last_root = self - .import_queue - .enqueue_bodies(res.block_bodies, peer_id.clone()); - - // Attempt to process all received bodies by recursively processing the latest block - if let Some(root) = last_root { - if let Some(BlockProcessingOutcome::Processed { .. }) = - self.attempt_process_partial_block(peer_id, root, network, &"rpc") - { - // If processing is successful remove from `import_queue` - self.import_queue.remove(root); - } - } + if !res.is_empty() { + self.sync_manager.add_blocks(peer_id, blocks); } - // Clear out old entries - self.import_queue.remove_stale(); + self.process_sync(); } /// Process a gossip message declaring a new block. @@ -679,22 +496,6 @@ impl SimpleSync { network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockHeaders(req)); } - /// Request some `BeaconBlockBodies` from the remote peer. - fn request_block_bodies( - &mut self, - peer_id: PeerId, - req: BeaconBlockBodiesRequest, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "RPCRequest(BeaconBlockBodies)"; - "count" => req.block_roots.len(), - "peer" => format!("{:?}", peer_id) - ); - - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockBodies(req)); - } /// Returns `true` if `self.chain` has not yet processed this block. pub fn chain_has_seen_block(&self, block_root: &Hash256) -> bool { From 24b2f83713f5e3fd5147e99be44a5f842a6332fb Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 07:35:52 +1000 Subject: [PATCH 072/305] Fix wrong state given to op pool prune --- beacon_node/beacon_chain/src/beacon_chain.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0e0583309..bed50202d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1199,8 +1199,12 @@ impl BeaconChain { self.fork_choice .process_finalization(&finalized_block, finalized_block_root)?; - self.op_pool - .prune_all(&self.head().beacon_state, &self.spec); + let finalized_state = self + .store + .get::>(&finalized_block.state_root)? + .ok_or_else(|| Error::MissingBeaconState(finalized_block.state_root))?; + + self.op_pool.prune_all(&finalized_state, &self.spec); Ok(()) } From e369e293a507c602be1901b462001bfd8e4e825c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 09:20:39 +1000 Subject: [PATCH 073/305] Make prom metric names more consistent --- beacon_node/beacon_chain/src/metrics.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index a4b36cd37..574fbb4a4 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -15,7 +15,7 @@ lazy_static! { "Count of blocks processed without error" ); pub static ref BLOCK_PROCESSING_TIMES: Result = - try_create_histogram("block_processing_seconds", "Full runtime of block processing"); + try_create_histogram("beacon_block_processing_seconds", "Full runtime of block processing"); pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( "beacon_block_processing_db_read_seconds", "Time spent loading block and state from DB for block processing" @@ -123,15 +123,15 @@ lazy_static! { "Count of occasions fork choice has switched to a different chain" ); pub static ref FORK_CHOICE_TIMES: Result = - try_create_histogram("beacon_fork_choice_time", "Full runtime of fork choice"); + try_create_histogram("beacon_fork_choice_seconds", "Full runtime of fork choice"); pub static ref FORK_CHOICE_FIND_HEAD_TIMES: Result = - try_create_histogram("beacon_fork_choice_find_head_time", "Full runtime of fork choice find_head function"); + try_create_histogram("beacon_fork_choice_find_head_seconds", "Full runtime of fork choice find_head function"); pub static ref FORK_CHOICE_PROCESS_BLOCK_TIMES: Result = try_create_histogram( - "beacon_fork_choice_process_block_time", + "beacon_fork_choice_process_block_seconds", "Time taken to add a block and all attestations to fork choice" ); pub static ref FORK_CHOICE_PROCESS_ATTESTATION_TIMES: Result = try_create_histogram( - "beacon_fork_choice_process_attestation_time", + "beacon_fork_choice_process_attestation_seconds", "Time taken to add an attestation to fork choice" ); From b076b07022c9f359315b52700de301d23530e1f0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 12:11:18 +1000 Subject: [PATCH 074/305] Add more metrics, tidy existing metrics --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 ++ beacon_node/beacon_chain/src/metrics.rs | 4 ++ beacon_node/eth2-libp2p/src/discovery.rs | 8 ++- beacon_node/eth2-libp2p/src/metrics.rs | 14 +++-- beacon_node/store/src/impls.rs | 20 ++++++- beacon_node/store/src/impls/beacon_state.rs | 21 +++++++- beacon_node/store/src/metrics.rs | 57 ++++++++++++++++++++ eth2/utils/slot_clock/src/metrics.rs | 3 ++ 8 files changed, 120 insertions(+), 11 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0e0583309..faffa46f5 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -827,8 +827,12 @@ impl BeaconChain { return Ok(BlockProcessingOutcome::GenesisBlock); } + let block_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOCK_ROOT); + let block_root = block.canonical_root(); + metrics::stop_timer(block_root_timer); + if block_root == self.genesis_block_root { return Ok(BlockProcessingOutcome::GenesisBlock); } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 574fbb4a4..db213a0cf 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -16,6 +16,10 @@ lazy_static! { ); pub static ref BLOCK_PROCESSING_TIMES: Result = try_create_histogram("beacon_block_processing_seconds", "Full runtime of block processing"); + pub static ref BLOCK_PROCESSING_BLOCK_ROOT: Result = try_create_histogram( + "beacon_block_processing_block_root_seconds", + "Time spent calculating the block root when processing a block." + ); pub static ref BLOCK_PROCESSING_DB_READ: Result = try_create_histogram( "beacon_block_processing_db_read_seconds", "Time spent loading block and state from DB for block processing" diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index d9f2f7465..ca98db324 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -159,13 +159,17 @@ where } fn inject_connected(&mut self, peer_id: PeerId, _endpoint: ConnectedPoint) { - metrics::inc_counter(&metrics::PEER_CONNECT_COUNT); self.connected_peers.insert(peer_id); + + metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); + metrics::set_gauge(&metrics::PEERS_CONNECTED, self.connected_peers() as i64); } fn inject_disconnected(&mut self, peer_id: &PeerId, _endpoint: ConnectedPoint) { - metrics::inc_counter(&metrics::PEER_DISCONNECT_COUNT); self.connected_peers.remove(peer_id); + + metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT); + metrics::set_gauge(&metrics::PEERS_CONNECTED, self.connected_peers() as i64); } fn inject_replaced( diff --git a/beacon_node/eth2-libp2p/src/metrics.rs b/beacon_node/eth2-libp2p/src/metrics.rs index a47037669..b678ef6b4 100644 --- a/beacon_node/eth2-libp2p/src/metrics.rs +++ b/beacon_node/eth2-libp2p/src/metrics.rs @@ -2,15 +2,19 @@ pub use lighthouse_metrics::*; lazy_static! { pub static ref ADDRESS_UPDATE_COUNT: Result = try_create_int_counter( - "libp2p_address_update_count", + "libp2p_address_update_total", "Count of libp2p socked updated events (when our view of our IP address has changed)" ); - pub static ref PEER_CONNECT_COUNT: Result = try_create_int_counter( - "libp2p_peer_connect_count", + pub static ref PEERS_CONNECTED: Result = try_create_int_gauge( + "libp2p_peer_connected_peers_total", + "Count of libp2p peers currently connected" + ); + pub static ref PEER_CONNECT_EVENT_COUNT: Result = try_create_int_counter( + "libp2p_peer_connect_event_total", "Count of libp2p peer connect events (not the current number of connected peers)" ); - pub static ref PEER_DISCONNECT_COUNT: Result = try_create_int_counter( - "libp2p_peer_disconnect_count", + pub static ref PEER_DISCONNECT_EVENT_COUNT: Result = try_create_int_counter( + "libp2p_peer_disconnect_event_total", "Count of libp2p peer disconnect events" ); } diff --git a/beacon_node/store/src/impls.rs b/beacon_node/store/src/impls.rs index e88b70f39..1c29c245b 100644 --- a/beacon_node/store/src/impls.rs +++ b/beacon_node/store/src/impls.rs @@ -9,10 +9,26 @@ impl StoreItem for BeaconBlock { } fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + let timer = metrics::start_timer(&metrics::BEACON_STATE_WRITE_TIMES); + let bytes = self.as_ssz_bytes(); + + metrics::stop_timer(timer); + metrics::inc_counter(&metrics::BEACON_STATE_WRITE_COUNT); + metrics::inc_counter_by(&metrics::BEACON_STATE_WRITE_BYTES, bytes.len() as i64); + + bytes } fn from_store_bytes(bytes: &mut [u8]) -> Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) + let timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES); + + let len = bytes.len(); + let result = Self::from_ssz_bytes(bytes).map_err(Into::into); + + metrics::stop_timer(timer); + metrics::inc_counter(&metrics::BEACON_STATE_READ_COUNT); + metrics::inc_counter_by(&metrics::BEACON_STATE_READ_BYTES, len as i64); + + result } } diff --git a/beacon_node/store/src/impls/beacon_state.rs b/beacon_node/store/src/impls/beacon_state.rs index 591663fe0..69e83cd63 100644 --- a/beacon_node/store/src/impls/beacon_state.rs +++ b/beacon_node/store/src/impls/beacon_state.rs @@ -53,12 +53,29 @@ impl StoreItem for BeaconState { } fn as_store_bytes(&self) -> Vec { + let timer = metrics::start_timer(&metrics::BEACON_STATE_WRITE_TIMES); + let container = StorageContainer::new(self); - container.as_ssz_bytes() + let bytes = container.as_ssz_bytes(); + + metrics::stop_timer(timer); + metrics::inc_counter(&metrics::BEACON_STATE_WRITE_COUNT); + metrics::inc_counter_by(&metrics::BEACON_STATE_WRITE_BYTES, bytes.len() as i64); + + bytes } fn from_store_bytes(bytes: &mut [u8]) -> Result { + let timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES); + + let len = bytes.len(); let container = StorageContainer::from_ssz_bytes(bytes)?; - container.try_into() + let result = container.try_into(); + + metrics::stop_timer(timer); + metrics::inc_counter(&metrics::BEACON_STATE_READ_COUNT); + metrics::inc_counter_by(&metrics::BEACON_STATE_READ_BYTES, len as i64); + + result } } diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 30cbb878b..90237824d 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -4,6 +4,9 @@ use std::fs; use std::path::PathBuf; lazy_static! { + /* + * General + */ pub static ref DISK_DB_SIZE: Result = try_create_int_gauge("store_disk_db_size", "Size of the on-disk database (bytes)"); pub static ref DISK_DB_WRITE_BYTES: Result = try_create_int_counter( @@ -30,6 +33,60 @@ lazy_static! { "store_disk_db_delete_count_total", "Total number of deletions from the on-disk DB" ); + /* + * Beacon State + */ + pub static ref BEACON_STATE_READ_TIMES: Result = try_create_histogram( + "store_beacon_state_read_overhead_seconds", + "Overhead on reading a beacon state from the DB (e.g., decoding)" + ); + pub static ref BEACON_STATE_READ_COUNT: Result = try_create_int_counter( + "store_beacon_state_read_total", + "Total number of beacon state reads from the DB" + ); + pub static ref BEACON_STATE_READ_BYTES: Result = try_create_int_counter( + "store_beacon_state_read_bytes_total", + "Total number of beacon state bytes read from the DB" + ); + pub static ref BEACON_STATE_WRITE_TIMES: Result = try_create_histogram( + "store_beacon_state_write_overhead_seconds", + "Overhead on writing a beacon state to the DB (e.g., encoding)" + ); + pub static ref BEACON_STATE_WRITE_COUNT: Result = try_create_int_counter( + "store_beacon_state_write_total", + "Total number of beacon state writes the DB" + ); + pub static ref BEACON_STATE_WRITE_BYTES: Result = try_create_int_counter( + "store_beacon_state_write_bytes_total", + "Total number of beacon state bytes written to the DB" + ); + /* + * Beacon Block + */ + pub static ref BEACON_BLOCK_READ_TIMES: Result = try_create_histogram( + "store_beacon_block_read_overhead_seconds", + "Overhead on reading a beacon block from the DB (e.g., decoding)" + ); + pub static ref BEACON_BLOCK_READ_COUNT: Result = try_create_int_counter( + "store_beacon_block_read_total", + "Total number of beacon block reads from the DB" + ); + pub static ref BEACON_BLOCK_READ_BYTES: Result = try_create_int_counter( + "store_beacon_block_read_bytes_total", + "Total number of beacon block bytes read from the DB" + ); + pub static ref BEACON_BLOCK_WRITE_TIMES: Result = try_create_histogram( + "store_beacon_block_write_overhead_seconds", + "Overhead on writing a beacon block to the DB (e.g., encoding)" + ); + pub static ref BEACON_BLOCK_WRITE_COUNT: Result = try_create_int_counter( + "store_beacon_block_write_total", + "Total number of beacon block writes the DB" + ); + pub static ref BEACON_BLOCK_WRITE_BYTES: Result = try_create_int_counter( + "store_beacon_block_write_bytes_total", + "Total number of beacon block bytes written to the DB" + ); } /// Updates the global metrics registry with store-related information. diff --git a/eth2/utils/slot_clock/src/metrics.rs b/eth2/utils/slot_clock/src/metrics.rs index a9153a10c..e0d3923e0 100644 --- a/eth2/utils/slot_clock/src/metrics.rs +++ b/eth2/utils/slot_clock/src/metrics.rs @@ -7,6 +7,8 @@ lazy_static! { try_create_int_gauge("slotclock_present_slot", "The present wall-clock slot"); pub static ref PRESENT_EPOCH: Result = try_create_int_gauge("slotclock_present_epoch", "The present wall-clock epoch"); + pub static ref SLOTS_PER_EPOCH: Result = + try_create_int_gauge("slotclock_slots_per_epoch", "Slots per epoch (constant)"); pub static ref MILLISECONDS_PER_SLOT: Result = try_create_int_gauge( "slotclock_slot_time_milliseconds", "The duration in milliseconds between each slot" @@ -25,5 +27,6 @@ pub fn scrape_for_metrics(clock: &U) { &PRESENT_EPOCH, present_slot.epoch(T::slots_per_epoch()).as_u64() as i64, ); + set_gauge(&SLOTS_PER_EPOCH, T::slots_per_epoch() as i64); set_gauge(&MILLISECONDS_PER_SLOT, clock.slot_duration_millis() as i64); } From a3e464078af39e10132bac3d1ac37dbebae8b41a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 13:00:01 +1000 Subject: [PATCH 075/305] Fix store block read metrics --- beacon_node/store/src/impls.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/beacon_node/store/src/impls.rs b/beacon_node/store/src/impls.rs index 1c29c245b..ed724480c 100644 --- a/beacon_node/store/src/impls.rs +++ b/beacon_node/store/src/impls.rs @@ -9,25 +9,25 @@ impl StoreItem for BeaconBlock { } fn as_store_bytes(&self) -> Vec { - let timer = metrics::start_timer(&metrics::BEACON_STATE_WRITE_TIMES); + let timer = metrics::start_timer(&metrics::BEACON_BLOCK_WRITE_TIMES); let bytes = self.as_ssz_bytes(); metrics::stop_timer(timer); - metrics::inc_counter(&metrics::BEACON_STATE_WRITE_COUNT); - metrics::inc_counter_by(&metrics::BEACON_STATE_WRITE_BYTES, bytes.len() as i64); + metrics::inc_counter(&metrics::BEACON_BLOCK_WRITE_COUNT); + metrics::inc_counter_by(&metrics::BEACON_BLOCK_WRITE_BYTES, bytes.len() as i64); bytes } fn from_store_bytes(bytes: &mut [u8]) -> Result { - let timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES); + let timer = metrics::start_timer(&metrics::BEACON_BLOCK_READ_TIMES); let len = bytes.len(); let result = Self::from_ssz_bytes(bytes).map_err(Into::into); metrics::stop_timer(timer); - metrics::inc_counter(&metrics::BEACON_STATE_READ_COUNT); - metrics::inc_counter_by(&metrics::BEACON_STATE_READ_BYTES, len as i64); + metrics::inc_counter(&metrics::BEACON_BLOCK_READ_COUNT); + metrics::inc_counter_by(&metrics::BEACON_BLOCK_READ_BYTES, len as i64); result } From 341a83b9e8d5f3733b09ac9dae2e8aa6d5602ef5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 16:17:11 +1000 Subject: [PATCH 076/305] Tidy attestation metrics --- beacon_node/beacon_chain/src/beacon_chain.rs | 28 +++++++++++++++----- beacon_node/beacon_chain/src/metrics.rs | 4 +++ 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index faffa46f5..0cb6d5f98 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -547,11 +547,14 @@ impl BeaconChain { &self, attestation: Attestation, ) -> Result { + metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_REQUESTS); + let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_TIMES); + // From the store, load the attestation's "head block". // // An honest validator would have set this block to be the head of the chain (i.e., the // result of running fork choice). - if let Some(attestation_head_block) = self + let result = if let Some(attestation_head_block) = self .store .get::>(&attestation.data.beacon_block_root)? { @@ -680,7 +683,15 @@ impl BeaconChain { Ok(AttestationProcessingOutcome::UnknownHeadBlock { beacon_block_root: attestation.data.beacon_block_root, }) + }; + + metrics::stop_timer(timer); + + if let Ok(AttestationProcessingOutcome::Processed) = &result { + metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_SUCCESSES); } + + result } /// Verifies the `attestation` against the `state` to which it is attesting. @@ -707,9 +718,6 @@ impl BeaconChain { state: &BeaconState, block: &BeaconBlock, ) -> Result { - metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_REQUESTS); - let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_TIMES); - // Find the highest between: // // - The highest valid finalized epoch we've ever seen (i.e., the head). @@ -719,6 +727,16 @@ impl BeaconChain { state.finalized_checkpoint.epoch, ); + // A helper function to allow attestation processing to be metered. + let verify_attestation_for_state = |state, attestation, spec, verify_signatures| { + let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_CORE); + + let result = verify_attestation_for_state(state, attestation, spec, verify_signatures); + + metrics::stop_timer(timer); + result + }; + let result = if block.slot <= finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()) { // Ignore any attestation where the slot of `data.beacon_block_root` is equal to or // prior to the finalized epoch. @@ -758,8 +776,6 @@ impl BeaconChain { Ok(AttestationProcessingOutcome::Processed) }; - timer.map(|t| t.observe_duration()); - result } diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index db213a0cf..6efa4b3f2 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -90,6 +90,10 @@ lazy_static! { "beacon_attestation_processing_seconds", "Full runtime of attestation processing" ); + pub static ref ATTESTATION_PROCESSING_CORE: Result = try_create_histogram( + "beacon_attestation_processing_core_seconds", + "Time spent on the core spec processing of attestation processing" + ); /* * Attestation Production From b7e43b56f9dc4167414c61d6b52238782e0caf47 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 19:37:14 +1000 Subject: [PATCH 077/305] Fix minor PR comments --- beacon_node/beacon_chain/src/beacon_chain.rs | 6 +++--- eth2/lmd_ghost/src/reduced_tree.rs | 6 +----- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 73ebb7007..76442fb8d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -536,7 +536,7 @@ impl BeaconChain { /// If valid, the attestation is added to `self.op_pool` and `self.fork_choice`. /// /// Returns an `Ok(AttestationProcessingOutcome)` if the chain was able to make a determination - /// about the `attestation` (wether it was invalid or not). Returns an `Err` if the was an + /// about the `attestation` (whether it was invalid or not). Returns an `Err` if there was an /// error during this process and no determination was able to be made. /// /// ## Notes @@ -620,7 +620,7 @@ impl BeaconChain { outcome } else { // Use the `data.beacon_block_root` to load the state from the latest non-skipped - // slot preceding the attestations creation. + // slot preceding the attestation's creation. // // This state is guaranteed to be in the same chain as the attestation, but it's // not guaranteed to be from the same slot or epoch as the attestation. @@ -703,7 +703,7 @@ impl BeaconChain { /// The given `state` must fulfil one of the following conditions: /// /// - `state` corresponds to the `block.state_root` identified by - /// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`. + /// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`). /// - `state.slot` is in the same epoch as `data.target.epoch` and /// `attestation.data.beacon_block_root` is in the history of `state`. /// diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index 822c388f6..deda02e1f 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -777,11 +777,7 @@ where } pub fn get_ref(&self, i: usize) -> Option<&T> { - if i < self.0.len() { - Some(&self.0[i]) - } else { - None - } + self.0.get(i) } pub fn insert(&mut self, i: usize, element: T) { From 6cd0af766e0ef97c258545fec7369169801cb9a5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 19:37:14 +1000 Subject: [PATCH 078/305] Fix minor PR comments --- beacon_node/beacon_chain/src/beacon_chain.rs | 6 +++--- eth2/lmd_ghost/src/reduced_tree.rs | 6 +----- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9ccf59589..61998b5de 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -536,7 +536,7 @@ impl BeaconChain { /// If valid, the attestation is added to `self.op_pool` and `self.fork_choice`. /// /// Returns an `Ok(AttestationProcessingOutcome)` if the chain was able to make a determination - /// about the `attestation` (wether it was invalid or not). Returns an `Err` if the was an + /// about the `attestation` (whether it was invalid or not). Returns an `Err` if there was an /// error during this process and no determination was able to be made. /// /// ## Notes @@ -617,7 +617,7 @@ impl BeaconChain { outcome } else { // Use the `data.beacon_block_root` to load the state from the latest non-skipped - // slot preceding the attestations creation. + // slot preceding the attestation's creation. // // This state is guaranteed to be in the same chain as the attestation, but it's // not guaranteed to be from the same slot or epoch as the attestation. @@ -692,7 +692,7 @@ impl BeaconChain { /// The given `state` must fulfil one of the following conditions: /// /// - `state` corresponds to the `block.state_root` identified by - /// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`. + /// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`). /// - `state.slot` is in the same epoch as `data.target.epoch` and /// `attestation.data.beacon_block_root` is in the history of `state`. /// diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index 822c388f6..deda02e1f 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -777,11 +777,7 @@ where } pub fn get_ref(&self, i: usize) -> Option<&T> { - if i < self.0.len() { - Some(&self.0[i]) - } else { - None - } + self.0.get(i) } pub fn insert(&mut self, i: usize, element: T) { From 8fb9e1f648b75b488f798a25cf2bce487ff8206e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 19:48:03 +1000 Subject: [PATCH 079/305] Remove duplicated attestation finalization check --- beacon_node/beacon_chain/src/beacon_chain.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 61998b5de..9ee51c162 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -555,23 +555,6 @@ impl BeaconChain { .store .get::>(&attestation.data.beacon_block_root)? { - let finalized_epoch = self.head().beacon_state.finalized_checkpoint.epoch; - - if attestation_head_block.slot - <= finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()) - { - // Ignore any attestation where the slot of `data.beacon_block_root` is equal to or - // prior to the finalized epoch. - // - // For any valid attestation if the `beacon_block_root` is prior to finalization, then - // all other parameters (source, target, etc) must all be prior to finalization and - // therefore no longer interesting. - return Ok(AttestationProcessingOutcome::FinalizedSlot { - attestation: attestation_head_block.epoch(), - finalized: finalized_epoch, - }); - } - // Attempt to process the attestation using the `self.head()` state. // // This is purely an effort to avoid loading a `BeaconState` unnecessarily from the DB. From 82e8aafb014484e72926dc476184634fd8b9afdf Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 13 Aug 2019 19:59:29 +1000 Subject: [PATCH 080/305] Remove awkward `let` statement --- beacon_node/beacon_chain/src/beacon_chain.rs | 170 +++++++++---------- 1 file changed, 82 insertions(+), 88 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9ee51c162..96d306530 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -558,97 +558,91 @@ impl BeaconChain { // Attempt to process the attestation using the `self.head()` state. // // This is purely an effort to avoid loading a `BeaconState` unnecessarily from the DB. - let optional_outcome: Option> = { - // Take a read lock on the head beacon state. - // - // The purpose of this whole `let processed ...` block is to ensure that the read - // lock is dropped if we don't end up using the head beacon state. - let state = &self.head().beacon_state; + // Take a read lock on the head beacon state. + let state = &self.head().beacon_state; - // If it turns out that the attestation was made using the head state, then there - // is no need to load a state from the database to process the attestation. - // - // Note: use the epoch of the target because it indicates which epoch the - // attestation was created in. You cannot use the epoch of the head block, because - // the block doesn't necessarily need to be in the same epoch as the attestation - // (e.g., if there are skip slots between the epoch the block was created in and - // the epoch for the attestation). - // - // This check also ensures that the slot for `data.beacon_block_root` is not higher - // than `state.root` by ensuring that the block is in the history of `state`. - if state.current_epoch() == attestation.data.target.epoch - && (attestation.data.beacon_block_root == self.head().beacon_block_root - || state - .get_block_root(attestation_head_block.slot) - .map(|root| *root == attestation.data.beacon_block_root) - .unwrap_or_else(|_| false)) - { - // The head state is able to be used to validate this attestation. No need to load - // anything from the database. - Some(self.process_attestation_for_state_and_block( - attestation.clone(), - state, - &attestation_head_block, - )) - } else { - None - } - }; + // If it turns out that the attestation was made using the head state, then there + // is no need to load a state from the database to process the attestation. + // + // Note: use the epoch of the target because it indicates which epoch the + // attestation was created in. You cannot use the epoch of the head block, because + // the block doesn't necessarily need to be in the same epoch as the attestation + // (e.g., if there are skip slots between the epoch the block was created in and + // the epoch for the attestation). + // + // This check also ensures that the slot for `data.beacon_block_root` is not higher + // than `state.root` by ensuring that the block is in the history of `state`. + if state.current_epoch() == attestation.data.target.epoch + && (attestation.data.beacon_block_root == self.head().beacon_block_root + || state + .get_block_root(attestation_head_block.slot) + .map(|root| *root == attestation.data.beacon_block_root) + .unwrap_or_else(|_| false)) + { + // The head state is able to be used to validate this attestation. No need to load + // anything from the database. + return self.process_attestation_for_state_and_block( + attestation.clone(), + state, + &attestation_head_block, + ); + } - if let Some(outcome) = optional_outcome { - // Verification was already completed with an in-memory state. Return that result. - outcome + // Ensure the read-lock from `self.head()` is dropped. + // + // This is likely unnecessary, however it remains as a reminder to ensure this lock + // isn't hogged. + std::mem::drop(state); + + // Use the `data.beacon_block_root` to load the state from the latest non-skipped + // slot preceding the attestation's creation. + // + // This state is guaranteed to be in the same chain as the attestation, but it's + // not guaranteed to be from the same slot or epoch as the attestation. + let mut state: BeaconState = self + .store + .get(&attestation_head_block.state_root)? + .ok_or_else(|| Error::MissingBeaconState(attestation_head_block.state_root))?; + + // Ensure the state loaded from the database matches the state of the attestation + // head block. + // + // The state needs to be advanced from the current slot through to the epoch in + // which the attestation was created in. It would be an error to try and use + // `state.get_attestation_data_slot(..)` because the state matching the + // `data.beacon_block_root` isn't necessarily in a nearby epoch to the attestation + // (e.g., if there were lots of skip slots since the head of the chain and the + // epoch creation epoch). + for _ in state.slot.as_u64() + ..attestation + .data + .target + .epoch + .start_slot(T::EthSpec::slots_per_epoch()) + .as_u64() + { + per_slot_processing(&mut state, &self.spec)?; + } + + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + + let attestation_slot = state.get_attestation_data_slot(&attestation.data)?; + + // Reject any attestation where the `state` loaded from `data.beacon_block_root` + // has a higher slot than the attestation. + // + // Permitting this would allow for attesters to vote on _future_ slots. + if attestation_slot > state.slot { + Ok(AttestationProcessingOutcome::AttestsToFutureState { + state: state.slot, + attestation: attestation_slot, + }) } else { - // Use the `data.beacon_block_root` to load the state from the latest non-skipped - // slot preceding the attestation's creation. - // - // This state is guaranteed to be in the same chain as the attestation, but it's - // not guaranteed to be from the same slot or epoch as the attestation. - let mut state: BeaconState = self - .store - .get(&attestation_head_block.state_root)? - .ok_or_else(|| Error::MissingBeaconState(attestation_head_block.state_root))?; - - // Ensure the state loaded from the database matches the state of the attestation - // head block. - // - // The state needs to be advanced from the current slot through to the epoch in - // which the attestation was created in. It would be an error to try and use - // `state.get_attestation_data_slot(..)` because the state matching the - // `data.beacon_block_root` isn't necessarily in a nearby epoch to the attestation - // (e.g., if there were lots of skip slots since the head of the chain and the - // epoch creation epoch). - for _ in state.slot.as_u64() - ..attestation - .data - .target - .epoch - .start_slot(T::EthSpec::slots_per_epoch()) - .as_u64() - { - per_slot_processing(&mut state, &self.spec)?; - } - - state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; - - let attestation_slot = state.get_attestation_data_slot(&attestation.data)?; - - // Reject any attestation where the `state` loaded from `data.beacon_block_root` - // has a higher slot than the attestation. - // - // Permitting this would allow for attesters to vote on _future_ slots. - if attestation_slot > state.slot { - Ok(AttestationProcessingOutcome::AttestsToFutureState { - state: state.slot, - attestation: attestation_slot, - }) - } else { - self.process_attestation_for_state_and_block( - attestation, - &state, - &attestation_head_block, - ) - } + self.process_attestation_for_state_and_block( + attestation, + &state, + &attestation_head_block, + ) } } else { // Drop any attestation where we have not processed `attestation.data.beacon_block_root`. From 4f98a3985fc1714799ac5897d002fa26ea74bb96 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 10:36:55 +1000 Subject: [PATCH 081/305] Add first attempts at HTTP bootstrap --- beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/beacon_chain_types.rs | 11 +++ beacon_node/client/src/config.rs | 2 + beacon_node/client/src/lib.rs | 1 + beacon_node/client/src/local_bootstrap.rs | 93 ++++++++++++++++++++ beacon_node/rest_api/src/beacon.rs | 21 +++++ beacon_node/rest_api/src/lib.rs | 6 ++ beacon_node/rest_api/src/spec.rs | 27 ++++++ 8 files changed, 162 insertions(+) create mode 100644 beacon_node/client/src/local_bootstrap.rs create mode 100644 beacon_node/rest_api/src/spec.rs diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 8c72fa417..b0524b17d 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -27,3 +27,4 @@ clap = "2.32.0" dirs = "1.0.3" exit-future = "0.1.3" futures = "0.1.25" +reqwest = "0.9" diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index 0b86c9583..a5b89b86a 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -1,4 +1,5 @@ use crate::error::Result; +use crate::local_bootstrap::BootstrapParams; use crate::{config::GenesisState, ClientConfig}; use beacon_chain::{ lmd_ghost::{LmdGhost, ThreadSafeReducedTree}, @@ -6,6 +7,7 @@ use beacon_chain::{ store::Store, BeaconChain, BeaconChainTypes, }; +use reqwest::Url; use slog::{crit, info, Logger}; use slot_clock::SlotClock; use std::fs::File; @@ -74,6 +76,15 @@ where serde_yaml::from_reader(file) .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))? } + GenesisState::HttpBootstrap { server } => { + let url: Url = + Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?; + + let params = BootstrapParams::from_http_api(url) + .map_err(|e| format!("Failed to bootstrap from HTTP server: {:?}", e))?; + + params.genesis_state + } }; let mut genesis_block = BeaconBlock::empty(&spec); diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index ee62b6281..2b410312b 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -48,6 +48,8 @@ pub enum GenesisState { }, /// Load a YAML-encoded genesis state from a file. Yaml { file: PathBuf }, + /// Use a HTTP server (running our REST-API) to load genesis and finalized states and blocks. + HttpBootstrap { server: String }, } impl Default for Config { diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 65ba071fa..7a9152ee0 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -2,6 +2,7 @@ extern crate slog; mod beacon_chain_types; mod config; +mod local_bootstrap; pub mod error; pub mod notifier; diff --git a/beacon_node/client/src/local_bootstrap.rs b/beacon_node/client/src/local_bootstrap.rs new file mode 100644 index 000000000..f38762b3b --- /dev/null +++ b/beacon_node/client/src/local_bootstrap.rs @@ -0,0 +1,93 @@ +use reqwest::{Error as HttpError, Url}; +use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Slot}; + +#[derive(Debug)] +pub enum Error { + UrlCannotBeBase, + HttpError(HttpError), +} + +impl From for Error { + fn from(e: HttpError) -> Error { + Error::HttpError(e) + } +} + +pub struct BootstrapParams { + pub finalized_block: BeaconBlock, + pub finalized_state: BeaconState, + pub genesis_block: BeaconBlock, + pub genesis_state: BeaconState, +} + +impl BootstrapParams { + pub fn from_http_api(url: Url) -> Result { + let slots_per_epoch = get_slots_per_epoch(url.clone())?; + let genesis_slot = Slot::new(0); + let finalized_slot = get_finalized_slot(url.clone(), slots_per_epoch.as_u64())?; + + Ok(Self { + finalized_block: get_block(url.clone(), finalized_slot)?, + finalized_state: get_state(url.clone(), finalized_slot)?, + genesis_block: get_block(url.clone(), genesis_slot)?, + genesis_state: get_state(url.clone(), genesis_slot)?, + }) + } +} + +fn get_slots_per_epoch(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("spec").push("slots_per_epoch"); + }) + .map_err(|_| Error::UrlCannotBeBase)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + +fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("beacon").push("latest_finalized_checkpoint"); + }) + .map_err(|_| Error::UrlCannotBeBase)?; + + let checkpoint: Checkpoint = reqwest::get(url)?.error_for_status()?.json()?; + + Ok(checkpoint.epoch.start_slot(slots_per_epoch)) +} + +fn get_state(mut url: Url, slot: Slot) -> Result, Error> { + url.path_segments_mut() + .map(|mut url| { + url.push("beacon").push("state"); + }) + .map_err(|_| Error::UrlCannotBeBase)?; + + url.query_pairs_mut() + .append_pair("slot", &format!("{}", slot.as_u64())); + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + +fn get_block(mut url: Url, slot: Slot) -> Result, Error> { + url.path_segments_mut() + .map(|mut url| { + url.push("beacon").push("block"); + }) + .map_err(|_| Error::UrlCannotBeBase)?; + + url.query_pairs_mut() + .append_pair("slot", &format!("{}", slot.as_u64())); + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index cef23abe8..8b089f542 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -58,3 +58,24 @@ pub fn get_state_root(req: Request) -> ApiR Ok(success_response(Body::from(json))) } + +/// HTTP handler to return the highest finalized slot. +pub fn get_latest_finalized_checkpoint( + req: Request, +) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let checkpoint = beacon_chain + .head() + .beacon_state + .finalized_checkpoint + .clone(); + + let json: String = serde_json::to_string(&checkpoint) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize checkpoint: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index a94a8cdf4..57c5482cd 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -4,6 +4,7 @@ mod beacon; mod config; mod helpers; mod node; +mod spec; mod url_query; use beacon_chain::{BeaconChain, BeaconChainTypes}; @@ -101,10 +102,15 @@ pub fn start_server( // Route the request to the correct handler. let result = match (req.method(), path.as_ref()) { + (&Method::GET, "/beacon/latest_finalized_checkpoint") => { + beacon::get_latest_finalized_checkpoint::(req) + } (&Method::GET, "/beacon/state") => beacon::get_state::(req), (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), (&Method::GET, "/node/version") => node::get_version(req), (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), + (&Method::GET, "/spec") => spec::get_spec::(req), + (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), }; diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs new file mode 100644 index 000000000..d0c8e4368 --- /dev/null +++ b/beacon_node/rest_api/src/spec.rs @@ -0,0 +1,27 @@ +use super::{success_response, ApiResult}; +use crate::ApiError; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use hyper::{Body, Request}; +use std::sync::Arc; +use types::EthSpec; + +/// HTTP handler to return the full spec object. +pub fn get_spec(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let json: String = serde_json::to_string(&beacon_chain.spec) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize spec: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} + +/// HTTP handler to return the full spec object. +pub fn get_slots_per_epoch(_req: Request) -> ApiResult { + let json: String = serde_json::to_string(&T::EthSpec::slots_per_epoch()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize epoch: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} From 2bf0d5c071efee2f24bda10afe5f21ec6a9c4884 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 11:22:43 +1000 Subject: [PATCH 082/305] Add beacon_block methods to rest api --- beacon_node/client/src/local_bootstrap.rs | 22 ++++--- beacon_node/rest_api/src/beacon.rs | 74 ++++++++++++++++++++++- beacon_node/rest_api/src/lib.rs | 2 + 3 files changed, 87 insertions(+), 11 deletions(-) diff --git a/beacon_node/client/src/local_bootstrap.rs b/beacon_node/client/src/local_bootstrap.rs index f38762b3b..79fad7ec2 100644 --- a/beacon_node/client/src/local_bootstrap.rs +++ b/beacon_node/client/src/local_bootstrap.rs @@ -2,7 +2,7 @@ use reqwest::{Error as HttpError, Url}; use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Slot}; #[derive(Debug)] -pub enum Error { +enum Error { UrlCannotBeBase, HttpError(HttpError), } @@ -21,16 +21,22 @@ pub struct BootstrapParams { } impl BootstrapParams { - pub fn from_http_api(url: Url) -> Result { - let slots_per_epoch = get_slots_per_epoch(url.clone())?; + pub fn from_http_api(url: Url) -> Result { + let slots_per_epoch = get_slots_per_epoch(url.clone()) + .map_err(|e| format!("Unable to get slots per epoch: {:?}", e))?; let genesis_slot = Slot::new(0); - let finalized_slot = get_finalized_slot(url.clone(), slots_per_epoch.as_u64())?; + let finalized_slot = get_finalized_slot(url.clone(), slots_per_epoch.as_u64()) + .map_err(|e| format!("Unable to get finalized slot: {:?}", e))?; Ok(Self { - finalized_block: get_block(url.clone(), finalized_slot)?, - finalized_state: get_state(url.clone(), finalized_slot)?, - genesis_block: get_block(url.clone(), genesis_slot)?, - genesis_state: get_state(url.clone(), genesis_slot)?, + finalized_block: get_block(url.clone(), finalized_slot) + .map_err(|e| format!("Unable to get finalized block: {:?}", e))?, + finalized_state: get_state(url.clone(), finalized_slot) + .map_err(|e| format!("Unable to get finalized state: {:?}", e))?, + genesis_block: get_block(url.clone(), genesis_slot) + .map_err(|e| format!("Unable to get genesis block: {:?}", e))?, + genesis_state: get_state(url.clone(), genesis_slot) + .map_err(|e| format!("Unable to get genesis state: {:?}", e))?, }) } } diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 8b089f542..a2afb1001 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -4,7 +4,75 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; use std::sync::Arc; use store::Store; -use types::BeaconState; +use types::{BeaconBlock, BeaconState}; + +/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. +pub fn get_block(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let query_params = ["root", "slot"]; + let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; + + let block_root = match (key.as_ref(), value) { + ("slot", value) => { + let target = parse_slot(&value)?; + + beacon_chain + .rev_iter_block_roots() + .take_while(|(_root, slot)| *slot >= target) + .find(|(_root, slot)| *slot == target) + .map(|(root, _slot)| root) + .ok_or_else(|| { + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + })? + } + ("root", value) => parse_root(&value)?, + _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), + }; + + let block = beacon_chain + .store + .get::>(&block_root)? + .ok_or_else(|| { + ApiError::NotFound(format!( + "Unable to find BeaconBlock for root {}", + block_root + )) + })?; + + let json: String = serde_json::to_string(&block) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize BeaconBlock: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} + +/// HTTP handler to return a `BeaconBlock` root at a given `slot`. +pub fn get_block_root(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; + let target = parse_slot(&slot_string)?; + + let root = beacon_chain + .rev_iter_block_roots() + .take_while(|(_root, slot)| *slot >= target) + .find(|(_root, slot)| *slot == target) + .map(|(root, _slot)| root) + .ok_or_else(|| { + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + })?; + + let json: String = serde_json::to_string(&root) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} /// HTTP handler to return a `BeaconState` at a given `root` or `slot`. /// @@ -29,7 +97,7 @@ pub fn get_state(req: Request) -> ApiResult .get(root)? .ok_or_else(|| ApiError::NotFound(format!("No state for root: {}", root)))? } - _ => unreachable!("Guarded by UrlQuery::from_request()"), + _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), }; let json: String = serde_json::to_string(&state) @@ -38,7 +106,7 @@ pub fn get_state(req: Request) -> ApiResult Ok(success_response(Body::from(json))) } -/// HTTP handler to return a `BeaconState` root at a given or `slot`. +/// HTTP handler to return a `BeaconState` root at a given `slot`. /// /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 57c5482cd..4f07b482a 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -102,6 +102,8 @@ pub fn start_server( // Route the request to the correct handler. let result = match (req.method(), path.as_ref()) { + (&Method::GET, "/beacon/block") => beacon::get_block::(req), + (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), (&Method::GET, "/beacon/latest_finalized_checkpoint") => { beacon::get_latest_finalized_checkpoint::(req) } From 980f533b3b1156c89cc9f46d396a216b13af9205 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 11:55:12 +1000 Subject: [PATCH 083/305] Fix serde for block.body.grafitti --- eth2/types/src/beacon_block_body.rs | 7 +++++-- eth2/types/src/utils/serde_utils.rs | 16 ++++++++++++++-- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/eth2/types/src/beacon_block_body.rs b/eth2/types/src/beacon_block_body.rs index 64dc229ed..c1f66b816 100644 --- a/eth2/types/src/beacon_block_body.rs +++ b/eth2/types/src/beacon_block_body.rs @@ -1,5 +1,5 @@ use crate::test_utils::TestRandom; -use crate::utils::graffiti_from_hex_str; +use crate::utils::{graffiti_from_hex_str, graffiti_to_hex_str}; use crate::*; use serde_derive::{Deserialize, Serialize}; @@ -16,7 +16,10 @@ use tree_hash_derive::TreeHash; pub struct BeaconBlockBody { pub randao_reveal: Signature, pub eth1_data: Eth1Data, - #[serde(deserialize_with = "graffiti_from_hex_str")] + #[serde( + serialize_with = "graffiti_to_hex_str", + deserialize_with = "graffiti_from_hex_str" + )] pub graffiti: [u8; 32], pub proposer_slashings: VariableList, pub attester_slashings: VariableList, T::MaxAttesterSlashings>, diff --git a/eth2/types/src/utils/serde_utils.rs b/eth2/types/src/utils/serde_utils.rs index 4b46fc0dc..a9b27d75b 100644 --- a/eth2/types/src/utils/serde_utils.rs +++ b/eth2/types/src/utils/serde_utils.rs @@ -46,8 +46,20 @@ where Ok(array) } -// #[allow(clippy::trivially_copy_pass_by_ref)] // Serde requires the `byte` to be a ref. -pub fn fork_to_hex_str(bytes: &[u8; 4], serializer: S) -> Result +pub fn fork_to_hex_str(bytes: &[u8; FORK_BYTES_LEN], serializer: S) -> Result +where + S: Serializer, +{ + let mut hex_string: String = "0x".to_string(); + hex_string.push_str(&hex::encode(&bytes)); + + serializer.serialize_str(&hex_string) +} + +pub fn graffiti_to_hex_str( + bytes: &[u8; GRAFFITI_BYTES_LEN], + serializer: S, +) -> Result where S: Serializer, { From 9b3c9f8c0fe1908b37ca7c6b8f98b68cc07adfac Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 12:03:03 +1000 Subject: [PATCH 084/305] Allow travis failures on beta (see desc) There's a non-backward compatible change in `cargo fmt`. Stable and beta do not agree. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index def7435a1..b9754eb1e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -17,6 +17,7 @@ rust: - nightly matrix: allow_failures: + - rust: beta - rust: nightly fast_finish: true install: From c93d2baa912a3ff41fba711ae5b2ae387298c265 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 18:23:26 +1000 Subject: [PATCH 085/305] Add network routes to API --- beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/lib.rs | 3 +- beacon_node/client/src/local_bootstrap.rs | 16 ++++++ beacon_node/eth2-libp2p/src/behaviour.rs | 5 ++ beacon_node/eth2-libp2p/src/discovery.rs | 9 ++++ beacon_node/eth2-libp2p/src/lib.rs | 1 + beacon_node/eth2-libp2p/src/service.rs | 2 +- beacon_node/network/src/service.rs | 26 +++++++++- beacon_node/rest_api/Cargo.toml | 2 + beacon_node/rest_api/src/lib.rs | 12 ++++- beacon_node/rest_api/src/network.rs | 61 +++++++++++++++++++++++ 11 files changed, 134 insertions(+), 4 deletions(-) create mode 100644 beacon_node/rest_api/src/network.rs diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 9aa3557a9..9d5d49e17 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -7,6 +7,7 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } network = { path = "../network" } +eth2-libp2p = { path = "../eth2-libp2p" } rpc = { path = "../rpc" } rest_api = { path = "../rest_api" } prometheus = "^0.6" diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index e7c3d2d8a..93e80df42 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -48,7 +48,7 @@ pub struct Client { impl Client where - T: BeaconChainTypes + InitialiseBeaconChain + Clone + 'static, + T: BeaconChainTypes + InitialiseBeaconChain + Clone + Send + Sync + 'static, { /// Generate an instance of the client. Spawn and link all internal sub-processes. pub fn new( @@ -122,6 +122,7 @@ where &client_config.rest_api, executor, beacon_chain.clone(), + network.clone(), client_config.db_path().expect("unable to read datadir"), &log, ) { diff --git a/beacon_node/client/src/local_bootstrap.rs b/beacon_node/client/src/local_bootstrap.rs index 79fad7ec2..5fe5e1b4f 100644 --- a/beacon_node/client/src/local_bootstrap.rs +++ b/beacon_node/client/src/local_bootstrap.rs @@ -1,3 +1,4 @@ +use eth2_libp2p::Enr; use reqwest::{Error as HttpError, Url}; use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Slot}; @@ -18,6 +19,7 @@ pub struct BootstrapParams { pub finalized_state: BeaconState, pub genesis_block: BeaconBlock, pub genesis_state: BeaconState, + pub enr: Enr, } impl BootstrapParams { @@ -37,6 +39,7 @@ impl BootstrapParams { .map_err(|e| format!("Unable to get genesis block: {:?}", e))?, genesis_state: get_state(url.clone(), genesis_slot) .map_err(|e| format!("Unable to get genesis state: {:?}", e))?, + enr: get_enr(url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e))?, }) } } @@ -97,3 +100,16 @@ fn get_block(mut url: Url, slot: Slot) -> Result, Err .json() .map_err(Into::into) } + +fn get_enr(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("node").push("network").push("enr"); + }) + .map_err(|_| Error::UrlCannotBeBase)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index b87f8a061..24aacbfa1 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -7,6 +7,7 @@ use futures::prelude::*; use libp2p::{ core::identity::Keypair, discv5::Discv5Event, + enr::Enr, gossipsub::{Gossipsub, GossipsubEvent}, identify::{Identify, IdentifyEvent}, ping::{Ping, PingConfig, PingEvent}, @@ -78,6 +79,10 @@ impl Behaviour { log: behaviour_log, }) } + + pub fn discovery(&self) -> &Discovery { + &self.discovery + } } // Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index ca98db324..87d5dd558 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -103,6 +103,10 @@ impl Discovery { }) } + pub fn local_enr(&self) -> &Enr { + self.discovery.local_enr() + } + /// Manually search for peers. This restarts the discovery round, sparking multiple rapid /// queries. pub fn discover_peers(&mut self) { @@ -120,6 +124,11 @@ impl Discovery { self.connected_peers.len() } + /// The current number of connected libp2p peers. + pub fn connected_peer_set(&self) -> &HashSet { + &self.connected_peers + } + /// Search for new peers using the underlying discovery mechanism. fn find_peers(&mut self) { // pick a random NodeId diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 33d5ba9ed..8c2644fbb 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -17,6 +17,7 @@ pub use behaviour::PubsubMessage; pub use config::{ Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX, }; +pub use libp2p::enr::Enr; pub use libp2p::gossipsub::{Topic, TopicHash}; pub use libp2p::multiaddr; pub use libp2p::Multiaddr; diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 316aa0579..4c343fa26 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -15,7 +15,7 @@ use libp2p::core::{ transport::boxed::Boxed, upgrade::{InboundUpgradeExt, OutboundUpgradeExt}, }; -use libp2p::{core, secio, PeerId, Swarm, Transport}; +use libp2p::{core, enr::Enr, secio, PeerId, Swarm, Transport}; use slog::{debug, info, trace, warn}; use std::fs::File; use std::io::prelude::*; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index e5ca2a917..ed3c9da0b 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -5,7 +5,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use core::marker::PhantomData; use eth2_libp2p::Service as LibP2PService; use eth2_libp2p::Topic; -use eth2_libp2p::{Libp2pEvent, PeerId}; +use eth2_libp2p::{Enr, Libp2pEvent, PeerId}; use eth2_libp2p::{PubsubMessage, RPCEvent}; use futures::prelude::*; use futures::Stream; @@ -64,6 +64,30 @@ impl Service { Ok((Arc::new(network_service), network_send)) } + pub fn local_enr(&self) -> Enr { + self.libp2p_service + .lock() + .swarm + .discovery() + .local_enr() + .clone() + } + + pub fn connected_peers(&self) -> usize { + self.libp2p_service.lock().swarm.connected_peers() + } + + pub fn connected_peer_set(&self) -> Vec { + self.libp2p_service + .lock() + .swarm + .discovery() + .connected_peer_set() + .iter() + .cloned() + .collect() + } + pub fn libp2p_service(&self) -> Arc> { self.libp2p_service.clone() } diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index c7026014c..cac196d9c 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -7,6 +7,8 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] beacon_chain = { path = "../beacon_chain" } +network = { path = "../network" } +eth2-libp2p = { path = "../eth2-libp2p" } store = { path = "../store" } version = { path = "../version" } serde = { version = "1.0", features = ["derive"] } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index e267ce313..86b5b35db 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -1,15 +1,18 @@ #[macro_use] extern crate lazy_static; +extern crate network as client_network; mod beacon; mod config; mod helpers; mod metrics; +mod network; mod node; mod spec; mod url_query; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use client_network::Service as NetworkService; pub use config::Config as ApiConfig; use hyper::rt::Future; use hyper::service::service_fn_ok; @@ -68,10 +71,11 @@ impl From for ApiError { } } -pub fn start_server( +pub fn start_server( config: &ApiConfig, executor: &TaskExecutor, beacon_chain: Arc>, + network_service: Arc>, db_path: PathBuf, log: &slog::Logger, ) -> Result { @@ -99,6 +103,7 @@ pub fn start_server( let log = server_log.clone(); let beacon_chain = server_bc.clone(); let db_path = db_path.clone(); + let network_service = network_service.clone(); // Create a simple handler for the router, inject our stateful objects into the request. service_fn_ok(move |mut req| { @@ -109,6 +114,8 @@ pub fn start_server( req.extensions_mut() .insert::>>(beacon_chain.clone()); req.extensions_mut().insert::(db_path.clone()); + req.extensions_mut() + .insert::>>(network_service.clone()); let path = req.uri().path().to_string(); @@ -124,6 +131,9 @@ pub fn start_server( (&Method::GET, "/metrics") => metrics::get_prometheus::(req), (&Method::GET, "/node/version") => node::get_version(req), (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), + (&Method::GET, "/node/network/enr") => network::get_enr::(req), + (&Method::GET, "/node/network/peer_count") => network::get_peer_count::(req), + (&Method::GET, "/node/network/peers") => network::get_peer_list::(req), (&Method::GET, "/spec") => spec::get_spec::(req), (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs new file mode 100644 index 000000000..2fd88f498 --- /dev/null +++ b/beacon_node/rest_api/src/network.rs @@ -0,0 +1,61 @@ +use crate::{success_response, ApiError, ApiResult, NetworkService}; +use beacon_chain::BeaconChainTypes; +use eth2_libp2p::{Enr, PeerId}; +use hyper::{Body, Request}; +use std::sync::Arc; + +/// HTTP handle to return the Discv5 ENR from the client's libp2p service. +/// +/// ENR is encoded as base64 string. +pub fn get_enr(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let enr: Enr = network.local_enr(); + + Ok(success_response(Body::from( + serde_json::to_string(&enr.to_base64()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, + ))) +} + +/// HTTP handle to return the number of peers connected in the client's libp2p service. +pub fn get_peer_count( + req: Request, +) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let connected_peers: usize = network.connected_peers(); + + Ok(success_response(Body::from( + serde_json::to_string(&connected_peers) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, + ))) +} + +/// HTTP handle to return the list of peers connected to the client's libp2p service. +/// +/// Peers are presented as a list of `PeerId::to_string()`. +pub fn get_peer_list(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let connected_peers: Vec = network + .connected_peer_set() + .iter() + .map(PeerId::to_string) + .collect(); + + Ok(success_response(Body::from( + serde_json::to_string(&connected_peers).map_err(|e| { + ApiError::ServerError(format!("Unable to serialize Vec: {:?}", e)) + })?, + ))) +} From bb166a25992535460aecdec2fe94403b1521254a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 18:58:01 +1000 Subject: [PATCH 086/305] Fix rustc warnings --- beacon_node/eth2-libp2p/src/behaviour.rs | 1 - beacon_node/eth2-libp2p/src/service.rs | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 24aacbfa1..9158fe485 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -7,7 +7,6 @@ use futures::prelude::*; use libp2p::{ core::identity::Keypair, discv5::Discv5Event, - enr::Enr, gossipsub::{Gossipsub, GossipsubEvent}, identify::{Identify, IdentifyEvent}, ping::{Ping, PingConfig, PingEvent}, diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 4c343fa26..316aa0579 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -15,7 +15,7 @@ use libp2p::core::{ transport::boxed::Boxed, upgrade::{InboundUpgradeExt, OutboundUpgradeExt}, }; -use libp2p::{core, enr::Enr, secio, PeerId, Swarm, Transport}; +use libp2p::{core, secio, PeerId, Swarm, Transport}; use slog::{debug, info, trace, warn}; use std::fs::File; use std::io::prelude::*; From c97b3b20cb1cfa6ae6ac5e9658b5f5a27f2bf4af Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 14 Aug 2019 20:58:51 +1000 Subject: [PATCH 087/305] Add best_slot method --- beacon_node/rest_api/src/beacon.rs | 15 +++++++++++++++ beacon_node/rest_api/src/lib.rs | 1 + 2 files changed, 16 insertions(+) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index a2afb1001..66e31ae41 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -6,6 +6,21 @@ use std::sync::Arc; use store::Store; use types::{BeaconBlock, BeaconState}; +/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. +pub fn get_best_slot(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let slot = beacon_chain.head().beacon_state.slot; + + let json: String = serde_json::to_string(&slot) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Slot: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} + /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. pub fn get_block(req: Request) -> ApiResult { let beacon_chain = req diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 86b5b35db..349a62c3f 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -121,6 +121,7 @@ pub fn start_server( // Route the request to the correct handler. let result = match (req.method(), path.as_ref()) { + (&Method::GET, "/beacon/best_slot") => beacon::get_best_slot::(req), (&Method::GET, "/beacon/block") => beacon::get_block::(req), (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), (&Method::GET, "/beacon/latest_finalized_checkpoint") => { From fda208b103284a156d801f2cea0e556642b10fe5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 15 Aug 2019 12:48:34 +1000 Subject: [PATCH 088/305] Add --bootstrap arg to beacon node --- beacon_node/client/src/lib.rs | 2 +- beacon_node/src/main.rs | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 93e80df42..44b5c0ce3 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -22,7 +22,7 @@ use tokio::timer::Interval; pub use beacon_chain::BeaconChainTypes; pub use beacon_chain_types::ClientType; pub use beacon_chain_types::InitialiseBeaconChain; -pub use config::Config as ClientConfig; +pub use config::{Config as ClientConfig, GenesisState}; pub use eth2_config::Eth2Config; /// Main beacon node client service. This provides the connection and initialisation of the clients diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 9a52f2638..862ca4a90 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -1,7 +1,7 @@ mod run; use clap::{App, Arg}; -use client::{ClientConfig, Eth2Config}; +use client::{ClientConfig, Eth2Config, GenesisState}; use env_logger::{Builder, Env}; use eth2_config::{read_from_file, write_to_file}; use slog::{crit, o, warn, Drain, Level}; @@ -200,6 +200,16 @@ fn main() { .help("Sets the verbosity level") .takes_value(true), ) + /* + * Bootstrap. + */ + .arg( + Arg::with_name("bootstrap") + .long("bootstrap") + .value_name("HTTP_SERVER") + .help("Load the genesis state and libp2p address from the HTTP API of another Lighthouse node.") + .takes_value(true) + ) .get_matches(); // build the initial logger @@ -288,6 +298,13 @@ fn main() { } }; + // If the `--bootstrap` flag is provided, overwrite the default configuration. + if let Some(server) = matches.value_of("bootstrap") { + client_config.genesis_state = GenesisState::HttpBootstrap { + server: server.to_string(), + }; + } + let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); // Initialise the `Eth2Config`. From b24482674933406460b40f83fee00a98c6c84135 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 15 Aug 2019 13:58:04 +1000 Subject: [PATCH 089/305] Get bootstrapper working for ENR address --- beacon_node/client/src/beacon_chain_types.rs | 14 ++--- .../{local_bootstrap.rs => bootstrapper.rs} | 55 +++++++++++-------- beacon_node/client/src/lib.rs | 3 +- beacon_node/src/main.rs | 21 ++++++- 4 files changed, 62 insertions(+), 31 deletions(-) rename beacon_node/client/src/{local_bootstrap.rs => bootstrapper.rs} (64%) diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index a5b89b86a..f2f95226a 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -1,5 +1,5 @@ +use crate::bootstrapper::Bootstrapper; use crate::error::Result; -use crate::local_bootstrap::BootstrapParams; use crate::{config::GenesisState, ClientConfig}; use beacon_chain::{ lmd_ghost::{LmdGhost, ThreadSafeReducedTree}, @@ -7,7 +7,6 @@ use beacon_chain::{ store::Store, BeaconChain, BeaconChainTypes, }; -use reqwest::Url; use slog::{crit, info, Logger}; use slot_clock::SlotClock; use std::fs::File; @@ -77,13 +76,14 @@ where .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))? } GenesisState::HttpBootstrap { server } => { - let url: Url = - Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?; + let bootstrapper = Bootstrapper::from_server_string(server.to_string()) + .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; - let params = BootstrapParams::from_http_api(url) - .map_err(|e| format!("Failed to bootstrap from HTTP server: {:?}", e))?; + let (state, _block) = bootstrapper + .genesis() + .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; - params.genesis_state + state } }; diff --git a/beacon_node/client/src/local_bootstrap.rs b/beacon_node/client/src/bootstrapper.rs similarity index 64% rename from beacon_node/client/src/local_bootstrap.rs rename to beacon_node/client/src/bootstrapper.rs index 5fe5e1b4f..9537f6f90 100644 --- a/beacon_node/client/src/local_bootstrap.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -14,33 +14,44 @@ impl From for Error { } } -pub struct BootstrapParams { - pub finalized_block: BeaconBlock, - pub finalized_state: BeaconState, - pub genesis_block: BeaconBlock, - pub genesis_state: BeaconState, - pub enr: Enr, +pub struct Bootstrapper { + url: Url, } -impl BootstrapParams { - pub fn from_http_api(url: Url) -> Result { - let slots_per_epoch = get_slots_per_epoch(url.clone()) - .map_err(|e| format!("Unable to get slots per epoch: {:?}", e))?; +impl Bootstrapper { + pub fn from_server_string(server: String) -> Result { + Ok(Self { + url: Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?, + }) + } + + pub fn enr(&self) -> Result { + get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) + } + + pub fn genesis(&self) -> Result<(BeaconState, BeaconBlock), String> { let genesis_slot = Slot::new(0); - let finalized_slot = get_finalized_slot(url.clone(), slots_per_epoch.as_u64()) + + let block = get_block(self.url.clone(), genesis_slot) + .map_err(|e| format!("Unable to get genesis block: {:?}", e))?; + let state = get_state(self.url.clone(), genesis_slot) + .map_err(|e| format!("Unable to get genesis state: {:?}", e))?; + + Ok((state, block)) + } + + pub fn finalized(&self) -> Result<(BeaconState, BeaconBlock), String> { + let slots_per_epoch = get_slots_per_epoch(self.url.clone()) + .map_err(|e| format!("Unable to get slots per epoch: {:?}", e))?; + let finalized_slot = get_finalized_slot(self.url.clone(), slots_per_epoch.as_u64()) .map_err(|e| format!("Unable to get finalized slot: {:?}", e))?; - Ok(Self { - finalized_block: get_block(url.clone(), finalized_slot) - .map_err(|e| format!("Unable to get finalized block: {:?}", e))?, - finalized_state: get_state(url.clone(), finalized_slot) - .map_err(|e| format!("Unable to get finalized state: {:?}", e))?, - genesis_block: get_block(url.clone(), genesis_slot) - .map_err(|e| format!("Unable to get genesis block: {:?}", e))?, - genesis_state: get_state(url.clone(), genesis_slot) - .map_err(|e| format!("Unable to get genesis state: {:?}", e))?, - enr: get_enr(url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e))?, - }) + let block = get_block(self.url.clone(), finalized_slot) + .map_err(|e| format!("Unable to get finalized block: {:?}", e))?; + let state = get_state(self.url.clone(), finalized_slot) + .map_err(|e| format!("Unable to get finalized state: {:?}", e))?; + + Ok((state, block)) } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 44b5c0ce3..798aedec9 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,8 +1,8 @@ extern crate slog; mod beacon_chain_types; +mod bootstrapper; mod config; -mod local_bootstrap; pub mod error; pub mod notifier; @@ -22,6 +22,7 @@ use tokio::timer::Interval; pub use beacon_chain::BeaconChainTypes; pub use beacon_chain_types::ClientType; pub use beacon_chain_types::InitialiseBeaconChain; +pub use bootstrapper::Bootstrapper; pub use config::{Config as ClientConfig, GenesisState}; pub use eth2_config::Eth2Config; diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 862ca4a90..5199bddb6 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -1,7 +1,7 @@ mod run; use clap::{App, Arg}; -use client::{ClientConfig, Eth2Config, GenesisState}; +use client::{Bootstrapper, ClientConfig, Eth2Config, GenesisState}; use env_logger::{Builder, Env}; use eth2_config::{read_from_file, write_to_file}; use slog::{crit, o, warn, Drain, Level}; @@ -300,9 +300,28 @@ fn main() { // If the `--bootstrap` flag is provided, overwrite the default configuration. if let Some(server) = matches.value_of("bootstrap") { + // Set the genesis state source. client_config.genesis_state = GenesisState::HttpBootstrap { server: server.to_string(), }; + + let bootstrapper = match Bootstrapper::from_server_string(server.to_string()) { + Ok(b) => b, + Err(e) => { + crit!(log, "Failed to load bootstrapper"; "error" => format!("{:?}", e)); + return; + } + }; + + let enr = match bootstrapper.enr() { + Ok(b) => b, + Err(e) => { + crit!(log, "Failed to read ENR from bootstrap server"; "error" => format!("{:?}", e)); + return; + } + }; + + client_config.network.boot_nodes.push(enr); } let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); From 4678524659f4915037b7d64b9ce8f52498a7bb54 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 15 Aug 2019 14:52:00 +1000 Subject: [PATCH 090/305] Store intermediate states during block processing --- beacon_node/beacon_chain/src/beacon_chain.rs | 25 +++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 891f76d37..7faca0dfd 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -870,9 +870,16 @@ impl BeaconChain { let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); + // Keep a list of any states that were "skipped" (block-less) in between the parent state + // slot and the block slot. These will need to be stored in the database. + let mut intermediate_states = vec![]; + // Transition the parent state to the block slot. let mut state: BeaconState = parent_state; - for _ in state.slot.as_u64()..block.slot.as_u64() { + for i in state.slot.as_u64()..block.slot.as_u64() { + if i > 0 { + intermediate_states.push(state.clone()); + } per_slot_processing(&mut state, &self.spec)?; } @@ -911,6 +918,22 @@ impl BeaconChain { let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE); + // Store all the states between the parent block state and this blocks slot before storing + // the final state. + for (i, intermediate_state) in intermediate_states.iter().enumerate() { + // To avoid doing an unnecessary tree hash, use the following (slot + 1) state's + // state_roots field to find the root. + let following_state = match intermediate_states.get(i + 1) { + Some(following_state) => following_state, + None => &state, + }; + let intermediate_state_root = + following_state.get_state_root(intermediate_state.slot)?; + + self.store + .put(&intermediate_state_root, intermediate_state)?; + } + // Store the block and state. self.store.put(&block_root, &block)?; self.store.put(&state_root, &state)?; From ce37f958612229370791ae170e85780a07362656 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 15 Aug 2019 16:41:02 +1000 Subject: [PATCH 091/305] Allow bootstrapper to scrape libp2p address --- beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/bootstrapper.rs | 29 +++++++++++++++++- beacon_node/client/src/config.rs | 42 ++++++++++++++++++++++++-- beacon_node/eth2-libp2p/src/lib.rs | 2 +- beacon_node/network/src/service.rs | 14 ++++++++- beacon_node/rest_api/src/lib.rs | 3 ++ beacon_node/rest_api/src/network.rs | 21 ++++++++++++- beacon_node/src/main.rs | 28 +---------------- 8 files changed, 107 insertions(+), 33 deletions(-) diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 9d5d49e17..9b5a9cf42 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -28,3 +28,4 @@ dirs = "1.0.3" exit-future = "0.1.3" futures = "0.1.25" reqwest = "0.9" +url = "1.2" diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index 9537f6f90..1fd8f1659 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -1,6 +1,8 @@ -use eth2_libp2p::Enr; +use eth2_libp2p::{Enr, Multiaddr}; use reqwest::{Error as HttpError, Url}; +use std::net::Ipv4Addr; use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Slot}; +use url::Host; #[derive(Debug)] enum Error { @@ -25,10 +27,22 @@ impl Bootstrapper { }) } + pub fn server_ipv4_addr(&self) -> Option { + match self.url.host()? { + Host::Ipv4(addr) => Some(addr), + _ => None, + } + } + pub fn enr(&self) -> Result { get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) } + pub fn listen_addresses(&self) -> Result, String> { + get_listen_addresses(self.url.clone()) + .map_err(|e| format!("Unable to get listen addresses: {:?}", e)) + } + pub fn genesis(&self) -> Result<(BeaconState, BeaconBlock), String> { let genesis_slot = Slot::new(0); @@ -124,3 +138,16 @@ fn get_enr(mut url: Url) -> Result { .json() .map_err(Into::into) } + +fn get_listen_addresses(mut url: Url) -> Result, Error> { + url.path_segments_mut() + .map(|mut url| { + url.push("node").push("network").push("listen_addresses"); + }) + .map_err(|_| Error::UrlCannotBeBase)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 0d5d5f81d..5dd0eef52 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,8 +1,9 @@ -use crate::Eth2Config; +use crate::{Bootstrapper, Eth2Config}; use clap::ArgMatches; +use eth2_libp2p::multiaddr::{Multiaddr, Protocol}; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; -use slog::{info, o, Drain}; +use slog::{info, o, warn, Drain}; use std::fs::{self, OpenOptions}; use std::path::PathBuf; use std::sync::Mutex; @@ -149,6 +150,43 @@ impl Config { self.update_logger(log)?; }; + // If the `--bootstrap` flag is provided, overwrite the default configuration. + if let Some(server) = args.value_of("bootstrap") { + do_bootstrapping(self, server.to_string(), &log)?; + } + Ok(()) } } + +fn do_bootstrapping(config: &mut Config, server: String, log: &slog::Logger) -> Result<(), String> { + // Set the genesis state source. + config.genesis_state = GenesisState::HttpBootstrap { + server: server.to_string(), + }; + + let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; + + config.network.boot_nodes.push(bootstrapper.enr()?); + + if let Some(server_ip) = bootstrapper.server_ipv4_addr() { + let server_multiaddr: Multiaddr = bootstrapper + .listen_addresses()? + .first() + .ok_or_else(|| "Bootstrap peer returned an empty list of listen addresses")? + // Iterate through the components of the Multiaddr, replacing any Ipv4 address with the + // server address. + .iter() + .map(|protocol| match protocol { + Protocol::Ip4(_) => Protocol::Ip4(server_ip), + _ => protocol, + }) + .collect::(); + + config.network.libp2p_nodes.push(server_multiaddr); + } else { + warn!(log, "Unable to determine bootstrap server Ipv4 address. Unable to add server as libp2p peer."); + } + + Ok(()) +} diff --git a/beacon_node/eth2-libp2p/src/lib.rs b/beacon_node/eth2-libp2p/src/lib.rs index 8c2644fbb..4c84469ce 100644 --- a/beacon_node/eth2-libp2p/src/lib.rs +++ b/beacon_node/eth2-libp2p/src/lib.rs @@ -23,7 +23,7 @@ pub use libp2p::multiaddr; pub use libp2p::Multiaddr; pub use libp2p::{ gossipsub::{GossipsubConfig, GossipsubConfigBuilder}, - PeerId, + PeerId, Swarm, }; pub use rpc::RPCEvent; pub use service::Libp2pEvent; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index ed3c9da0b..4bec03830 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -5,7 +5,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use core::marker::PhantomData; use eth2_libp2p::Service as LibP2PService; use eth2_libp2p::Topic; -use eth2_libp2p::{Enr, Libp2pEvent, PeerId}; +use eth2_libp2p::{Enr, Libp2pEvent, Multiaddr, PeerId, Swarm}; use eth2_libp2p::{PubsubMessage, RPCEvent}; use futures::prelude::*; use futures::Stream; @@ -64,6 +64,8 @@ impl Service { Ok((Arc::new(network_service), network_send)) } + /// Returns the local ENR from the underlying Discv5 behaviour that external peers may connect + /// to. pub fn local_enr(&self) -> Enr { self.libp2p_service .lock() @@ -73,10 +75,19 @@ impl Service { .clone() } + /// Returns the list of `Multiaddr` that the underlying libp2p instance is listening on. + pub fn listen_multiaddrs(&self) -> Vec { + Swarm::listeners(&self.libp2p_service.lock().swarm) + .cloned() + .collect() + } + + /// Returns the number of libp2p connected peers. pub fn connected_peers(&self) -> usize { self.libp2p_service.lock().swarm.connected_peers() } + /// Returns the set of `PeerId` that are connected via libp2p. pub fn connected_peer_set(&self) -> Vec { self.libp2p_service .lock() @@ -88,6 +99,7 @@ impl Service { .collect() } + /// Provides a reference to the underlying libp2p service. pub fn libp2p_service(&self) -> Arc> { self.libp2p_service.clone() } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 349a62c3f..8ef48ad72 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -135,6 +135,9 @@ pub fn start_server( (&Method::GET, "/node/network/enr") => network::get_enr::(req), (&Method::GET, "/node/network/peer_count") => network::get_peer_count::(req), (&Method::GET, "/node/network/peers") => network::get_peer_list::(req), + (&Method::GET, "/node/network/listen_addresses") => { + network::get_listen_addresses::(req) + } (&Method::GET, "/spec") => spec::get_spec::(req), (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index 2fd88f498..0e2448270 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -1,9 +1,28 @@ use crate::{success_response, ApiError, ApiResult, NetworkService}; use beacon_chain::BeaconChainTypes; -use eth2_libp2p::{Enr, PeerId}; +use eth2_libp2p::{Enr, Multiaddr, PeerId}; use hyper::{Body, Request}; use std::sync::Arc; +/// HTTP handle to return the list of libp2p multiaddr the client is listening on. +/// +/// Returns a list of `Multiaddr`, serialized according to their `serde` impl. +pub fn get_listen_addresses( + req: Request, +) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let multiaddresses: Vec = network.listen_multiaddrs(); + + Ok(success_response(Body::from( + serde_json::to_string(&multiaddresses) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, + ))) +} + /// HTTP handle to return the Discv5 ENR from the client's libp2p service. /// /// ENR is encoded as base64 string. diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 5199bddb6..ae48f692b 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -1,7 +1,7 @@ mod run; use clap::{App, Arg}; -use client::{Bootstrapper, ClientConfig, Eth2Config, GenesisState}; +use client::{ClientConfig, Eth2Config}; use env_logger::{Builder, Env}; use eth2_config::{read_from_file, write_to_file}; use slog::{crit, o, warn, Drain, Level}; @@ -298,32 +298,6 @@ fn main() { } }; - // If the `--bootstrap` flag is provided, overwrite the default configuration. - if let Some(server) = matches.value_of("bootstrap") { - // Set the genesis state source. - client_config.genesis_state = GenesisState::HttpBootstrap { - server: server.to_string(), - }; - - let bootstrapper = match Bootstrapper::from_server_string(server.to_string()) { - Ok(b) => b, - Err(e) => { - crit!(log, "Failed to load bootstrapper"; "error" => format!("{:?}", e)); - return; - } - }; - - let enr = match bootstrapper.enr() { - Ok(b) => b, - Err(e) => { - crit!(log, "Failed to read ENR from bootstrap server"; "error" => format!("{:?}", e)); - return; - } - }; - - client_config.network.boot_nodes.push(enr); - } - let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); // Initialise the `Eth2Config`. From 7cd963e6bb7ad35458defc94f3c6a24eb24f249c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 15 Aug 2019 18:48:39 +1000 Subject: [PATCH 092/305] Update bootstrapper libp2p address finding --- beacon_node/client/src/bootstrapper.rs | 31 +++++++++++++++++++++++++- beacon_node/client/src/config.rs | 26 +++++++++------------ beacon_node/src/main.rs | 5 +++++ beacon_node/src/run.rs | 7 +----- 4 files changed, 46 insertions(+), 23 deletions(-) diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index 1fd8f1659..2c8cf6afc 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -1,5 +1,9 @@ -use eth2_libp2p::{Enr, Multiaddr}; +use eth2_libp2p::{ + multiaddr::{Multiaddr, Protocol}, + Enr, +}; use reqwest::{Error as HttpError, Url}; +use std::borrow::Cow; use std::net::Ipv4Addr; use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Slot}; use url::Host; @@ -27,6 +31,31 @@ impl Bootstrapper { }) } + pub fn best_effort_multiaddr(&self) -> Option { + let tcp_port = self.first_listening_tcp_port()?; + + let mut multiaddr = Multiaddr::with_capacity(2); + + match self.url.host()? { + Host::Ipv4(addr) => multiaddr.push(Protocol::Ip4(addr)), + Host::Domain(s) => multiaddr.push(Protocol::Dns4(Cow::Borrowed(s))), + _ => return None, + }; + + multiaddr.push(Protocol::Tcp(tcp_port)); + + Some(multiaddr) + } + + fn first_listening_tcp_port(&self) -> Option { + self.listen_addresses().ok()?.iter().find_map(|multiaddr| { + multiaddr.iter().find_map(|protocol| match protocol { + Protocol::Tcp(port) => Some(port), + _ => None, + }) + }) + } + pub fn server_ipv4_addr(&self) -> Option { match self.url.host()? { Host::Ipv4(addr) => Some(addr), diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 5dd0eef52..1a985fb4a 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,6 +1,5 @@ use crate::{Bootstrapper, Eth2Config}; use clap::ArgMatches; -use eth2_libp2p::multiaddr::{Multiaddr, Protocol}; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; use slog::{info, o, warn, Drain}; @@ -169,23 +168,18 @@ fn do_bootstrapping(config: &mut Config, server: String, log: &slog::Logger) -> config.network.boot_nodes.push(bootstrapper.enr()?); - if let Some(server_ip) = bootstrapper.server_ipv4_addr() { - let server_multiaddr: Multiaddr = bootstrapper - .listen_addresses()? - .first() - .ok_or_else(|| "Bootstrap peer returned an empty list of listen addresses")? - // Iterate through the components of the Multiaddr, replacing any Ipv4 address with the - // server address. - .iter() - .map(|protocol| match protocol { - Protocol::Ip4(_) => Protocol::Ip4(server_ip), - _ => protocol, - }) - .collect::(); - + if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr() { + info!( + log, + "Estimated bootstrapper libp2p address"; + "multiaddr" => format!("{:?}", server_multiaddr) + ); config.network.libp2p_nodes.push(server_multiaddr); } else { - warn!(log, "Unable to determine bootstrap server Ipv4 address. Unable to add server as libp2p peer."); + warn!( + log, + "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." + ); } Ok(()) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index ae48f692b..04366baa7 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -237,6 +237,11 @@ fn main() { let mut log = slog::Logger::root(drain.fuse(), o!()); + warn!( + log, + "Ethereum 2.0 is pre-release. This software is experimental." + ); + let data_dir = match matches .value_of("datadir") .and_then(|v| Some(PathBuf::from(v))) diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index c16d23e5f..5066231d5 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -4,7 +4,7 @@ use client::{ }; use futures::sync::oneshot; use futures::Future; -use slog::{error, info, warn}; +use slog::{error, info}; use std::cell::RefCell; use std::path::Path; use std::path::PathBuf; @@ -42,11 +42,6 @@ pub fn run_beacon_node( let other_client_config = client_config.clone(); - warn!( - log, - "Ethereum 2.0 is pre-release. This software is experimental." - ); - info!( log, "BeaconNode init"; From c259d6c00637e6372cc75afd1c6cd2debe009424 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 18 Aug 2019 03:36:13 +1000 Subject: [PATCH 093/305] First draft sync re-write. WIP --- beacon_node/network/src/message_handler.rs | 10 +- beacon_node/network/src/sync/import_queue.rs | 307 ------- beacon_node/network/src/sync/manager.rs | 810 +++++++++++++------ beacon_node/network/src/sync/simple_sync.rs | 409 ++-------- 4 files changed, 661 insertions(+), 875 deletions(-) delete mode 100644 beacon_node/network/src/sync/import_queue.rs diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 6a9a40369..fd10c5aea 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -118,7 +118,14 @@ impl MessageHandler { hello_message, &mut self.network_context, ), - RPCRequest::Goodbye(goodbye_reason) => self.sync.on_goodbye(peer_id, goodbye_reason), + RPCRequest::Goodbye(goodbye_reason) => { + debug!( + self.log, "PeerGoodbye"; + "peer" => format!("{:?}", peer_id), + "reason" => format!("{:?}", reason), + ); + self.sync.on_disconnect(peer_id), + }, RPCRequest::BeaconBlocks(request) => self.sync.on_beacon_blocks_request( peer_id, request_id, @@ -167,6 +174,7 @@ impl MessageHandler { Ok(beacon_blocks) => { self.sync.on_beacon_blocks_response( peer_id, + request_id, beacon_blocks, &mut self.network_context, ); diff --git a/beacon_node/network/src/sync/import_queue.rs b/beacon_node/network/src/sync/import_queue.rs deleted file mode 100644 index 5503ed64f..000000000 --- a/beacon_node/network/src/sync/import_queue.rs +++ /dev/null @@ -1,307 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::PeerId; -use slog::error; -use std::collections::HashMap; -use std::sync::Arc; -use std::time::{Duration, Instant}; -use tree_hash::TreeHash; -use types::{BeaconBlock, BeaconBlockBody, BeaconBlockHeader, EthSpec, Hash256, Slot}; - -/// Provides a queue for fully and partially built `BeaconBlock`s. -/// -/// The queue is fundamentally a `Vec` where no two items have the same -/// `item.block_root`. This struct it backed by a `Vec` not a `HashMap` for the following two -/// reasons: -/// -/// - When we receive a `BeaconBlockBody`, the only way we can find it's matching -/// `BeaconBlockHeader` is to find a header such that `header.beacon_block_body == -/// tree_hash_root(body)`. Therefore, if we used a `HashMap` we would need to use the root of -/// `BeaconBlockBody` as the key. -/// - It is possible for multiple distinct blocks to have identical `BeaconBlockBodies`. Therefore -/// we cannot use a `HashMap` keyed by the root of `BeaconBlockBody`. -pub struct ImportQueue { - pub chain: Arc>, - /// Partially imported blocks, keyed by the root of `BeaconBlockBody`. - partials: HashMap>, - /// Time before a queue entry is considered state. - pub stale_time: Duration, - /// Logging - log: slog::Logger, -} - -impl ImportQueue { - /// Return a new, empty queue. - pub fn new(chain: Arc>, stale_time: Duration, log: slog::Logger) -> Self { - Self { - chain, - partials: HashMap::new(), - stale_time, - log, - } - } - - /// Returns true of the if the `BlockRoot` is found in the `import_queue`. - pub fn contains_block_root(&self, block_root: Hash256) -> bool { - self.partials.contains_key(&block_root) - } - - /// Attempts to complete the `BlockRoot` if it is found in the `import_queue`. - /// - /// Returns an Enum with a `PartialBeaconBlockCompletion`. - /// Does not remove the `block_root` from the `import_queue`. - pub fn attempt_complete_block( - &self, - block_root: Hash256, - ) -> PartialBeaconBlockCompletion { - if let Some(partial) = self.partials.get(&block_root) { - partial.attempt_complete() - } else { - PartialBeaconBlockCompletion::MissingRoot - } - } - - /// Removes the first `PartialBeaconBlock` with a matching `block_root`, returning the partial - /// if it exists. - pub fn remove(&mut self, block_root: Hash256) -> Option> { - self.partials.remove(&block_root) - } - - /// Flushes all stale entries from the queue. - /// - /// An entry is stale if it has as a `inserted` time that is more than `self.stale_time` in the - /// past. - pub fn remove_stale(&mut self) { - let stale_time = self.stale_time; - - self.partials - .retain(|_, partial| partial.inserted + stale_time > Instant::now()) - } - - /// Returns `true` if `self.chain` has not yet processed this block. - pub fn chain_has_not_seen_block(&self, block_root: &Hash256) -> bool { - self.chain - .is_new_block_root(&block_root) - .unwrap_or_else(|_| { - error!(self.log, "Unable to determine if block is new."); - true - }) - } - - /// Adds the `block_roots` to the partials queue. - /// - /// If a `block_root` is not in the queue and has not been processed by the chain it is added - /// to the queue and it's block root is included in the output. - pub fn enqueue_block_roots( - &mut self, - block_roots: &[BlockRootSlot], - sender: PeerId, - ) -> Vec { - // TODO: This will currently not return a `BlockRootSlot` if this root exists but there is no header. - // It would be more robust if it did. - let new_block_root_slots: Vec = block_roots - .iter() - // Ignore any roots already stored in the queue. - .filter(|brs| !self.contains_block_root(brs.block_root)) - // Ignore any roots already processed by the chain. - .filter(|brs| self.chain_has_not_seen_block(&brs.block_root)) - .cloned() - .collect(); - - self.partials.extend( - new_block_root_slots - .iter() - .map(|brs| PartialBeaconBlock { - slot: brs.slot, - block_root: brs.block_root, - sender: sender.clone(), - header: None, - body: None, - inserted: Instant::now(), - }) - .map(|partial| (partial.block_root, partial)), - ); - - new_block_root_slots - } - - /// Adds the `headers` to the `partials` queue. Returns a list of `Hash256` block roots for - /// which we should use to request `BeaconBlockBodies`. - /// - /// If a `header` is not in the queue and has not been processed by the chain it is added to - /// the queue and it's block root is included in the output. - /// - /// If a `header` is already in the queue, but not yet processed by the chain the block root is - /// not included in the output and the `inserted` time for the partial record is set to - /// `Instant::now()`. Updating the `inserted` time stops the partial from becoming stale. - pub fn enqueue_headers( - &mut self, - headers: Vec, - sender: PeerId, - ) -> Vec { - let mut required_bodies: Vec = vec![]; - - for header in headers { - let block_root = Hash256::from_slice(&header.canonical_root()[..]); - - if self.chain_has_not_seen_block(&block_root) - && !self.insert_header(block_root, header, sender.clone()) - { - // If a body is empty - required_bodies.push(block_root); - } - } - - required_bodies - } - - /// If there is a matching `header` for this `body`, adds it to the queue. - /// - /// If there is no `header` for the `body`, the body is simply discarded. - pub fn enqueue_bodies( - &mut self, - bodies: Vec>, - sender: PeerId, - ) -> Option { - let mut last_block_hash = None; - for body in bodies { - last_block_hash = self.insert_body(body, sender.clone()); - } - - last_block_hash - } - - pub fn enqueue_full_blocks(&mut self, blocks: Vec>, sender: PeerId) { - for block in blocks { - self.insert_full_block(block, sender.clone()); - } - } - - /// Inserts a header to the queue. - /// - /// If the header already exists, the `inserted` time is set to `now` and not other - /// modifications are made. - /// Returns true is `body` exists. - fn insert_header( - &mut self, - block_root: Hash256, - header: BeaconBlockHeader, - sender: PeerId, - ) -> bool { - let mut exists = false; - self.partials - .entry(block_root) - .and_modify(|partial| { - partial.header = Some(header.clone()); - partial.inserted = Instant::now(); - if partial.body.is_some() { - exists = true; - } - }) - .or_insert_with(|| PartialBeaconBlock { - slot: header.slot, - block_root, - header: Some(header), - body: None, - inserted: Instant::now(), - sender, - }); - exists - } - - /// Updates an existing partial with the `body`. - /// - /// If the body already existed, the `inserted` time is set to `now`. - /// - /// Returns the block hash of the inserted body - fn insert_body( - &mut self, - body: BeaconBlockBody, - sender: PeerId, - ) -> Option { - let body_root = Hash256::from_slice(&body.tree_hash_root()[..]); - let mut last_root = None; - - self.partials.iter_mut().for_each(|(root, mut p)| { - if let Some(header) = &mut p.header { - if body_root == header.body_root { - p.inserted = Instant::now(); - p.body = Some(body.clone()); - p.sender = sender.clone(); - last_root = Some(*root); - } - } - }); - - last_root - } - - /// Updates an existing `partial` with the completed block, or adds a new (complete) partial. - /// - /// If the partial already existed, the `inserted` time is set to `now`. - fn insert_full_block(&mut self, block: BeaconBlock, sender: PeerId) { - let block_root = Hash256::from_slice(&block.canonical_root()[..]); - - let partial = PartialBeaconBlock { - slot: block.slot, - block_root, - header: Some(block.block_header()), - body: Some(block.body), - inserted: Instant::now(), - sender, - }; - - self.partials - .entry(block_root) - .and_modify(|existing_partial| *existing_partial = partial.clone()) - .or_insert(partial); - } -} - -/// Individual components of a `BeaconBlock`, potentially all that are required to form a full -/// `BeaconBlock`. -#[derive(Clone, Debug)] -pub struct PartialBeaconBlock { - pub slot: Slot, - /// `BeaconBlock` root. - pub block_root: Hash256, - pub header: Option, - pub body: Option>, - /// The instant at which this record was created or last meaningfully modified. Used to - /// determine if an entry is stale and should be removed. - pub inserted: Instant, - /// The `PeerId` that last meaningfully contributed to this item. - pub sender: PeerId, -} - -impl PartialBeaconBlock { - /// Attempts to build a block. - /// - /// Does not comsume the `PartialBeaconBlock`. - pub fn attempt_complete(&self) -> PartialBeaconBlockCompletion { - if self.header.is_none() { - PartialBeaconBlockCompletion::MissingHeader(self.slot) - } else if self.body.is_none() { - PartialBeaconBlockCompletion::MissingBody - } else { - PartialBeaconBlockCompletion::Complete( - self.header - .clone() - .unwrap() - .into_block(self.body.clone().unwrap()), - ) - } - } -} - -/// The result of trying to convert a `BeaconBlock` into a `PartialBeaconBlock`. -pub enum PartialBeaconBlockCompletion { - /// The partial contains a valid BeaconBlock. - Complete(BeaconBlock), - /// The partial does not exist. - MissingRoot, - /// The partial contains a `BeaconBlockRoot` but no `BeaconBlockHeader`. - MissingHeader(Slot), - /// The partial contains a `BeaconBlockRoot` and `BeaconBlockHeader` but no `BeaconBlockBody`. - MissingBody, -} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 52c1a72c6..a4ce544ec 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1,283 +1,639 @@ +const MAX_BLOCKS_PER_REQUEST: usize = 10; -const MAXIMUM_BLOCKS_PER_REQUEST: usize = 10; -const SIMULTANEOUS_REQUESTS: usize = 10; -use super::simple_sync::FUTURE_SLOT_TOLERANCE; +/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. +const SLOT_IMPORT_TOLERANCE: u64 = 10; -struct Chunk { - id: usize, - start_slot: Slot, - end_slot: Slot, - } +const PARENT_FAIL_TOLERANCE: usize = 3; +const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE*2; - -struct CompletedChunk { - peer_id: PeerId, - chunk: Chunk, - blocks: Vec, +enum BlockRequestsState { + QueuedForward, + QueuedBackward, + Pending(RequestId), + Complete, } -struct ProcessedChunk { - peer_id: PeerId, - chunk: Chunk, +struct BlockRequests { + target_head_slot: Slot + target_head_root: Hash256, + downloaded_blocks: Vec, + state: State, } -#[derive(PartialEq)] -pub enum SyncState { - Idle, - Downloading, - ColdSync { - max_wanted_slot: Slot, - max_wanted_hash: Hash256, +struct ParentRequests { + downloaded_blocks: Vec, + attempts: usize, + last_submitted_peer: PeerId, // to downvote the submitting peer. + state: BlockRequestsState, +} + +impl BlockRequests { + + // gets the start slot for next batch + // last block slot downloaded plus 1 + fn next_start_slot(&self) -> Option { + if !self.downloaded_blocks.is_empty() { + match self.state { + BlockRequestsState::QueuedForward => { + let last_element_index = self.downloaded_blocks.len() -1; + Some(downloaded_blocks[last_element_index].slot.add(1)) + } + BlockRequestsState::QueuedBackward => { + let earliest_known_slot = self.downloaded_blocks[0].slot; + Some(earliest_known_slot.add(1).sub(MAX_BLOCKS_PER_REQUEST)) + } + } + } + else { + None + } } } -pub enum SyncManagerState { - RequestBlocks(peer_id, BeaconBlockRequest), +enum ManagerState { + Syncing, + Regular, Stalled, - Idle, } -pub struct PeerSyncInfo { - peer_id: PeerId, - fork_version: [u8,4], - finalized_root: Hash256, - finalized_epoch: Epoch, - head_root: Hash256, - head_slot: Slot, - requested_slot_skip: Option<(Slot, usize)>, +enum ImportManagerOutcome { + Idle, + RequestBlocks{ + peer_id: PeerId, + request_id: RequestId, + request: BeaconBlocksRequest, + }, + RecentRequest(PeerId, RecentBeaconBlocksRequest), + DownvotePeer(PeerId), } -pub(crate) struct SyncManager { + +pub struct ImportManager { /// A reference to the underlying beacon chain. chain: Arc>, - /// A mapping of Peers to their respective PeerSyncInfo. - available_peers: HashMap, - wanted_chunks: Vec, - pending_chunks: HashMap, - completed_chunks: Vec, - processed_chunks: Vec, // ordered - multi_peer_sections: HashMap - - current_requests: usize, - latest_wanted_slot: Option, - sync_status: SyncStatus, - to_process_chunk_id: usize, + state: MangerState, + import_queue: HashMap, + parent_queue: Vec, + full_peers: Hashset, + current_req_id: usize, log: Logger, - } -impl SyncManager { - /// Adds a sync-able peer and determines which blocks to download given the current state of - /// the chain, known peers and currently requested blocks. - fn add_sync_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo, network &mut NetworkContext) { +impl ImportManager { + pub fn add_peer(&mut self, peer_id, remote: PeerSyncInfo) { + // TODO: Improve comments. + // initially try to download blocks from our current head + // then backwards search all the way back to our finalized epoch until we match on a chain + // has to be done sequentially to find next slot to start the batch from + let local = PeerSyncInfo::from(&self.chain); - let remote_finalized_slot = remote.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); - let local_finalized_slot = local.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); - // cold sync - if remote_finalized_slot > local.head_slot { - if let SyncState::Idle || SyncState::Downloading = self.sync_state { - info!(self.log, "Cold Sync Started", "start_slot" => local.head_slot, "latest_known_finalized" => remote_finalized_slot); - self.sync_state = SyncState::ColdSync{Slot::from(0), remote.finalized_hash} - } - - if let SyncState::ColdSync{max_wanted_slot, max_wanted_hjash } = self.sync_state { - - // We don't assume that our current head is the canonical chain. So we request blocks from - // our last finalized slot to ensure we are on the finalized chain. - if max_wanted_slot < remote_finalized_slot { - let remaining_blocks = remote_finalized_slot - max_wanted_slot; - for chunk in (0..remaining_blocks/MAXIMUM_BLOCKS_PER_REQUEST) { - self.wanted_chunks.push( - Chunk { - id: self.current_chunk_id, - previous_chunk: self.curent_chunk_id.saturating_sub(1), - start_slot: chunk*MAXIMUM_BLOCKS_PER_REQUEST + self.last_wanted_slot, - end_slot: (section+1)*MAXIMUM_BLOCKS_PER_REQUEST +self.last_wanted_slot, - }) - self.current_chunk_id +=1; - } - - // add any extra partial chunks - self.pending_section.push( Section { - start_slot: (remaining_blocks/MAXIMUM_BLOCKS_PER_REQUEST) + 1, - end_slot: remote_finalized_slot, - }) - self.current_chunk_id +=1; - - info!(self.log, "Cold Sync Updated", "start_slot" => local.head_slot, "latest_known_finalized" => remote_finalized_slot); - - self.sync_state = SyncState::ColdSync{remote_finalized_slot, remote.finalized_hash} - } + // If a peer is within SLOT_IMPORT_TOLERANCE from out head slot, ignore a batch sync + if remote.head_slot.sub(local.head_slot) < SLOT_IMPORT_TOLERANCE { + trace!(self.log, "Ignoring full sync with peer"; + "peer" => peer_id, + "peer_head_slot" => remote.head_slot, + "local_head_slot" => local.head_slot, + ); + // remove the peer from the queue if it exists + self.import_queue.remove(&peer_id); + return; } - else { // hot sync - if remote_head_slot > self.chain.head().beacon_state.slot { - if let SyncState::Idle = self.sync_state { - self.sync_state = SyncState::Downloading - info!(self.log, "Sync Started", "start_slot" => local.head_slot, "latest_known_head" => remote.head_slot.as_u64()); + if let Some(block_requests) = self.import_queue.get_mut(&peer_id) { + // update the target head slot + if remote.head_slot > requested_block.target_head_slot { + block_requests.target_head_slot = remote.head_slot; } - self.latest_known_slot = remote_head_slot; - //TODO Build requests. + } else { + let block_requests = BlockRequests { + target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called + target_head_root: remote.head_root, + downloaded_blocks: Vec::new(), + state: RequestedBlockState::Queued } + self.import_queue.insert(peer_id, block_requests); } - available_peers.push(remote); - } - pub fn add_blocks(&mut self, chunk_id: RequestId, peer_id: PeerId, blocks: Vec) { - - if SyncState::ColdSync{max_wanted_slot, max_wanted_hash} = self.sync_state { - - let chunk = match self.pending_chunks.remove(&peer_id) { - Some(chunks) => { - match chunks.find(|chunk| chunk.id == chunk_id) { - Some(chunk) => chunk, - None => { - warn!(self.log, "Received blocks for an unknown chunk"; - "peer"=> peer_id); - return; - } - } - }, - None => { - warn!(self.log, "Received blocks without a request"; - "peer"=> peer_id); + pub fn beacon_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec) { + + // find the request + let block_requests = match self.import_queue.get_mut(&peer_id) { + Some(req) if req.state = RequestedBlockState::Pending(request_id) => req, + None => { + // No pending request, invalid request_id or coding error + warn!(self.log, "BeaconBlocks response unknown"; "request_id" => request_id); return; - } - }; + } + }; - // add to completed - self.current_requests -= 1; - self.completed_chunks.push(CompletedChunk(peer_id, Chunk)); + // The response should contain at least one block. + // + // If we are syncing up to a target head block, at least the target head block should be + // returned. If we are syncing back to our last finalized block the request should return + // at least the last block we received (last known block). In diagram form: + // + // unknown blocks requested blocks downloaded blocks + // |-------------------|------------------------|------------------------| + // ^finalized slot ^ requested start slot ^ last known block ^ remote head + + if blocks.is_empty() { + warn!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); + block_requests.state = RequestedBlockState::Failed; + return; + } + + // Add the newly downloaded blocks to the current list of downloaded blocks. This also + // determines if we are syncing forward or backward. + let syncing_forwards = { + if block_requests.blocks.is_empty() { + block_requests.blocks.push(blocks); + true + } + else if block_requests.blocks[0].slot < blocks[0].slot { // syncing forwards + // verify the peer hasn't sent overlapping blocks - ensuring the strictly + // increasing blocks in a batch will be verified during the processing + if block_requests.next_slot() > blocks[0].slot { + warn!(self.log, "BeaconBlocks response returned duplicate blocks", "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_slot()); + block_requests.state = RequestedBlockState::Failed; + return; + } + + block_requests.blocks.push(blocks); + true + } + else { false } + }; + + + // Determine if more blocks need to be downloaded. There are a few cases: + // - We have downloaded a batch from our head_slot, which has not reached the remotes head + // (target head). Therefore we need to download another sequential batch. + // - The latest batch includes blocks that greater than or equal to the target_head slot, + // which means we have caught up to their head. We then check to see if the first + // block downloaded matches our head. If so, we are on the same chain and can process + // the blocks. If not we need to sync back further until we are on the same chain. So + // request more blocks. + // - We are syncing backwards (from our head slot) and need to check if we are on the same + // chain. If so, process the blocks, if not, request more blocks all the way up to + // our last finalized slot. + + if syncing_forwards { + // does the batch contain the target_head_slot + let last_element_index = block_requests.blocks.len()-1; + if block_requests[last_element_index].slot >= block_requests.target_slot { + // if the batch is on our chain, this is complete and we can then process. + // Otherwise start backwards syncing until we reach a common chain. + let earliest_slot = block_requests_blocks[0].slot + if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) { + block_requests.state = RequestedBlockState::Complete; + return; + } + + // not on the same chain, request blocks backwards + // binary search, request half the distance between the earliest block and our + // finalized slot + let state = &beacon_chain.head().beacon_state; + let local_finalized_slot = state.finalized_checkpoint.epoch; //TODO: Convert to slot + // check that the request hasn't failed by having no common chain + if local_finalized_slot >= block_requests.blocks[0] { + warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); + block_requests.state = RequestedBlockState::Failed; + return; + } + + // Start a backwards sync by requesting earlier blocks + // There can be duplication in downloaded blocks here if there are a large number + // of skip slots. In all cases we at least re-download the earliest known block. + // It is unlikely that a backwards sync in required, so we accept this duplication + // for now. + block_requests.state = RequestedBlockState::QueuedBackward; + } + else { + // batch doesn't contain the head slot, request the next batch + block_requests.state = RequestedBlockState::QueuedForward; + } + } + else { + // syncing backwards + // if the batch is on our chain, this is complete and we can then process. + // Otherwise continue backwards + let earliest_slot = block_requests_blocks[0].slot + if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) { + block_requests.state = RequestedBlockState::Complete; + return; + } + block_requests.state = RequestedBlockState::QueuedBackward; + } } - pub fn inject_error(id: RequestId, peer_id) { - if let SyncState::ColdSync{ _max_wanted_slot, _max_wanted_hash } { - match self.pending_chunks.get(&peer_id) { - Some(chunks) => { - if let Some(pos) = chunks.iter().position(|c| c.id == id) { - chunks.remove(pos); - } - }, - None => { - debug!(self.log, - "Received an error for an unknown request"; - "request_id" => id, - "peer" => peer_id - ); + pub fn recent_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec) { + + // find the request + let parent_request = match self.parent_queue.get_mut(&peer_id) { + Some(req) if req.state = RequestedBlockState::Pending(request_id) => req, + None => { + // No pending request, invalid request_id or coding error + warn!(self.log, "RecentBeaconBlocks response unknown"; "request_id" => request_id); + return; + } + }; + + // if an empty response is given, the peer didn't have the requested block, try again + if blocks.is_empty() { + parent_request.attempts += 1; + parent_request.state = RequestedBlockState::QueuedForward; + parent_request.last_submitted_peer = peer_id; + return; + } + + // currently only support a single block lookup. Reject any response that has more than 1 + // block + if blocks.len() != 1 { + //TODO: Potentially downvote the peer + debug!(self.log, "Peer sent more than 1 parent. Ignoring"; + "peer_id" => peer_id, + "no_parents" => blocks.len() + ); + return; + } + + + // queue for processing + parent_request.state = RequestedBlockState::Complete; + } + + + pub fn inject_error(peer_id: PeerId, id: RequestId) { + //TODO: Remove block state from pending + } + + pub fn peer_disconnect(peer_id: PeerId) { + self.import_queue.remove(&peer_id); + self.full_peers.remove(&peer_id); + self.update_state(); + } + + pub fn add_full_peer(peer_id: PeerId) { + debug!( + self.log, "Fully synced peer added"; + "peer" => format!("{:?}", peer_id), + ); + self.full_peers.insert(peer_id); + self.update_state(); + } + + pub fn add_unknown_block(&mut self,block: BeaconBlock) { + // if we are not in regular sync mode, ignore this block + if self.state == ManagerState::Regular { + return; + } + + // make sure this block is not already being searched for + // TODO: Potentially store a hashset of blocks for O(1) lookups + for parent_req in self.parent_queue.iter() { + if let Some(_) = parent_req.downloaded_blocks.iter().find(|d_block| d_block == block) { + // we are already searching for this block, ignore it + return; + } + } + + let req = ParentRequests { + downloaded_blocks: vec![block], + failed_attempts: 0, + state: RequestedBlockState::QueuedBackward + } + + self.parent_queue.push(req); + } + + pub fn poll() -> ImportManagerOutcome { + + loop { + // update the state of the manager + self.update_state(); + + // process potential block requests + if let Some(outcome) = self.process_potential_block_requests() { + return outcome; + } + + // process any complete long-range batches + if let Some(outcome) = self.process_complete_batches() { + return outcome; + } + + // process any parent block lookup-requests + if let Some(outcome) = self.process_parent_requests() { + return outcome; + } + + // process any complete parent lookups + if let (re_run, outcome) = self.process_complete_parent_requests() { + if let Some(outcome) = outcome { + return outcome; + } + else if !re_run { + break; } } } + + return ImportManagerOutcome::Idle; + } - pub fn poll(&mut self) -> SyncManagerState { - // if cold sync - if let SyncState::ColdSync(waiting_slot, max_wanted_slot, max_wanted_hash) = self.sync_state { + fn update_state(&mut self) { + let previous_state = self.state; + self.state = { + if !self.import_queue.is_empty() { + ManagerState::Syncing + } + else if !self.full_peers.is_empty() { + ManagerState::Regualar + } + else { + ManagerState::Stalled } + }; + if self.state != previous_state { + info!(self.log, "Syncing state updated", + "old_state" => format!("{:?}", previous_state) + "new_state" => format!("{:?}", self.state) + ); + } + } - // Try to process completed chunks - for completed_chunk in self.completed_chunks { - let chunk = completed_chunk.1; - let last_chunk_id = { - let no_processed_chunks = self.processed_chunks.len(); - if elements == 0 { 0 } else { self.processed_chunks[no_processed_chunks].id } - }; - if chunk.id == last_chunk_id + 1 { - // try and process the chunk - for block in chunk.blocks { - let processing_result = self.chain.process_block(block.clone()); - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutCome::Processed { block_root} => { - // block successfully processed - }, - BlockProcessingOutcome::BlockIsAlreadyKnown => { - warn!( - self.log, "Block Already Known"; - "source" => source, - "sync" => "Cold Sync", - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - "peer" => format!("{:?}", chunk.0), - ); - }, - _ => { - // An error has occurred - // This could be due to the previous chunk or the current chunk. - // Re-issue both. - warn!( - self.log, "Faulty Chunk"; - "source" => source, - "sync" => "Cold Sync", - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - "peer" => format!("{:?}", chunk.0), - "outcome" => format!("{:?}", outcome), - ); - // re-issue both chunks - // if both are the same peer. Downgrade the peer. - let past_chunk = self.processed_chunks.pop() - self.wanted_chunks.insert(0, chunk.clone()); - self.wanted_chunks.insert(0, past_chunk.clone()); - if chunk.0 == past_chunk.peer_id { - // downgrade peer - return SyncManagerState::DowngradePeer(chunk.0); - } - break; - } - } - } - } - // chunk successfully processed - debug!(self.log, - "Chunk Processed"; - "id" => chunk.id - "start_slot" => chunk.start_slot, - "end_slot" => chunk.end_slot, + fn process_potential_block_requests(&mut self) -> Option { + // check if an outbound request is required + // Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p + // layer and not needed here. + // If any in queued state we submit a request. + + + // remove any failed batches + self.import_queue.retain(|peer_id, block_request| { + if block_request.state == RequestedBlockState::Failed { + debug!(self.log, "Block import from peer failed", + "peer_id" => peer_id, + "downloaded_blocks" => block_request.downloaded.blocks.len() ); - self.processed_chunks.push(chunk); - } + false } + else { true } + }); - // chunks completed, update the state - self.sync_state = SyncState::ColdSync{waiting_slot, max_wanted_slot, max_wanted_hash}; - // Remove stales + for (peer_id, block_requests) in self.import_queue.iter_mut() { + if let Some(request) = requests.iter().find(|req| req.state == RequestedBlockState::QueuedForward || req.state == RequestedBlockState::QueuedBackward) { - // Spawn requests - if self.current_requests <= SIMULTANEOUS_REQUESTS { - if !self.wanted_chunks.is_empty() { - let chunk = self.wanted_chunks.remove(0); - for n in (0..self.peers.len()).rev() { - let peer = self.peers.swap_remove(n); - let peer_finalized_slot = peer.finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()); - if peer_finalized_slot >= chunk.end_slot { - *self.pending.chunks.entry(&peer_id).or_insert_with(|| Vec::new).push(chunk); - self.active_peers.push(peer); - self.current_requests +=1; - let block_request = BeaconBlockRequest { - head_block_root, - start_slot: chunk.start_slot, - count: chunk.end_slot - chunk.start_slot - step: 1 - } - return SyncManagerState::BlockRequest(peer, block_request); - } - } - // no peers for this chunk - self.wanted_chunks.push(chunk); - return SyncManagerState::Stalled + let request.state = RequestedBlockState::Pending(self.current_req_id); + self.current_req_id +=1; + + let req = BeaconBlocksRequest { + head_block_root: request.target_root, + start_slot: request.next_start_slot().unwrap_or_else(|| self.chain.head().slot), + count: MAX_BLOCKS_PER_REQUEST, + step: 0 } + return Some(ImportManagerOutCome::RequestBlocks{ peer_id, req }); } } - // if hot sync - return SyncManagerState::Idle + None + } + + fn process_complete_batches(&mut self) -> Option { + + let completed_batches = self.import_queue.iter().filter(|_peer, block_requests| block_requests.state == RequestedState::Complete).map(|peer, _| peer).collect::>(); + for peer_id in completed_batches { + let block_requests = self.import_queue.remove(&peer_id).unwrap("key exists"); + match self.process_blocks(block_requests.downloaded_blocks) { + Ok(()) => { + //TODO: Verify it's impossible to have empty downloaded_blocks + last_element = block_requests.downloaded_blocks.len() -1 + debug!(self.log, "Blocks processed successfully"; + "peer" => peer_id, + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + ); + // Re-HELLO to ensure we are up to the latest head + return Some(ImportManagerOutcome::Hello(peer_id)); + } + Err(e) => { + last_element = block_requests.downloaded_blocks.len() -1 + warn!(self.log, "Block processing failed"; + "peer" => peer_id, + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + "error" => format!("{:?}", e), + ); + return Some(ImportManagerOutcome::DownvotePeer(peer_id)); + } + } + } + None + } + + + fn process_parent_requests(&mut self) -> Option { + + // remove any failed requests + self.parent_queue.retain(|parent_request| { + if parent_request.state == RequestedBlockState::Failed { + debug!(self.log, "Parent import failed", + "block" => parent_request.downloaded_blocks[0].hash, + "siblings found" => parent_request.len() + ); + false + } + else { true } + }); + + // check to make sure there are peers to search for the parent from + if self.full_peers.is_empty() { + return; + } + + // check if parents need to be searched for + for parent_request in self.parent_queue.iter_mut() { + if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { + parent_request.state == BlockRequestsState::Failed + continue; + } + else if parent_request.state == BlockRequestsState::QueuedForward { + parent_request.state = BlockRequestsState::Pending(self.current_req_id); + self.current_req_id +=1; + let parent_hash = + let req = RecentBeaconBlocksRequest { + block_roots: vec![parent_hash], + }; + + // select a random fully synced peer to attempt to download the parent block + let peer_id = self.full_peers.iter().next().expect("List is not empty"); + + return Some(ImportManagerOutcome::RecentRequest(peer_id, req); + } + } + + None + } + + + fn process_complete_parent_requests(&mut self) => (bool, Option) { + + // flag to determine if there is more process to drive or if the manager can be switched to + // an idle state + let mut re_run = false; + + // verify the last added block is the parent of the last requested block + let last_index = parent_requests.downloaded_blocks.len() -1; + let expected_hash = parent_requests.downloaded_blocks[last_index].parent ; + let block_hash = parent_requests.downloaded_blocks[0].tree_hash_root(); + if block_hash != expected_hash { + //TODO: Potentially downvote the peer + debug!(self.log, "Peer sent invalid parent. Ignoring"; + "peer_id" => peer_id, + "received_block" => block_hash, + "expected_parent" => expected_hash, + ); + return; + } + + // Find any parent_requests ready to be processed + for completed_request in self.parent_queue.iter_mut().filter(|req| req.state == BlockRequestsState::Complete) { + // try and process the list of blocks up to the requested block + while !completed_request.downloaded_blocks.is_empty() { + let block = completed_request.downloaded_blocks.pop(); + match self.chain_process_block(block.clone()) { + Ok(BlockProcessingOutcome::ParentUnknown { parent } => { + // need to keep looking for parents + completed_request.downloaded_blocks.push(block); + completed_request.state == BlockRequestsState::QueuedForward; + re_run = true; + break; + } + Ok(BlockProcessingOutcome::Processed { _ } => { } + Ok(outcome) => { // it's a future slot or an invalid block, remove it and try again + completed_request.failed_attempts +=1; + trace!( + self.log, "Invalid parent block"; + "outcome" => format!("{:?}", outcome); + "peer" => format!("{:?}", completed_request.last_submitted_peer), + ); + completed_request.state == BlockRequestsState::QueuedForward; + re_run = true; + return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer))); + } + Err(e) => { + completed_request.failed_attempts +=1; + warn!( + self.log, "Parent processing error"; + "error" => format!("{:?}", e); + ); + completed_request.state == BlockRequestsState::QueuedForward; + re_run = true; + return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer))); + } + } + } + } + + // remove any full completed and processed parent chains + self.parent_queue.retain(|req| if req.state == BlockRequestsState::Complete { false } else { true }); + (re_run, None) } + + + fn process_blocks( + &mut self, + blocks: Vec>, + ) -> Result<(), String> { + + for block in blocks { + let processing_result = self.chain.process_block(block.clone()); + + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutcome::Processed { block_root } => { + // The block was valid and we processed it successfully. + trace!( + self.log, "Imported block from network"; + "source" => source, + "slot" => block.slot, + "block_root" => format!("{}", block_root), + "peer" => format!("{:?}", peer_id), + ); + } + BlockProcessingOutcome::ParentUnknown { parent } => { + // blocks should be sequential and all parents should exist + trace!( + self.log, "ParentBlockUnknown"; + "source" => source, + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + ); + return Err(format!("Block at slot {} has an unknown parent.", block.slot)); + } + BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot, + } => { + if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { + // The block is too far in the future, drop it. + trace!( + self.log, "FutureBlock"; + "source" => source, + "msg" => "block for future slot rejected, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + "peer" => format!("{:?}", peer_id), + ); + return Err(format!("Block at slot {} is too far in the future", block.slot)); + } else { + // The block is in the future, but not too far. + trace!( + self.log, "QueuedFutureBlock"; + "source" => source, + "msg" => "queuing future block, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + "peer" => format!("{:?}", peer_id), + ); + } + } + _ => { + trace!( + self.log, "InvalidBlock"; + "source" => source, + "msg" => "peer sent invalid block", + "outcome" => format!("{:?}", outcome), + "peer" => format!("{:?}", peer_id), + ); + return Err(format!("Invalid block at slot {}", block.slot)); + } + } + Ok(()) + } else { + trace!( + self.log, "BlockProcessingFailure"; + "source" => source, + "msg" => "unexpected condition in processing block.", + "outcome" => format!("{:?}", processing_result) + ); + return Err(format!("Unexpected block processing error: {:?}", processing_result)); + } + } + } +} diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 6e5cada23..a7f5ced40 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -14,11 +14,6 @@ use types::{ Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot, }; -/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. -const SLOT_IMPORT_TOLERANCE: u64 = 100; - -/// The amount of seconds a block may exist in the import queue. -const QUEUE_STALE_SECS: u64 = 100; /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. /// Otherwise we queue it. @@ -35,9 +30,11 @@ pub struct PeerSyncInfo { finalized_epoch: Epoch, head_root: Hash256, head_slot: Slot, - requested_slot_skip: Option<(Slot, usize)>, } + + + impl From for PeerSyncInfo { fn from(hello: HelloMessage) -> PeerSyncInfo { PeerSyncInfo { @@ -69,10 +66,7 @@ pub enum SyncState { pub struct SimpleSync { /// A reference to the underlying beacon chain. chain: Arc>, - /// A mapping of Peers to their respective PeerSyncInfo. - known_peers: HashMap, - /// The current state of the syncing protocol. - state: SyncState, + manager: ImportManager, log: slog::Logger, } @@ -81,49 +75,24 @@ impl SimpleSync { pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { let sync_logger = log.new(o!("Service"=> "Sync")); - let queue_item_stale_time = Duration::from_secs(QUEUE_STALE_SECS); - - let import_queue = - ImportQueue::new(beacon_chain.clone(), queue_item_stale_time, log.clone()); SimpleSync { chain: beacon_chain.clone(), - known_peers: HashMap::new(), - import_queue, - state: SyncState::Idle, + manager: ImportManager::new(), log: sync_logger, } } - /// Handle a `Goodbye` message from a peer. - /// - /// Removes the peer from `known_peers`. - pub fn on_goodbye(&mut self, peer_id: PeerId, reason: GoodbyeReason) { - info!( - self.log, "PeerGoodbye"; - "peer" => format!("{:?}", peer_id), - "reason" => format!("{:?}", reason), - ); - - self.known_peers.remove(&peer_id); - } - /// Handle a peer disconnect. /// - /// Removes the peer from `known_peers`. + /// Removes the peer from the manager. pub fn on_disconnect(&mut self, peer_id: PeerId) { - info!( - self.log, "Peer Disconnected"; - "peer" => format!("{:?}", peer_id), - ); - self.known_peers.remove(&peer_id); + self.manager.peer_disconnect(&peer_id); } /// Handle the connection of a new peer. /// /// Sends a `Hello` message to the peer. pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { - info!(self.log, "PeerConnected"; "peer" => format!("{:?}", peer_id)); - network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); } @@ -137,7 +106,7 @@ impl SimpleSync { hello: HelloMessage, network: &mut NetworkContext, ) { - debug!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); + trace!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); // Say hello back. network.send_rpc_response( @@ -156,7 +125,7 @@ impl SimpleSync { hello: HelloMessage, network: &mut NetworkContext, ) { - debug!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); + trace!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); // Process the hello message, without sending back another hello. self.process_hello(peer_id, hello, network); @@ -178,7 +147,7 @@ impl SimpleSync { if local.fork_version != remote.fork_version { // The node is on a different network/fork, disconnect them. - info!( + debug!( self.log, "HandshakeFailure"; "peer" => format!("{:?}", peer_id), "reason" => "network_id" @@ -195,7 +164,7 @@ impl SimpleSync { // different to the one in our chain. // // Therefore, the node is on a different chain and we should not communicate with them. - info!( + debug!( self.log, "HandshakeFailure"; "peer" => format!("{:?}", peer_id), "reason" => "different finalized chain" @@ -227,13 +196,10 @@ impl SimpleSync { .exists::>(&remote.best_root) .unwrap_or_else(|_| false) { - // If the node's best-block is already known to us, we have nothing to request. - debug!( - self.log, - "NaivePeer"; - "peer" => format!("{:?}", peer_id), - "reason" => "best block is known" - ); + // If the node's best-block is already known to us and they are close to our current + // head, treat them as a fully sync'd peer. + self.import_manager.add_full_peer(peer_id); + self.process_sync(); } else { // The remote node has an equal or great finalized epoch and we don't know it's head. // @@ -246,43 +212,60 @@ impl SimpleSync { "remote_latest_finalized_epoch" => remote.latest_finalized_epoch, ); - + self.import_manager.add_peer(peer_id, remote); self.process_sync(); } } self.proess_sync(&mut self) { loop { - match self.sync_manager.poll() { - SyncManagerState::RequestBlocks(peer_id, req) { - debug!( + match self.import_manager.poll() { + ImportManagerOutcome::RequestBlocks(peer_id, req) { + trace!( self.log, - "RPCRequest(BeaconBlockBodies)"; - "count" => req.block_roots.len(), + "RPC Request"; + "method" => "BeaconBlocks", + "count" => req.count, "peer" => format!("{:?}", peer_id) ); network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(req)); }, - SyncManagerState::Stalled { - // need more peers to continue sync - warn!(self.log, "No useable peers for sync"); - break; + ImportManagerOutcome::RecentRequest(peer_id, req) { + trace!( + self.log, + "RPC Request"; + "method" => "RecentBeaconBlocks", + "count" => req.block_roots.len(), + "peer" => format!("{:?}", peer_id) + ); + network.send_rpc_request(peer_id.clone(), RPCRequest::RecentBeaconBlocks(req)); + }, + ImportManagerOutcome::DownvotePeer(peer_id) { + trace!( + self.log, + "Peer downvoted"; + "peer" => format!("{:?}", peer_id) + ); + // TODO: Implement reputation + network.disconnect(peer_id.clone(), GoodbyeReason::Fault); }, SyncManagerState::Idle { // nothing to do - break; + return; } } } } + /* fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain .rev_iter_block_roots() .find(|(_root, slot)| *slot == target_slot) .map(|(root, _slot)| root) } + */ /// Handle a `BeaconBlocks` request from the peer. pub fn on_beacon_blocks_request( @@ -346,8 +329,8 @@ impl SimpleSync { pub fn on_beacon_blocks_response( &mut self, peer_id: PeerId, + request_id: RequestId, res: Vec>, - network: &mut NetworkContext, ) { debug!( self.log, @@ -356,9 +339,26 @@ impl SimpleSync { "count" => res.block_bodies.len(), ); - if !res.is_empty() { - self.sync_manager.add_blocks(peer_id, blocks); - } + self.import_manager.beacon_blocks_response(peer_id, request_id, blocks); + + self.process_sync(); + } + + /// Handle a `RecentBeaconBlocks` response from the peer. + pub fn on_recent_beacon_blocks_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + res: Vec>, + ) { + debug!( + self.log, + "BeaconBlocksResponse"; + "peer" => format!("{:?}", peer_id), + "count" => res.block_bodies.len(), + ); + + self.import_manager.recent_blocks_response(peer_id, request_id, blocks); self.process_sync(); } @@ -372,7 +372,6 @@ impl SimpleSync { &mut self, peer_id: PeerId, block: BeaconBlock, - network: &mut NetworkContext, ) -> bool { if let Some(outcome) = self.process_block(peer_id.clone(), block.clone(), network, &"gossip") @@ -380,53 +379,17 @@ impl SimpleSync { match outcome { BlockProcessingOutcome::Processed { .. } => SHOULD_FORWARD_GOSSIP_BLOCK, BlockProcessingOutcome::ParentUnknown { parent } => { - // Add this block to the queue - self.import_queue - .enqueue_full_blocks(vec![block.clone()], peer_id.clone()); - debug!( - self.log, "RequestParentBlock"; - "parent_root" => format!("{}", parent), - "parent_slot" => block.slot - 1, - "peer" => format!("{:?}", peer_id), - ); - - // Request roots between parent and start of finality from peer. - let start_slot = self - .chain - .head() - .beacon_state - .finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - self.request_block_roots( - peer_id, - BeaconBlockRootsRequest { - // Request blocks between `latest_finalized_slot` and the `block` - start_slot, - count: block.slot.as_u64() - start_slot.as_u64(), - }, - network, - ); - - // Clean the stale entries from the queue. - self.import_queue.remove_stale(); - + // Inform the sync manager to find parents for this block + self.import_manager.add_unknown_block(block.clone()); SHOULD_FORWARD_GOSSIP_BLOCK } - BlockProcessingOutcome::FutureSlot { present_slot, block_slot, } if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot => { - self.import_queue - .enqueue_full_blocks(vec![block], peer_id.clone()); - + //TODO: Decide the logic here SHOULD_FORWARD_GOSSIP_BLOCK } - // Note: known blocks are forwarded on the gossip network. - // - // We rely upon the lower layers (libp2p) to stop loops occurring from re-gossiped - // blocks. BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK, _ => SHOULD_NOT_FORWARD_GOSSIP_BLOCK, } @@ -457,48 +420,8 @@ impl SimpleSync { } } - /// Request some `BeaconBlockRoots` from the remote peer. - fn request_block_roots( - &mut self, - peer_id: PeerId, - req: BeaconBlockRootsRequest, - network: &mut NetworkContext, - ) { - // Potentially set state to sync. - if self.state == SyncState::Idle && req.count > SLOT_IMPORT_TOLERANCE { - debug!(self.log, "Entering downloading sync state."); - self.state = SyncState::Downloading; - } - - debug!( - self.log, - "RPCRequest(BeaconBlockRoots)"; - "count" => req.count, - "peer" => format!("{:?}", peer_id) - ); - - // TODO: handle count > max count. - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockRoots(req)); - } - - /// Request some `BeaconBlockHeaders` from the remote peer. - fn request_block_headers( - &mut self, - peer_id: PeerId, - req: BeaconBlockHeadersRequest, - network: &mut NetworkContext, - ) { - debug!( - self.log, - "RPCRequest(BeaconBlockHeaders)"; - "max_headers" => req.max_headers, - "peer" => format!("{:?}", peer_id) - ); - - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlockHeaders(req)); - } - +/* /// Returns `true` if `self.chain` has not yet processed this block. pub fn chain_has_seen_block(&self, block_root: &Hash256) -> bool { !self @@ -509,207 +432,13 @@ impl SimpleSync { false }) } + */ /// Generates our current state in the form of a HELLO RPC message. pub fn generate_hello(&self) -> HelloMessage { hello_message(&self.chain) } - /// Helper function to attempt to process a partial block. - /// - /// If the block can be completed recursively call `process_block` - /// else request missing parts. - fn attempt_process_partial_block( - &mut self, - peer_id: PeerId, - block_root: Hash256, - network: &mut NetworkContext, - source: &str, - ) -> Option { - match self.import_queue.attempt_complete_block(block_root) { - PartialBeaconBlockCompletion::MissingBody => { - // Unable to complete the block because the block body is missing. - debug!( - self.log, "RequestParentBody"; - "source" => source, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - - // Request the block body from the peer. - self.request_block_bodies( - peer_id, - BeaconBlockBodiesRequest { - block_roots: vec![block_root], - }, - network, - ); - - None - } - PartialBeaconBlockCompletion::MissingHeader(slot) => { - // Unable to complete the block because the block header is missing. - debug!( - self.log, "RequestParentHeader"; - "source" => source, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - - // Request the block header from the peer. - self.request_block_headers( - peer_id, - BeaconBlockHeadersRequest { - start_root: block_root, - start_slot: slot, - max_headers: 1, - skip_slots: 0, - }, - network, - ); - - None - } - PartialBeaconBlockCompletion::MissingRoot => { - // The `block_root` is not known to the queue. - debug!( - self.log, "MissingParentRoot"; - "source" => source, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - - // Do nothing. - - None - } - PartialBeaconBlockCompletion::Complete(block) => { - // The block exists in the queue, attempt to process it - trace!( - self.log, "AttemptProcessParent"; - "source" => source, - "block_root" => format!("{}", block_root), - "parent_slot" => block.slot, - "peer" => format!("{:?}", peer_id), - ); - - self.process_block(peer_id.clone(), block, network, source) - } - } - } - - /// Processes the `block` that was received from `peer_id`. - /// - /// If the block was submitted to the beacon chain without internal error, `Some(outcome)` is - /// returned, otherwise `None` is returned. Note: `Some(_)` does not necessarily indicate that - /// the block was successfully processed or valid. - /// - /// This function performs the following duties: - /// - /// - Attempting to import the block into the beacon chain. - /// - Logging - /// - Requesting unavailable blocks (e.g., if parent is unknown). - /// - Disconnecting faulty nodes. - /// - /// This function does not remove processed blocks from the import queue. - fn process_block( - &mut self, - peer_id: PeerId, - block: BeaconBlock, - network: &mut NetworkContext, - source: &str, - ) -> Option { - let processing_result = self.chain.process_block(block.clone()); - - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutcome::Processed { block_root } => { - // The block was valid and we processed it successfully. - debug!( - self.log, "Imported block from network"; - "source" => source, - "slot" => block.slot, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - } - BlockProcessingOutcome::ParentUnknown { parent } => { - // The parent has not been processed - trace!( - self.log, "ParentBlockUnknown"; - "source" => source, - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - "peer" => format!("{:?}", peer_id), - ); - - // If the parent is in the `import_queue` attempt to complete it then process it. - // All other cases leave `parent` in `import_queue` and return original outcome. - if let Some(BlockProcessingOutcome::Processed { .. }) = - self.attempt_process_partial_block(peer_id, parent, network, source) - { - // If processing parent is successful, re-process block and remove parent from queue - self.import_queue.remove(parent); - - // Attempt to process `block` again - match self.chain.process_block(block) { - Ok(outcome) => return Some(outcome), - Err(_) => return None, - } - } - } - BlockProcessingOutcome::FutureSlot { - present_slot, - block_slot, - } => { - if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { - // The block is too far in the future, drop it. - warn!( - self.log, "FutureBlock"; - "source" => source, - "msg" => "block for future slot rejected, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), - ); - network.disconnect(peer_id, GoodbyeReason::Fault); - } else { - // The block is in the future, but not too far. - debug!( - self.log, "QueuedFutureBlock"; - "source" => source, - "msg" => "queuing future block, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), - ); - } - } - _ => { - debug!( - self.log, "InvalidBlock"; - "source" => source, - "msg" => "peer sent invalid block", - "outcome" => format!("{:?}", outcome), - "peer" => format!("{:?}", peer_id), - ); - } - } - - Some(outcome) - } else { - error!( - self.log, "BlockProcessingFailure"; - "source" => source, - "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", processing_result) - ); - - None - } - } } /// Build a `HelloMessage` representing the state of the given `beacon_chain`. From a8daf46d5f557d45d1add6c974d654d366e31a6f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 21 Aug 2019 14:48:49 +1000 Subject: [PATCH 094/305] Add comments --- beacon_node/client/src/bootstrapper.rs | 21 +++++++++++++++++++++ beacon_node/client/src/config.rs | 2 ++ 2 files changed, 23 insertions(+) diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index 2c8cf6afc..9843ceec7 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -20,17 +20,31 @@ impl From for Error { } } +/// Used to load "bootstrap" information from the HTTP API of another Lighthouse beacon node. +/// +/// Bootstrapping information includes things like genesis and finalized states and blocks, and +/// libp2p connection details. pub struct Bootstrapper { url: Url, } impl Bootstrapper { + /// Parses the given `server` as a URL, instantiating `Self`. pub fn from_server_string(server: String) -> Result { Ok(Self { url: Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?, }) } + /// Build a multiaddr using the HTTP server URL that is not guaranteed to be correct. + /// + /// The address is created by querying the HTTP server for it's listening libp2p addresses. + /// Then, we find the first TCP port in those addresses and combine the port with the URL of + /// the server. + /// + /// For example, the server `http://192.168.0.1` might end up with a `best_effort_multiaddr` of + /// `/ipv4/192.168.0.1/tcp/9000` if the server advertises a listening address of + /// `/ipv4/172.0.0.1/tcp/9000`. pub fn best_effort_multiaddr(&self) -> Option { let tcp_port = self.first_listening_tcp_port()?; @@ -47,6 +61,8 @@ impl Bootstrapper { Some(multiaddr) } + /// Reads the server's listening libp2p addresses and returns the first TCP port protocol it + /// finds, if any. fn first_listening_tcp_port(&self) -> Option { self.listen_addresses().ok()?.iter().find_map(|multiaddr| { multiaddr.iter().find_map(|protocol| match protocol { @@ -56,6 +72,7 @@ impl Bootstrapper { }) } + /// Returns the IPv4 address of the server URL, unless it contains a FQDN. pub fn server_ipv4_addr(&self) -> Option { match self.url.host()? { Host::Ipv4(addr) => Some(addr), @@ -63,15 +80,18 @@ impl Bootstrapper { } } + /// Returns the servers ENR address. pub fn enr(&self) -> Result { get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) } + /// Returns the servers listening libp2p addresses. pub fn listen_addresses(&self) -> Result, String> { get_listen_addresses(self.url.clone()) .map_err(|e| format!("Unable to get listen addresses: {:?}", e)) } + /// Returns the genesis block and state. pub fn genesis(&self) -> Result<(BeaconState, BeaconBlock), String> { let genesis_slot = Slot::new(0); @@ -83,6 +103,7 @@ impl Bootstrapper { Ok((state, block)) } + /// Returns the most recent finalized state and block. pub fn finalized(&self) -> Result<(BeaconState, BeaconBlock), String> { let slots_per_epoch = get_slots_per_epoch(self.url.clone()) .map_err(|e| format!("Unable to get slots per epoch: {:?}", e))?; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 1a985fb4a..ea8186dbc 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -158,6 +158,8 @@ impl Config { } } +/// Perform the HTTP bootstrapping procedure, reading an ENR and multiaddr from the HTTP server and +/// adding them to the `config`. fn do_bootstrapping(config: &mut Config, server: String, log: &slog::Logger) -> Result<(), String> { // Set the genesis state source. config.genesis_state = GenesisState::HttpBootstrap { From b912e26b7938270392e251f213cc50278aa0cc99 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 22 Aug 2019 14:37:47 +1000 Subject: [PATCH 095/305] Tidy API to be more consistent with recent decisions --- beacon_node/eth2-libp2p/src/service.rs | 4 +- beacon_node/network/src/service.rs | 5 ++ beacon_node/rest_api/src/beacon.rs | 64 +++++++++++++++++++++----- beacon_node/rest_api/src/helpers.rs | 13 ++++-- beacon_node/rest_api/src/lib.rs | 15 +++--- beacon_node/rest_api/src/network.rs | 17 +++++++ 6 files changed, 92 insertions(+), 26 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 316aa0579..e1e112e2d 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -33,7 +33,7 @@ pub struct Service { //TODO: Make this private pub swarm: Swarm, /// This node's PeerId. - _local_peer_id: PeerId, + pub local_peer_id: PeerId, /// The libp2p logger handle. pub log: slog::Logger, } @@ -113,7 +113,7 @@ impl Service { info!(log, "Subscribed to topics: {:?}", subscribed_topics); Ok(Service { - _local_peer_id: local_peer_id, + local_peer_id, swarm, log, }) diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 4bec03830..dc7e94140 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -75,6 +75,11 @@ impl Service { .clone() } + /// Returns the local libp2p PeerID. + pub fn local_peer_id(&self) -> PeerId { + self.libp2p_service.lock().local_peer_id.clone() + } + /// Returns the list of `Multiaddr` that the underlying libp2p instance is listening on. pub fn listen_multiaddrs(&self) -> Vec { Swarm::listeners(&self.libp2p_service.lock().swarm) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 66e31ae41..88427c9a4 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -2,25 +2,44 @@ use super::{success_response, ApiResult}; use crate::{helpers::*, ApiError, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; +use serde::Serialize; use std::sync::Arc; use store::Store; -use types::{BeaconBlock, BeaconState}; +use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot}; + +#[derive(Serialize)] +struct HeadResponse { + pub slot: Slot, + pub block_root: Hash256, + pub state_root: Hash256, +} /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. -pub fn get_best_slot(req: Request) -> ApiResult { +pub fn get_head(req: Request) -> ApiResult { let beacon_chain = req .extensions() .get::>>() .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; - let slot = beacon_chain.head().beacon_state.slot; + let head = HeadResponse { + slot: beacon_chain.head().beacon_state.slot, + block_root: beacon_chain.head().beacon_block_root, + state_root: beacon_chain.head().beacon_state_root, + }; - let json: String = serde_json::to_string(&slot) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize Slot: {:?}", e)))?; + let json: String = serde_json::to_string(&head) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize HeadResponse: {:?}", e)))?; Ok(success_response(Body::from(json))) } +#[derive(Serialize)] +#[serde(bound = "T: EthSpec")] +struct BlockResponse { + pub root: Hash256, + pub beacon_block: BeaconBlock, +} + /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. pub fn get_block(req: Request) -> ApiResult { let beacon_chain = req @@ -58,8 +77,14 @@ pub fn get_block(req: Request) -> ApiResult )) })?; - let json: String = serde_json::to_string(&block) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize BeaconBlock: {:?}", e)))?; + let response = BlockResponse { + root: block_root, + beacon_block: block, + }; + + let json: String = serde_json::to_string(&response).map_err(|e| { + ApiError::ServerError(format!("Unable to serialize BlockResponse: {:?}", e)) + })?; Ok(success_response(Body::from(json))) } @@ -89,6 +114,13 @@ pub fn get_block_root(req: Request) -> ApiR Ok(success_response(Body::from(json))) } +#[derive(Serialize)] +#[serde(bound = "T: EthSpec")] +struct StateResponse { + pub root: Hash256, + pub beacon_state: BeaconState, +} + /// HTTP handler to return a `BeaconState` at a given `root` or `slot`. /// /// Will not return a state if the request slot is in the future. Will return states higher than @@ -102,21 +134,29 @@ pub fn get_state(req: Request) -> ApiResult let query_params = ["root", "slot"]; let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; - let state: BeaconState = match (key.as_ref(), value) { + let (root, state): (Hash256, BeaconState) = match (key.as_ref(), value) { ("slot", value) => state_at_slot(&beacon_chain, parse_slot(&value)?)?, ("root", value) => { let root = &parse_root(&value)?; - beacon_chain + let state = beacon_chain .store .get(root)? - .ok_or_else(|| ApiError::NotFound(format!("No state for root: {}", root)))? + .ok_or_else(|| ApiError::NotFound(format!("No state for root: {}", root)))?; + + (*root, state) } _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), }; - let json: String = serde_json::to_string(&state) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize BeaconState: {:?}", e)))?; + let response = StateResponse { + root, + beacon_state: state, + }; + + let json: String = serde_json::to_string(&response).map_err(|e| { + ApiError::ServerError(format!("Unable to serialize StateResponse: {:?}", e)) + })?; Ok(success_response(Body::from(json))) } diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 2a429076c..a65c7c1ac 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -31,22 +31,25 @@ pub fn parse_root(string: &str) -> Result { } } -/// Returns a `BeaconState` in the canonical chain of `beacon_chain` at the given `slot`, if -/// possible. +/// Returns a `BeaconState` and it's root in the canonical chain of `beacon_chain` at the given +/// `slot`, if possible. /// /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. pub fn state_at_slot( beacon_chain: &BeaconChain, slot: Slot, -) -> Result, ApiError> { +) -> Result<(Hash256, BeaconState), ApiError> { let head_state = &beacon_chain.head().beacon_state; if head_state.slot == slot { // The request slot is the same as the best block (head) slot. // I'm not sure if this `.clone()` will be optimized out. If not, it seems unnecessary. - Ok(beacon_chain.head().beacon_state.clone()) + Ok(( + beacon_chain.head().beacon_state_root, + beacon_chain.head().beacon_state.clone(), + )) } else { let root = state_root_at_slot(beacon_chain, slot)?; @@ -55,7 +58,7 @@ pub fn state_at_slot( .get(&root)? .ok_or_else(|| ApiError::NotFound(format!("Unable to find state at root {}", root)))?; - Ok(state) + Ok((root, state)) } } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 8ef48ad72..839aa7abc 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -121,7 +121,7 @@ pub fn start_server( // Route the request to the correct handler. let result = match (req.method(), path.as_ref()) { - (&Method::GET, "/beacon/best_slot") => beacon::get_best_slot::(req), + (&Method::GET, "/beacon/head") => beacon::get_head::(req), (&Method::GET, "/beacon/block") => beacon::get_block::(req), (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), (&Method::GET, "/beacon/latest_finalized_checkpoint") => { @@ -130,14 +130,15 @@ pub fn start_server( (&Method::GET, "/beacon/state") => beacon::get_state::(req), (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), (&Method::GET, "/metrics") => metrics::get_prometheus::(req), - (&Method::GET, "/node/version") => node::get_version(req), - (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), - (&Method::GET, "/node/network/enr") => network::get_enr::(req), - (&Method::GET, "/node/network/peer_count") => network::get_peer_count::(req), - (&Method::GET, "/node/network/peers") => network::get_peer_list::(req), - (&Method::GET, "/node/network/listen_addresses") => { + (&Method::GET, "/network/enr") => network::get_enr::(req), + (&Method::GET, "/network/peer_count") => network::get_peer_count::(req), + (&Method::GET, "/network/peer_id") => network::get_peer_id::(req), + (&Method::GET, "/network/peers") => network::get_peer_list::(req), + (&Method::GET, "/network/listen_addresses") => { network::get_listen_addresses::(req) } + (&Method::GET, "/node/version") => node::get_version(req), + (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), (&Method::GET, "/spec") => spec::get_spec::(req), (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index 0e2448270..154cd142d 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -40,6 +40,23 @@ pub fn get_enr(req: Request) ))) } +/// HTTP handle to return the `PeerId` from the client's libp2p service. +/// +/// PeerId is encoded as base58 string. +pub fn get_peer_id(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + let peer_id: PeerId = network.local_peer_id(); + + Ok(success_response(Body::from( + serde_json::to_string(&peer_id.to_base58()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, + ))) +} + /// HTTP handle to return the number of peers connected in the client's libp2p service. pub fn get_peer_count( req: Request, From 5a34f86e770dedae20d4c383293bdb8cce722000 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 22 Aug 2019 16:14:51 +1000 Subject: [PATCH 096/305] Address some review comments --- beacon_node/client/src/bootstrapper.rs | 16 ++++++++-------- beacon_node/rest_api/src/beacon.rs | 22 ++++++---------------- beacon_node/rest_api/src/helpers.rs | 15 +++++++++++++++ 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index 9843ceec7..19f13e2da 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -10,7 +10,7 @@ use url::Host; #[derive(Debug)] enum Error { - UrlCannotBeBase, + InvalidUrl, HttpError(HttpError), } @@ -38,7 +38,7 @@ impl Bootstrapper { /// Build a multiaddr using the HTTP server URL that is not guaranteed to be correct. /// - /// The address is created by querying the HTTP server for it's listening libp2p addresses. + /// The address is created by querying the HTTP server for its listening libp2p addresses. /// Then, we find the first TCP port in those addresses and combine the port with the URL of /// the server. /// @@ -124,7 +124,7 @@ fn get_slots_per_epoch(mut url: Url) -> Result { .map(|mut url| { url.push("spec").push("slots_per_epoch"); }) - .map_err(|_| Error::UrlCannotBeBase)?; + .map_err(|_| Error::InvalidUrl)?; reqwest::get(url)? .error_for_status()? @@ -137,7 +137,7 @@ fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result .map(|mut url| { url.push("beacon").push("latest_finalized_checkpoint"); }) - .map_err(|_| Error::UrlCannotBeBase)?; + .map_err(|_| Error::InvalidUrl)?; let checkpoint: Checkpoint = reqwest::get(url)?.error_for_status()?.json()?; @@ -149,7 +149,7 @@ fn get_state(mut url: Url, slot: Slot) -> Result, Err .map(|mut url| { url.push("beacon").push("state"); }) - .map_err(|_| Error::UrlCannotBeBase)?; + .map_err(|_| Error::InvalidUrl)?; url.query_pairs_mut() .append_pair("slot", &format!("{}", slot.as_u64())); @@ -165,7 +165,7 @@ fn get_block(mut url: Url, slot: Slot) -> Result, Err .map(|mut url| { url.push("beacon").push("block"); }) - .map_err(|_| Error::UrlCannotBeBase)?; + .map_err(|_| Error::InvalidUrl)?; url.query_pairs_mut() .append_pair("slot", &format!("{}", slot.as_u64())); @@ -181,7 +181,7 @@ fn get_enr(mut url: Url) -> Result { .map(|mut url| { url.push("node").push("network").push("enr"); }) - .map_err(|_| Error::UrlCannotBeBase)?; + .map_err(|_| Error::InvalidUrl)?; reqwest::get(url)? .error_for_status()? @@ -194,7 +194,7 @@ fn get_listen_addresses(mut url: Url) -> Result, Error> { .map(|mut url| { url.push("node").push("network").push("listen_addresses"); }) - .map_err(|_| Error::UrlCannotBeBase)?; + .map_err(|_| Error::InvalidUrl)?; reqwest::get(url)? .error_for_status()? diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 88427c9a4..4e3cc02fd 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -54,14 +54,9 @@ pub fn get_block(req: Request) -> ApiResult ("slot", value) => { let target = parse_slot(&value)?; - beacon_chain - .rev_iter_block_roots() - .take_while(|(_root, slot)| *slot >= target) - .find(|(_root, slot)| *slot == target) - .map(|(root, _slot)| root) - .ok_or_else(|| { - ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) - })? + block_root_at_slot(&beacon_chain, target).ok_or_else(|| { + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + })? } ("root", value) => parse_root(&value)?, _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), @@ -99,14 +94,9 @@ pub fn get_block_root(req: Request) -> ApiR let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; let target = parse_slot(&slot_string)?; - let root = beacon_chain - .rev_iter_block_roots() - .take_while(|(_root, slot)| *slot >= target) - .find(|(_root, slot)| *slot == target) - .map(|(root, _slot)| root) - .ok_or_else(|| { - ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) - })?; + let root = block_root_at_slot(&beacon_chain, target).ok_or_else(|| { + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + })?; let json: String = serde_json::to_string(&root) .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index a65c7c1ac..5365086df 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -31,6 +31,21 @@ pub fn parse_root(string: &str) -> Result { } } +/// Returns the root of the `BeaconBlock` in the canonical chain of `beacon_chain` at the given +/// `slot`, if possible. +/// +/// May return a root for a previous slot, in the case of skip slots. +pub fn block_root_at_slot( + beacon_chain: &BeaconChain, + target: Slot, +) -> Option { + beacon_chain + .rev_iter_block_roots() + .take_while(|(_root, slot)| *slot >= target) + .find(|(_root, slot)| *slot == target) + .map(|(root, _slot)| root) +} + /// Returns a `BeaconState` and it's root in the canonical chain of `beacon_chain` at the given /// `slot`, if possible. /// From 853344af8a6127a70df2207402a317fc7282b8cd Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 22 Aug 2019 16:34:21 +1000 Subject: [PATCH 097/305] Make BeaconChainTypes Send + Sync + 'static --- beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- beacon_node/beacon_chain/src/test_utils.rs | 8 ++++---- beacon_node/client/src/beacon_chain_types.rs | 6 +++++- beacon_node/client/src/lib.rs | 2 +- beacon_node/client/src/notifier.rs | 6 +----- beacon_node/rest_api/src/lib.rs | 2 +- beacon_node/rest_api/src/network.rs | 14 +++++--------- beacon_node/src/run.rs | 2 +- 8 files changed, 19 insertions(+), 23 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index bd7f37fba..5feefd841 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -77,7 +77,7 @@ pub enum AttestationProcessingOutcome { Invalid(AttestationValidationError), } -pub trait BeaconChainTypes { +pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; type SlotClock: slot_clock::SlotClock; type LmdGhost: LmdGhost; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 298c637db..bd51f8620 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -54,8 +54,8 @@ where impl BeaconChainTypes for CommonTypes where - L: LmdGhost, - E: EthSpec, + L: LmdGhost + 'static, + E: EthSpec + 'static, { type Store = MemoryStore; type SlotClock = TestingSlotClock; @@ -69,8 +69,8 @@ where /// Used for testing. pub struct BeaconChainHarness where - L: LmdGhost, - E: EthSpec, + L: LmdGhost + 'static, + E: EthSpec + 'static, { pub chain: BeaconChain>, pub keypairs: Vec, diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index f2f95226a..adea8c7b5 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -36,7 +36,11 @@ pub struct ClientType { _phantom_u: PhantomData, } -impl BeaconChainTypes for ClientType { +impl BeaconChainTypes for ClientType +where + S: Store + 'static, + E: EthSpec + 'static + Clone, +{ type Store = S; type SlotClock = SystemTimeSlotClock; type LmdGhost = ThreadSafeReducedTree; diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 798aedec9..6405e05e7 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -49,7 +49,7 @@ pub struct Client { impl Client where - T: BeaconChainTypes + InitialiseBeaconChain + Clone + Send + Sync + 'static, + T: BeaconChainTypes + InitialiseBeaconChain + Clone, { /// Generate an instance of the client. Spawn and link all internal sub-processes. pub fn new( diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 1c7cf3867..78e50ac79 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -17,11 +17,7 @@ pub const WARN_PEER_COUNT: usize = 1; /// durations. /// /// Presently unused, but remains for future use. -pub fn run( - client: &Client, - executor: TaskExecutor, - exit: Exit, -) { +pub fn run(client: &Client, executor: TaskExecutor, exit: Exit) { // notification heartbeat let interval = Interval::new( Instant::now(), diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 839aa7abc..354b23403 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -71,7 +71,7 @@ impl From for ApiError { } } -pub fn start_server( +pub fn start_server( config: &ApiConfig, executor: &TaskExecutor, beacon_chain: Arc>, diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index 154cd142d..daded9d3d 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -7,9 +7,7 @@ use std::sync::Arc; /// HTTP handle to return the list of libp2p multiaddr the client is listening on. /// /// Returns a list of `Multiaddr`, serialized according to their `serde` impl. -pub fn get_listen_addresses( - req: Request, -) -> ApiResult { +pub fn get_listen_addresses(req: Request) -> ApiResult { let network = req .extensions() .get::>>() @@ -26,7 +24,7 @@ pub fn get_listen_addresses( /// HTTP handle to return the Discv5 ENR from the client's libp2p service. /// /// ENR is encoded as base64 string. -pub fn get_enr(req: Request) -> ApiResult { +pub fn get_enr(req: Request) -> ApiResult { let network = req .extensions() .get::>>() @@ -43,7 +41,7 @@ pub fn get_enr(req: Request) /// HTTP handle to return the `PeerId` from the client's libp2p service. /// /// PeerId is encoded as base58 string. -pub fn get_peer_id(req: Request) -> ApiResult { +pub fn get_peer_id(req: Request) -> ApiResult { let network = req .extensions() .get::>>() @@ -58,9 +56,7 @@ pub fn get_peer_id(req: Request( - req: Request, -) -> ApiResult { +pub fn get_peer_count(req: Request) -> ApiResult { let network = req .extensions() .get::>>() @@ -77,7 +73,7 @@ pub fn get_peer_count( /// HTTP handle to return the list of peers connected to the client's libp2p service. /// /// Peers are presented as a list of `PeerId::to_string()`. -pub fn get_peer_list(req: Request) -> ApiResult { +pub fn get_peer_list(req: Request) -> ApiResult { let network = req .extensions() .get::>>() diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 5066231d5..f88cb7460 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -118,7 +118,7 @@ fn run( log: &slog::Logger, ) -> error::Result<()> where - T: BeaconChainTypes + InitialiseBeaconChain + Clone + Send + Sync + 'static, + T: BeaconChainTypes + InitialiseBeaconChain + Clone, T::Store: OpenDatabase, { let store = T::Store::open_database(&db_path)?; From 11dc72a4422e7c164c2d79619b6c92d12ae2ab4b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 22 Aug 2019 17:48:13 +1000 Subject: [PATCH 098/305] Start implementing BeaconChainBuilder --- beacon_node/beacon_chain/Cargo.toml | 1 + .../beacon_chain/src/beacon_chain_builder.rs | 68 +++++++++++++++++++ beacon_node/beacon_chain/src/lib.rs | 2 + 3 files changed, 71 insertions(+) create mode 100644 beacon_node/beacon_chain/src/beacon_chain_builder.rs diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 1d3fc03b8..31f341286 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -13,6 +13,7 @@ log = "0.4" operation_pool = { path = "../../eth2/operation_pool" } serde = "1.0" serde_derive = "1.0" +serde_yaml = "0.8" slog = { version = "^2.2.3" , features = ["max_level_trace"] } sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs new file mode 100644 index 000000000..a6c77cb63 --- /dev/null +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -0,0 +1,68 @@ +use crate::BeaconChainTypes; +use std::fs::File; +use std::path::PathBuf; +use std::time::SystemTime; +use types::{ + test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, +}; + +pub struct BeaconChainBuilder { + genesis_state: BeaconState, + genesis_block: BeaconBlock, + spec: ChainSpec, +} + +impl BeaconChainBuilder { + pub fn recent_genesis(validator_count: usize, spec: ChainSpec) -> Self { + Self::quick_start(recent_genesis_time(), validator_count, spec) + } + + pub fn quick_start(genesis_time: u64, validator_count: usize, spec: ChainSpec) -> Self { + let (mut genesis_state, _keypairs) = + TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec) + .build(); + + genesis_state.genesis_time = genesis_time; + + Self::from_genesis_state(genesis_state, spec) + } + + pub fn yaml_state(file: PathBuf, spec: ChainSpec) -> Result { + let file = File::open(file.clone()) + .map_err(|e| format!("Unable to open YAML genesis state file {:?}: {:?}", file, e))?; + + let genesis_state = serde_yaml::from_reader(file) + .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))?; + + Ok(Self::from_genesis_state(genesis_state, spec)) + } + + pub fn from_genesis_state(genesis_state: BeaconState, spec: ChainSpec) -> Self { + Self { + genesis_block: genesis_block(&genesis_state, &spec), + genesis_state, + spec, + } + } +} + +fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) -> BeaconBlock { + let mut genesis_block = BeaconBlock::empty(&spec); + + genesis_block.state_root = genesis_state.canonical_root(); + + genesis_block +} + +/// Returns the system time, mod 30 minutes. +/// +/// Used for easily creating testnets. +fn recent_genesis_time() -> u64 { + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_secs(); + let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0); + // genesis is now the last 30 minute block. + now - secs_after_last_period +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index cc7725dd8..9c833f778 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -3,6 +3,7 @@ extern crate lazy_static; mod beacon_chain; +mod beacon_chain_builder; mod checkpoint; mod errors; mod fork_choice; @@ -16,6 +17,7 @@ pub use self::beacon_chain::{ }; pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; +pub use beacon_chain_builder::BeaconChainBuilder; pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; From 94d987cb6aaa6fcd7920803c494a00a870f1ffae Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Aug 2019 12:12:29 +1000 Subject: [PATCH 099/305] Add `/network/listen_port` API endpoint --- beacon_node/client/src/bootstrapper.rs | 24 ++++++------------------ beacon_node/network/src/service.rs | 7 +++++++ beacon_node/rest_api/src/lib.rs | 1 + beacon_node/rest_api/src/network.rs | 15 +++++++++++++++ 4 files changed, 29 insertions(+), 18 deletions(-) diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index 19f13e2da..eaaee4aa1 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -46,7 +46,7 @@ impl Bootstrapper { /// `/ipv4/192.168.0.1/tcp/9000` if the server advertises a listening address of /// `/ipv4/172.0.0.1/tcp/9000`. pub fn best_effort_multiaddr(&self) -> Option { - let tcp_port = self.first_listening_tcp_port()?; + let tcp_port = self.listen_port().ok()?; let mut multiaddr = Multiaddr::with_capacity(2); @@ -61,17 +61,6 @@ impl Bootstrapper { Some(multiaddr) } - /// Reads the server's listening libp2p addresses and returns the first TCP port protocol it - /// finds, if any. - fn first_listening_tcp_port(&self) -> Option { - self.listen_addresses().ok()?.iter().find_map(|multiaddr| { - multiaddr.iter().find_map(|protocol| match protocol { - Protocol::Tcp(port) => Some(port), - _ => None, - }) - }) - } - /// Returns the IPv4 address of the server URL, unless it contains a FQDN. pub fn server_ipv4_addr(&self) -> Option { match self.url.host()? { @@ -86,9 +75,8 @@ impl Bootstrapper { } /// Returns the servers listening libp2p addresses. - pub fn listen_addresses(&self) -> Result, String> { - get_listen_addresses(self.url.clone()) - .map_err(|e| format!("Unable to get listen addresses: {:?}", e)) + pub fn listen_port(&self) -> Result { + get_listen_port(self.url.clone()).map_err(|e| format!("Unable to get listen port: {:?}", e)) } /// Returns the genesis block and state. @@ -179,7 +167,7 @@ fn get_block(mut url: Url, slot: Slot) -> Result, Err fn get_enr(mut url: Url) -> Result { url.path_segments_mut() .map(|mut url| { - url.push("node").push("network").push("enr"); + url.push("network").push("enr"); }) .map_err(|_| Error::InvalidUrl)?; @@ -189,10 +177,10 @@ fn get_enr(mut url: Url) -> Result { .map_err(Into::into) } -fn get_listen_addresses(mut url: Url) -> Result, Error> { +fn get_listen_port(mut url: Url) -> Result { url.path_segments_mut() .map(|mut url| { - url.push("node").push("network").push("listen_addresses"); + url.push("network").push("listen_port"); }) .map_err(|_| Error::InvalidUrl)?; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index dc7e94140..152f4dc77 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -18,6 +18,7 @@ use tokio::sync::{mpsc, oneshot}; /// Service that handles communication between internal services and the eth2_libp2p network service. pub struct Service { libp2p_service: Arc>, + libp2p_port: u16, _libp2p_exit: oneshot::Sender<()>, _network_send: mpsc::UnboundedSender, _phantom: PhantomData, //message_handler: MessageHandler, @@ -56,6 +57,7 @@ impl Service { )?; let network_service = Service { libp2p_service, + libp2p_port: config.libp2p_port, _libp2p_exit: libp2p_exit, _network_send: network_send.clone(), _phantom: PhantomData, @@ -87,6 +89,11 @@ impl Service { .collect() } + /// Returns the libp2p port that this node has been configured to listen using. + pub fn listen_port(&self) -> u16 { + self.libp2p_port + } + /// Returns the number of libp2p connected peers. pub fn connected_peers(&self) -> usize { self.libp2p_service.lock().swarm.connected_peers() diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 354b23403..a382c49e3 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -134,6 +134,7 @@ pub fn start_server( (&Method::GET, "/network/peer_count") => network::get_peer_count::(req), (&Method::GET, "/network/peer_id") => network::get_peer_id::(req), (&Method::GET, "/network/peers") => network::get_peer_list::(req), + (&Method::GET, "/network/listen_port") => network::get_listen_port::(req), (&Method::GET, "/network/listen_addresses") => { network::get_listen_addresses::(req) } diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index daded9d3d..a3e4c5ee7 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -21,6 +21,21 @@ pub fn get_listen_addresses(req: Request) -> ApiResul ))) } +/// HTTP handle to return the list of libp2p multiaddr the client is listening on. +/// +/// Returns a list of `Multiaddr`, serialized according to their `serde` impl. +pub fn get_listen_port(req: Request) -> ApiResult { + let network = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + + Ok(success_response(Body::from( + serde_json::to_string(&network.listen_port()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize port: {:?}", e)))?, + ))) +} + /// HTTP handle to return the Discv5 ENR from the client's libp2p service. /// /// ENR is encoded as base64 string. From 7d11d782992fc8a8780026860a97be56bb0325b6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Aug 2019 12:43:34 +1000 Subject: [PATCH 100/305] Abandon starting the node if libp2p doesn't start --- beacon_node/eth2-libp2p/src/service.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index e1e112e2d..e208dbeca 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -16,7 +16,7 @@ use libp2p::core::{ upgrade::{InboundUpgradeExt, OutboundUpgradeExt}, }; use libp2p::{core, secio, PeerId, Swarm, Transport}; -use slog::{debug, info, trace, warn}; +use slog::{crit, debug, info, trace, warn}; use std::fs::File; use std::io::prelude::*; use std::io::{Error, ErrorKind}; @@ -69,10 +69,15 @@ impl Service { log_address.push(Protocol::P2p(local_peer_id.clone().into())); info!(log, "Listening on: {}", log_address); } - Err(err) => warn!( - log, - "Cannot listen on: {} because: {:?}", listen_multiaddr, err - ), + Err(err) => { + crit!( + log, + "Unable to listen on libp2p address"; + "error" => format!("{:?}", err), + "listen_multiaddr" => format!("{}", listen_multiaddr), + ); + return Err("Libp2p was unable to listen on the given listen address.".into()); + } }; // attempt to connect to user-input libp2p nodes From a358bbc1b1bc04a852c792fa33f3ca85f77aabbc Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Aug 2019 12:45:31 +1000 Subject: [PATCH 101/305] Update bootstrapper for API changes --- beacon_node/client/src/bootstrapper.rs | 33 ++++++++++++++++++++------ beacon_node/rest_api/src/beacon.rs | 6 ++--- beacon_node/rest_api/src/lib.rs | 4 +++- 3 files changed, 32 insertions(+), 11 deletions(-) diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index eaaee4aa1..c94d9a51d 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -3,9 +3,10 @@ use eth2_libp2p::{ Enr, }; use reqwest::{Error as HttpError, Url}; +use serde::Deserialize; use std::borrow::Cow; use std::net::Ipv4Addr; -use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Slot}; +use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; use url::Host; #[derive(Debug)] @@ -84,9 +85,11 @@ impl Bootstrapper { let genesis_slot = Slot::new(0); let block = get_block(self.url.clone(), genesis_slot) - .map_err(|e| format!("Unable to get genesis block: {:?}", e))?; + .map_err(|e| format!("Unable to get genesis block: {:?}", e))? + .beacon_block; let state = get_state(self.url.clone(), genesis_slot) - .map_err(|e| format!("Unable to get genesis state: {:?}", e))?; + .map_err(|e| format!("Unable to get genesis state: {:?}", e))? + .beacon_state; Ok((state, block)) } @@ -99,9 +102,11 @@ impl Bootstrapper { .map_err(|e| format!("Unable to get finalized slot: {:?}", e))?; let block = get_block(self.url.clone(), finalized_slot) - .map_err(|e| format!("Unable to get finalized block: {:?}", e))?; + .map_err(|e| format!("Unable to get finalized block: {:?}", e))? + .beacon_block; let state = get_state(self.url.clone(), finalized_slot) - .map_err(|e| format!("Unable to get finalized state: {:?}", e))?; + .map_err(|e| format!("Unable to get finalized state: {:?}", e))? + .beacon_state; Ok((state, block)) } @@ -132,7 +137,14 @@ fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result Ok(checkpoint.epoch.start_slot(slots_per_epoch)) } -fn get_state(mut url: Url, slot: Slot) -> Result, Error> { +#[derive(Deserialize)] +#[serde(bound = "T: EthSpec")] +pub struct StateResponse { + pub root: Hash256, + pub beacon_state: BeaconState, +} + +fn get_state(mut url: Url, slot: Slot) -> Result, Error> { url.path_segments_mut() .map(|mut url| { url.push("beacon").push("state"); @@ -148,7 +160,14 @@ fn get_state(mut url: Url, slot: Slot) -> Result, Err .map_err(Into::into) } -fn get_block(mut url: Url, slot: Slot) -> Result, Error> { +#[derive(Deserialize)] +#[serde(bound = "T: EthSpec")] +pub struct BlockResponse { + pub root: Hash256, + pub beacon_block: BeaconBlock, +} + +fn get_block(mut url: Url, slot: Slot) -> Result, Error> { url.path_segments_mut() .map(|mut url| { url.push("beacon").push("block"); diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 4e3cc02fd..1c66a2819 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -8,7 +8,7 @@ use store::Store; use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot}; #[derive(Serialize)] -struct HeadResponse { +pub struct HeadResponse { pub slot: Slot, pub block_root: Hash256, pub state_root: Hash256, @@ -35,7 +35,7 @@ pub fn get_head(req: Request) -> ApiResult #[derive(Serialize)] #[serde(bound = "T: EthSpec")] -struct BlockResponse { +pub struct BlockResponse { pub root: Hash256, pub beacon_block: BeaconBlock, } @@ -106,7 +106,7 @@ pub fn get_block_root(req: Request) -> ApiR #[derive(Serialize)] #[serde(bound = "T: EthSpec")] -struct StateResponse { +pub struct StateResponse { pub root: Hash256, pub beacon_state: BeaconState, } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index a382c49e3..964dd7998 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -13,7 +13,6 @@ mod url_query; use beacon_chain::{BeaconChain, BeaconChainTypes}; use client_network::Service as NetworkService; -pub use config::Config as ApiConfig; use hyper::rt::Future; use hyper::service::service_fn_ok; use hyper::{Body, Method, Response, Server, StatusCode}; @@ -24,6 +23,9 @@ use std::sync::Arc; use tokio::runtime::TaskExecutor; use url_query::UrlQuery; +pub use beacon::{BlockResponse, HeadResponse, StateResponse}; +pub use config::Config as ApiConfig; + #[derive(PartialEq, Debug)] pub enum ApiError { MethodNotAllowed(String), From a8de94ca133ddfb63ced7d02d3432f3166b8bcbb Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Aug 2019 13:02:17 +1000 Subject: [PATCH 102/305] Remove unnecessary trait bounds --- beacon_node/beacon_chain/src/test_utils.rs | 4 ++-- beacon_node/client/src/beacon_chain_types.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index bd51f8620..09f4749ea 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -55,7 +55,7 @@ where impl BeaconChainTypes for CommonTypes where L: LmdGhost + 'static, - E: EthSpec + 'static, + E: EthSpec, { type Store = MemoryStore; type SlotClock = TestingSlotClock; @@ -70,7 +70,7 @@ where pub struct BeaconChainHarness where L: LmdGhost + 'static, - E: EthSpec + 'static, + E: EthSpec, { pub chain: BeaconChain>, pub keypairs: Vec, diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index adea8c7b5..5168c067a 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -39,7 +39,7 @@ pub struct ClientType { impl BeaconChainTypes for ClientType where S: Store + 'static, - E: EthSpec + 'static + Clone, + E: EthSpec, { type Store = S; type SlotClock = SystemTimeSlotClock; From 453c8e2255263b5116b8fb7f94a29254e7836e4a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Aug 2019 16:39:32 +1000 Subject: [PATCH 103/305] Re-arrange CLI to suit new "testnet" pattern --- beacon_node/Cargo.toml | 1 + beacon_node/client/src/config.rs | 11 +- beacon_node/src/config.rs | 206 ++++++++++++++++++++++++++++ beacon_node/src/main.rs | 227 ++++++++++--------------------- beacon_node/src/run.rs | 1 - 5 files changed, 280 insertions(+), 166 deletions(-) create mode 100644 beacon_node/src/config.rs diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 9124047e4..9ce724c14 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -11,6 +11,7 @@ store = { path = "./store" } client = { path = "client" } version = { path = "version" } clap = "2.32.0" +rand = "0.7" slog = { version = "^2.2.3" , features = ["max_level_trace"] } slog-term = "^2.4.0" slog-async = "^2.3.0" diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index ea8186dbc..e1464e5b4 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,4 +1,4 @@ -use crate::{Bootstrapper, Eth2Config}; +use crate::Bootstrapper; use clap::ArgMatches; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; @@ -127,15 +127,6 @@ impl Config { self.data_dir = PathBuf::from(dir); }; - if let Some(default_spec) = args.value_of("default-spec") { - match default_spec { - "mainnet" => self.spec_constants = Eth2Config::mainnet().spec_constants, - "minimal" => self.spec_constants = Eth2Config::minimal().spec_constants, - "interop" => self.spec_constants = Eth2Config::interop().spec_constants, - _ => {} // not supported - } - } - if let Some(dir) = args.value_of("db") { self.db_type = dir.to_string(); }; diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs new file mode 100644 index 000000000..959edbd60 --- /dev/null +++ b/beacon_node/src/config.rs @@ -0,0 +1,206 @@ +use clap::ArgMatches; +use client::{ClientConfig, Eth2Config}; +use eth2_config::{read_from_file, write_to_file}; +use rand::{distributions::Alphanumeric, Rng}; +use slog::{crit, info, Logger}; +use std::fs; +use std::path::PathBuf; + +pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; +pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; +pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; + +type Result = std::result::Result; +type Config = (ClientConfig, Eth2Config); + +/// Gets the fully-initialized global client and eth2 configuration objects. +pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { + let mut builder = ConfigBuilder::new(matches, log)?; + + match matches.subcommand() { + ("testnet", Some(sub_matches)) => { + if sub_matches.is_present("random-datadir") { + builder.set_random_datadir()?; + } + + info!( + log, + "Creating new datadir"; + "path" => format!("{:?}", builder.data_dir) + ); + + builder.update_spec_from_subcommand(&sub_matches)?; + builder.write_configs_to_new_datadir()?; + } + _ => { + info!( + log, + "Resuming from existing datadir"; + "path" => format!("{:?}", builder.data_dir) + ); + + // If the `testnet` command was not provided, attempt to load an existing datadir and + // continue with an existing chain. + builder.load_from_datadir()?; + } + }; + + builder.build() +} + +/// Allows for building a set of configurations based upon `clap` arguments. +struct ConfigBuilder<'a> { + matches: &'a ArgMatches<'a>, + log: &'a Logger, + pub data_dir: PathBuf, + eth2_config: Eth2Config, + client_config: ClientConfig, +} + +impl<'a> ConfigBuilder<'a> { + /// Create a new builder with default settings. + pub fn new(matches: &'a ArgMatches, log: &'a Logger) -> Result { + // Read the `--datadir` flag. + // + // If it's not present, try and find the home directory (`~`) and push the default data + // directory onto it. + let data_dir: PathBuf = matches + .value_of("datadir") + .map(|string| PathBuf::from(string)) + .or_else(|| { + dirs::home_dir().map(|mut home| { + home.push(DEFAULT_DATA_DIR); + home + }) + }) + .ok_or_else(|| "Unable to find a home directory for the datadir".to_string())?; + + Ok(Self { + matches, + log, + data_dir, + eth2_config: Eth2Config::minimal(), + client_config: ClientConfig::default(), + }) + } + + /// Consumes self, returning the configs. + pub fn build(mut self) -> Result { + self.eth2_config.apply_cli_args(&self.matches)?; + self.client_config + .apply_cli_args(&self.matches, &mut self.log.clone())?; + + if self.eth2_config.spec_constants != self.client_config.spec_constants { + crit!(self.log, "Specification constants do not match."; + "client_config" => format!("{}", self.client_config.spec_constants), + "eth2_config" => format!("{}", self.eth2_config.spec_constants) + ); + return Err("Specification constant mismatch".into()); + } + + self.client_config.data_dir = self.data_dir; + + Ok((self.client_config, self.eth2_config)) + } + + /// Set the config data_dir to be an random directory. + /// + /// Useful for easily spinning up ephemeral testnets. + pub fn set_random_datadir(&mut self) -> Result<()> { + let random = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(10) + .collect::(); + + let mut s = DEFAULT_DATA_DIR.to_string(); + s.push_str("_random_"); + s.push_str(&random); + + self.data_dir.pop(); + self.data_dir.push(s); + + Ok(()) + } + + /// Reads the subcommand and tries to update `self.eth2_config` based up on the `--spec` flag. + /// + /// Returns an error if the `--spec` flag is not present. + pub fn update_spec_from_subcommand(&mut self, sub_matches: &ArgMatches) -> Result<()> { + // Re-initialise the `Eth2Config`. + // + // If a CLI parameter is set, overwrite any config file present. + // If a parameter is not set, use either the config file present or default to minimal. + let eth2_config = match sub_matches.value_of("spec") { + Some("mainnet") => Eth2Config::mainnet(), + Some("minimal") => Eth2Config::minimal(), + Some("interop") => Eth2Config::interop(), + _ => return Err("Unable to determine specification type.".into()), + }; + + self.client_config.spec_constants = sub_matches + .value_of("spec") + .expect("Guarded by prior match statement") + .to_string(); + self.eth2_config = eth2_config; + + Ok(()) + } + + /// Writes the configs in `self` to `self.data_dir`. + /// + /// Returns an error if `self.data_dir` already exists. + pub fn write_configs_to_new_datadir(&mut self) -> Result<()> { + // Do not permit creating a new config when the datadir exists. + if self.data_dir.exists() { + return Err( + "Datadir already exists, will not overwrite. Remove the directory or use --datadir." + .into(), + ); + } + + // Create `datadir` and any non-existing parent directories. + fs::create_dir_all(&self.data_dir).map_err(|e| { + crit!(self.log, "Failed to initialize data dir"; "error" => format!("{}", e)); + format!("{}", e) + })?; + + // Write the client config to a TOML file in the datadir. + write_to_file( + self.data_dir.join(CLIENT_CONFIG_FILENAME), + &self.client_config, + ) + .map_err(|e| format!("Unable to write {} file: {:?}", CLIENT_CONFIG_FILENAME, e))?; + + // Write the eth2 config to a TOML file in the datadir. + write_to_file(self.data_dir.join(ETH2_CONFIG_FILENAME), &self.eth2_config) + .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; + + Ok(()) + } + + /// Attempts to load the client and eth2 configs from `self.data_dir`. + /// + /// Returns an error if any files are not found or are invalid. + pub fn load_from_datadir(&mut self) -> Result<()> { + // Check to ensure the datadir exists. + // + // For now we return an error. In the future we may decide to boot a default (e.g., + // public testnet or mainnet). + if !self.data_dir.exists() { + return Err( + "No datadir found. Use the 'testnet' sub-command to select a testnet type.".into(), + ); + } + + self.eth2_config = read_from_file::(self.data_dir.join(ETH2_CONFIG_FILENAME)) + .map_err(|e| format!("Unable to parse {} file: {:?}", ETH2_CONFIG_FILENAME, e))? + .ok_or_else(|| format!("{} file does not exist", ETH2_CONFIG_FILENAME))?; + + self.client_config = + read_from_file::(self.data_dir.join(CLIENT_CONFIG_FILENAME)) + .map_err(|e| format!("Unable to parse {} file: {:?}", CLIENT_CONFIG_FILENAME, e))? + .ok_or_else(|| format!("{} file does not exist", ETH2_CONFIG_FILENAME))?; + + Ok(()) + } +} diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 04366baa7..12c9b8a01 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -1,12 +1,10 @@ +mod config; mod run; -use clap::{App, Arg}; -use client::{ClientConfig, Eth2Config}; +use clap::{App, Arg, SubCommand}; +use config::get_configs; use env_logger::{Builder, Env}; -use eth2_config::{read_from_file, write_to_file}; use slog::{crit, o, warn, Drain, Level}; -use std::fs; -use std::path::PathBuf; pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; @@ -31,6 +29,7 @@ fn main() { .value_name("DIR") .help("Data directory for keys and databases.") .takes_value(true) + .global(true) ) .arg( Arg::with_name("logfile") @@ -45,6 +44,7 @@ fn main() { .value_name("NETWORK-DIR") .help("Data directory for network keys.") .takes_value(true) + .global(true) ) /* * Network parameters. @@ -163,24 +163,6 @@ fn main() { .possible_values(&["disk", "memory"]) .default_value("memory"), ) - /* - * Specification/testnet params. - */ - .arg( - Arg::with_name("default-spec") - .long("default-spec") - .value_name("TITLE") - .short("default-spec") - .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") - .takes_value(true) - .possible_values(&["mainnet", "minimal", "interop"]) - ) - .arg( - Arg::with_name("recent-genesis") - .long("recent-genesis") - .short("r") - .help("When present, genesis will be within 30 minutes prior. Only for testing"), - ) /* * Logging. */ @@ -201,14 +183,68 @@ fn main() { .takes_value(true), ) /* - * Bootstrap. + * The "testnet" sub-command. + * + * Allows for creating a new datadir with testnet-specific configs. */ - .arg( - Arg::with_name("bootstrap") - .long("bootstrap") - .value_name("HTTP_SERVER") - .help("Load the genesis state and libp2p address from the HTTP API of another Lighthouse node.") - .takes_value(true) + .subcommand(SubCommand::with_name("testnet") + .about("Create a new Lighthouse datadir using a testnet strategy.") + .arg( + Arg::with_name("spec") + .short("s") + .long("spec") + .value_name("TITLE") + .help("Specifies the default eth2 spec type. Only effective when creating a new datadir.") + .takes_value(true) + .required(true) + .possible_values(&["mainnet", "minimal", "interop"]) + ) + .arg( + Arg::with_name("random-datadir") + .long("random-datadir") + .short("r") + .help("If present, append a random string to the datadir path. Useful for fast development \ + iteration.") + ) + .arg( + Arg::with_name("force-create") + .long("force-create") + .short("f") + .help("If present, will delete any existing datadir before creating a new one. Cannot be \ + used when specifying --random-datadir (logic error).") + .conflicts_with("random-datadir") + ) + /* + * Testnet sub-commands. + */ + .subcommand(SubCommand::with_name("bootstrap") + .about("Connects to the given HTTP server, downloads a genesis state and attempts to peer with it.") + .arg(Arg::with_name("server") + .value_name("HTTP_SERVER") + .required(true) + .help("A HTTP server, with a http:// prefix")) + .arg(Arg::with_name("libp2p-port") + .short("p") + .long("port") + .value_name("TCP_PORT") + .help("A libp2p listen port used to peer with the bootstrap server")) + ) + .subcommand(SubCommand::with_name("recent") + .about("Creates a new genesis state where the genesis time was at the previous \ + 30-minute boundary (e.g., 12:00, 12:30, 13:00, etc.)") + .arg(Arg::with_name("validator_count") + .value_name("VALIDATOR_COUNT") + .required(true) + .help("The number of validators in the genesis state")) + ) + .subcommand(SubCommand::with_name("yaml-genesis-state") + .about("Creates a new datadir where the genesis state is read from YAML. Will fail to parse \ + a YAML state that was generated to a different spec than that specified by --spec.") + .arg(Arg::with_name("file") + .value_name("YAML_FILE") + .required(true) + .help("A YAML file from which to read the state")) + ) ) .get_matches(); @@ -235,143 +271,24 @@ fn main() { _ => drain.filter_level(Level::Trace), }; - let mut log = slog::Logger::root(drain.fuse(), o!()); + let log = slog::Logger::root(drain.fuse(), o!()); warn!( log, "Ethereum 2.0 is pre-release. This software is experimental." ); - let data_dir = match matches - .value_of("datadir") - .and_then(|v| Some(PathBuf::from(v))) - { - Some(v) => v, - None => { - // use the default - let mut default_dir = match dirs::home_dir() { - Some(v) => v, - None => { - crit!(log, "Failed to find a home directory"); - return; - } - }; - default_dir.push(DEFAULT_DATA_DIR); - default_dir - } - }; - - // create the directory if needed - match fs::create_dir_all(&data_dir) { - Ok(_) => {} - Err(e) => { - crit!(log, "Failed to initialize data dir"; "error" => format!("{}", e)); - return; - } - } - - let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME); - - // Attempt to load the `ClientConfig` from disk. + // Load the process-wide configuration. // - // If file doesn't exist, create a new, default one. - let mut client_config = match read_from_file::(client_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { - let default = ClientConfig::default(); - if let Err(e) = write_to_file(client_config_path, &default) { - crit!(log, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e)); - return; - } - default - } + // May load this from disk or create a new configuration, depending on the CLI flags supplied. + let (client_config, eth2_config) = match get_configs(&matches, &log) { + Ok(configs) => configs, Err(e) => { - crit!(log, "Failed to load a ChainConfig file"; "error" => format!("{:?}", e)); + crit!(log, "Failed to load configuration"; "error" => e); return; } }; - // Ensure the `data_dir` in the config matches that supplied to the CLI. - client_config.data_dir = data_dir.clone(); - - // Update the client config with any CLI args. - match client_config.apply_cli_args(&matches, &mut log) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse ClientConfig CLI arguments"; "error" => s); - return; - } - }; - - let eth2_config_path = data_dir.join(ETH2_CONFIG_FILENAME); - - // Initialise the `Eth2Config`. - // - // If a CLI parameter is set, overwrite any config file present. - // If a parameter is not set, use either the config file present or default to minimal. - let cli_config = match matches.value_of("default-spec") { - Some("mainnet") => Some(Eth2Config::mainnet()), - Some("minimal") => Some(Eth2Config::minimal()), - Some("interop") => Some(Eth2Config::interop()), - _ => None, - }; - // if a CLI flag is specified, write the new config if it doesn't exist, - // otherwise notify the user that the file will not be written. - let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { - Ok(config) => config, - Err(e) => { - crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); - return; - } - }; - - let mut eth2_config = { - if let Some(cli_config) = cli_config { - if eth2_config_from_file.is_none() { - // write to file if one doesn't exist - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - } else { - warn!( - log, - "Eth2Config file exists. Configuration file is ignored, using default" - ); - } - cli_config - } else { - // CLI config not specified, read from disk - match eth2_config_from_file { - Some(config) => config, - None => { - // set default to minimal - let eth2_config = Eth2Config::minimal(); - if let Err(e) = write_to_file(eth2_config_path, ð2_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - eth2_config - } - } - } - }; - - // Update the eth2 config with any CLI flags. - match eth2_config.apply_cli_args(&matches) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse Eth2Config CLI arguments"; "error" => s); - return; - } - }; - - // check to ensure the spec constants between the client and eth2_config match - if eth2_config.spec_constants != client_config.spec_constants { - crit!(log, "Specification constants do not match."; "client_config" => format!("{}", client_config.spec_constants), "eth2_config" => format!("{}", eth2_config.spec_constants)); - return; - } - // Start the node using a `tokio` executor. match run::run_beacon_node(client_config, eth2_config, &log) { Ok(_) => {} diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index f88cb7460..e23b5bc72 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -46,7 +46,6 @@ pub fn run_beacon_node( log, "BeaconNode init"; "p2p_listen_address" => format!("{:?}", &other_client_config.network.listen_address), - "data_dir" => format!("{:?}", other_client_config.data_dir()), "network_dir" => format!("{:?}", other_client_config.network.network_dir), "spec_constants" => &spec_constants, "db_type" => &other_client_config.db_type, From cdf3ade63fd32bea919c8e7fa847855352569148 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 23 Aug 2019 18:23:58 +1000 Subject: [PATCH 104/305] Add further CLI progress --- beacon_node/client/src/bootstrapper.rs | 8 ++- beacon_node/client/src/config.rs | 72 ++++++++++++-------------- beacon_node/src/config.rs | 41 +++++++++++++-- 3 files changed, 76 insertions(+), 45 deletions(-) diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs index c94d9a51d..9baf1dc7e 100644 --- a/beacon_node/client/src/bootstrapper.rs +++ b/beacon_node/client/src/bootstrapper.rs @@ -46,8 +46,12 @@ impl Bootstrapper { /// For example, the server `http://192.168.0.1` might end up with a `best_effort_multiaddr` of /// `/ipv4/192.168.0.1/tcp/9000` if the server advertises a listening address of /// `/ipv4/172.0.0.1/tcp/9000`. - pub fn best_effort_multiaddr(&self) -> Option { - let tcp_port = self.listen_port().ok()?; + pub fn best_effort_multiaddr(&self, port: Option) -> Option { + let tcp_port = if let Some(port) = port { + port + } else { + self.listen_port().ok()? + }; let mut multiaddr = Multiaddr::with_capacity(2); diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index e1464e5b4..e802a93a3 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -21,14 +21,42 @@ pub struct Config { db_name: String, pub log_file: PathBuf, pub spec_constants: String, - pub genesis_state: GenesisState, + #[serde(skip)] + pub boot_method: BootMethod, pub network: network::NetworkConfig, pub rpc: rpc::RPCConfig, pub rest_api: rest_api::ApiConfig, } -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(tag = "type")] +#[derive(Debug, Clone)] +pub enum BootMethod { + /// Resume from an existing database. + Resume, + /// Generate a state with `validator_count` validators, all with well-known secret keys. + /// + /// Set the genesis time to be the start of the previous 30-minute window. + RecentGenesis { validator_count: usize }, + /// Generate a state with `genesis_time` and `validator_count` validators, all with well-known + /// secret keys. + Generated { + validator_count: usize, + genesis_time: u64, + }, + /// Load a YAML-encoded genesis state from a file. + Yaml { file: PathBuf }, + /// Use a HTTP server (running our REST-API) to load genesis and finalized states and blocks. + HttpBootstrap { + server: String, + port: Option, + }, +} + +impl Default for BootMethod { + fn default() -> Self { + BootMethod::Resume + } +} + pub enum GenesisState { /// Use the mainnet genesis state. /// @@ -61,9 +89,7 @@ impl Default for Config { rpc: rpc::RPCConfig::default(), rest_api: rest_api::ApiConfig::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), - genesis_state: GenesisState::RecentGenesis { - validator_count: TESTNET_VALIDATOR_COUNT, - }, + boot_method: BootMethod::default(), } } } @@ -140,40 +166,6 @@ impl Config { self.update_logger(log)?; }; - // If the `--bootstrap` flag is provided, overwrite the default configuration. - if let Some(server) = args.value_of("bootstrap") { - do_bootstrapping(self, server.to_string(), &log)?; - } - Ok(()) } } - -/// Perform the HTTP bootstrapping procedure, reading an ENR and multiaddr from the HTTP server and -/// adding them to the `config`. -fn do_bootstrapping(config: &mut Config, server: String, log: &slog::Logger) -> Result<(), String> { - // Set the genesis state source. - config.genesis_state = GenesisState::HttpBootstrap { - server: server.to_string(), - }; - - let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; - - config.network.boot_nodes.push(bootstrapper.enr()?); - - if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr() { - info!( - log, - "Estimated bootstrapper libp2p address"; - "multiaddr" => format!("{:?}", server_multiaddr) - ); - config.network.libp2p_nodes.push(server_multiaddr); - } else { - warn!( - log, - "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." - ); - } - - Ok(()) -} diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 959edbd60..b66a00abb 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,5 +1,5 @@ use clap::ArgMatches; -use client::{ClientConfig, Eth2Config}; +use client::{Bootstrapper, ClientConfig, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; use rand::{distributions::Alphanumeric, Rng}; use slog::{crit, info, Logger}; @@ -30,6 +30,41 @@ pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { ); builder.update_spec_from_subcommand(&sub_matches)?; + + match sub_matches.subcommand() { + // The bootstrap testnet method requires inserting a libp2p address into the + // network config. + ("bootstrap", Some(sub_matches)) => { + let server = sub_matches + .value_of("server") + .ok_or_else(|| "No bootstrap server specified".into())?; + + let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; + + if let Some(server_multiaddr) = + bootstrapper.best_effort_multiaddr(sub_matches.value_of("libp2p_port")) + { + info!( + log, + "Estimated bootstrapper libp2p address"; + "multiaddr" => format!("{:?}", server_multiaddr) + ); + + builder + .client_config + .network + .libp2p_nodes + .push(server_multiaddr); + } else { + warn!( + log, + "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." + ); + }; + } + _ => (), + }; + builder.write_configs_to_new_datadir()?; } _ => { @@ -53,8 +88,8 @@ struct ConfigBuilder<'a> { matches: &'a ArgMatches<'a>, log: &'a Logger, pub data_dir: PathBuf, - eth2_config: Eth2Config, - client_config: ClientConfig, + pub eth2_config: Eth2Config, + pub client_config: ClientConfig, } impl<'a> ConfigBuilder<'a> { From b078385362293fda872ee4dc62d0e1f8888005a8 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sat, 24 Aug 2019 01:09:29 +1000 Subject: [PATCH 105/305] Improved syncing compilation issues --- beacon_node/network/src/message_handler.rs | 118 +--- beacon_node/network/src/sync/manager.rs | 696 +++++++++++--------- beacon_node/network/src/sync/mod.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 336 ++++++---- 4 files changed, 622 insertions(+), 530 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index fd10c5aea..7a1a4ad31 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -22,8 +22,6 @@ pub struct MessageHandler { _chain: Arc>, /// The syncing framework. sync: SimpleSync, - /// The context required to send messages to, and process messages from peers. - network_context: NetworkContext, /// The `MessageHandler` logger. log: slog::Logger, } @@ -52,15 +50,13 @@ impl MessageHandler { trace!(log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); - // Initialise sync and begin processing in thread - let sync = SimpleSync::new(beacon_chain.clone(), &log); + let sync = SimpleSync::new(beacon_chain.clone(), network_send, &log); // generate the Message handler let mut handler = MessageHandler { _chain: beacon_chain.clone(), sync, - network_context: NetworkContext::new(network_send, log.clone()), log: log.clone(), }; @@ -81,7 +77,7 @@ impl MessageHandler { match message { // we have initiated a connection to a peer HandlerMessage::PeerDialed(peer_id) => { - self.sync.on_connect(peer_id, &mut self.network_context); + self.sync.on_connect(peer_id); } // A peer has disconnected HandlerMessage::PeerDisconnected(peer_id) => { @@ -112,32 +108,24 @@ impl MessageHandler { /// A new RPC request has been received from the network. fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: RequestId, request: RPCRequest) { match request { - RPCRequest::Hello(hello_message) => self.sync.on_hello_request( - peer_id, - request_id, - hello_message, - &mut self.network_context, - ), + RPCRequest::Hello(hello_message) => { + self.sync + .on_hello_request(peer_id, request_id, hello_message) + } RPCRequest::Goodbye(goodbye_reason) => { debug!( self.log, "PeerGoodbye"; "peer" => format!("{:?}", peer_id), - "reason" => format!("{:?}", reason), + "reason" => format!("{:?}", goodbye_reason), ); - self.sync.on_disconnect(peer_id), - }, - RPCRequest::BeaconBlocks(request) => self.sync.on_beacon_blocks_request( - peer_id, - request_id, - request, - &mut self.network_context, - ), - RPCRequest::RecentBeaconBlocks(request) => self.sync.on_recent_beacon_blocks_request( - peer_id, - request_id, - request, - &mut self.network_context, - ), + self.sync.on_disconnect(peer_id); + } + RPCRequest::BeaconBlocks(request) => self + .sync + .on_beacon_blocks_request(peer_id, request_id, request), + RPCRequest::RecentBeaconBlocks(request) => self + .sync + .on_recent_beacon_blocks_request(peer_id, request_id, request), } } @@ -163,20 +151,15 @@ impl MessageHandler { RPCErrorResponse::Success(response) => { match response { RPCResponse::Hello(hello_message) => { - self.sync.on_hello_response( - peer_id, - hello_message, - &mut self.network_context, - ); + self.sync.on_hello_response(peer_id, hello_message); } RPCResponse::BeaconBlocks(response) => { - match self.decode_beacon_blocks(response) { + match self.decode_beacon_blocks(&response) { Ok(beacon_blocks) => { self.sync.on_beacon_blocks_response( peer_id, request_id, beacon_blocks, - &mut self.network_context, ); } Err(e) => { @@ -186,13 +169,12 @@ impl MessageHandler { } } RPCResponse::RecentBeaconBlocks(response) => { - match self.decode_beacon_blocks(response) { + match self.decode_beacon_blocks(&response) { Ok(beacon_blocks) => { self.sync.on_recent_beacon_blocks_response( - request_id, peer_id, + request_id, beacon_blocks, - &mut self.network_context, ); } Err(e) => { @@ -217,19 +199,14 @@ impl MessageHandler { match gossip_message { PubsubMessage::Block(message) => match self.decode_gossip_block(message) { Ok(block) => { - let _should_forward_on = - self.sync - .on_block_gossip(peer_id, block, &mut self.network_context); + let _should_forward_on = self.sync.on_block_gossip(peer_id, block); } Err(e) => { debug!(self.log, "Invalid gossiped beacon block"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); } }, PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { - Ok(attestation) => { - self.sync - .on_attestation_gossip(peer_id, attestation, &mut self.network_context) - } + Ok(attestation) => self.sync.on_attestation_gossip(peer_id, attestation), Err(e) => { debug!(self.log, "Invalid gossiped attestation"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); } @@ -331,56 +308,3 @@ impl MessageHandler { Vec::from_ssz_bytes(&beacon_blocks) } } - -/// Wraps a Network Channel to employ various RPC/Sync related network functionality. -pub struct NetworkContext { - /// The network channel to relay messages to the Network service. - network_send: mpsc::UnboundedSender, - /// Logger for the `NetworkContext`. - log: slog::Logger, -} - -impl NetworkContext { - pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { - Self { network_send, log } - } - - pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { - self.send_rpc_request(peer_id, RPCRequest::Goodbye(reason)) - // TODO: disconnect peers. - } - - pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) { - // Note: There is currently no use of keeping track of requests. However the functionality - // is left here for future revisions. - self.send_rpc_event(peer_id, RPCEvent::Request(0, rpc_request)); - } - - //TODO: Handle Error responses - pub fn send_rpc_response( - &mut self, - peer_id: PeerId, - request_id: RequestId, - rpc_response: RPCErrorResponse, - ) { - self.send_rpc_event( - peer_id, - RPCEvent::Response(request_id, RPCErrorResponse::Success(rpc_response)), - ); - } - - fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { - self.send(peer_id, OutgoingMessage::RPC(rpc_event)) - } - - fn send(&mut self, peer_id: PeerId, outgoing_message: OutgoingMessage) { - self.network_send - .try_send(NetworkMessage::Send(peer_id, outgoing_message)) - .unwrap_or_else(|_| { - warn!( - self.log, - "Could not send RPC message to the network service" - ) - }); - } -} diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index a4ce544ec..f5c669455 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1,129 +1,164 @@ -const MAX_BLOCKS_PER_REQUEST: usize = 10; +use super::simple_sync::{PeerSyncInfo, FUTURE_SLOT_TOLERANCE}; +use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +use eth2_libp2p::rpc::methods::*; +use eth2_libp2p::rpc::RequestId; +use eth2_libp2p::PeerId; +use slog::{debug, info, trace, warn, Logger}; +use std::collections::{HashMap, HashSet}; +use std::ops::{Add, Sub}; +use std::sync::Arc; +use types::{BeaconBlock, EthSpec, Hash256, Slot}; + +const MAX_BLOCKS_PER_REQUEST: u64 = 10; /// The number of slots that we can import blocks ahead of us, before going into full Sync mode. -const SLOT_IMPORT_TOLERANCE: u64 = 10; +const SLOT_IMPORT_TOLERANCE: usize = 10; const PARENT_FAIL_TOLERANCE: usize = 3; -const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE*2; +const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; +#[derive(PartialEq)] enum BlockRequestsState { QueuedForward, QueuedBackward, Pending(RequestId), Complete, + Failed, } -struct BlockRequests { - target_head_slot: Slot +struct BlockRequests { + target_head_slot: Slot, target_head_root: Hash256, - downloaded_blocks: Vec, - state: State, + downloaded_blocks: Vec>, + state: BlockRequestsState, } -struct ParentRequests { - downloaded_blocks: Vec, - attempts: usize, +struct ParentRequests { + downloaded_blocks: Vec>, + failed_attempts: usize, last_submitted_peer: PeerId, // to downvote the submitting peer. state: BlockRequestsState, } -impl BlockRequests { - +impl BlockRequests { // gets the start slot for next batch // last block slot downloaded plus 1 fn next_start_slot(&self) -> Option { if !self.downloaded_blocks.is_empty() { match self.state { BlockRequestsState::QueuedForward => { - let last_element_index = self.downloaded_blocks.len() -1; - Some(downloaded_blocks[last_element_index].slot.add(1)) + let last_element_index = self.downloaded_blocks.len() - 1; + Some(self.downloaded_blocks[last_element_index].slot.add(1)) } BlockRequestsState::QueuedBackward => { let earliest_known_slot = self.downloaded_blocks[0].slot; Some(earliest_known_slot.add(1).sub(MAX_BLOCKS_PER_REQUEST)) } + _ => { + // pending/complete/failed + None + } } - } - else { + } else { None } } } +#[derive(PartialEq, Debug, Clone)] enum ManagerState { Syncing, Regular, Stalled, } -enum ImportManagerOutcome { +pub(crate) enum ImportManagerOutcome { Idle, - RequestBlocks{ + RequestBlocks { peer_id: PeerId, request_id: RequestId, request: BeaconBlocksRequest, }, + /// Updates information with peer via requesting another HELLO handshake. + Hello(PeerId), RecentRequest(PeerId, RecentBeaconBlocksRequest), DownvotePeer(PeerId), } - -pub struct ImportManager { +pub struct ImportManager { /// A reference to the underlying beacon chain. chain: Arc>, - state: MangerState, - import_queue: HashMap, - parent_queue: Vec, - full_peers: Hashset, + state: ManagerState, + import_queue: HashMap>, + parent_queue: Vec>, + full_peers: HashSet, current_req_id: usize, log: Logger, } -impl ImportManager { +impl ImportManager { + pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { + ImportManager { + chain: beacon_chain.clone(), + state: ManagerState::Regular, + import_queue: HashMap::new(), + parent_queue: Vec::new(), + full_peers: HashSet::new(), + current_req_id: 0, + log: log.clone(), + } + } - pub fn add_peer(&mut self, peer_id, remote: PeerSyncInfo) { + pub fn add_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo) { // TODO: Improve comments. // initially try to download blocks from our current head // then backwards search all the way back to our finalized epoch until we match on a chain // has to be done sequentially to find next slot to start the batch from - + let local = PeerSyncInfo::from(&self.chain); // If a peer is within SLOT_IMPORT_TOLERANCE from out head slot, ignore a batch sync - if remote.head_slot.sub(local.head_slot) < SLOT_IMPORT_TOLERANCE { + if remote.head_slot.sub(local.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { trace!(self.log, "Ignoring full sync with peer"; - "peer" => peer_id, - "peer_head_slot" => remote.head_slot, - "local_head_slot" => local.head_slot, - ); + "peer" => format!("{:?}", peer_id), + "peer_head_slot" => remote.head_slot, + "local_head_slot" => local.head_slot, + ); // remove the peer from the queue if it exists - self.import_queue.remove(&peer_id); + self.import_queue.remove(&peer_id); return; } if let Some(block_requests) = self.import_queue.get_mut(&peer_id) { // update the target head slot - if remote.head_slot > requested_block.target_head_slot { + if remote.head_slot > block_requests.target_head_slot { block_requests.target_head_slot = remote.head_slot; } - } else { + } else { let block_requests = BlockRequests { target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called target_head_root: remote.head_root, downloaded_blocks: Vec::new(), - state: RequestedBlockState::Queued - } + state: BlockRequestsState::QueuedForward, + }; self.import_queue.insert(peer_id, block_requests); } - } - pub fn beacon_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec) { - + pub fn beacon_blocks_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + mut blocks: Vec>, + ) { // find the request - let block_requests = match self.import_queue.get_mut(&peer_id) { - Some(req) if req.state = RequestedBlockState::Pending(request_id) => req, - None => { + let block_requests = match self + .import_queue + .get_mut(&peer_id) + .filter(|r| r.state == BlockRequestsState::Pending(request_id)) + { + Some(req) => req, + _ => { // No pending request, invalid request_id or coding error warn!(self.log, "BeaconBlocks response unknown"; "request_id" => request_id); return; @@ -142,100 +177,115 @@ impl ImportManager { if blocks.is_empty() { warn!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); - block_requests.state = RequestedBlockState::Failed; + block_requests.state = BlockRequestsState::Failed; return; } // Add the newly downloaded blocks to the current list of downloaded blocks. This also // determines if we are syncing forward or backward. let syncing_forwards = { - if block_requests.blocks.is_empty() { - block_requests.blocks.push(blocks); + if block_requests.downloaded_blocks.is_empty() { + block_requests.downloaded_blocks.append(&mut blocks); true - } - else if block_requests.blocks[0].slot < blocks[0].slot { // syncing forwards - // verify the peer hasn't sent overlapping blocks - ensuring the strictly - // increasing blocks in a batch will be verified during the processing - if block_requests.next_slot() > blocks[0].slot { - warn!(self.log, "BeaconBlocks response returned duplicate blocks", "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_slot()); - block_requests.state = RequestedBlockState::Failed; - return; - } - - block_requests.blocks.push(blocks); - true + } else if block_requests.downloaded_blocks[0].slot < blocks[0].slot { + // syncing forwards + // verify the peer hasn't sent overlapping blocks - ensuring the strictly + // increasing blocks in a batch will be verified during the processing + if block_requests.next_start_slot() > Some(blocks[0].slot) { + warn!(self.log, "BeaconBlocks response returned duplicate blocks"; "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_start_slot()); + block_requests.state = BlockRequestsState::Failed; + return; } - else { false } + + block_requests.downloaded_blocks.append(&mut blocks); + true + } else { + false + } }; - // Determine if more blocks need to be downloaded. There are a few cases: // - We have downloaded a batch from our head_slot, which has not reached the remotes head // (target head). Therefore we need to download another sequential batch. // - The latest batch includes blocks that greater than or equal to the target_head slot, - // which means we have caught up to their head. We then check to see if the first + // which means we have caught up to their head. We then check to see if the first // block downloaded matches our head. If so, we are on the same chain and can process // the blocks. If not we need to sync back further until we are on the same chain. So // request more blocks. // - We are syncing backwards (from our head slot) and need to check if we are on the same // chain. If so, process the blocks, if not, request more blocks all the way up to // our last finalized slot. - + if syncing_forwards { // does the batch contain the target_head_slot - let last_element_index = block_requests.blocks.len()-1; - if block_requests[last_element_index].slot >= block_requests.target_slot { + let last_element_index = block_requests.downloaded_blocks.len() - 1; + if block_requests.downloaded_blocks[last_element_index].slot + >= block_requests.target_head_slot + { // if the batch is on our chain, this is complete and we can then process. // Otherwise start backwards syncing until we reach a common chain. - let earliest_slot = block_requests_blocks[0].slot - if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) { - block_requests.state = RequestedBlockState::Complete; + let earliest_slot = block_requests.downloaded_blocks[0].slot; + //TODO: Decide which is faster. Reading block from db and comparing or calculating + //the hash tree root and comparing. + if Some(block_requests.downloaded_blocks[0].canonical_root()) + == root_at_slot(self.chain, earliest_slot) + { + block_requests.state = BlockRequestsState::Complete; return; } // not on the same chain, request blocks backwards - // binary search, request half the distance between the earliest block and our - // finalized slot - let state = &beacon_chain.head().beacon_state; - let local_finalized_slot = state.finalized_checkpoint.epoch; //TODO: Convert to slot - // check that the request hasn't failed by having no common chain - if local_finalized_slot >= block_requests.blocks[0] { + let state = &self.chain.head().beacon_state; + let local_finalized_slot = state + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + // check that the request hasn't failed by having no common chain + if local_finalized_slot >= block_requests.downloaded_blocks[0].slot { warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); - block_requests.state = RequestedBlockState::Failed; + block_requests.state = BlockRequestsState::Failed; return; } - // Start a backwards sync by requesting earlier blocks + // Start a backwards sync by requesting earlier blocks // There can be duplication in downloaded blocks here if there are a large number // of skip slots. In all cases we at least re-download the earliest known block. // It is unlikely that a backwards sync in required, so we accept this duplication // for now. - block_requests.state = RequestedBlockState::QueuedBackward; + block_requests.state = BlockRequestsState::QueuedBackward; + } else { + // batch doesn't contain the head slot, request the next batch + block_requests.state = BlockRequestsState::QueuedForward; } - else { - // batch doesn't contain the head slot, request the next batch - block_requests.state = RequestedBlockState::QueuedForward; - } - } - else { + } else { // syncing backwards // if the batch is on our chain, this is complete and we can then process. // Otherwise continue backwards - let earliest_slot = block_requests_blocks[0].slot - if block_requests.blocks[0] == self.chain.get_block_by_slot(earliest_slot) { - block_requests.state = RequestedBlockState::Complete; + let earliest_slot = block_requests.downloaded_blocks[0].slot; + if Some(block_requests.downloaded_blocks[0].canonical_root()) + == root_at_slot(self.chain, earliest_slot) + { + block_requests.state = BlockRequestsState::Complete; return; } - block_requests.state = RequestedBlockState::QueuedBackward; - + block_requests.state = BlockRequestsState::QueuedBackward; } } - pub fn recent_blocks_response(peer_id: PeerId, request_id: RequestId, blocks: Vec) { - + pub fn recent_blocks_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + blocks: Vec>, + ) { // find the request - let parent_request = match self.parent_queue.get_mut(&peer_id) { - Some(req) if req.state = RequestedBlockState::Pending(request_id) => req, + let parent_request = match self + .parent_queue + .iter_mut() + .find(|request| request.state == BlockRequestsState::Pending(request_id)) + { + Some(req) => req, None => { // No pending request, invalid request_id or coding error warn!(self.log, "RecentBeaconBlocks response unknown"; "request_id" => request_id); @@ -245,8 +295,8 @@ impl ImportManager { // if an empty response is given, the peer didn't have the requested block, try again if blocks.is_empty() { - parent_request.attempts += 1; - parent_request.state = RequestedBlockState::QueuedForward; + parent_request.failed_attempts += 1; + parent_request.state = BlockRequestsState::QueuedForward; parent_request.last_submitted_peer = peer_id; return; } @@ -256,29 +306,27 @@ impl ImportManager { if blocks.len() != 1 { //TODO: Potentially downvote the peer debug!(self.log, "Peer sent more than 1 parent. Ignoring"; - "peer_id" => peer_id, - "no_parents" => blocks.len() - ); + "peer_id" => format!("{:?}", peer_id), + "no_parents" => blocks.len() + ); return; } - // queue for processing - parent_request.state = RequestedBlockState::Complete; + parent_request.state = BlockRequestsState::Complete; } - pub fn inject_error(peer_id: PeerId, id: RequestId) { //TODO: Remove block state from pending } - pub fn peer_disconnect(peer_id: PeerId) { - self.import_queue.remove(&peer_id); - self.full_peers.remove(&peer_id); + pub fn peer_disconnect(&mut self, peer_id: &PeerId) { + self.import_queue.remove(peer_id); + self.full_peers.remove(peer_id); self.update_state(); } - pub fn add_full_peer(peer_id: PeerId) { + pub fn add_full_peer(&mut self, peer_id: PeerId) { debug!( self.log, "Fully synced peer added"; "peer" => format!("{:?}", peer_id), @@ -287,32 +335,36 @@ impl ImportManager { self.update_state(); } - pub fn add_unknown_block(&mut self,block: BeaconBlock) { + pub fn add_unknown_block(&mut self, block: BeaconBlock, peer_id: PeerId) { // if we are not in regular sync mode, ignore this block - if self.state == ManagerState::Regular { + if let ManagerState::Regular = self.state { return; } // make sure this block is not already being searched for // TODO: Potentially store a hashset of blocks for O(1) lookups for parent_req in self.parent_queue.iter() { - if let Some(_) = parent_req.downloaded_blocks.iter().find(|d_block| d_block == block) { + if let Some(_) = parent_req + .downloaded_blocks + .iter() + .find(|d_block| d_block == &&block) + { // we are already searching for this block, ignore it return; } } - let req = ParentRequests { + let req = ParentRequests { downloaded_blocks: vec![block], failed_attempts: 0, - state: RequestedBlockState::QueuedBackward - } + last_submitted_peer: peer_id, + state: BlockRequestsState::QueuedBackward, + }; self.parent_queue.push(req); } - pub fn poll() -> ImportManagerOutcome { - + pub fn poll(&mut self) -> ImportManagerOutcome { loop { // update the state of the manager self.update_state(); @@ -336,304 +388,340 @@ impl ImportManager { if let (re_run, outcome) = self.process_complete_parent_requests() { if let Some(outcome) = outcome { return outcome; - } - else if !re_run { + } else if !re_run { break; } } } - - return ImportManagerOutcome::Idle; + return ImportManagerOutcome::Idle; } - fn update_state(&mut self) { - let previous_state = self.state; + let previous_state = self.state.clone(); self.state = { if !self.import_queue.is_empty() { ManagerState::Syncing + } else if !self.full_peers.is_empty() { + ManagerState::Regular + } else { + ManagerState::Stalled } - else if !self.full_peers.is_empty() { - ManagerState::Regualar - } - else { - ManagerState::Stalled } }; if self.state != previous_state { - info!(self.log, "Syncing state updated", - "old_state" => format!("{:?}", previous_state) - "new_state" => format!("{:?}", self.state) - ); + info!(self.log, "Syncing state updated"; + "old_state" => format!("{:?}", previous_state), + "new_state" => format!("{:?}", self.state), + ); } } - - - fn process_potential_block_requests(&mut self) -> Option { + fn process_potential_block_requests(&mut self) -> Option { // check if an outbound request is required // Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p // layer and not needed here. - // If any in queued state we submit a request. - + // If any in queued state we submit a request. // remove any failed batches self.import_queue.retain(|peer_id, block_request| { - if block_request.state == RequestedBlockState::Failed { - debug!(self.log, "Block import from peer failed", - "peer_id" => peer_id, - "downloaded_blocks" => block_request.downloaded.blocks.len() - ); + if let BlockRequestsState::Failed = block_request.state { + debug!(self.log, "Block import from peer failed"; + "peer_id" => format!("{:?}", peer_id), + "downloaded_blocks" => block_request.downloaded_blocks.len() + ); false + } else { + true } - else { true } }); + // process queued block requests + for (peer_id, block_requests) in self.import_queue.iter_mut().find(|(_peer_id, req)| { + req.state == BlockRequestsState::QueuedForward + || req.state == BlockRequestsState::QueuedBackward + }) { + let request_id = self.current_req_id; + block_requests.state = BlockRequestsState::Pending(request_id); + self.current_req_id += 1; - for (peer_id, block_requests) in self.import_queue.iter_mut() { - if let Some(request) = requests.iter().find(|req| req.state == RequestedBlockState::QueuedForward || req.state == RequestedBlockState::QueuedBackward) { - - let request.state = RequestedBlockState::Pending(self.current_req_id); - self.current_req_id +=1; - - let req = BeaconBlocksRequest { - head_block_root: request.target_root, - start_slot: request.next_start_slot().unwrap_or_else(|| self.chain.head().slot), - count: MAX_BLOCKS_PER_REQUEST, - step: 0 - } - return Some(ImportManagerOutCome::RequestBlocks{ peer_id, req }); - } + let request = BeaconBlocksRequest { + head_block_root: block_requests.target_head_root, + start_slot: block_requests + .next_start_slot() + .unwrap_or_else(|| self.chain.best_slot()) + .as_u64(), + count: MAX_BLOCKS_PER_REQUEST, + step: 0, + }; + return Some(ImportManagerOutcome::RequestBlocks { + peer_id: peer_id.clone(), + request, + request_id, + }); } None } fn process_complete_batches(&mut self) -> Option { - - let completed_batches = self.import_queue.iter().filter(|_peer, block_requests| block_requests.state == RequestedState::Complete).map(|peer, _| peer).collect::>(); + let completed_batches = self + .import_queue + .iter() + .filter(|(_peer, block_requests)| block_requests.state == BlockRequestsState::Complete) + .map(|(peer, _)| peer) + .cloned() + .collect::>(); for peer_id in completed_batches { - let block_requests = self.import_queue.remove(&peer_id).unwrap("key exists"); - match self.process_blocks(block_requests.downloaded_blocks) { - Ok(()) => { - //TODO: Verify it's impossible to have empty downloaded_blocks - last_element = block_requests.downloaded_blocks.len() -1 - debug!(self.log, "Blocks processed successfully"; - "peer" => peer_id, - "start_slot" => block_requests.downloaded_blocks[0].slot, - "end_slot" => block_requests.downloaded_blocks[last_element].slot, - "no_blocks" => last_element + 1, - ); - // Re-HELLO to ensure we are up to the latest head - return Some(ImportManagerOutcome::Hello(peer_id)); - } - Err(e) => { - last_element = block_requests.downloaded_blocks.len() -1 - warn!(self.log, "Block processing failed"; - "peer" => peer_id, - "start_slot" => block_requests.downloaded_blocks[0].slot, - "end_slot" => block_requests.downloaded_blocks[last_element].slot, - "no_blocks" => last_element + 1, - "error" => format!("{:?}", e), - ); - return Some(ImportManagerOutcome::DownvotePeer(peer_id)); - } + let block_requests = self.import_queue.remove(&peer_id).expect("key exists"); + match self.process_blocks(block_requests.downloaded_blocks.clone()) { + Ok(()) => { + //TODO: Verify it's impossible to have empty downloaded_blocks + let last_element = block_requests.downloaded_blocks.len() - 1; + debug!(self.log, "Blocks processed successfully"; + "peer" => format!("{:?}", peer_id), + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + ); + // Re-HELLO to ensure we are up to the latest head + return Some(ImportManagerOutcome::Hello(peer_id)); } + Err(e) => { + let last_element = block_requests.downloaded_blocks.len() - 1; + warn!(self.log, "Block processing failed"; + "peer" => format!("{:?}", peer_id), + "start_slot" => block_requests.downloaded_blocks[0].slot, + "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "no_blocks" => last_element + 1, + "error" => format!("{:?}", e), + ); + return Some(ImportManagerOutcome::DownvotePeer(peer_id)); + } + } } None } - fn process_parent_requests(&mut self) -> Option { - // remove any failed requests self.parent_queue.retain(|parent_request| { - if parent_request.state == RequestedBlockState::Failed { - debug!(self.log, "Parent import failed", - "block" => parent_request.downloaded_blocks[0].hash, - "siblings found" => parent_request.len() - ); + if parent_request.state == BlockRequestsState::Failed { + debug!(self.log, "Parent import failed"; + "block" => format!("{:?}",parent_request.downloaded_blocks[0].canonical_root()), + "ancestors_found" => parent_request.downloaded_blocks.len() + ); false + } else { + true } - else { true } }); // check to make sure there are peers to search for the parent from if self.full_peers.is_empty() { - return; + return None; } // check if parents need to be searched for for parent_request in self.parent_queue.iter_mut() { if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { - parent_request.state == BlockRequestsState::Failed - continue; - } - else if parent_request.state == BlockRequestsState::QueuedForward { + parent_request.state == BlockRequestsState::Failed; + continue; + } else if parent_request.state == BlockRequestsState::QueuedForward { parent_request.state = BlockRequestsState::Pending(self.current_req_id); - self.current_req_id +=1; - let parent_hash = + self.current_req_id += 1; + let last_element_index = parent_request.downloaded_blocks.len() - 1; + let parent_hash = parent_request.downloaded_blocks[last_element_index].parent_root; let req = RecentBeaconBlocksRequest { block_roots: vec![parent_hash], }; // select a random fully synced peer to attempt to download the parent block - let peer_id = self.full_peers.iter().next().expect("List is not empty"); + let peer_id = self.full_peers.iter().next().expect("List is not empty"); - return Some(ImportManagerOutcome::RecentRequest(peer_id, req); + return Some(ImportManagerOutcome::RecentRequest(peer_id.clone(), req)); } } None - } - - - fn process_complete_parent_requests(&mut self) => (bool, Option) { + } + fn process_complete_parent_requests(&mut self) -> (bool, Option) { // flag to determine if there is more process to drive or if the manager can be switched to // an idle state - let mut re_run = false; - - // verify the last added block is the parent of the last requested block - let last_index = parent_requests.downloaded_blocks.len() -1; - let expected_hash = parent_requests.downloaded_blocks[last_index].parent ; - let block_hash = parent_requests.downloaded_blocks[0].tree_hash_root(); - if block_hash != expected_hash { - //TODO: Potentially downvote the peer - debug!(self.log, "Peer sent invalid parent. Ignoring"; - "peer_id" => peer_id, - "received_block" => block_hash, - "expected_parent" => expected_hash, - ); - return; - } + let mut re_run = false; // Find any parent_requests ready to be processed - for completed_request in self.parent_queue.iter_mut().filter(|req| req.state == BlockRequestsState::Complete) { + for completed_request in self + .parent_queue + .iter_mut() + .filter(|req| req.state == BlockRequestsState::Complete) + { + // verify the last added block is the parent of the last requested block + let last_index = completed_request.downloaded_blocks.len() - 1; + let expected_hash = completed_request.downloaded_blocks[last_index].parent_root; + // Note: the length must be greater than 1 so this cannot panic. + let block_hash = completed_request.downloaded_blocks[last_index - 1].canonical_root(); + if block_hash != expected_hash { + // remove the head block + let _ = completed_request.downloaded_blocks.pop(); + completed_request.state = BlockRequestsState::QueuedForward; + //TODO: Potentially downvote the peer + let peer = completed_request.last_submitted_peer.clone(); + debug!(self.log, "Peer sent invalid parent. Ignoring"; + "peer_id" => format!("{:?}",peer), + "received_block" => format!("{}", block_hash), + "expected_parent" => format!("{}", expected_hash), + ); + return (true, Some(ImportManagerOutcome::DownvotePeer(peer))); + } + // try and process the list of blocks up to the requested block while !completed_request.downloaded_blocks.is_empty() { - let block = completed_request.downloaded_blocks.pop(); - match self.chain_process_block(block.clone()) { - Ok(BlockProcessingOutcome::ParentUnknown { parent } => { + let block = completed_request + .downloaded_blocks + .pop() + .expect("Block must exist exist"); + match self.chain.process_block(block.clone()) { + Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => { // need to keep looking for parents completed_request.downloaded_blocks.push(block); completed_request.state == BlockRequestsState::QueuedForward; re_run = true; break; } - Ok(BlockProcessingOutcome::Processed { _ } => { } - Ok(outcome) => { // it's a future slot or an invalid block, remove it and try again - completed_request.failed_attempts +=1; + Ok(BlockProcessingOutcome::Processed { block_root: _ }) => {} + Ok(outcome) => { + // it's a future slot or an invalid block, remove it and try again + completed_request.failed_attempts += 1; trace!( self.log, "Invalid parent block"; - "outcome" => format!("{:?}", outcome); + "outcome" => format!("{:?}", outcome), "peer" => format!("{:?}", completed_request.last_submitted_peer), ); completed_request.state == BlockRequestsState::QueuedForward; re_run = true; - return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer))); + return ( + re_run, + Some(ImportManagerOutcome::DownvotePeer( + completed_request.last_submitted_peer.clone(), + )), + ); } - Err(e) => { - completed_request.failed_attempts +=1; + Err(e) => { + completed_request.failed_attempts += 1; warn!( self.log, "Parent processing error"; - "error" => format!("{:?}", e); + "error" => format!("{:?}", e) ); completed_request.state == BlockRequestsState::QueuedForward; re_run = true; - return (re_run, Some(ImportManagerOutcome::DownvotePeer(completed_request.last_submitted_peer))); - } + return ( + re_run, + Some(ImportManagerOutcome::DownvotePeer( + completed_request.last_submitted_peer.clone(), + )), + ); } + } } } // remove any full completed and processed parent chains - self.parent_queue.retain(|req| if req.state == BlockRequestsState::Complete { false } else { true }); + self.parent_queue.retain(|req| { + if req.state == BlockRequestsState::Complete { + false + } else { + true + } + }); (re_run, None) - } - - fn process_blocks( - &mut self, - blocks: Vec>, - ) -> Result<(), String> { - + fn process_blocks(&mut self, blocks: Vec>) -> Result<(), String> { for block in blocks { - let processing_result = self.chain.process_block(block.clone()); + let processing_result = self.chain.process_block(block.clone()); - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutcome::Processed { block_root } => { - // The block was valid and we processed it successfully. - trace!( - self.log, "Imported block from network"; - "source" => source, - "slot" => block.slot, - "block_root" => format!("{}", block_root), - "peer" => format!("{:?}", peer_id), - ); - } - BlockProcessingOutcome::ParentUnknown { parent } => { - // blocks should be sequential and all parents should exist - trace!( - self.log, "ParentBlockUnknown"; - "source" => source, - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - ); - return Err(format!("Block at slot {} has an unknown parent.", block.slot)); - } - BlockProcessingOutcome::FutureSlot { - present_slot, - block_slot, - } => { - if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { - // The block is too far in the future, drop it. + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutcome::Processed { block_root } => { + // The block was valid and we processed it successfully. trace!( - self.log, "FutureBlock"; - "source" => source, - "msg" => "block for future slot rejected, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), - ); - return Err(format!("Block at slot {} is too far in the future", block.slot)); - } else { - // The block is in the future, but not too far. - trace!( - self.log, "QueuedFutureBlock"; - "source" => source, - "msg" => "queuing future block, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - "peer" => format!("{:?}", peer_id), + self.log, "Imported block from network"; + "slot" => block.slot, + "block_root" => format!("{}", block_root), ); } + BlockProcessingOutcome::ParentUnknown { parent } => { + // blocks should be sequential and all parents should exist + trace!( + self.log, "ParentBlockUnknown"; + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + ); + return Err(format!( + "Block at slot {} has an unknown parent.", + block.slot + )); + } + BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot, + } => { + if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { + // The block is too far in the future, drop it. + trace!( + self.log, "FutureBlock"; + "msg" => "block for future slot rejected, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + return Err(format!( + "Block at slot {} is too far in the future", + block.slot + )); + } else { + // The block is in the future, but not too far. + trace!( + self.log, "QueuedFutureBlock"; + "msg" => "queuing future block, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, + ); + } + } + _ => { + trace!( + self.log, "InvalidBlock"; + "msg" => "peer sent invalid block", + "outcome" => format!("{:?}", outcome), + ); + return Err(format!("Invalid block at slot {}", block.slot)); + } } - _ => { - trace!( - self.log, "InvalidBlock"; - "source" => source, - "msg" => "peer sent invalid block", - "outcome" => format!("{:?}", outcome), - "peer" => format!("{:?}", peer_id), - ); - return Err(format!("Invalid block at slot {}", block.slot)); - } + } else { + trace!( + self.log, "BlockProcessingFailure"; + "msg" => "unexpected condition in processing block.", + "outcome" => format!("{:?}", processing_result) + ); + return Err(format!( + "Unexpected block processing error: {:?}", + processing_result + )); } - Ok(()) - } else { - trace!( - self.log, "BlockProcessingFailure"; - "source" => source, - "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", processing_result) - ); - return Err(format!("Unexpected block processing error: {:?}", processing_result)); } - } + Ok(()) } } + +fn root_at_slot( + chain: Arc>, + target_slot: Slot, +) -> Option { + chain + .rev_iter_block_roots() + .find(|(_root, slot)| *slot == target_slot) + .map(|(root, _slot)| root) +} diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index fac1b46eb..b26d78c14 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -1,4 +1,4 @@ -mod import_queue; +mod manager; /// Syncing for lighthouse. /// /// Stores the various syncing methods for the beacon chain. diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index a7f5ced40..deadf214d 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,8 +1,9 @@ -use super::import_queue::{ImportQueue, PartialBeaconBlockCompletion}; -use crate::message_handler::NetworkContext; +use super::manager::{ImportManager, ImportManagerOutcome}; +use crate::service::{NetworkMessage, OutgoingMessage}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::rpc::{RPCRequest, RPCResponse, RequestId}; +use eth2_libp2p::rpc::methods::*; +use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; use slog::{debug, error, info, o, trace, warn}; use ssz::Encode; @@ -10,14 +11,14 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use store::Store; +use tokio::sync::mpsc; use types::{ Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot, }; - /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. /// Otherwise we queue it. -const FUTURE_SLOT_TOLERANCE: u64 = 1; +pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; const SHOULD_FORWARD_GOSSIP_BLOCK: bool = true; const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; @@ -25,16 +26,13 @@ const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; /// Keeps track of syncing information for known connected peers. #[derive(Clone, Copy, Debug)] pub struct PeerSyncInfo { - fork_version: [u8,4], - finalized_root: Hash256, - finalized_epoch: Epoch, - head_root: Hash256, - head_slot: Slot, + fork_version: [u8; 4], + pub finalized_root: Hash256, + pub finalized_epoch: Epoch, + pub head_root: Hash256, + pub head_slot: Slot, } - - - impl From for PeerSyncInfo { fn from(hello: HelloMessage) -> PeerSyncInfo { PeerSyncInfo { @@ -43,7 +41,6 @@ impl From for PeerSyncInfo { finalized_epoch: hello.finalized_epoch, head_root: hello.head_root, head_slot: hello.head_slot, - requested_slot_skip: None, } } } @@ -66,18 +63,24 @@ pub enum SyncState { pub struct SimpleSync { /// A reference to the underlying beacon chain. chain: Arc>, - manager: ImportManager, + manager: ImportManager, + network: NetworkContext, log: slog::Logger, } impl SimpleSync { /// Instantiate a `SimpleSync` instance, with no peers and an empty queue. - pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { + pub fn new( + beacon_chain: Arc>, + network_send: mpsc::UnboundedSender, + log: &slog::Logger, + ) -> Self { let sync_logger = log.new(o!("Service"=> "Sync")); SimpleSync { chain: beacon_chain.clone(), - manager: ImportManager::new(), + manager: ImportManager::new(beacon_chain, log), + network: NetworkContext::new(network_send, log.clone()), log: sync_logger, } } @@ -92,8 +95,9 @@ impl SimpleSync { /// Handle the connection of a new peer. /// /// Sends a `Hello` message to the peer. - pub fn on_connect(&self, peer_id: PeerId, network: &mut NetworkContext) { - network.send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); + pub fn on_connect(&mut self, peer_id: PeerId) { + self.network + .send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); } /// Handle a `Hello` request. @@ -104,42 +108,31 @@ impl SimpleSync { peer_id: PeerId, request_id: RequestId, hello: HelloMessage, - network: &mut NetworkContext, ) { trace!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); // Say hello back. - network.send_rpc_response( + self.network.send_rpc_response( peer_id.clone(), request_id, RPCResponse::Hello(hello_message(&self.chain)), ); - self.process_hello(peer_id, hello, network); + self.process_hello(peer_id, hello); } /// Process a `Hello` response from a peer. - pub fn on_hello_response( - &mut self, - peer_id: PeerId, - hello: HelloMessage, - network: &mut NetworkContext, - ) { + pub fn on_hello_response(&mut self, peer_id: PeerId, hello: HelloMessage) { trace!(self.log, "HelloResponse"; "peer" => format!("{:?}", peer_id)); // Process the hello message, without sending back another hello. - self.process_hello(peer_id, hello, network); + self.process_hello(peer_id, hello); } /// Process a `Hello` message, requesting new blocks if appropriate. /// /// Disconnects the peer if required. - fn process_hello( - &mut self, - peer_id: PeerId, - hello: HelloMessage, - network: &mut NetworkContext, - ) { + fn process_hello(&mut self, peer_id: PeerId, hello: HelloMessage) { let remote = PeerSyncInfo::from(hello); let local = PeerSyncInfo::from(&self.chain); @@ -153,12 +146,13 @@ impl SimpleSync { "reason" => "network_id" ); - network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + self.network + .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); } else if remote.finalized_epoch <= local.finalized_epoch && remote.finalized_root != Hash256::zero() && local.finalized_root != Hash256::zero() - && (self.root_at_slot(start_slot(remote.latest_finalized_epoch)) - != Some(remote.latest_finalized_root)) + && (self.root_at_slot(start_slot(remote.finalized_epoch)) + != Some(remote.finalized_root)) { // The remotes finalized epoch is less than or greater than ours, but the block root is // different to the one in our chain. @@ -169,8 +163,9 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "reason" => "different finalized chain" ); - network.disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); - } else if remote.latest_finalized_epoch < local.latest_finalized_epoch { + self.network + .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + } else if remote.finalized_epoch < local.finalized_epoch { // The node has a lower finalized epoch, their chain is not useful to us. There are two // cases where a node can have a lower finalized epoch: // @@ -193,12 +188,12 @@ impl SimpleSync { } else if self .chain .store - .exists::>(&remote.best_root) + .exists::>(&remote.head_root) .unwrap_or_else(|_| false) { // If the node's best-block is already known to us and they are close to our current // head, treat them as a fully sync'd peer. - self.import_manager.add_full_peer(peer_id); + self.manager.add_full_peer(peer_id); self.process_sync(); } else { // The remote node has an equal or great finalized epoch and we don't know it's head. @@ -208,29 +203,45 @@ impl SimpleSync { debug!( self.log, "UsefulPeer"; "peer" => format!("{:?}", peer_id), - "local_finalized_epoch" => local.latest_finalized_epoch, - "remote_latest_finalized_epoch" => remote.latest_finalized_epoch, + "local_finalized_epoch" => local.finalized_epoch, + "remote_latest_finalized_epoch" => remote.finalized_epoch, ); - self.import_manager.add_peer(peer_id, remote); + self.manager.add_peer(peer_id, remote); self.process_sync(); } } - self.proess_sync(&mut self) { + fn process_sync(&mut self) { loop { - match self.import_manager.poll() { - ImportManagerOutcome::RequestBlocks(peer_id, req) { + match self.manager.poll() { + ImportManagerOutcome::Hello(peer_id) => { + trace!( + self.log, + "RPC Request"; + "method" => "HELLO", + "peer" => format!("{:?}", peer_id) + ); + self.network + .send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); + } + ImportManagerOutcome::RequestBlocks { + peer_id, + request_id, + request, + } => { trace!( self.log, "RPC Request"; "method" => "BeaconBlocks", - "count" => req.count, + "id" => request_id, + "count" => request.count, "peer" => format!("{:?}", peer_id) ); - network.send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(req)); - }, - ImportManagerOutcome::RecentRequest(peer_id, req) { + self.network + .send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(request)); + } + ImportManagerOutcome::RecentRequest(peer_id, req) => { trace!( self.log, "RPC Request"; @@ -238,18 +249,20 @@ impl SimpleSync { "count" => req.block_roots.len(), "peer" => format!("{:?}", peer_id) ); - network.send_rpc_request(peer_id.clone(), RPCRequest::RecentBeaconBlocks(req)); - }, - ImportManagerOutcome::DownvotePeer(peer_id) { + self.network + .send_rpc_request(peer_id.clone(), RPCRequest::RecentBeaconBlocks(req)); + } + ImportManagerOutcome::DownvotePeer(peer_id) => { trace!( self.log, "Peer downvoted"; "peer" => format!("{:?}", peer_id) ); // TODO: Implement reputation - network.disconnect(peer_id.clone(), GoodbyeReason::Fault); - }, - SyncManagerState::Idle { + self.network + .disconnect(peer_id.clone(), GoodbyeReason::Fault); + } + ImportManagerOutcome::Idle => { // nothing to do return; } @@ -257,37 +270,26 @@ impl SimpleSync { } } - - /* fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain .rev_iter_block_roots() .find(|(_root, slot)| *slot == target_slot) .map(|(root, _slot)| root) } - */ - /// Handle a `BeaconBlocks` request from the peer. - pub fn on_beacon_blocks_request( + /// Handle a `RecentBeaconBlocks` request from the peer. + pub fn on_recent_beacon_blocks_request( &mut self, peer_id: PeerId, request_id: RequestId, - req: BeaconBlocksRequest, - network: &mut NetworkContext, + request: RecentBeaconBlocksRequest, ) { - debug!( - self.log, - "BeaconBlocksRequest"; - "peer" => format!("{:?}", peer_id), - "count" => req.count, - "start_slot" => req.start_slot, - ); - - let blocks = Vec> = self - .chain.rev_iter_block_roots().filter(|(_root, slot) req.start_slot <= slot && req.start_slot + req.count >= slot).take_while(|(_root, slot) req.start_slot <= *slot) - .filter_map(|root, slot| { + let blocks: Vec> = request + .block_roots + .iter() + .filter_map(|root| { if let Ok(Some(block)) = self.chain.store.get::>(root) { - Some(block.body) + Some(block) } else { debug!( self.log, @@ -301,10 +303,63 @@ impl SimpleSync { }) .collect(); - roots.reverse(); - roots.dedup_by_key(|brs| brs.block_root); + debug!( + self.log, + "BlockBodiesRequest"; + "peer" => format!("{:?}", peer_id), + "requested" => request.block_roots.len(), + "returned" => blocks.len(), + ); - if roots.len() as u64 != req.count { + self.network.send_rpc_response( + peer_id, + request_id, + RPCResponse::BeaconBlocks(blocks.as_ssz_bytes()), + ) + } + + /// Handle a `BeaconBlocks` request from the peer. + pub fn on_beacon_blocks_request( + &mut self, + peer_id: PeerId, + request_id: RequestId, + req: BeaconBlocksRequest, + ) { + debug!( + self.log, + "BeaconBlocksRequest"; + "peer" => format!("{:?}", peer_id), + "count" => req.count, + "start_slot" => req.start_slot, + ); + + let mut blocks: Vec> = self + .chain + .rev_iter_block_roots() + .filter(|(_root, slot)| { + req.start_slot <= slot.as_u64() && req.start_slot + req.count >= slot.as_u64() + }) + .take_while(|(_root, slot)| req.start_slot <= slot.as_u64()) + .filter_map(|(root, _slot)| { + if let Ok(Some(block)) = self.chain.store.get::>(&root) { + Some(block) + } else { + debug!( + self.log, + "Peer requested unknown block"; + "peer" => format!("{:?}", peer_id), + "request_root" => format!("{:}", root), + ); + + None + } + }) + .collect(); + + blocks.reverse(); + blocks.dedup_by_key(|brs| brs.slot); + + if blocks.len() as u64 != req.count { debug!( self.log, "BeaconBlocksRequest"; @@ -313,33 +368,33 @@ impl SimpleSync { "start_slot" => req.start_slot, "current_slot" => self.chain.present_slot(), "requested" => req.count, - "returned" => roots.len(), + "returned" => blocks.len(), ); } - network.send_rpc_response( + self.network.send_rpc_response( peer_id, request_id, RPCResponse::BeaconBlocks(blocks.as_ssz_bytes()), ) } - /// Handle a `BeaconBlocks` response from the peer. pub fn on_beacon_blocks_response( &mut self, peer_id: PeerId, request_id: RequestId, - res: Vec>, + beacon_blocks: Vec>, ) { debug!( self.log, "BeaconBlocksResponse"; "peer" => format!("{:?}", peer_id), - "count" => res.block_bodies.len(), + "count" => beacon_blocks.len(), ); - self.import_manager.beacon_blocks_response(peer_id, request_id, blocks); + self.manager + .beacon_blocks_response(peer_id, request_id, beacon_blocks); self.process_sync(); } @@ -349,16 +404,17 @@ impl SimpleSync { &mut self, peer_id: PeerId, request_id: RequestId, - res: Vec>, + beacon_blocks: Vec>, ) { debug!( self.log, "BeaconBlocksResponse"; "peer" => format!("{:?}", peer_id), - "count" => res.block_bodies.len(), + "count" => beacon_blocks.len(), ); - self.import_manager.recent_blocks_response(peer_id, request_id, blocks); + self.manager + .recent_blocks_response(peer_id, request_id, beacon_blocks); self.process_sync(); } @@ -368,19 +424,13 @@ impl SimpleSync { /// Attempts to apply to block to the beacon chain. May queue the block for later processing. /// /// Returns a `bool` which, if `true`, indicates we should forward the block to our peers. - pub fn on_block_gossip( - &mut self, - peer_id: PeerId, - block: BeaconBlock, - ) -> bool { - if let Some(outcome) = - self.process_block(peer_id.clone(), block.clone(), network, &"gossip") - { + pub fn on_block_gossip(&mut self, peer_id: PeerId, block: BeaconBlock) -> bool { + if let Ok(outcome) = self.chain.process_block(block.clone()) { match outcome { BlockProcessingOutcome::Processed { .. } => SHOULD_FORWARD_GOSSIP_BLOCK, - BlockProcessingOutcome::ParentUnknown { parent } => { + BlockProcessingOutcome::ParentUnknown { parent: _ } => { // Inform the sync manager to find parents for this block - self.import_manager.add_unknown_block(block.clone()); + self.manager.add_unknown_block(block.clone(), peer_id); SHOULD_FORWARD_GOSSIP_BLOCK } BlockProcessingOutcome::FutureSlot { @@ -401,12 +451,7 @@ impl SimpleSync { /// Process a gossip message declaring a new attestation. /// /// Not currently implemented. - pub fn on_attestation_gossip( - &mut self, - _peer_id: PeerId, - msg: Attestation, - _network: &mut NetworkContext, - ) { + pub fn on_attestation_gossip(&mut self, _peer_id: PeerId, msg: Attestation) { match self.chain.process_attestation(msg) { Ok(outcome) => info!( self.log, @@ -420,39 +465,74 @@ impl SimpleSync { } } - -/* - /// Returns `true` if `self.chain` has not yet processed this block. - pub fn chain_has_seen_block(&self, block_root: &Hash256) -> bool { - !self - .chain - .is_new_block_root(&block_root) - .unwrap_or_else(|_| { - error!(self.log, "Unable to determine if block is new."); - false - }) - } - */ - /// Generates our current state in the form of a HELLO RPC message. pub fn generate_hello(&self) -> HelloMessage { hello_message(&self.chain) } - } /// Build a `HelloMessage` representing the state of the given `beacon_chain`. fn hello_message(beacon_chain: &BeaconChain) -> HelloMessage { - let spec = &beacon_chain.spec; let state = &beacon_chain.head().beacon_state; HelloMessage { - network_id: spec.network_id, - //TODO: Correctly define the chain id - chain_id: spec.network_id as u64, - latest_finalized_root: state.finalized_checkpoint.root, - latest_finalized_epoch: state.finalized_checkpoint.epoch, - best_root: beacon_chain.head().beacon_block_root, - best_slot: state.slot, + fork_version: state.fork.current_version, + finalized_root: state.finalized_checkpoint.root, + finalized_epoch: state.finalized_checkpoint.epoch, + head_root: beacon_chain.head().beacon_block_root, + head_slot: state.slot, + } +} + +/// Wraps a Network Channel to employ various RPC/Sync related network functionality. +pub struct NetworkContext { + /// The network channel to relay messages to the Network service. + network_send: mpsc::UnboundedSender, + /// Logger for the `NetworkContext`. + log: slog::Logger, +} + +impl NetworkContext { + pub fn new(network_send: mpsc::UnboundedSender, log: slog::Logger) -> Self { + Self { network_send, log } + } + + pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { + self.send_rpc_request(peer_id, RPCRequest::Goodbye(reason)) + // TODO: disconnect peers. + } + + pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) { + // Note: There is currently no use of keeping track of requests. However the functionality + // is left here for future revisions. + self.send_rpc_event(peer_id, RPCEvent::Request(0, rpc_request)); + } + + //TODO: Handle Error responses + pub fn send_rpc_response( + &mut self, + peer_id: PeerId, + request_id: RequestId, + rpc_response: RPCResponse, + ) { + self.send_rpc_event( + peer_id, + RPCEvent::Response(request_id, RPCErrorResponse::Success(rpc_response)), + ); + } + + fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { + self.send(peer_id, OutgoingMessage::RPC(rpc_event)) + } + + fn send(&mut self, peer_id: PeerId, outgoing_message: OutgoingMessage) { + self.network_send + .try_send(NetworkMessage::Send(peer_id, outgoing_message)) + .unwrap_or_else(|_| { + warn!( + self.log, + "Could not send RPC message to the network service" + ) + }); } } From 0d56df474a6df70353a89970329f3c08068eef23 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 25 Aug 2019 00:27:47 +1000 Subject: [PATCH 106/305] Main batch sync debugging --- beacon_node/client/src/lib.rs | 6 +- beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs | 20 +- beacon_node/eth2-libp2p/src/rpc/handler.rs | 4 +- beacon_node/network/src/message_handler.rs | 6 +- beacon_node/network/src/sync/manager.rs | 240 +++++++++---------- beacon_node/network/src/sync/simple_sync.rs | 99 +++++--- 6 files changed, 219 insertions(+), 156 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 4b64c1070..7e6449a98 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -41,7 +41,7 @@ pub struct Client { /// Signal to terminate the slot timer. pub slot_timer_exit_signal: Option, /// Signal to terminate the API - pub api_exit_signal: Option, + // pub api_exit_signal: Option, /// The clients logger. log: slog::Logger, /// Marker to pin the beacon chain generics. @@ -134,6 +134,7 @@ where None }; + /* // Start the `rest_api` service let api_exit_signal = if client_config.rest_api.enabled { match rest_api::start_server( @@ -151,6 +152,7 @@ where } else { None }; + */ let (slot_timer_exit_signal, exit) = exit_future::signal(); if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { @@ -184,7 +186,7 @@ where http_exit_signal, rpc_exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal), - api_exit_signal, + //api_exit_signal, log, network, phantom: PhantomData, diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs index f7262118d..260a00346 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs @@ -171,7 +171,25 @@ impl Decoder for SSZOutboundCodec { }, _ => unreachable!("Cannot negotiate an unknown protocol"), }, - Ok(None) => Ok(None), + Ok(None) => { + // the object sent could be a empty. We return the empty object if this is the case + match self.protocol.message_name.as_str() { + "hello" => match self.protocol.version.as_str() { + "1" => Ok(None), // cannot have an empty HELLO message. The stream has terminated unexpectedly + _ => unreachable!("Cannot negotiate an unknown version"), + }, + "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), + "beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::BeaconBlocks(Vec::new()))), + _ => unreachable!("Cannot negotiate an unknown version"), + }, + "recent_beacon_blocks" => match self.protocol.version.as_str() { + "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(Vec::new()))), + _ => unreachable!("Cannot negotiate an unknown version"), + }, + _ => unreachable!("Cannot negotiate an unknown protocol"), + } + } Err(e) => Err(e), } } diff --git a/beacon_node/eth2-libp2p/src/rpc/handler.rs b/beacon_node/eth2-libp2p/src/rpc/handler.rs index a69cd0cda..07322875f 100644 --- a/beacon_node/eth2-libp2p/src/rpc/handler.rs +++ b/beacon_node/eth2-libp2p/src/rpc/handler.rs @@ -317,11 +317,11 @@ where RPCEvent::Response(rpc_event.id(), response), ))); } else { - // stream closed early + // stream closed early or nothing was sent return Ok(Async::Ready(ProtocolsHandlerEvent::Custom( RPCEvent::Error( rpc_event.id(), - RPCError::Custom("Stream Closed Early".into()), + RPCError::Custom("Stream closed early. Empty response".into()), ), ))); } diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 7a1a4ad31..c14fc970d 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,8 +1,7 @@ use crate::error; -use crate::service::{NetworkMessage, OutgoingMessage}; +use crate::service::NetworkMessage; use crate::sync::SimpleSync; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use eth2_libp2p::rpc::methods::*; use eth2_libp2p::{ behaviour::PubsubMessage, rpc::{RPCError, RPCErrorResponse, RPCRequest, RPCResponse, RequestId}, @@ -304,6 +303,9 @@ impl MessageHandler { &self, beacon_blocks: &[u8], ) -> Result>, DecodeError> { + if beacon_blocks.is_empty() { + return Ok(Vec::new()); + } //TODO: Implement faster block verification before decoding entirely Vec::from_ssz_bytes(&beacon_blocks) } diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index f5c669455..b81da0991 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -13,14 +13,12 @@ const MAX_BLOCKS_PER_REQUEST: u64 = 10; /// The number of slots that we can import blocks ahead of us, before going into full Sync mode. const SLOT_IMPORT_TOLERANCE: usize = 10; - const PARENT_FAIL_TOLERANCE: usize = 3; const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; #[derive(PartialEq)] enum BlockRequestsState { - QueuedForward, - QueuedBackward, + Queued, Pending(RequestId), Complete, Failed, @@ -31,6 +29,10 @@ struct BlockRequests { target_head_root: Hash256, downloaded_blocks: Vec>, state: BlockRequestsState, + /// Specifies whether the current state is syncing forwards or backwards. + forward_sync: bool, + /// The current `start_slot` of the batched block request. + current_start_slot: Slot, } struct ParentRequests { @@ -43,25 +45,13 @@ struct ParentRequests { impl BlockRequests { // gets the start slot for next batch // last block slot downloaded plus 1 - fn next_start_slot(&self) -> Option { - if !self.downloaded_blocks.is_empty() { - match self.state { - BlockRequestsState::QueuedForward => { - let last_element_index = self.downloaded_blocks.len() - 1; - Some(self.downloaded_blocks[last_element_index].slot.add(1)) - } - BlockRequestsState::QueuedBackward => { - let earliest_known_slot = self.downloaded_blocks[0].slot; - Some(earliest_known_slot.add(1).sub(MAX_BLOCKS_PER_REQUEST)) - } - _ => { - // pending/complete/failed - None - } - } + fn update_start_slot(&mut self) { + if self.forward_sync { + self.current_start_slot += Slot::from(MAX_BLOCKS_PER_REQUEST); } else { - None + self.current_start_slot -= Slot::from(MAX_BLOCKS_PER_REQUEST); } + self.state = BlockRequestsState::Queued; } } @@ -117,7 +107,7 @@ impl ImportManager { let local = PeerSyncInfo::from(&self.chain); - // If a peer is within SLOT_IMPORT_TOLERANCE from out head slot, ignore a batch sync + // If a peer is within SLOT_IMPORT_TOLERANCE from our head slot, ignore a batch sync if remote.head_slot.sub(local.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { trace!(self.log, "Ignoring full sync with peer"; "peer" => format!("{:?}", peer_id), @@ -139,7 +129,9 @@ impl ImportManager { target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called target_head_root: remote.head_root, downloaded_blocks: Vec::new(), - state: BlockRequestsState::QueuedForward, + state: BlockRequestsState::Queued, + forward_sync: true, + current_start_slot: self.chain.best_slot(), }; self.import_queue.insert(peer_id, block_requests); } @@ -165,8 +157,6 @@ impl ImportManager { } }; - // The response should contain at least one block. - // // If we are syncing up to a target head block, at least the target head block should be // returned. If we are syncing back to our last finalized block the request should return // at least the last block we received (last known block). In diagram form: @@ -176,33 +166,30 @@ impl ImportManager { // ^finalized slot ^ requested start slot ^ last known block ^ remote head if blocks.is_empty() { - warn!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); - block_requests.state = BlockRequestsState::Failed; + debug!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); + block_requests.update_start_slot(); return; } - // Add the newly downloaded blocks to the current list of downloaded blocks. This also - // determines if we are syncing forward or backward. - let syncing_forwards = { - if block_requests.downloaded_blocks.is_empty() { - block_requests.downloaded_blocks.append(&mut blocks); - true - } else if block_requests.downloaded_blocks[0].slot < blocks[0].slot { - // syncing forwards - // verify the peer hasn't sent overlapping blocks - ensuring the strictly - // increasing blocks in a batch will be verified during the processing - if block_requests.next_start_slot() > Some(blocks[0].slot) { - warn!(self.log, "BeaconBlocks response returned duplicate blocks"; "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.next_start_slot()); - block_requests.state = BlockRequestsState::Failed; - return; - } - - block_requests.downloaded_blocks.append(&mut blocks); - true - } else { - false - } - }; + // verify the range of received blocks + // Note that the order of blocks is verified in block processing + let last_sent_slot = blocks[blocks.len() - 1].slot; + if block_requests.current_start_slot > blocks[0].slot + || block_requests + .current_start_slot + .add(MAX_BLOCKS_PER_REQUEST) + < last_sent_slot + { + //TODO: Downvote peer - add a reason to failed + dbg!(&blocks); + warn!(self.log, "BeaconBlocks response returned out of range blocks"; + "request_id" => request_id, + "response_initial_slot" => blocks[0].slot, + "requested_initial_slot" => block_requests.current_start_slot); + // consider this sync failed + block_requests.state = BlockRequestsState::Failed; + return; + } // Determine if more blocks need to be downloaded. There are a few cases: // - We have downloaded a batch from our head_slot, which has not reached the remotes head @@ -216,61 +203,60 @@ impl ImportManager { // chain. If so, process the blocks, if not, request more blocks all the way up to // our last finalized slot. - if syncing_forwards { - // does the batch contain the target_head_slot - let last_element_index = block_requests.downloaded_blocks.len() - 1; - if block_requests.downloaded_blocks[last_element_index].slot - >= block_requests.target_head_slot - { - // if the batch is on our chain, this is complete and we can then process. - // Otherwise start backwards syncing until we reach a common chain. - let earliest_slot = block_requests.downloaded_blocks[0].slot; - //TODO: Decide which is faster. Reading block from db and comparing or calculating - //the hash tree root and comparing. - if Some(block_requests.downloaded_blocks[0].canonical_root()) - == root_at_slot(self.chain, earliest_slot) - { - block_requests.state = BlockRequestsState::Complete; - return; - } - - // not on the same chain, request blocks backwards - let state = &self.chain.head().beacon_state; - let local_finalized_slot = state - .finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - - // check that the request hasn't failed by having no common chain - if local_finalized_slot >= block_requests.downloaded_blocks[0].slot { - warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); - block_requests.state = BlockRequestsState::Failed; - return; - } - - // Start a backwards sync by requesting earlier blocks - // There can be duplication in downloaded blocks here if there are a large number - // of skip slots. In all cases we at least re-download the earliest known block. - // It is unlikely that a backwards sync in required, so we accept this duplication - // for now. - block_requests.state = BlockRequestsState::QueuedBackward; - } else { - // batch doesn't contain the head slot, request the next batch - block_requests.state = BlockRequestsState::QueuedForward; - } + if block_requests.forward_sync { + // append blocks if syncing forward + block_requests.downloaded_blocks.append(&mut blocks); } else { - // syncing backwards + // prepend blocks if syncing backwards + block_requests.downloaded_blocks.splice(..0, blocks); + } + + // does the batch contain the target_head_slot + let last_element_index = block_requests.downloaded_blocks.len() - 1; + if block_requests.downloaded_blocks[last_element_index].slot + >= block_requests.target_head_slot + || !block_requests.forward_sync + { // if the batch is on our chain, this is complete and we can then process. - // Otherwise continue backwards + // Otherwise start backwards syncing until we reach a common chain. let earliest_slot = block_requests.downloaded_blocks[0].slot; + //TODO: Decide which is faster. Reading block from db and comparing or calculating + //the hash tree root and comparing. if Some(block_requests.downloaded_blocks[0].canonical_root()) - == root_at_slot(self.chain, earliest_slot) + == root_at_slot(&self.chain, earliest_slot) { block_requests.state = BlockRequestsState::Complete; return; } - block_requests.state = BlockRequestsState::QueuedBackward; + + // not on the same chain, request blocks backwards + let state = &self.chain.head().beacon_state; + let local_finalized_slot = state + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + // check that the request hasn't failed by having no common chain + if local_finalized_slot >= block_requests.current_start_slot { + warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); + block_requests.state = BlockRequestsState::Failed; + return; + } + + // if this is a forward sync, then we have reached the head without a common chain + // and we need to start syncing backwards. + if block_requests.forward_sync { + // Start a backwards sync by requesting earlier blocks + block_requests.forward_sync = false; + block_requests.current_start_slot = std::cmp::min( + self.chain.best_slot(), + block_requests.downloaded_blocks[0].slot, + ); + } } + + // update the start slot and re-queue the batch + block_requests.update_start_slot(); } pub fn recent_blocks_response( @@ -296,7 +282,7 @@ impl ImportManager { // if an empty response is given, the peer didn't have the requested block, try again if blocks.is_empty() { parent_request.failed_attempts += 1; - parent_request.state = BlockRequestsState::QueuedForward; + parent_request.state = BlockRequestsState::Queued; parent_request.last_submitted_peer = peer_id; return; } @@ -316,7 +302,7 @@ impl ImportManager { parent_request.state = BlockRequestsState::Complete; } - pub fn inject_error(peer_id: PeerId, id: RequestId) { + pub fn _inject_error(_peer_id: PeerId, _id: RequestId) { //TODO: Remove block state from pending } @@ -358,13 +344,13 @@ impl ImportManager { downloaded_blocks: vec![block], failed_attempts: 0, last_submitted_peer: peer_id, - state: BlockRequestsState::QueuedBackward, + state: BlockRequestsState::Queued, }; self.parent_queue.push(req); } - pub fn poll(&mut self) -> ImportManagerOutcome { + pub(crate) fn poll(&mut self) -> ImportManagerOutcome { loop { // update the state of the manager self.update_state(); @@ -385,12 +371,11 @@ impl ImportManager { } // process any complete parent lookups - if let (re_run, outcome) = self.process_complete_parent_requests() { - if let Some(outcome) = outcome { - return outcome; - } else if !re_run { - break; - } + let (re_run, outcome) = self.process_complete_parent_requests(); + if let Some(outcome) = outcome { + return outcome; + } else if !re_run { + break; } } @@ -423,9 +408,10 @@ impl ImportManager { // If any in queued state we submit a request. // remove any failed batches + let debug_log = &self.log; self.import_queue.retain(|peer_id, block_request| { if let BlockRequestsState::Failed = block_request.state { - debug!(self.log, "Block import from peer failed"; + debug!(debug_log, "Block import from peer failed"; "peer_id" => format!("{:?}", peer_id), "downloaded_blocks" => block_request.downloaded_blocks.len() ); @@ -436,20 +422,18 @@ impl ImportManager { }); // process queued block requests - for (peer_id, block_requests) in self.import_queue.iter_mut().find(|(_peer_id, req)| { - req.state == BlockRequestsState::QueuedForward - || req.state == BlockRequestsState::QueuedBackward - }) { + for (peer_id, block_requests) in self + .import_queue + .iter_mut() + .find(|(_peer_id, req)| req.state == BlockRequestsState::Queued) + { let request_id = self.current_req_id; block_requests.state = BlockRequestsState::Pending(request_id); self.current_req_id += 1; let request = BeaconBlocksRequest { head_block_root: block_requests.target_head_root, - start_slot: block_requests - .next_start_slot() - .unwrap_or_else(|| self.chain.best_slot()) - .as_u64(), + start_slot: block_requests.current_start_slot.as_u64(), count: MAX_BLOCKS_PER_REQUEST, step: 0, }; @@ -504,9 +488,10 @@ impl ImportManager { fn process_parent_requests(&mut self) -> Option { // remove any failed requests + let debug_log = &self.log; self.parent_queue.retain(|parent_request| { if parent_request.state == BlockRequestsState::Failed { - debug!(self.log, "Parent import failed"; + debug!(debug_log, "Parent import failed"; "block" => format!("{:?}",parent_request.downloaded_blocks[0].canonical_root()), "ancestors_found" => parent_request.downloaded_blocks.len() ); @@ -524,9 +509,15 @@ impl ImportManager { // check if parents need to be searched for for parent_request in self.parent_queue.iter_mut() { if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { - parent_request.state == BlockRequestsState::Failed; + parent_request.state = BlockRequestsState::Failed; continue; - } else if parent_request.state == BlockRequestsState::QueuedForward { + } else if parent_request.state == BlockRequestsState::Queued { + // check the depth isn't too large + if parent_request.downloaded_blocks.len() >= PARENT_DEPTH_TOLERANCE { + parent_request.state = BlockRequestsState::Failed; + continue; + } + parent_request.state = BlockRequestsState::Pending(self.current_req_id); self.current_req_id += 1; let last_element_index = parent_request.downloaded_blocks.len() - 1; @@ -564,7 +555,7 @@ impl ImportManager { if block_hash != expected_hash { // remove the head block let _ = completed_request.downloaded_blocks.pop(); - completed_request.state = BlockRequestsState::QueuedForward; + completed_request.state = BlockRequestsState::Queued; //TODO: Potentially downvote the peer let peer = completed_request.last_submitted_peer.clone(); debug!(self.log, "Peer sent invalid parent. Ignoring"; @@ -585,7 +576,7 @@ impl ImportManager { Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => { // need to keep looking for parents completed_request.downloaded_blocks.push(block); - completed_request.state == BlockRequestsState::QueuedForward; + completed_request.state = BlockRequestsState::Queued; re_run = true; break; } @@ -598,7 +589,7 @@ impl ImportManager { "outcome" => format!("{:?}", outcome), "peer" => format!("{:?}", completed_request.last_submitted_peer), ); - completed_request.state == BlockRequestsState::QueuedForward; + completed_request.state = BlockRequestsState::Queued; re_run = true; return ( re_run, @@ -613,7 +604,7 @@ impl ImportManager { self.log, "Parent processing error"; "error" => format!("{:?}", e) ); - completed_request.state == BlockRequestsState::QueuedForward; + completed_request.state = BlockRequestsState::Queued; re_run = true; return ( re_run, @@ -691,6 +682,13 @@ impl ImportManager { ); } } + BlockProcessingOutcome::FinalizedSlot => { + trace!( + self.log, "Finalized or earlier block processed"; + "outcome" => format!("{:?}", outcome), + ); + // block reached our finalized slot or was earlier, move to the next block + } _ => { trace!( self.log, "InvalidBlock"; @@ -717,7 +715,7 @@ impl ImportManager { } fn root_at_slot( - chain: Arc>, + chain: &Arc>, target_slot: Slot, ) -> Option { chain diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index deadf214d..924b2de9b 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -2,24 +2,22 @@ use super::manager::{ImportManager, ImportManagerOutcome}; use crate::service::{NetworkMessage, OutgoingMessage}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; -use slog::{debug, error, info, o, trace, warn}; +use slog::{debug, info, o, trace, warn}; use ssz::Encode; -use std::collections::HashMap; +use std::ops::Sub; use std::sync::Arc; -use std::time::Duration; use store::Store; use tokio::sync::mpsc; -use types::{ - Attestation, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, Epoch, EthSpec, Hash256, Slot, -}; +use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256, Slot}; /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. /// Otherwise we queue it. pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; +/// The number of slots behind our head that we still treat a peer as a fully synced peer. +const FULL_PEER_TOLERANCE: u64 = 10; const SHOULD_FORWARD_GOSSIP_BLOCK: bool = true; const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; @@ -54,8 +52,8 @@ impl From<&Arc>> for PeerSyncInfo { /// The current syncing state. #[derive(PartialEq)] pub enum SyncState { - Idle, - Downloading, + _Idle, + _Downloading, _Stopped, } @@ -97,7 +95,7 @@ impl SimpleSync { /// Sends a `Hello` message to the peer. pub fn on_connect(&mut self, peer_id: PeerId) { self.network - .send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); + .send_rpc_request(None, peer_id, RPCRequest::Hello(hello_message(&self.chain))); } /// Handle a `Hello` request. @@ -193,8 +191,16 @@ impl SimpleSync { { // If the node's best-block is already known to us and they are close to our current // head, treat them as a fully sync'd peer. - self.manager.add_full_peer(peer_id); - self.process_sync(); + if self.chain.best_slot().sub(remote.head_slot).as_u64() < FULL_PEER_TOLERANCE { + self.manager.add_full_peer(peer_id); + self.process_sync(); + } else { + debug!( + self.log, + "Out of sync peer connected"; + "peer" => format!("{:?}", peer_id), + ); + } } else { // The remote node has an equal or great finalized epoch and we don't know it's head. // @@ -222,8 +228,11 @@ impl SimpleSync { "method" => "HELLO", "peer" => format!("{:?}", peer_id) ); - self.network - .send_rpc_request(peer_id, RPCRequest::Hello(hello_message(&self.chain))); + self.network.send_rpc_request( + None, + peer_id, + RPCRequest::Hello(hello_message(&self.chain)), + ); } ImportManagerOutcome::RequestBlocks { peer_id, @@ -238,8 +247,11 @@ impl SimpleSync { "count" => request.count, "peer" => format!("{:?}", peer_id) ); - self.network - .send_rpc_request(peer_id.clone(), RPCRequest::BeaconBlocks(request)); + self.network.send_rpc_request( + Some(request_id), + peer_id.clone(), + RPCRequest::BeaconBlocks(request), + ); } ImportManagerOutcome::RecentRequest(peer_id, req) => { trace!( @@ -249,8 +261,11 @@ impl SimpleSync { "count" => req.block_roots.len(), "peer" => format!("{:?}", peer_id) ); - self.network - .send_rpc_request(peer_id.clone(), RPCRequest::RecentBeaconBlocks(req)); + self.network.send_rpc_request( + None, + peer_id.clone(), + RPCRequest::RecentBeaconBlocks(req), + ); } ImportManagerOutcome::DownvotePeer(peer_id) => { trace!( @@ -270,6 +285,7 @@ impl SimpleSync { } } + //TODO: Move to beacon chain fn root_at_slot(&self, target_slot: Slot) -> Option { self.chain .rev_iter_block_roots() @@ -333,36 +349,58 @@ impl SimpleSync { "start_slot" => req.start_slot, ); + //TODO: Optimize this + // Currently for skipped slots, the blocks returned could be less than the requested range. + // In the current implementation we read from the db then filter out out-of-range blocks. + // Improving the db schema to prevent this would be ideal. + let mut blocks: Vec> = self .chain .rev_iter_block_roots() .filter(|(_root, slot)| { - req.start_slot <= slot.as_u64() && req.start_slot + req.count >= slot.as_u64() + req.start_slot <= slot.as_u64() && req.start_slot + req.count > slot.as_u64() }) .take_while(|(_root, slot)| req.start_slot <= slot.as_u64()) .filter_map(|(root, _slot)| { if let Ok(Some(block)) = self.chain.store.get::>(&root) { Some(block) } else { - debug!( + warn!( self.log, - "Peer requested unknown block"; - "peer" => format!("{:?}", peer_id), + "Block in the chain is not in the store"; "request_root" => format!("{:}", root), ); None } }) + .filter(|block| block.slot >= req.start_slot) .collect(); + // TODO: Again find a more elegant way to include genesis if needed + // if the genesis is requested, add it in + if req.start_slot == 0 { + if let Ok(Some(genesis)) = self + .chain + .store + .get::>(&self.chain.genesis_block_root) + { + blocks.push(genesis); + } else { + warn!( + self.log, + "Requested genesis, which is not in the chain store"; + ); + } + } + blocks.reverse(); blocks.dedup_by_key(|brs| brs.slot); if blocks.len() as u64 != req.count { debug!( self.log, - "BeaconBlocksRequest"; + "BeaconBlocksRequest response"; "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, @@ -498,14 +536,19 @@ impl NetworkContext { } pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { - self.send_rpc_request(peer_id, RPCRequest::Goodbye(reason)) + self.send_rpc_request(None, peer_id, RPCRequest::Goodbye(reason)) // TODO: disconnect peers. } - pub fn send_rpc_request(&mut self, peer_id: PeerId, rpc_request: RPCRequest) { - // Note: There is currently no use of keeping track of requests. However the functionality - // is left here for future revisions. - self.send_rpc_event(peer_id, RPCEvent::Request(0, rpc_request)); + pub fn send_rpc_request( + &mut self, + request_id: Option, + peer_id: PeerId, + rpc_request: RPCRequest, + ) { + // use 0 as the default request id, when an ID is not required. + let request_id = request_id.unwrap_or_else(|| 0); + self.send_rpc_event(peer_id, RPCEvent::Request(request_id, rpc_request)); } //TODO: Handle Error responses From 7ee080db6021b2fb4b47056ce0a666020b71b3d9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 25 Aug 2019 08:25:54 +1000 Subject: [PATCH 107/305] Updated syncing algorithm --- beacon_node/client/src/lib.rs | 6 ++---- beacon_node/eth2-libp2p/Cargo.toml | 4 ++-- beacon_node/eth2-libp2p/src/behaviour.rs | 3 +-- beacon_node/eth2-libp2p/src/service.rs | 2 +- beacon_node/network/src/service.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 8 +++++++- beacon_node/rpc/src/attestation.rs | 8 ++++++-- beacon_node/rpc/src/beacon_block.rs | 10 +++++++--- 8 files changed, 27 insertions(+), 16 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 7e6449a98..4b64c1070 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -41,7 +41,7 @@ pub struct Client { /// Signal to terminate the slot timer. pub slot_timer_exit_signal: Option, /// Signal to terminate the API - // pub api_exit_signal: Option, + pub api_exit_signal: Option, /// The clients logger. log: slog::Logger, /// Marker to pin the beacon chain generics. @@ -134,7 +134,6 @@ where None }; - /* // Start the `rest_api` service let api_exit_signal = if client_config.rest_api.enabled { match rest_api::start_server( @@ -152,7 +151,6 @@ where } else { None }; - */ let (slot_timer_exit_signal, exit) = exit_future::signal(); if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { @@ -186,7 +184,7 @@ where http_exit_signal, rpc_exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal), - //api_exit_signal, + api_exit_signal, log, network, phantom: PhantomData, diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 55081aed5..a379bcead 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "b0d3cf7b4b0fa6c555b64dbdd110673a05457abd", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "a56865a4077ac54767136b4bee627c9734720a6b" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "a56865a4077ac54767136b4bee627c9734720a6b", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index b4822de4c..29725e0ce 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -16,7 +16,6 @@ use libp2p::{ NetworkBehaviour, PeerId, }; use slog::{debug, o, trace}; -use ssz::{ssz_encode, Encode}; use std::num::NonZeroU32; use std::time::Duration; @@ -189,7 +188,7 @@ impl Behaviour { } /// Publishes a message on the pubsub (gossipsub) behaviour. - pub fn publish(&mut self, topics: Vec, message: PubsubMessage) { + pub fn publish(&mut self, topics: &[Topic], message: PubsubMessage) { let message_data = message.to_data(); for topic in topics { self.gossipsub.publish(topic, message_data.clone()); diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 98718445b..9945b1586 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -148,7 +148,7 @@ impl Stream for Service { topics, message, } => { - trace!(self.log, "Gossipsub message received"; "Message" => format!("{:?}", message)); + trace!(self.log, "Gossipsub message received"; "service" => "Swarm"); return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage { source, topics, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index df0404cfa..4800a7efb 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -119,7 +119,7 @@ fn network_service( }, NetworkMessage::Publish { topics, message } => { debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics)); - libp2p_service.lock().swarm.publish(topics, message); + libp2p_service.lock().swarm.publish(&topics, message); } }, Ok(Async::NotReady) => break, diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 924b2de9b..bee9310d3 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -465,9 +465,15 @@ impl SimpleSync { pub fn on_block_gossip(&mut self, peer_id: PeerId, block: BeaconBlock) -> bool { if let Ok(outcome) = self.chain.process_block(block.clone()) { match outcome { - BlockProcessingOutcome::Processed { .. } => SHOULD_FORWARD_GOSSIP_BLOCK, + BlockProcessingOutcome::Processed { .. } => { + trace!(self.log, "Gossipsub block processed"; + "peer_id" => format!("{:?}",peer_id)); + SHOULD_FORWARD_GOSSIP_BLOCK + } BlockProcessingOutcome::ParentUnknown { parent: _ } => { // Inform the sync manager to find parents for this block + trace!(self.log, "Unknown parent gossip"; + "peer_id" => format!("{:?}",peer_id)); self.manager.add_unknown_block(block.clone(), peer_id); SHOULD_FORWARD_GOSSIP_BLOCK } diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index f442e247d..dff3f8d70 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -1,7 +1,7 @@ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2_libp2p::PubsubMessage; use eth2_libp2p::Topic; -use eth2_libp2p::BEACON_ATTESTATION_TOPIC; +use eth2_libp2p::{BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use network::NetworkMessage; @@ -144,7 +144,11 @@ impl AttestationService for AttestationServiceInstance { ); // valid attestation, propagate to the network - let topic = Topic::new(BEACON_ATTESTATION_TOPIC.into()); + let topic_string = format!( + "/{}/{}/{}", + TOPIC_PREFIX, BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX + ); + let topic = Topic::new(topic_string); let message = PubsubMessage::Attestation(attestation.as_ssz_bytes()); self.network_chan diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index b1a67399e..92a543ef3 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -1,6 +1,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; -use eth2_libp2p::BEACON_BLOCK_TOPIC; use eth2_libp2p::{PubsubMessage, Topic}; +use eth2_libp2p::{BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX}; use futures::Future; use grpcio::{RpcContext, RpcStatus, RpcStatusCode, UnarySink}; use network::NetworkMessage; @@ -105,8 +105,12 @@ impl BeaconBlockService for BeaconBlockServiceInstance { "block_root" => format!("{}", block_root), ); - // get the network topic to send on - let topic = Topic::new(BEACON_BLOCK_TOPIC.into()); + // create the network topic to send on + let topic_string = format!( + "/{}/{}/{}", + TOPIC_PREFIX, BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX + ); + let topic = Topic::new(topic_string); let message = PubsubMessage::Block(block.as_ssz_bytes()); // Publish the block to the p2p network via gossipsub. From 66d78387079c187545646bf2047428d872327113 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 25 Aug 2019 09:43:03 +1000 Subject: [PATCH 108/305] Remove GenesisConfig, add BeaconChainStartMethod --- beacon_node/client/src/beacon_chain_types.rs | 14 ++--- beacon_node/client/src/config.rs | 61 ++++++++------------ beacon_node/client/src/lib.rs | 2 +- beacon_node/src/config.rs | 18 ++++-- beacon_node/src/main.rs | 4 +- 5 files changed, 46 insertions(+), 53 deletions(-) diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index 5168c067a..37e4a055e 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -1,6 +1,6 @@ use crate::bootstrapper::Bootstrapper; use crate::error::Result; -use crate::{config::GenesisState, ClientConfig}; +use crate::{config::BeaconChainStartMethod, ClientConfig}; use beacon_chain::{ lmd_ghost::{LmdGhost, ThreadSafeReducedTree}, slot_clock::SystemTimeSlotClock, @@ -59,19 +59,19 @@ where T: BeaconChainTypes, T::LmdGhost: LmdGhost, { - let genesis_state = match &config.genesis_state { - GenesisState::Mainnet => { + let genesis_state = match &config.beacon_chain_start_method { + BeaconChainStartMethod::Resume => { crit!(log, "This release does not support mainnet genesis state."); return Err("Mainnet is unsupported".into()); } - GenesisState::RecentGenesis { validator_count } => { + BeaconChainStartMethod::RecentGenesis { validator_count } => { generate_testnet_genesis_state(*validator_count, recent_genesis_time(), &spec) } - GenesisState::Generated { + BeaconChainStartMethod::Generated { validator_count, genesis_time, } => generate_testnet_genesis_state(*validator_count, *genesis_time, &spec), - GenesisState::Yaml { file } => { + BeaconChainStartMethod::Yaml { file } => { let file = File::open(file).map_err(|e| { format!("Unable to open YAML genesis state file {:?}: {:?}", file, e) })?; @@ -79,7 +79,7 @@ where serde_yaml::from_reader(file) .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))? } - GenesisState::HttpBootstrap { server } => { + BeaconChainStartMethod::HttpBootstrap { server, .. } => { let bootstrapper = Bootstrapper::from_server_string(server.to_string()) .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index e802a93a3..1e8f60f6e 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,15 +1,11 @@ -use crate::Bootstrapper; use clap::ArgMatches; use network::NetworkConfig; use serde_derive::{Deserialize, Serialize}; -use slog::{info, o, warn, Drain}; +use slog::{info, o, Drain}; use std::fs::{self, OpenOptions}; use std::path::PathBuf; use std::sync::Mutex; -/// The number initial validators when starting the `Minimal`. -const TESTNET_VALIDATOR_COUNT: usize = 16; - /// The number initial validators when starting the `Minimal`. const TESTNET_SPEC_CONSTANTS: &str = "minimal"; @@ -21,63 +17,52 @@ pub struct Config { db_name: String, pub log_file: PathBuf, pub spec_constants: String, + /// Defines how we should initialize a BeaconChain instances. + /// + /// This field is not serialized, there for it will not be written to (or loaded from) config + /// files. It can only be configured via the CLI. #[serde(skip)] - pub boot_method: BootMethod, + pub beacon_chain_start_method: BeaconChainStartMethod, pub network: network::NetworkConfig, pub rpc: rpc::RPCConfig, pub rest_api: rest_api::ApiConfig, } +/// Defines how the client should initialize a BeaconChain. +/// +/// In general, there are two methods: +/// - resuming a new chain, or +/// - initializing a new one. #[derive(Debug, Clone)] -pub enum BootMethod { - /// Resume from an existing database. +pub enum BeaconChainStartMethod { + /// Resume from an existing BeaconChain, loaded from the existing local database. Resume, - /// Generate a state with `validator_count` validators, all with well-known secret keys. + /// Create a new beacon chain with `validator_count` validators, all with well-known secret keys. /// /// Set the genesis time to be the start of the previous 30-minute window. RecentGenesis { validator_count: usize }, - /// Generate a state with `genesis_time` and `validator_count` validators, all with well-known + /// Create a new beacon chain with `genesis_time` and `validator_count` validators, all with well-known /// secret keys. Generated { validator_count: usize, genesis_time: u64, }, - /// Load a YAML-encoded genesis state from a file. + /// Create a new beacon chain by loading a YAML-encoded genesis state from a file. Yaml { file: PathBuf }, - /// Use a HTTP server (running our REST-API) to load genesis and finalized states and blocks. + /// Create a new beacon chain by using a HTTP server (running our REST-API) to load genesis and + /// finalized states and blocks. HttpBootstrap { server: String, port: Option, }, } -impl Default for BootMethod { +impl Default for BeaconChainStartMethod { fn default() -> Self { - BootMethod::Resume + BeaconChainStartMethod::Resume } } -pub enum GenesisState { - /// Use the mainnet genesis state. - /// - /// Mainnet genesis state is not presently known, so this is a place-holder. - Mainnet, - /// Generate a state with `validator_count` validators, all with well-known secret keys. - /// - /// Set the genesis time to be the start of the previous 30-minute window. - RecentGenesis { validator_count: usize }, - /// Generate a state with `genesis_time` and `validator_count` validators, all with well-known - /// secret keys. - Generated { - validator_count: usize, - genesis_time: u64, - }, - /// Load a YAML-encoded genesis state from a file. - Yaml { file: PathBuf }, - /// Use a HTTP server (running our REST-API) to load genesis and finalized states and blocks. - HttpBootstrap { server: String }, -} - impl Default for Config { fn default() -> Self { Self { @@ -86,10 +71,10 @@ impl Default for Config { db_type: "disk".to_string(), db_name: "chain_db".to_string(), network: NetworkConfig::new(), - rpc: rpc::RPCConfig::default(), - rest_api: rest_api::ApiConfig::default(), + rpc: <_>::default(), + rest_api: <_>::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), - boot_method: BootMethod::default(), + beacon_chain_start_method: <_>::default(), } } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 6405e05e7..3eb555369 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -23,7 +23,7 @@ pub use beacon_chain::BeaconChainTypes; pub use beacon_chain_types::ClientType; pub use beacon_chain_types::InitialiseBeaconChain; pub use bootstrapper::Bootstrapper; -pub use config::{Config as ClientConfig, GenesisState}; +pub use config::Config as ClientConfig; pub use eth2_config::Eth2Config; /// Main beacon node client service. This provides the connection and initialisation of the clients diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index b66a00abb..a97ec3708 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -2,7 +2,7 @@ use clap::ArgMatches; use client::{Bootstrapper, ClientConfig, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; use rand::{distributions::Alphanumeric, Rng}; -use slog::{crit, info, Logger}; +use slog::{crit, info, warn, Logger}; use std::fs; use std::path::PathBuf; @@ -35,15 +35,16 @@ pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { // The bootstrap testnet method requires inserting a libp2p address into the // network config. ("bootstrap", Some(sub_matches)) => { - let server = sub_matches + let server: String = sub_matches .value_of("server") - .ok_or_else(|| "No bootstrap server specified".into())?; + .ok_or_else(|| "No bootstrap server specified")? + .to_string(); let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; - if let Some(server_multiaddr) = - bootstrapper.best_effort_multiaddr(sub_matches.value_of("libp2p_port")) - { + if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr( + parse_port_option(sub_matches.value_of("libp2p_port")), + ) { info!( log, "Estimated bootstrapper libp2p address"; @@ -83,6 +84,11 @@ pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { builder.build() } +/// Decodes an optional string into an optional u16. +fn parse_port_option(o: Option<&str>) -> Option { + o.and_then(|s| s.parse::().ok()) +} + /// Allows for building a set of configurations based upon `clap` arguments. struct ConfigBuilder<'a> { matches: &'a ArgMatches<'a>, diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 12c9b8a01..d7a4bae79 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -227,7 +227,9 @@ fn main() { .short("p") .long("port") .value_name("TCP_PORT") - .help("A libp2p listen port used to peer with the bootstrap server")) + .help("A libp2p listen port used to peer with the bootstrap server. This flag is useful \ + when port-fowarding is used: you may connect using a different port than \ + the one the server is immediately listening on.")) ) .subcommand(SubCommand::with_name("recent") .about("Creates a new genesis state where the genesis time was at the previous \ From 9cdcc7d198b9e0e48e89870ef050958e8bb94abd Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 25 Aug 2019 10:02:54 +1000 Subject: [PATCH 109/305] Update to latest libp2p --- beacon_node/eth2-libp2p/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 7517c2980..92c2c80d4 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "a56865a4077ac54767136b4bee627c9734720a6b" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "a56865a4077ac54767136b4bee627c9734720a6b", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "a6ae26225bf1ef154f8c61a0e5391898ba038948" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "a6ae26225bf1ef154f8c61a0e5391898ba038948", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" From 7fd7aa2cdbe1538232440c22ea8f752ede465bd2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 25 Aug 2019 10:09:51 +1000 Subject: [PATCH 110/305] Tidy ConfigBuilder --- beacon_node/src/config.rs | 136 +++++++++++++++++++++----------------- 1 file changed, 77 insertions(+), 59 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index a97ec3708..c1074da03 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -14,12 +14,12 @@ type Result = std::result::Result; type Config = (ClientConfig, Eth2Config); /// Gets the fully-initialized global client and eth2 configuration objects. -pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { - let mut builder = ConfigBuilder::new(matches, log)?; +pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { + let mut builder = ConfigBuilder::new(cli_args, log)?; - match matches.subcommand() { - ("testnet", Some(sub_matches)) => { - if sub_matches.is_present("random-datadir") { + match cli_args.subcommand() { + ("testnet", Some(sub_cmd_args)) => { + if sub_cmd_args.is_present("random-datadir") { builder.set_random_datadir()?; } @@ -29,39 +29,13 @@ pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { "path" => format!("{:?}", builder.data_dir) ); - builder.update_spec_from_subcommand(&sub_matches)?; + builder.update_spec_from_subcommand(&sub_cmd_args)?; - match sub_matches.subcommand() { + match sub_cmd_args.subcommand() { // The bootstrap testnet method requires inserting a libp2p address into the // network config. - ("bootstrap", Some(sub_matches)) => { - let server: String = sub_matches - .value_of("server") - .ok_or_else(|| "No bootstrap server specified")? - .to_string(); - - let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; - - if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr( - parse_port_option(sub_matches.value_of("libp2p_port")), - ) { - info!( - log, - "Estimated bootstrapper libp2p address"; - "multiaddr" => format!("{:?}", server_multiaddr) - ); - - builder - .client_config - .network - .libp2p_nodes - .push(server_multiaddr); - } else { - warn!( - log, - "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." - ); - }; + ("bootstrap", Some(sub_cmd_args)) => { + builder.import_bootstrap_libp2p_address(&sub_cmd_args)?; } _ => (), }; @@ -81,7 +55,7 @@ pub fn get_configs(matches: &ArgMatches, log: &Logger) -> Result { } }; - builder.build() + builder.build(cli_args) } /// Decodes an optional string into an optional u16. @@ -91,21 +65,20 @@ fn parse_port_option(o: Option<&str>) -> Option { /// Allows for building a set of configurations based upon `clap` arguments. struct ConfigBuilder<'a> { - matches: &'a ArgMatches<'a>, log: &'a Logger, pub data_dir: PathBuf, - pub eth2_config: Eth2Config, - pub client_config: ClientConfig, + eth2_config: Eth2Config, + client_config: ClientConfig, } impl<'a> ConfigBuilder<'a> { /// Create a new builder with default settings. - pub fn new(matches: &'a ArgMatches, log: &'a Logger) -> Result { + pub fn new(cli_args: &'a ArgMatches, log: &'a Logger) -> Result { // Read the `--datadir` flag. // // If it's not present, try and find the home directory (`~`) and push the default data // directory onto it. - let data_dir: PathBuf = matches + let data_dir: PathBuf = cli_args .value_of("datadir") .map(|string| PathBuf::from(string)) .or_else(|| { @@ -117,7 +90,6 @@ impl<'a> ConfigBuilder<'a> { .ok_or_else(|| "Unable to find a home directory for the datadir".to_string())?; Ok(Self { - matches, log, data_dir, eth2_config: Eth2Config::minimal(), @@ -125,23 +97,47 @@ impl<'a> ConfigBuilder<'a> { }) } - /// Consumes self, returning the configs. - pub fn build(mut self) -> Result { - self.eth2_config.apply_cli_args(&self.matches)?; - self.client_config - .apply_cli_args(&self.matches, &mut self.log.clone())?; + pub fn set_beacon_chain_start_method(&mut self, cli_args: &ArgMatches) -> Result<()> { + // + } - if self.eth2_config.spec_constants != self.client_config.spec_constants { - crit!(self.log, "Specification constants do not match."; - "client_config" => format!("{}", self.client_config.spec_constants), - "eth2_config" => format!("{}", self.eth2_config.spec_constants) + /// Reads a `server` flag from `cli_args` and attempts to generate a libp2p `Multiaddr` that + /// this client can use to connect to the given `server`. + /// + /// Also reads for a `libp2p_port` flag in `cli_args`, using that as the port for the + /// `Multiaddr`. If `libp2p_port` is not in `cli_args`, attempts to connect to `server` via HTTP + /// and retrieve it's libp2p listen port. + /// + /// Returns an error if the `server` flag is not present in `cli_args`. + pub fn import_bootstrap_libp2p_address(&mut self, cli_args: &ArgMatches) -> Result<()> { + let server: String = cli_args + .value_of("server") + .ok_or_else(|| "No bootstrap server specified")? + .to_string(); + + let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; + + if let Some(server_multiaddr) = + bootstrapper.best_effort_multiaddr(parse_port_option(cli_args.value_of("libp2p_port"))) + { + info!( + self.log, + "Estimated bootstrapper libp2p address"; + "multiaddr" => format!("{:?}", server_multiaddr) ); - return Err("Specification constant mismatch".into()); - } - self.client_config.data_dir = self.data_dir; + self.client_config + .network + .libp2p_nodes + .push(server_multiaddr); + } else { + warn!( + self.log, + "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." + ); + }; - Ok((self.client_config, self.eth2_config)) + Ok(()) } /// Set the config data_dir to be an random directory. @@ -165,20 +161,20 @@ impl<'a> ConfigBuilder<'a> { /// Reads the subcommand and tries to update `self.eth2_config` based up on the `--spec` flag. /// - /// Returns an error if the `--spec` flag is not present. - pub fn update_spec_from_subcommand(&mut self, sub_matches: &ArgMatches) -> Result<()> { + /// Returns an error if the `--spec` flag is not present in the given `cli_args`. + pub fn update_spec_from_subcommand(&mut self, cli_args: &ArgMatches) -> Result<()> { // Re-initialise the `Eth2Config`. // // If a CLI parameter is set, overwrite any config file present. // If a parameter is not set, use either the config file present or default to minimal. - let eth2_config = match sub_matches.value_of("spec") { + let eth2_config = match cli_args.value_of("spec") { Some("mainnet") => Eth2Config::mainnet(), Some("minimal") => Eth2Config::minimal(), Some("interop") => Eth2Config::interop(), _ => return Err("Unable to determine specification type.".into()), }; - self.client_config.spec_constants = sub_matches + self.client_config.spec_constants = cli_args .value_of("spec") .expect("Guarded by prior match statement") .to_string(); @@ -244,4 +240,26 @@ impl<'a> ConfigBuilder<'a> { Ok(()) } + + /// Consumes self, returning the configs. + /// + /// The supplied `cli_args` should be the base-level `clap` cli_args (i.e., not a subcommand + /// cli_args). + pub fn build(mut self, cli_args: &ArgMatches) -> Result { + self.eth2_config.apply_cli_args(cli_args)?; + self.client_config + .apply_cli_args(cli_args, &mut self.log.clone())?; + + if self.eth2_config.spec_constants != self.client_config.spec_constants { + crit!(self.log, "Specification constants do not match."; + "client_config" => format!("{}", self.client_config.spec_constants), + "eth2_config" => format!("{}", self.eth2_config.spec_constants) + ); + return Err("Specification constant mismatch".into()); + } + + self.client_config.data_dir = self.data_dir; + + Ok((self.client_config, self.eth2_config)) + } } From 1bea1755c46d17fff9c6cea56e55691bf5cfa1b9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 25 Aug 2019 10:13:17 +1000 Subject: [PATCH 111/305] Remove redundant code --- beacon_node/eth2-libp2p/src/rpc/methods.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index 8fef1a75a..d912bcfa1 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -89,17 +89,6 @@ pub struct BeaconBlocksRequest { pub step: u64, } -// TODO: Currently handle encoding/decoding of blocks in the message handler. Leave this struct -// here in case encoding/decoding of ssz requires an object. -/* -/// Response containing a number of beacon block roots from a peer. -#[derive(Encode, Decode, Clone, Debug, PartialEq)] -pub struct BeaconBlocksResponse { - /// List of requested blocks and associated slots. - pub beacon_blocks: Vec, -} -*/ - /// Request a number of beacon block bodies from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] pub struct RecentBeaconBlocksRequest { From 140c677a38d16a8f51f4b6521ee0f74f1cd1ddca Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 25 Aug 2019 12:14:04 +1000 Subject: [PATCH 112/305] Add much more progress to new CLI setup --- beacon_node/client/src/beacon_chain_types.rs | 7 +- beacon_node/client/src/config.rs | 9 +- beacon_node/client/src/lib.rs | 2 +- beacon_node/src/config.rs | 258 ++++++++++++++----- beacon_node/src/main.rs | 19 +- 5 files changed, 214 insertions(+), 81 deletions(-) diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs index 37e4a055e..7a57aa475 100644 --- a/beacon_node/client/src/beacon_chain_types.rs +++ b/beacon_node/client/src/beacon_chain_types.rs @@ -60,9 +60,10 @@ where T::LmdGhost: LmdGhost, { let genesis_state = match &config.beacon_chain_start_method { - BeaconChainStartMethod::Resume => { - crit!(log, "This release does not support mainnet genesis state."); - return Err("Mainnet is unsupported".into()); + BeaconChainStartMethod::Resume => unimplemented!("No resume code yet"), + BeaconChainStartMethod::Mainnet => { + crit!(log, "No mainnet beacon chain startup specification."); + return Err("Mainnet is not yet specified. We're working on it.".into()); } BeaconChainStartMethod::RecentGenesis { validator_count } => { generate_testnet_genesis_state(*validator_count, recent_genesis_time(), &spec) diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 1e8f60f6e..f2725b3e7 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -37,7 +37,9 @@ pub struct Config { pub enum BeaconChainStartMethod { /// Resume from an existing BeaconChain, loaded from the existing local database. Resume, - /// Create a new beacon chain with `validator_count` validators, all with well-known secret keys. + /// Resume from an existing BeaconChain, loaded from the existing local database. + Mainnet, + /// Create a new beacon chain that can connect to mainnet. /// /// Set the genesis time to be the start of the previous 30-minute window. RecentGenesis { validator_count: usize }, @@ -51,10 +53,7 @@ pub enum BeaconChainStartMethod { Yaml { file: PathBuf }, /// Create a new beacon chain by using a HTTP server (running our REST-API) to load genesis and /// finalized states and blocks. - HttpBootstrap { - server: String, - port: Option, - }, + HttpBootstrap { server: String, port: Option }, } impl Default for BeaconChainStartMethod { diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 3eb555369..9d3e001fa 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -23,7 +23,7 @@ pub use beacon_chain::BeaconChainTypes; pub use beacon_chain_types::ClientType; pub use beacon_chain_types::InitialiseBeaconChain; pub use bootstrapper::Bootstrapper; -pub use config::Config as ClientConfig; +pub use config::{BeaconChainStartMethod, Config as ClientConfig}; pub use eth2_config::Eth2Config; /// Main beacon node client service. This provides the connection and initialisation of the clients diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c1074da03..68d905ed2 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,10 +1,10 @@ use clap::ArgMatches; -use client::{Bootstrapper, ClientConfig, Eth2Config}; +use client::{BeaconChainStartMethod, Bootstrapper, ClientConfig, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; use rand::{distributions::Alphanumeric, Rng}; use slog::{crit, info, warn, Logger}; use std::fs; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; @@ -19,29 +19,9 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { match cli_args.subcommand() { ("testnet", Some(sub_cmd_args)) => { - if sub_cmd_args.is_present("random-datadir") { - builder.set_random_datadir()?; - } - - info!( - log, - "Creating new datadir"; - "path" => format!("{:?}", builder.data_dir) - ); - - builder.update_spec_from_subcommand(&sub_cmd_args)?; - - match sub_cmd_args.subcommand() { - // The bootstrap testnet method requires inserting a libp2p address into the - // network config. - ("bootstrap", Some(sub_cmd_args)) => { - builder.import_bootstrap_libp2p_address(&sub_cmd_args)?; - } - _ => (), - }; - - builder.write_configs_to_new_datadir()?; + process_testnet_subcommand(&mut builder, sub_cmd_args, log)? } + // No sub-command assumes a resume operation. _ => { info!( log, @@ -49,6 +29,20 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { "path" => format!("{:?}", builder.data_dir) ); + // If no primary subcommand was given, start the beacon chain from an existing + // database. + builder.set_beacon_chain_start_method(BeaconChainStartMethod::Resume); + + // Whilst there is no large testnet or mainnet force the user to specify how they want + // to start a new chain (e.g., from a genesis YAML file, another node, etc). + if !builder.data_dir.exists() { + return Err( + "No datadir found. To start a new beacon chain, see `testnet --help`. \ + Use `--datadir` to specify a different directory" + .into(), + ); + } + // If the `testnet` command was not provided, attempt to load an existing datadir and // continue with an existing chain. builder.load_from_datadir()?; @@ -58,9 +52,62 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { builder.build(cli_args) } -/// Decodes an optional string into an optional u16. -fn parse_port_option(o: Option<&str>) -> Option { - o.and_then(|s| s.parse::().ok()) +/// Process the `testnet` CLI subcommand arguments, updating the `builder`. +fn process_testnet_subcommand( + builder: &mut ConfigBuilder, + cli_args: &ArgMatches, + log: &Logger, +) -> Result<()> { + if cli_args.is_present("random-datadir") { + builder.set_random_datadir()?; + } + + if cli_args.is_present("force") { + builder.clean_datadir()?; + } + + info!( + log, + "Creating new datadir"; + "path" => format!("{:?}", builder.data_dir) + ); + + builder.update_spec_from_subcommand(&cli_args)?; + + // Start matching on the second subcommand (e.g., `testnet bootstrap ...`) + match cli_args.subcommand() { + ("bootstrap", Some(cli_args)) => { + let server = cli_args + .value_of("server") + .ok_or_else(|| "No bootstrap server specified")?; + let port: Option = cli_args + .value_of("port") + .and_then(|s| s.parse::().ok()); + + builder.import_bootstrap_libp2p_address(server, port)?; + + builder.set_beacon_chain_start_method(BeaconChainStartMethod::HttpBootstrap { + server: server.to_string(), + port, + }) + } + ("recent", Some(cli_args)) => { + let validator_count = cli_args + .value_of("validator_count") + .ok_or_else(|| "No validator_count specified")? + .parse::() + .map_err(|e| format!("Unable to parse validator_count: {:?}", e))?; + + builder.set_beacon_chain_start_method(BeaconChainStartMethod::RecentGenesis { + validator_count, + }) + } + _ => return Err("No testnet method specified. See 'testnet --help'.".into()), + }; + + builder.write_configs_to_new_datadir()?; + + Ok(()) } /// Allows for building a set of configurations based upon `clap` arguments. @@ -97,29 +144,65 @@ impl<'a> ConfigBuilder<'a> { }) } - pub fn set_beacon_chain_start_method(&mut self, cli_args: &ArgMatches) -> Result<()> { - // + /// Clears any configuration files that would interfere with writing new configs. + /// + /// Moves the following files in `data_dir` into a backup directory: + /// + /// - Client config + /// - Eth2 config + /// - The entire database directory + pub fn clean_datadir(&mut self) -> Result<()> { + let backup_dir = { + let mut s = String::from("backup_"); + s.push_str(&random_string(6)); + self.data_dir.join(s) + }; + + fs::create_dir_all(&backup_dir) + .map_err(|e| format!("Unable to create config backup dir: {:?}", e))?; + + let move_to_backup_dir = |path: &Path| -> Result<()> { + let file_name = path + .file_name() + .ok_or_else(|| "Invalid path found during datadir clean (no filename).")?; + + let mut new = path.to_path_buf(); + new.pop(); + new.push(backup_dir.clone()); + new.push(file_name); + + let _ = fs::rename(path, new); + + Ok(()) + }; + + move_to_backup_dir(&self.data_dir.join(CLIENT_CONFIG_FILENAME))?; + move_to_backup_dir(&self.data_dir.join(ETH2_CONFIG_FILENAME))?; + + if let Some(db_path) = self.client_config.db_path() { + move_to_backup_dir(&db_path)?; + } + + Ok(()) } - /// Reads a `server` flag from `cli_args` and attempts to generate a libp2p `Multiaddr` that - /// this client can use to connect to the given `server`. - /// - /// Also reads for a `libp2p_port` flag in `cli_args`, using that as the port for the - /// `Multiaddr`. If `libp2p_port` is not in `cli_args`, attempts to connect to `server` via HTTP - /// and retrieve it's libp2p listen port. - /// - /// Returns an error if the `server` flag is not present in `cli_args`. - pub fn import_bootstrap_libp2p_address(&mut self, cli_args: &ArgMatches) -> Result<()> { - let server: String = cli_args - .value_of("server") - .ok_or_else(|| "No bootstrap server specified")? - .to_string(); + /// Sets the method for starting the beacon chain. + pub fn set_beacon_chain_start_method(&mut self, method: BeaconChainStartMethod) { + self.client_config.beacon_chain_start_method = method; + } + /// Import the libp2p address for `server` into the list of bootnodes in `self`. + /// + /// If `port` is `Some`, it is used as the port for the `Multiaddr`. If `port` is `None`, + /// attempts to connect to the `server` via HTTP and retrieve it's libp2p listen port. + pub fn import_bootstrap_libp2p_address( + &mut self, + server: &str, + port: Option, + ) -> Result<()> { let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; - if let Some(server_multiaddr) = - bootstrapper.best_effort_multiaddr(parse_port_option(cli_args.value_of("libp2p_port"))) - { + if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr(port) { info!( self.log, "Estimated bootstrapper libp2p address"; @@ -132,9 +215,9 @@ impl<'a> ConfigBuilder<'a> { .push(server_multiaddr); } else { warn!( - self.log, - "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." - ); + self.log, + "Unable to estimate a bootstrapper libp2p address, this node may not find any peers." + ); }; Ok(()) @@ -144,14 +227,9 @@ impl<'a> ConfigBuilder<'a> { /// /// Useful for easily spinning up ephemeral testnets. pub fn set_random_datadir(&mut self) -> Result<()> { - let random = rand::thread_rng() - .sample_iter(&Alphanumeric) - .take(10) - .collect::(); - let mut s = DEFAULT_DATA_DIR.to_string(); s.push_str("_random_"); - s.push_str(&random); + s.push_str(&random_string(6)); self.data_dir.pop(); self.data_dir.push(s); @@ -187,12 +265,15 @@ impl<'a> ConfigBuilder<'a> { /// /// Returns an error if `self.data_dir` already exists. pub fn write_configs_to_new_datadir(&mut self) -> Result<()> { + let db_exists = self + .client_config + .db_path() + .map(|d| d.exists()) + .unwrap_or_else(|| false); + // Do not permit creating a new config when the datadir exists. - if self.data_dir.exists() { - return Err( - "Datadir already exists, will not overwrite. Remove the directory or use --datadir." - .into(), - ); + if db_exists { + return Err("Database already exists. See `-f` in `testnet --help`".into()); } // Create `datadir` and any non-existing parent directories. @@ -201,16 +282,35 @@ impl<'a> ConfigBuilder<'a> { format!("{}", e) })?; - // Write the client config to a TOML file in the datadir. - write_to_file( - self.data_dir.join(CLIENT_CONFIG_FILENAME), - &self.client_config, - ) - .map_err(|e| format!("Unable to write {} file: {:?}", CLIENT_CONFIG_FILENAME, e))?; + let client_config_file = self.data_dir.join(CLIENT_CONFIG_FILENAME); + if client_config_file.exists() { + return Err(format!( + "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", + CLIENT_CONFIG_FILENAME + )); + } else { + // Write the onfig to a TOML file in the datadir. + write_to_file( + self.data_dir.join(CLIENT_CONFIG_FILENAME), + &self.client_config, + ) + .map_err(|e| format!("Unable to write {} file: {:?}", CLIENT_CONFIG_FILENAME, e))?; + } - // Write the eth2 config to a TOML file in the datadir. - write_to_file(self.data_dir.join(ETH2_CONFIG_FILENAME), &self.eth2_config) + let eth2_config_file = self.data_dir.join(ETH2_CONFIG_FILENAME); + if eth2_config_file.exists() { + return Err(format!( + "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", + ETH2_CONFIG_FILENAME + )); + } else { + // Write the config to a TOML file in the datadir. + write_to_file( + self.data_dir.join(ETH2_CONFIG_FILENAME), + &self.client_config, + ) .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; + } Ok(()) } @@ -225,7 +325,22 @@ impl<'a> ConfigBuilder<'a> { // public testnet or mainnet). if !self.data_dir.exists() { return Err( - "No datadir found. Use the 'testnet' sub-command to select a testnet type.".into(), + "No datadir found. Either create a new testnet or specify a different `--datadir`." + .into(), + ); + } + + // If there is a path to a databse in the config, ensure it exists. + if !self + .client_config + .db_path() + .map(|path| path.exists()) + .unwrap_or_else(|| true) + { + return Err( + "No database found in datadir. Use the 'testnet -f' sub-command to overwrite the \ + existing datadir, or specify a different `--datadir`." + .into(), ); } @@ -263,3 +378,10 @@ impl<'a> ConfigBuilder<'a> { Ok((self.client_config, self.eth2_config)) } } + +fn random_string(len: usize) -> String { + rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(len) + .collect::() +} diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index d7a4bae79..4430db128 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -161,7 +161,7 @@ fn main() { .help("Type of database to use.") .takes_value(true) .possible_values(&["disk", "memory"]) - .default_value("memory"), + .default_value("disk"), ) /* * Logging. @@ -207,15 +207,20 @@ fn main() { iteration.") ) .arg( - Arg::with_name("force-create") - .long("force-create") + Arg::with_name("force") + .long("force") .short("f") - .help("If present, will delete any existing datadir before creating a new one. Cannot be \ + .help("If present, will backup any existing config files before creating new ones. Cannot be \ used when specifying --random-datadir (logic error).") .conflicts_with("random-datadir") ) /* * Testnet sub-commands. + * + * `boostrap` + * + * Start a new node by downloading genesis and network info from another node via the + * HTTP API. */ .subcommand(SubCommand::with_name("bootstrap") .about("Connects to the given HTTP server, downloads a genesis state and attempts to peer with it.") @@ -231,6 +236,12 @@ fn main() { when port-fowarding is used: you may connect using a different port than \ the one the server is immediately listening on.")) ) + /* + * `recent` + * + * Start a new node, with a specified number of validators with a genesis time in the last + * 30-minutes. + */ .subcommand(SubCommand::with_name("recent") .about("Creates a new genesis state where the genesis time was at the previous \ 30-minute boundary (e.g., 12:00, 12:30, 13:00, etc.)") From 5a7903a3773ae5c640a0004ef2f2ddec77322dfb Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 19 Aug 2019 16:15:55 +1000 Subject: [PATCH 113/305] Improve BeaconState safe accessors And fix a bug in the compact committees accessor. --- .../src/per_epoch_processing.rs | 45 +++----- eth2/types/src/beacon_state.rs | 104 +++++++++++------- 2 files changed, 81 insertions(+), 68 deletions(-) diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 8d6153aea..71d8b20da 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -221,42 +221,29 @@ pub fn process_final_updates( // Update start shard. state.start_shard = state.next_epoch_start_shard(spec)?; - // This is a hack to allow us to update index roots and slashed balances for the next epoch. - // - // The indentation here is to make it obvious where the weird stuff happens. - { - state.slot += 1; - - // Set active index root - let index_epoch = next_epoch + spec.activation_exit_delay; - let indices_list = VariableList::::from( - state.get_active_validator_indices(index_epoch), - ); - state.set_active_index_root( - index_epoch, - Hash256::from_slice(&indices_list.tree_hash_root()), - spec, - )?; - - // Reset slashings - state.set_slashings(next_epoch, 0)?; - - // Set randao mix - state.set_randao_mix(next_epoch, *state.get_randao_mix(current_epoch)?)?; - - state.slot -= 1; - } + // Set active index root + let index_epoch = next_epoch + spec.activation_exit_delay; + let indices_list = VariableList::::from( + state.get_active_validator_indices(index_epoch), + ); + state.set_active_index_root( + index_epoch, + Hash256::from_slice(&indices_list.tree_hash_root()), + spec, + )?; // Set committees root - // Note: we do this out-of-order w.r.t. to the spec, because we don't want the slot to be - // incremented. It's safe because the updates to slashings and the RANDAO mix (above) don't - // affect this. state.set_compact_committee_root( next_epoch, get_compact_committees_root(state, RelativeEpoch::Next, spec)?, - spec, )?; + // Reset slashings + state.set_slashings(next_epoch, 0)?; + + // Set randao mix + state.set_randao_mix(next_epoch, *state.get_randao_mix(current_epoch)?)?; + // Set historical root accumulator if next_epoch.as_u64() % (T::SlotsPerHistoricalRoot::to_u64() / T::slots_per_epoch()) == 0 { let historical_batch = state.historical_batch(); diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index d312316f3..5b00f08b7 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -60,6 +60,22 @@ pub enum Error { SszTypesError(ssz_types::Error), } +/// Control whether an epoch-indexed field can be indexed at the next epoch or not. +#[derive(Debug, PartialEq, Clone, Copy)] +enum AllowNextEpoch { + True, + False, +} + +impl AllowNextEpoch { + fn upper_bound_of(self, current_epoch: Epoch) -> Epoch { + match self { + AllowNextEpoch::True => current_epoch + 1, + AllowNextEpoch::False => current_epoch, + } + } +} + /// The state of the `BeaconChain` at some slot. /// /// Spec v0.8.0 @@ -108,12 +124,12 @@ where pub start_shard: u64, pub randao_mixes: FixedVector, #[compare_fields(as_slice)] - active_index_roots: FixedVector, + pub active_index_roots: FixedVector, #[compare_fields(as_slice)] - compact_committees_roots: FixedVector, + pub compact_committees_roots: FixedVector, // Slashings - slashings: FixedVector, + pub slashings: FixedVector, // Attestations pub previous_epoch_attestations: VariableList, T::MaxPendingAttestations>, @@ -459,12 +475,16 @@ impl BeaconState { /// Safely obtains the index for `randao_mixes` /// - /// Spec v0.8.0 - fn get_randao_mix_index(&self, epoch: Epoch) -> Result { + /// Spec v0.8.1 + fn get_randao_mix_index( + &self, + epoch: Epoch, + allow_next_epoch: AllowNextEpoch, + ) -> Result { let current_epoch = self.current_epoch(); let len = T::EpochsPerHistoricalVector::to_u64(); - if epoch + len > current_epoch && epoch <= current_epoch { + if current_epoch < epoch + len && epoch <= allow_next_epoch.upper_bound_of(current_epoch) { Ok(epoch.as_usize() % len as usize) } else { Err(Error::EpochOutOfBounds) @@ -492,7 +512,7 @@ impl BeaconState { /// /// Spec v0.8.1 pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, Error> { - let i = self.get_randao_mix_index(epoch)?; + let i = self.get_randao_mix_index(epoch, AllowNextEpoch::False)?; Ok(&self.randao_mixes[i]) } @@ -500,21 +520,29 @@ impl BeaconState { /// /// Spec v0.8.1 pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), Error> { - let i = self.get_randao_mix_index(epoch)?; + let i = self.get_randao_mix_index(epoch, AllowNextEpoch::True)?; self.randao_mixes[i] = mix; Ok(()) } /// Safely obtains the index for `active_index_roots`, given some `epoch`. /// + /// If `allow_next_epoch` is `True`, then we allow an _extra_ one epoch of lookahead. + /// /// Spec v0.8.1 - fn get_active_index_root_index(&self, epoch: Epoch, spec: &ChainSpec) -> Result { + fn get_active_index_root_index( + &self, + epoch: Epoch, + spec: &ChainSpec, + allow_next_epoch: AllowNextEpoch, + ) -> Result { let current_epoch = self.current_epoch(); let lookahead = spec.activation_exit_delay; let lookback = self.active_index_roots.len() as u64 - lookahead; + let epoch_upper_bound = allow_next_epoch.upper_bound_of(current_epoch) + lookahead; - if epoch + lookback > current_epoch && current_epoch + lookahead >= epoch { + if current_epoch < epoch + lookback && epoch <= epoch_upper_bound { Ok(epoch.as_usize() % self.active_index_roots.len()) } else { Err(Error::EpochOutOfBounds) @@ -525,7 +553,7 @@ impl BeaconState { /// /// Spec v0.8.1 pub fn get_active_index_root(&self, epoch: Epoch, spec: &ChainSpec) -> Result { - let i = self.get_active_index_root_index(epoch, spec)?; + let i = self.get_active_index_root_index(epoch, spec, AllowNextEpoch::False)?; Ok(self.active_index_roots[i]) } @@ -538,7 +566,7 @@ impl BeaconState { index_root: Hash256, spec: &ChainSpec, ) -> Result<(), Error> { - let i = self.get_active_index_root_index(epoch, spec)?; + let i = self.get_active_index_root_index(epoch, spec, AllowNextEpoch::True)?; self.active_index_roots[i] = index_root; Ok(()) } @@ -552,19 +580,17 @@ impl BeaconState { /// Safely obtains the index for `compact_committees_roots`, given some `epoch`. /// - /// Spec v0.8.0 + /// Spec v0.8.1 fn get_compact_committee_root_index( &self, epoch: Epoch, - spec: &ChainSpec, + allow_next_epoch: AllowNextEpoch, ) -> Result { let current_epoch = self.current_epoch(); + let len = T::EpochsPerHistoricalVector::to_u64(); - let lookahead = spec.activation_exit_delay; - let lookback = self.compact_committees_roots.len() as u64 - lookahead; - - if epoch + lookback > current_epoch && current_epoch + lookahead >= epoch { - Ok(epoch.as_usize() % self.compact_committees_roots.len()) + if current_epoch < epoch + len && epoch <= allow_next_epoch.upper_bound_of(current_epoch) { + Ok(epoch.as_usize() % len as usize) } else { Err(Error::EpochOutOfBounds) } @@ -572,26 +598,21 @@ impl BeaconState { /// Return the `compact_committee_root` at a recent `epoch`. /// - /// Spec v0.8.0 - pub fn get_compact_committee_root( - &self, - epoch: Epoch, - spec: &ChainSpec, - ) -> Result { - let i = self.get_compact_committee_root_index(epoch, spec)?; + /// Spec v0.8.1 + pub fn get_compact_committee_root(&self, epoch: Epoch) -> Result { + let i = self.get_compact_committee_root_index(epoch, AllowNextEpoch::False)?; Ok(self.compact_committees_roots[i]) } /// Set the `compact_committee_root` at a recent `epoch`. /// - /// Spec v0.8.0 + /// Spec v0.8.1 pub fn set_compact_committee_root( &mut self, epoch: Epoch, index_root: Hash256, - spec: &ChainSpec, ) -> Result<(), Error> { - let i = self.get_compact_committee_root_index(epoch, spec)?; + let i = self.get_compact_committee_root_index(epoch, AllowNextEpoch::True)?; self.compact_committees_roots[i] = index_root; Ok(()) } @@ -642,14 +663,19 @@ impl BeaconState { /// Safely obtain the index for `slashings`, given some `epoch`. /// - /// Spec v0.8.0 - fn get_slashings_index(&self, epoch: Epoch) -> Result { + /// Spec v0.8.1 + fn get_slashings_index( + &self, + epoch: Epoch, + allow_next_epoch: AllowNextEpoch, + ) -> Result { // We allow the slashings vector to be accessed at any cached epoch at or before - // the current epoch. - if epoch <= self.current_epoch() - && epoch + T::EpochsPerSlashingsVector::to_u64() >= self.current_epoch() + 1 + // the current epoch, or the next epoch if `AllowNextEpoch::True` is passed. + let current_epoch = self.current_epoch(); + if current_epoch < epoch + T::EpochsPerSlashingsVector::to_u64() + && epoch <= allow_next_epoch.upper_bound_of(current_epoch) { - Ok((epoch.as_u64() % T::EpochsPerSlashingsVector::to_u64()) as usize) + Ok(epoch.as_usize() % T::EpochsPerSlashingsVector::to_usize()) } else { Err(Error::EpochOutOfBounds) } @@ -664,17 +690,17 @@ impl BeaconState { /// Get the total slashed balances for some epoch. /// - /// Spec v0.8.0 + /// Spec v0.8.1 pub fn get_slashings(&self, epoch: Epoch) -> Result { - let i = self.get_slashings_index(epoch)?; + let i = self.get_slashings_index(epoch, AllowNextEpoch::False)?; Ok(self.slashings[i]) } /// Set the total slashed balances for some epoch. /// - /// Spec v0.8.0 + /// Spec v0.8.1 pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), Error> { - let i = self.get_slashings_index(epoch)?; + let i = self.get_slashings_index(epoch, AllowNextEpoch::True)?; self.slashings[i] = value; Ok(()) } From cf435d96536567414141ccf3c1bfaa9b292cb523 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 26 Aug 2019 14:45:49 +1000 Subject: [PATCH 114/305] Refactor beacon chain start code --- beacon_node/Cargo.toml | 1 + beacon_node/beacon_chain/Cargo.toml | 3 + beacon_node/beacon_chain/src/beacon_chain.rs | 8 +- .../beacon_chain/src/beacon_chain_builder.rs | 98 ++++++++-- .../src/bootstrapper.rs | 0 beacon_node/beacon_chain/src/lib.rs | 2 + beacon_node/beacon_chain/src/test_utils.rs | 20 +-- beacon_node/client/Cargo.toml | 2 - beacon_node/client/src/beacon_chain_types.rs | 170 ------------------ beacon_node/client/src/lib.rs | 74 ++++++-- beacon_node/src/config.rs | 3 +- beacon_node/src/run.rs | 7 +- 12 files changed, 161 insertions(+), 227 deletions(-) rename beacon_node/{client => beacon_chain}/src/bootstrapper.rs (100%) delete mode 100644 beacon_node/client/src/beacon_chain_types.rs diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 9ce724c14..5efb73423 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" [dependencies] eth2_config = { path = "../eth2/utils/eth2_config" } +beacon_chain = { path = "beacon_chain" } types = { path = "../eth2/types" } store = { path = "./store" } client = { path = "client" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 31f341286..018ea1976 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -11,9 +11,11 @@ lazy_static = "1.3.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } log = "0.4" operation_pool = { path = "../../eth2/operation_pool" } +reqwest = "0.9" serde = "1.0" serde_derive = "1.0" serde_yaml = "0.8" +eth2-libp2p = { path = "../eth2-libp2p" } slog = { version = "^2.2.3" , features = ["max_level_trace"] } sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } @@ -22,6 +24,7 @@ eth2_ssz_derive = "0.1" state_processing = { path = "../../eth2/state_processing" } tree_hash = "0.1" types = { path = "../../eth2/types" } +url = "1.2" lmd_ghost = { path = "../../eth2/lmd_ghost" } [dev-dependencies] diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5feefd841..d79d8c358 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -114,7 +114,6 @@ impl BeaconChain { /// Instantiate a new Beacon Chain, from genesis. pub fn from_genesis( store: Arc, - slot_clock: T::SlotClock, mut genesis_state: BeaconState, mut genesis_block: BeaconBlock, spec: ChainSpec, @@ -147,6 +146,13 @@ impl BeaconChain { "genesis_block_root" => format!("{}", genesis_block_root), ); + // Slot clock + let slot_clock = T::SlotClock::new( + spec.genesis_slot, + genesis_state.genesis_time, + spec.seconds_per_slot, + ); + Ok(Self { spec, slot_clock, diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index a6c77cb63..79c74b006 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -1,49 +1,115 @@ -use crate::BeaconChainTypes; +use super::bootstrapper::Bootstrapper; +use crate::{BeaconChain, BeaconChainTypes}; +use slog::Logger; use std::fs::File; use std::path::PathBuf; +use std::sync::Arc; use std::time::SystemTime; -use types::{ - test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, -}; +use types::{test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec}; + +enum BuildStrategy { + FromGenesis { + genesis_state: Box>, + genesis_block: Box>, + }, + LoadFromStore, +} pub struct BeaconChainBuilder { - genesis_state: BeaconState, - genesis_block: BeaconBlock, + build_strategy: BuildStrategy, spec: ChainSpec, + log: Logger, } impl BeaconChainBuilder { - pub fn recent_genesis(validator_count: usize, spec: ChainSpec) -> Self { - Self::quick_start(recent_genesis_time(), validator_count, spec) + pub fn recent_genesis(validator_count: usize, spec: ChainSpec, log: Logger) -> Self { + Self::quick_start(recent_genesis_time(), validator_count, spec, log) } - pub fn quick_start(genesis_time: u64, validator_count: usize, spec: ChainSpec) -> Self { + pub fn quick_start( + genesis_time: u64, + validator_count: usize, + spec: ChainSpec, + log: Logger, + ) -> Self { let (mut genesis_state, _keypairs) = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec) .build(); genesis_state.genesis_time = genesis_time; - Self::from_genesis_state(genesis_state, spec) + Self::from_genesis_state(genesis_state, spec, log) } - pub fn yaml_state(file: PathBuf, spec: ChainSpec) -> Result { + pub fn yaml_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { let file = File::open(file.clone()) .map_err(|e| format!("Unable to open YAML genesis state file {:?}: {:?}", file, e))?; let genesis_state = serde_yaml::from_reader(file) .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))?; - Ok(Self::from_genesis_state(genesis_state, spec)) + Ok(Self::from_genesis_state(genesis_state, spec, log)) } - pub fn from_genesis_state(genesis_state: BeaconState, spec: ChainSpec) -> Self { - Self { - genesis_block: genesis_block(&genesis_state, &spec), - genesis_state, + pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result { + let bootstrapper = Bootstrapper::from_server_string(server.to_string()) + .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; + + let (genesis_state, genesis_block) = bootstrapper + .genesis() + .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; + + Ok(Self { + build_strategy: BuildStrategy::FromGenesis { + genesis_block: Box::new(genesis_block), + genesis_state: Box::new(genesis_state), + }, spec, + log, + }) + } + + fn from_genesis_state( + genesis_state: BeaconState, + spec: ChainSpec, + log: Logger, + ) -> Self { + Self { + build_strategy: BuildStrategy::FromGenesis { + genesis_block: Box::new(genesis_block(&genesis_state, &spec)), + genesis_state: Box::new(genesis_state), + }, + spec, + log, } } + + pub fn from_store(spec: ChainSpec, log: Logger) -> Self { + Self { + build_strategy: BuildStrategy::LoadFromStore, + spec, + log, + } + } + + pub fn build(self, store: Arc) -> Result, String> { + Ok(match self.build_strategy { + BuildStrategy::LoadFromStore => BeaconChain::from_store(store, self.spec, self.log) + .map_err(|e| format!("Error loading BeaconChain from database: {:?}", e))? + .ok_or_else(|| format!("Unable to find exising BeaconChain in database."))?, + BuildStrategy::FromGenesis { + genesis_block, + genesis_state, + } => BeaconChain::from_genesis( + store, + genesis_state.as_ref().clone(), + genesis_block.as_ref().clone(), + self.spec, + self.log, + ) + .map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e))?, + }) + } } fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) -> BeaconBlock { diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/beacon_chain/src/bootstrapper.rs similarity index 100% rename from beacon_node/client/src/bootstrapper.rs rename to beacon_node/beacon_chain/src/bootstrapper.rs diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 9c833f778..560da6519 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -4,6 +4,7 @@ extern crate lazy_static; mod beacon_chain; mod beacon_chain_builder; +mod bootstrapper; mod checkpoint; mod errors; mod fork_choice; @@ -18,6 +19,7 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use beacon_chain_builder::BeaconChainBuilder; +pub use bootstrapper::Bootstrapper; pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 09f4749ea..29696b771 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,7 +1,6 @@ use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use lmd_ghost::LmdGhost; use sloggers::{null::NullLoggerBuilder, Build}; -use slot_clock::SlotClock; use slot_clock::TestingSlotClock; use state_processing::per_slot_processing; use std::marker::PhantomData; @@ -114,22 +113,9 @@ where let builder = NullLoggerBuilder; let log = builder.build().expect("logger should build"); - // Slot clock - let slot_clock = TestingSlotClock::new( - spec.genesis_slot, - genesis_state.genesis_time, - spec.seconds_per_slot, - ); - - let chain = BeaconChain::from_genesis( - store, - slot_clock, - genesis_state, - genesis_block, - spec.clone(), - log, - ) - .expect("Terminate if beacon chain generation fails"); + let chain = + BeaconChain::from_genesis(store, genesis_state, genesis_block, spec.clone(), log) + .expect("Terminate if beacon chain generation fails"); Self { chain, diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 9b5a9cf42..05c58cc8b 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -27,5 +27,3 @@ clap = "2.32.0" dirs = "1.0.3" exit-future = "0.1.3" futures = "0.1.25" -reqwest = "0.9" -url = "1.2" diff --git a/beacon_node/client/src/beacon_chain_types.rs b/beacon_node/client/src/beacon_chain_types.rs deleted file mode 100644 index 7a57aa475..000000000 --- a/beacon_node/client/src/beacon_chain_types.rs +++ /dev/null @@ -1,170 +0,0 @@ -use crate::bootstrapper::Bootstrapper; -use crate::error::Result; -use crate::{config::BeaconChainStartMethod, ClientConfig}; -use beacon_chain::{ - lmd_ghost::{LmdGhost, ThreadSafeReducedTree}, - slot_clock::SystemTimeSlotClock, - store::Store, - BeaconChain, BeaconChainTypes, -}; -use slog::{crit, info, Logger}; -use slot_clock::SlotClock; -use std::fs::File; -use std::marker::PhantomData; -use std::sync::Arc; -use std::time::SystemTime; -use tree_hash::TreeHash; -use types::{ - test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec, Hash256, -}; - -/// Provides a new, initialized `BeaconChain` -pub trait InitialiseBeaconChain { - fn initialise_beacon_chain( - store: Arc, - config: &ClientConfig, - spec: ChainSpec, - log: Logger, - ) -> Result> { - maybe_load_from_store_for_testnet::<_, T::Store, T::EthSpec>(store, config, spec, log) - } -} - -#[derive(Clone)] -pub struct ClientType { - _phantom_t: PhantomData, - _phantom_u: PhantomData, -} - -impl BeaconChainTypes for ClientType -where - S: Store + 'static, - E: EthSpec, -{ - type Store = S; - type SlotClock = SystemTimeSlotClock; - type LmdGhost = ThreadSafeReducedTree; - type EthSpec = E; -} -impl InitialiseBeaconChain for ClientType {} - -/// Loads a `BeaconChain` from `store`, if it exists. Otherwise, create a new chain from genesis. -fn maybe_load_from_store_for_testnet( - store: Arc, - config: &ClientConfig, - spec: ChainSpec, - log: Logger, -) -> Result> -where - T: BeaconChainTypes, - T::LmdGhost: LmdGhost, -{ - let genesis_state = match &config.beacon_chain_start_method { - BeaconChainStartMethod::Resume => unimplemented!("No resume code yet"), - BeaconChainStartMethod::Mainnet => { - crit!(log, "No mainnet beacon chain startup specification."); - return Err("Mainnet is not yet specified. We're working on it.".into()); - } - BeaconChainStartMethod::RecentGenesis { validator_count } => { - generate_testnet_genesis_state(*validator_count, recent_genesis_time(), &spec) - } - BeaconChainStartMethod::Generated { - validator_count, - genesis_time, - } => generate_testnet_genesis_state(*validator_count, *genesis_time, &spec), - BeaconChainStartMethod::Yaml { file } => { - let file = File::open(file).map_err(|e| { - format!("Unable to open YAML genesis state file {:?}: {:?}", file, e) - })?; - - serde_yaml::from_reader(file) - .map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))? - } - BeaconChainStartMethod::HttpBootstrap { server, .. } => { - let bootstrapper = Bootstrapper::from_server_string(server.to_string()) - .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; - - let (state, _block) = bootstrapper - .genesis() - .map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?; - - state - } - }; - - let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); - let genesis_block_root = genesis_block.canonical_root(); - - // Slot clock - let slot_clock = T::SlotClock::new( - spec.genesis_slot, - genesis_state.genesis_time, - spec.seconds_per_slot, - ); - - // Try load an existing `BeaconChain` from the store. If unable, create a new one. - if let Ok(Some(beacon_chain)) = - BeaconChain::from_store(store.clone(), spec.clone(), log.clone()) - { - // Here we check to ensure that the `BeaconChain` loaded from store has the expected - // genesis block. - // - // Without this check, it's possible that there will be an existing DB with a `BeaconChain` - // that has different parameters than provided to this executable. - if beacon_chain.genesis_block_root == genesis_block_root { - info!( - log, - "Loaded BeaconChain from store"; - "slot" => beacon_chain.head().beacon_state.slot, - "best_slot" => beacon_chain.best_slot(), - ); - - Ok(beacon_chain) - } else { - crit!( - log, - "The BeaconChain loaded from disk has an incorrect genesis root. \ - This may be caused by an old database in located in datadir." - ); - Err("Incorrect genesis root".into()) - } - } else { - BeaconChain::from_genesis( - store, - slot_clock, - genesis_state, - genesis_block, - spec, - log.clone(), - ) - .map_err(|e| format!("Failed to initialize new beacon chain: {:?}", e).into()) - } -} - -fn generate_testnet_genesis_state( - validator_count: usize, - genesis_time: u64, - spec: &ChainSpec, -) -> BeaconState { - let (mut genesis_state, _keypairs) = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, spec) - .build(); - - genesis_state.genesis_time = genesis_time; - - genesis_state -} - -/// Returns the system time, mod 30 minutes. -/// -/// Used for easily creating testnets. -fn recent_genesis_time() -> u64 { - let now = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs(); - let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0); - // genesis is now the last 30 minute block. - now - secs_after_last_period -} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 9d3e001fa..e2baf22d5 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -1,31 +1,47 @@ extern crate slog; -mod beacon_chain_types; -mod bootstrapper; mod config; pub mod error; pub mod notifier; -use beacon_chain::BeaconChain; +use beacon_chain::{ + lmd_ghost::ThreadSafeReducedTree, slot_clock::SystemTimeSlotClock, store::Store, BeaconChain, + BeaconChainBuilder, +}; use exit_future::Signal; use futures::{future::Future, Stream}; use network::Service as NetworkService; -use slog::{error, info, o}; +use slog::{crit, error, info, o}; use slot_clock::SlotClock; use std::marker::PhantomData; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::runtime::TaskExecutor; use tokio::timer::Interval; +use types::EthSpec; pub use beacon_chain::BeaconChainTypes; -pub use beacon_chain_types::ClientType; -pub use beacon_chain_types::InitialiseBeaconChain; -pub use bootstrapper::Bootstrapper; pub use config::{BeaconChainStartMethod, Config as ClientConfig}; pub use eth2_config::Eth2Config; +#[derive(Clone)] +pub struct ClientType { + _phantom_t: PhantomData, + _phantom_u: PhantomData, +} + +impl BeaconChainTypes for ClientType +where + S: Store + 'static, + E: EthSpec, +{ + type Store = S; + type SlotClock = SystemTimeSlotClock; + type LmdGhost = ThreadSafeReducedTree; + type EthSpec = E; +} + /// Main beacon node client service. This provides the connection and initialisation of the clients /// sub-services in multiple threads. pub struct Client { @@ -49,7 +65,7 @@ pub struct Client { impl Client where - T: BeaconChainTypes + InitialiseBeaconChain + Clone, + T: BeaconChainTypes + Clone, { /// Generate an instance of the client. Spawn and link all internal sub-processes. pub fn new( @@ -62,13 +78,41 @@ where let store = Arc::new(store); let seconds_per_slot = eth2_config.spec.seconds_per_slot; - // Load a `BeaconChain` from the store, or create a new one if it does not exist. - let beacon_chain = Arc::new(T::initialise_beacon_chain( - store, - &client_config, - eth2_config.spec.clone(), - log.clone(), - )?); + let spec = ð2_config.spec.clone(); + + let beacon_chain_builder = match &client_config.beacon_chain_start_method { + BeaconChainStartMethod::Resume => { + BeaconChainBuilder::from_store(spec.clone(), log.clone()) + } + BeaconChainStartMethod::Mainnet => { + crit!(log, "No mainnet beacon chain startup specification."); + return Err("Mainnet is not yet specified. We're working on it.".into()); + } + BeaconChainStartMethod::RecentGenesis { validator_count } => { + BeaconChainBuilder::recent_genesis(*validator_count, spec.clone(), log.clone()) + } + BeaconChainStartMethod::Generated { + validator_count, + genesis_time, + } => BeaconChainBuilder::quick_start( + *genesis_time, + *validator_count, + spec.clone(), + log.clone(), + ), + BeaconChainStartMethod::Yaml { file } => { + BeaconChainBuilder::yaml_state(file, spec.clone(), log.clone())? + } + BeaconChainStartMethod::HttpBootstrap { server, .. } => { + BeaconChainBuilder::http_bootstrap(server, spec.clone(), log.clone())? + } + }; + + let beacon_chain: Arc> = Arc::new( + beacon_chain_builder + .build(store) + .map_err(error::Error::from)?, + ); if beacon_chain.read_slot_clock().is_none() { panic!("Cannot start client before genesis!") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 68d905ed2..9fac9b49a 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,5 +1,6 @@ +use beacon_chain::Bootstrapper; use clap::ArgMatches; -use client::{BeaconChainStartMethod, Bootstrapper, ClientConfig, Eth2Config}; +use client::{BeaconChainStartMethod, ClientConfig, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; use rand::{distributions::Alphanumeric, Rng}; use slog::{crit, info, warn, Logger}; diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index e23b5bc72..620cb64bb 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,7 +1,4 @@ -use client::{ - error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth2Config, - InitialiseBeaconChain, -}; +use client::{error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth2Config}; use futures::sync::oneshot; use futures::Future; use slog::{error, info}; @@ -117,7 +114,7 @@ fn run( log: &slog::Logger, ) -> error::Result<()> where - T: BeaconChainTypes + InitialiseBeaconChain + Clone, + T: BeaconChainTypes + Clone, T::Store: OpenDatabase, { let store = T::Store::open_database(&db_path)?; From b58aa1d1481b4b7104032c48e30a5de99aed7a20 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 26 Aug 2019 15:47:03 +1000 Subject: [PATCH 115/305] Add custom config options to testnet sub-cmd --- beacon_node/src/config.rs | 61 ++++++++++++++++++++++++++++----------- beacon_node/src/main.rs | 20 +++++++++++-- 2 files changed, 62 insertions(+), 19 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 9fac9b49a..c8a9299a5 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -67,14 +67,28 @@ fn process_testnet_subcommand( builder.clean_datadir()?; } + if let Some(path_string) = cli_args.value_of("eth2-config") { + let path = path_string + .parse::() + .map_err(|e| format!("Unable to parse eth2-config path: {:?}", e))?; + builder.load_eth2_config(path)?; + } else { + builder.update_spec_from_subcommand(&cli_args)?; + } + + if let Some(path_string) = cli_args.value_of("config") { + let path = path_string + .parse::() + .map_err(|e| format!("Unable to parse config path: {:?}", e))?; + builder.load_client_config(path)?; + } + info!( log, "Creating new datadir"; "path" => format!("{:?}", builder.data_dir) ); - builder.update_spec_from_subcommand(&cli_args)?; - // Start matching on the second subcommand (e.g., `testnet bootstrap ...`) match cli_args.subcommand() { ("bootstrap", Some(cli_args)) => { @@ -82,7 +96,7 @@ fn process_testnet_subcommand( .value_of("server") .ok_or_else(|| "No bootstrap server specified")?; let port: Option = cli_args - .value_of("port") + .value_of("libp2p-port") .and_then(|s| s.parse::().ok()); builder.import_bootstrap_libp2p_address(server, port)?; @@ -306,11 +320,8 @@ impl<'a> ConfigBuilder<'a> { )); } else { // Write the config to a TOML file in the datadir. - write_to_file( - self.data_dir.join(ETH2_CONFIG_FILENAME), - &self.client_config, - ) - .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; + write_to_file(self.data_dir.join(ETH2_CONFIG_FILENAME), &self.eth2_config) + .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; } Ok(()) @@ -339,20 +350,36 @@ impl<'a> ConfigBuilder<'a> { .unwrap_or_else(|| true) { return Err( - "No database found in datadir. Use the 'testnet -f' sub-command to overwrite the \ - existing datadir, or specify a different `--datadir`." + "No database found in datadir. Use 'testnet -f' to overwrite the existing \ + datadir, or specify a different `--datadir`." .into(), ); } - self.eth2_config = read_from_file::(self.data_dir.join(ETH2_CONFIG_FILENAME)) - .map_err(|e| format!("Unable to parse {} file: {:?}", ETH2_CONFIG_FILENAME, e))? - .ok_or_else(|| format!("{} file does not exist", ETH2_CONFIG_FILENAME))?; + self.load_eth2_config(self.data_dir.join(ETH2_CONFIG_FILENAME))?; + self.load_client_config(self.data_dir.join(CLIENT_CONFIG_FILENAME))?; - self.client_config = - read_from_file::(self.data_dir.join(CLIENT_CONFIG_FILENAME)) - .map_err(|e| format!("Unable to parse {} file: {:?}", CLIENT_CONFIG_FILENAME, e))? - .ok_or_else(|| format!("{} file does not exist", ETH2_CONFIG_FILENAME))?; + Ok(()) + } + + /// Attempts to load the client config from `path`. + /// + /// Returns an error if any files are not found or are invalid. + pub fn load_client_config(&mut self, path: PathBuf) -> Result<()> { + self.client_config = read_from_file::(path) + .map_err(|e| format!("Unable to parse ClientConfig file: {:?}", e))? + .ok_or_else(|| "ClientConfig file does not exist".to_string())?; + + Ok(()) + } + + /// Attempts to load the eth2 config from `path`. + /// + /// Returns an error if any files are not found or are invalid. + pub fn load_eth2_config(&mut self, path: PathBuf) -> Result<()> { + self.eth2_config = read_from_file::(path) + .map_err(|e| format!("Unable to parse Eth2Config file: {:?}", e))? + .ok_or_else(|| "Eth2Config file does not exist".to_string())?; Ok(()) } diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 4430db128..a9659362c 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -198,6 +198,22 @@ fn main() { .takes_value(true) .required(true) .possible_values(&["mainnet", "minimal", "interop"]) + .default_value("minimal") + ) + .arg( + Arg::with_name("eth2-config") + .long("eth2-config") + .value_name("TOML_FILE") + .help("A existing eth2_spec TOML file (e.g., eth2_spec.toml).") + .takes_value(true) + .conflicts_with("spec") + ) + .arg( + Arg::with_name("config") + .long("config") + .value_name("TOML_FILE") + .help("An existing beacon_node TOML file (e.g., beacon_node.toml).") + .takes_value(true) ) .arg( Arg::with_name("random-datadir") @@ -210,8 +226,8 @@ fn main() { Arg::with_name("force") .long("force") .short("f") - .help("If present, will backup any existing config files before creating new ones. Cannot be \ - used when specifying --random-datadir (logic error).") + .help("If present, will create new config and database files and move the any existing to a \ + backup directory.") .conflicts_with("random-datadir") ) /* From bab1f2b06423445e4aa72958bb293c0f65afb190 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 26 Aug 2019 15:51:11 +1000 Subject: [PATCH 116/305] Rename CLI flag --- beacon_node/src/config.rs | 4 ++-- beacon_node/src/main.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c8a9299a5..0aa2d29bd 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -76,10 +76,10 @@ fn process_testnet_subcommand( builder.update_spec_from_subcommand(&cli_args)?; } - if let Some(path_string) = cli_args.value_of("config") { + if let Some(path_string) = cli_args.value_of("client-config") { let path = path_string .parse::() - .map_err(|e| format!("Unable to parse config path: {:?}", e))?; + .map_err(|e| format!("Unable to parse client config path: {:?}", e))?; builder.load_client_config(path)?; } diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index a9659362c..243e4b716 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -209,8 +209,8 @@ fn main() { .conflicts_with("spec") ) .arg( - Arg::with_name("config") - .long("config") + Arg::with_name("client-config") + .long("client-config") .value_name("TOML_FILE") .help("An existing beacon_node TOML file (e.g., beacon_node.toml).") .takes_value(true) From 39be2ed1d24f53b5494e53a89cf00f6b1023dd0f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 26 Aug 2019 15:57:40 +1000 Subject: [PATCH 117/305] Improve CLI error messages --- beacon_node/src/config.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 0aa2d29bd..2c928ad44 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -366,9 +366,9 @@ impl<'a> ConfigBuilder<'a> { /// /// Returns an error if any files are not found or are invalid. pub fn load_client_config(&mut self, path: PathBuf) -> Result<()> { - self.client_config = read_from_file::(path) - .map_err(|e| format!("Unable to parse ClientConfig file: {:?}", e))? - .ok_or_else(|| "ClientConfig file does not exist".to_string())?; + self.client_config = read_from_file::(path.clone()) + .map_err(|e| format!("Unable to parse {:?} file: {:?}", path, e))? + .ok_or_else(|| format!("{:?} file does not exist", path))?; Ok(()) } @@ -377,9 +377,9 @@ impl<'a> ConfigBuilder<'a> { /// /// Returns an error if any files are not found or are invalid. pub fn load_eth2_config(&mut self, path: PathBuf) -> Result<()> { - self.eth2_config = read_from_file::(path) - .map_err(|e| format!("Unable to parse Eth2Config file: {:?}", e))? - .ok_or_else(|| "Eth2Config file does not exist".to_string())?; + self.eth2_config = read_from_file::(path.clone()) + .map_err(|e| format!("Unable to parse {:?} file: {:?}", path, e))? + .ok_or_else(|| format!("{:?} file does not exist", path))?; Ok(()) } From 901393b6642e5f01971d04fa79cd7ccfb4dac9ef Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 26 Aug 2019 16:02:05 +1000 Subject: [PATCH 118/305] Clean datadir after config files have been loaded --- beacon_node/src/config.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 2c928ad44..f47a2ddb0 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -63,10 +63,6 @@ fn process_testnet_subcommand( builder.set_random_datadir()?; } - if cli_args.is_present("force") { - builder.clean_datadir()?; - } - if let Some(path_string) = cli_args.value_of("eth2-config") { let path = path_string .parse::() @@ -83,6 +79,10 @@ fn process_testnet_subcommand( builder.load_client_config(path)?; } + if cli_args.is_present("force") { + builder.clean_datadir()?; + } + info!( log, "Creating new datadir"; From 6875ae8af510ea2fe4bc86671f28770344368def Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 27 Aug 2019 00:04:15 +1000 Subject: [PATCH 119/305] Pull Eth2Config during bootstrap --- beacon_node/beacon_chain/Cargo.toml | 1 + beacon_node/beacon_chain/src/bootstrapper.rs | 19 +++++++++++++++++++ beacon_node/client/src/lib.rs | 1 + beacon_node/rest_api/Cargo.toml | 1 + beacon_node/rest_api/src/lib.rs | 7 +++++++ beacon_node/rest_api/src/spec.rs | 14 ++++++++++++++ beacon_node/src/config.rs | 20 ++++++++++++++++++++ 7 files changed, 63 insertions(+) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 018ea1976..f6763d167 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Paul Hauner ", "Age Manning Result { + get_eth2_config(self.url.clone()).map_err(|e| format!("Unable to get Eth2Config: {:?}", e)) + } + /// Returns the servers ENR address. pub fn enr(&self) -> Result { get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) @@ -129,6 +135,19 @@ fn get_slots_per_epoch(mut url: Url) -> Result { .map_err(Into::into) } +fn get_eth2_config(mut url: Url) -> Result { + url.path_segments_mut() + .map(|mut url| { + url.push("spec").push("eth2_config"); + }) + .map_err(|_| Error::InvalidUrl)?; + + reqwest::get(url)? + .error_for_status()? + .json() + .map_err(Into::into) +} + fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result { url.path_segments_mut() .map(|mut url| { diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 0bb30d0af..2612fd648 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -162,6 +162,7 @@ where beacon_chain.clone(), network.clone(), client_config.db_path().expect("unable to read datadir"), + eth2_config.clone(), &log, ) { Ok(s) => Some(s), diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index cac196d9c..5303dc8bd 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -27,5 +27,6 @@ exit-future = "0.1.3" tokio = "0.1.17" url = "2.0" lazy_static = "1.3.0" +eth2_config = { path = "../../eth2/utils/eth2_config" } lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } slot_clock = { path = "../../eth2/utils/slot_clock" } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 964dd7998..b1137c249 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -13,6 +13,7 @@ mod url_query; use beacon_chain::{BeaconChain, BeaconChainTypes}; use client_network::Service as NetworkService; +use eth2_config::Eth2Config; use hyper::rt::Future; use hyper::service::service_fn_ok; use hyper::{Body, Method, Response, Server, StatusCode}; @@ -79,6 +80,7 @@ pub fn start_server( beacon_chain: Arc>, network_service: Arc>, db_path: PathBuf, + eth2_config: Eth2Config, log: &slog::Logger, ) -> Result { let log = log.new(o!("Service" => "Api")); @@ -100,12 +102,14 @@ pub fn start_server( // Clone our stateful objects, for use in service closure. let server_log = log.clone(); let server_bc = beacon_chain.clone(); + let eth2_config = Arc::new(eth2_config); let service = move || { let log = server_log.clone(); let beacon_chain = server_bc.clone(); let db_path = db_path.clone(); let network_service = network_service.clone(); + let eth2_config = eth2_config.clone(); // Create a simple handler for the router, inject our stateful objects into the request. service_fn_ok(move |mut req| { @@ -118,6 +122,8 @@ pub fn start_server( req.extensions_mut().insert::(db_path.clone()); req.extensions_mut() .insert::>>(network_service.clone()); + req.extensions_mut() + .insert::>(eth2_config.clone()); let path = req.uri().path().to_string(); @@ -144,6 +150,7 @@ pub fn start_server( (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), (&Method::GET, "/spec") => spec::get_spec::(req), (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), + (&Method::GET, "/spec/eth2_config") => spec::get_eth2_config::(req), _ => Err(ApiError::MethodNotAllowed(path.clone())), }; diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs index d0c8e4368..86d1c227d 100644 --- a/beacon_node/rest_api/src/spec.rs +++ b/beacon_node/rest_api/src/spec.rs @@ -1,6 +1,7 @@ use super::{success_response, ApiResult}; use crate::ApiError; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2_config::Eth2Config; use hyper::{Body, Request}; use std::sync::Arc; use types::EthSpec; @@ -18,6 +19,19 @@ pub fn get_spec(req: Request) -> ApiResult Ok(success_response(Body::from(json))) } +/// HTTP handler to return the full Eth2Config object. +pub fn get_eth2_config(req: Request) -> ApiResult { + let eth2_config = req + .extensions() + .get::>() + .ok_or_else(|| ApiError::ServerError("Eth2Config extension missing".to_string()))?; + + let json: String = serde_json::to_string(eth2_config.as_ref()) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize Eth2Config: {:?}", e)))?; + + Ok(success_response(Body::from(json))) +} + /// HTTP handler to return the full spec object. pub fn get_slots_per_epoch(_req: Request) -> ApiResult { let json: String = serde_json::to_string(&T::EthSpec::slots_per_epoch()) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index f47a2ddb0..e76bd48fa 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -63,7 +63,13 @@ fn process_testnet_subcommand( builder.set_random_datadir()?; } + let is_bootstrap = cli_args.subcommand_name() == Some("bootstrap"); + if let Some(path_string) = cli_args.value_of("eth2-config") { + if is_bootstrap { + return Err("Cannot supply --eth2-config when using bootsrap".to_string()); + } + let path = path_string .parse::() .map_err(|e| format!("Unable to parse eth2-config path: {:?}", e))?; @@ -100,6 +106,7 @@ fn process_testnet_subcommand( .and_then(|s| s.parse::().ok()); builder.import_bootstrap_libp2p_address(server, port)?; + builder.import_bootstrap_eth2_config(server)?; builder.set_beacon_chain_start_method(BeaconChainStartMethod::HttpBootstrap { server: server.to_string(), @@ -252,6 +259,19 @@ impl<'a> ConfigBuilder<'a> { Ok(()) } + /// Imports an `Eth2Config` from `server`, returning an error if this fails. + pub fn import_bootstrap_eth2_config(&mut self, server: &str) -> Result<()> { + let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; + + self.update_eth2_config(bootstrapper.eth2_config()?); + + Ok(()) + } + + fn update_eth2_config(&mut self, eth2_config: Eth2Config) { + self.eth2_config = eth2_config; + } + /// Reads the subcommand and tries to update `self.eth2_config` based up on the `--spec` flag. /// /// Returns an error if the `--spec` flag is not present in the given `cli_args`. From 7f6b700b983429f4c67b1592bc76b4fd2486716a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 27 Aug 2019 00:05:25 +1000 Subject: [PATCH 120/305] Remove old git merge relic --- beacon_node/src/main.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 797217af0..aba44e6fe 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -182,7 +182,6 @@ fn main() { .takes_value(true), ) /* -<<<<<<< HEAD * The "testnet" sub-command. * * Allows for creating a new datadir with testnet-specific configs. From ed6c39e25a7ee7fae51ef4d20522ac171a2202aa Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 27 Aug 2019 11:19:50 +1000 Subject: [PATCH 121/305] Add log for fork choice integrity in beacon chain --- beacon_node/beacon_chain/src/beacon_chain.rs | 21 ++++++++++---- beacon_node/beacon_chain/src/fork_choice.rs | 8 ++++++ eth2/lmd_ghost/src/lib.rs | 6 ++++ eth2/lmd_ghost/src/reduced_tree.rs | 30 ++++++-------------- 4 files changed, 38 insertions(+), 27 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5feefd841..0fc71fe7b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -739,8 +739,19 @@ impl BeaconChain { } else { // Provide the attestation to fork choice, updating the validator latest messages but // _without_ finding and updating the head. - self.fork_choice - .process_attestation(&state, &attestation, block)?; + if let Err(e) = self + .fork_choice + .process_attestation(&state, &attestation, block) + { + error!( + self.log, + "Add attestation to fork choice failed"; + "fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()), + "beacon_block_root" => format!("{}", attestation.data.beacon_block_root), + "error" => format!("{:?}", e) + ); + return Err(e.into()); + } // Provide the valid attestation to op pool, which may choose to retain the // attestation for inclusion in a future block. @@ -947,10 +958,10 @@ impl BeaconChain { if let Err(e) = self.fork_choice.process_block(&state, &block, block_root) { error!( self.log, - "fork choice failed to process_block"; - "error" => format!("{:?}", e), + "Add block to fork choice failed"; + "fork_choice_integrity" => format!("{:?}", self.fork_choice.verify_integrity()), "block_root" => format!("{}", block_root), - "block_slot" => format!("{}", block.slot) + "error" => format!("{:?}", e), ) } diff --git a/beacon_node/beacon_chain/src/fork_choice.rs b/beacon_node/beacon_chain/src/fork_choice.rs index 77fdaacdc..26084e04a 100644 --- a/beacon_node/beacon_chain/src/fork_choice.rs +++ b/beacon_node/beacon_chain/src/fork_choice.rs @@ -199,6 +199,14 @@ impl ForkChoice { self.backend.latest_message(validator_index) } + /// Runs an integrity verification function on the underlying fork choice algorithm. + /// + /// Returns `Ok(())` if the underlying fork choice has maintained it's integrity, + /// `Err(description)` otherwise. + pub fn verify_integrity(&self) -> core::result::Result<(), String> { + self.backend.verify_integrity() + } + /// Inform the fork choice that the given block (and corresponding root) have been finalized so /// it may prune it's storage. /// diff --git a/eth2/lmd_ghost/src/lib.rs b/eth2/lmd_ghost/src/lib.rs index 95cd0679c..167cd36ea 100644 --- a/eth2/lmd_ghost/src/lib.rs +++ b/eth2/lmd_ghost/src/lib.rs @@ -46,4 +46,10 @@ pub trait LmdGhost: Send + Sync { /// Returns the latest message for a given validator index. fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)>; + + /// Runs an integrity verification function on fork choice algorithm. + /// + /// Returns `Ok(())` if the underlying fork choice has maintained it's integrity, + /// `Err(description)` otherwise. + fn verify_integrity(&self) -> Result<()>; } diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index deda02e1f..cd3a38c46 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -43,16 +43,6 @@ impl fmt::Debug for ThreadSafeReducedTree { } } -impl ThreadSafeReducedTree -where - T: Store, - E: EthSpec, -{ - pub fn verify_integrity(&self) -> std::result::Result<(), String> { - self.core.read().verify_integrity() - } -} - impl LmdGhost for ThreadSafeReducedTree where T: Store, @@ -80,7 +70,7 @@ where fn process_block(&self, block: &BeaconBlock, block_hash: Hash256) -> SuperResult<()> { self.core .write() - .add_weightless_node(block.slot, block_hash) + .maybe_add_weightless_node(block.slot, block_hash) .map_err(|e| format!("process_block failed: {:?}", e)) } @@ -113,6 +103,10 @@ where fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Slot)> { self.core.read().latest_message(validator_index) } + + fn verify_integrity(&self) -> std::result::Result<(), String> { + self.core.read().verify_integrity() + } } struct ReducedTree { @@ -163,15 +157,7 @@ where /// The given `new_root` must be in the block tree (but not necessarily in the reduced tree). /// Any nodes which are not a descendant of `new_root` will be removed from the store. pub fn update_root(&mut self, new_slot: Slot, new_root: Hash256) -> Result<()> { - if !self.nodes.contains_key(&new_root) { - let node = Node { - block_hash: new_root, - voters: vec![], - ..Node::default() - }; - - self.add_node(node)?; - } + self.maybe_add_weightless_node(new_slot, new_root)?; self.retain_subtree(self.root.0, new_root)?; @@ -247,7 +233,7 @@ where // // In this case, we add a weightless node at `start_block_root`. if !self.nodes.contains_key(&start_block_root) { - self.add_weightless_node(start_block_slot, start_block_root)?; + self.maybe_add_weightless_node(start_block_slot, start_block_root)?; }; let _root_weight = self.update_weight(start_block_root, weight_fn)?; @@ -430,7 +416,7 @@ where Ok(()) } - fn add_weightless_node(&mut self, slot: Slot, hash: Hash256) -> Result<()> { + fn maybe_add_weightless_node(&mut self, slot: Slot, hash: Hash256) -> Result<()> { if slot > self.root_slot() && !self.nodes.contains_key(&hash) { let node = Node { block_hash: hash, From 4d2cdc94927279dce594510649df4cfedf30f187 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 27 Aug 2019 16:59:53 +1000 Subject: [PATCH 122/305] Update to spec v0.8.3 --- .../src/common/get_attesting_indices.rs | 4 +--- .../src/common/get_compact_committees_root.rs | 21 +++---------------- .../src/common/get_indexed_attestation.rs | 2 ++ .../src/per_epoch_processing.rs | 6 +++--- eth2/types/src/beacon_state.rs | 8 ------- 5 files changed, 9 insertions(+), 32 deletions(-) diff --git a/eth2/state_processing/src/common/get_attesting_indices.rs b/eth2/state_processing/src/common/get_attesting_indices.rs index f558909f6..adb71801a 100644 --- a/eth2/state_processing/src/common/get_attesting_indices.rs +++ b/eth2/state_processing/src/common/get_attesting_indices.rs @@ -17,11 +17,9 @@ pub fn get_attesting_indices( target_relative_epoch, )?; - /* TODO(freeze): re-enable this? - if bitlist.len() > committee.committee.len() { + if bitlist.len() != committee.committee.len() { return Err(BeaconStateError::InvalidBitfield); } - */ Ok(committee .committee diff --git a/eth2/state_processing/src/common/get_compact_committees_root.rs b/eth2/state_processing/src/common/get_compact_committees_root.rs index 75edb3549..b8ab4345f 100644 --- a/eth2/state_processing/src/common/get_compact_committees_root.rs +++ b/eth2/state_processing/src/common/get_compact_committees_root.rs @@ -3,7 +3,7 @@ use types::*; /// Return the compact committee root at `relative_epoch`. /// -/// Spec v0.8.0 +/// Spec v0.8.3 pub fn get_compact_committees_root( state: &BeaconState, relative_epoch: RelativeEpoch, @@ -11,28 +11,13 @@ pub fn get_compact_committees_root( ) -> Result { let mut committees = FixedVector::<_, T::ShardCount>::from_elem(CompactCommittee::::default()); - // FIXME: this is a spec bug, whereby the start shard for the epoch after the next epoch - // is mistakenly used. The start shard from the cache SHOULD work. - // Waiting on a release to fix https://github.com/ethereum/eth2.0-specs/issues/1315 - let start_shard = if relative_epoch == RelativeEpoch::Next { - state.next_epoch_start_shard(spec)? - } else { - state.get_epoch_start_shard(relative_epoch)? - }; + let start_shard = state.get_epoch_start_shard(relative_epoch)?; for committee_number in 0..state.get_committee_count(relative_epoch)? { let shard = (start_shard + committee_number) % T::ShardCount::to_u64(); - // FIXME: this is a partial workaround for the above, but it only works in the case - // where there's a committee for every shard in every epoch. It works for the minimal - // tests but not the mainnet ones. - let fake_shard = if relative_epoch == RelativeEpoch::Next { - (shard + 1) % T::ShardCount::to_u64() - } else { - shard - }; for &index in state - .get_crosslink_committee_for_shard(fake_shard, relative_epoch)? + .get_crosslink_committee_for_shard(shard, relative_epoch)? .committee { let validator = state diff --git a/eth2/state_processing/src/common/get_indexed_attestation.rs b/eth2/state_processing/src/common/get_indexed_attestation.rs index 7c08c8708..82ca92eb7 100644 --- a/eth2/state_processing/src/common/get_indexed_attestation.rs +++ b/eth2/state_processing/src/common/get_indexed_attestation.rs @@ -11,6 +11,8 @@ pub fn get_indexed_attestation( state: &BeaconState, attestation: &Attestation, ) -> Result, Error> { + // Note: we rely on both calls to `get_attesting_indices` to check the bitfield lengths + // against the committee length let attesting_indices = get_attesting_indices(state, &attestation.data, &attestation.aggregation_bits)?; diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 71d8b20da..08f42a229 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -218,9 +218,6 @@ pub fn process_final_updates( } } - // Update start shard. - state.start_shard = state.next_epoch_start_shard(spec)?; - // Set active index root let index_epoch = next_epoch + spec.activation_exit_delay; let indices_list = VariableList::::from( @@ -252,6 +249,9 @@ pub fn process_final_updates( .push(Hash256::from_slice(&historical_batch.tree_hash_root()))?; } + // Update start shard. + state.start_shard = state.get_epoch_start_shard(RelativeEpoch::Next)?; + // Rotate current/previous epoch attestations state.previous_epoch_attestations = std::mem::replace(&mut state.current_epoch_attestations, VariableList::empty()); diff --git a/eth2/types/src/beacon_state.rs b/eth2/types/src/beacon_state.rs index 5b00f08b7..9b623c070 100644 --- a/eth2/types/src/beacon_state.rs +++ b/eth2/types/src/beacon_state.rs @@ -298,14 +298,6 @@ impl BeaconState { Ok(cache.epoch_start_shard()) } - pub fn next_epoch_start_shard(&self, spec: &ChainSpec) -> Result { - let cache = self.cache(RelativeEpoch::Current)?; - let active_validator_count = cache.active_validator_count(); - let shard_delta = T::get_shard_delta(active_validator_count, spec.target_committee_size); - - Ok((self.start_shard + shard_delta) % T::ShardCount::to_u64()) - } - /// Get the slot of an attestation. /// /// Note: Utilizes the cache and will fail if the appropriate cache is not initialized. From aed2f6407dac2e644825c76734ce610badf1e637 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 27 Aug 2019 17:00:21 +1000 Subject: [PATCH 123/305] Bump EF tests to v0.8.3 --- tests/ef_tests/eth2.0-spec-tests | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ef_tests/eth2.0-spec-tests b/tests/ef_tests/eth2.0-spec-tests index aaa1673f5..ae6dd9011 160000 --- a/tests/ef_tests/eth2.0-spec-tests +++ b/tests/ef_tests/eth2.0-spec-tests @@ -1 +1 @@ -Subproject commit aaa1673f508103e11304833e0456e4149f880065 +Subproject commit ae6dd9011df05fab8c7e651c09cf9c940973bf81 From 6bb3a651893960679bf1de3190dd2ed484a34710 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 27 Aug 2019 18:09:31 +1000 Subject: [PATCH 124/305] Guard reduced tree from errors --- eth2/lmd_ghost/src/reduced_tree.rs | 107 +++++++++++++++-------------- 1 file changed, 57 insertions(+), 50 deletions(-) diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index cd3a38c46..a388d2c38 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -311,51 +311,53 @@ where /// become redundant and removed from the reduced tree. fn remove_latest_message(&mut self, validator_index: usize) -> Result<()> { if let Some(vote) = *self.latest_votes.get(validator_index) { - self.get_mut_node(vote.hash)?.remove_voter(validator_index); - let node = self.get_node(vote.hash)?.clone(); + if self.nodes.contains_key(&vote.hash) { + self.get_mut_node(vote.hash)?.remove_voter(validator_index); + let node = self.get_node(vote.hash)?.clone(); - if let Some(parent_hash) = node.parent_hash { - if node.has_votes() || node.children.len() > 1 { - // A node with votes or more than one child is never removed. - } else if node.children.len() == 1 { - // A node which has only one child may be removed. - // - // Load the child of the node and set it's parent to be the parent of this - // node (viz., graft the node's child to the node's parent) - let child = self.get_mut_node(node.children[0])?; - child.parent_hash = node.parent_hash; + if let Some(parent_hash) = node.parent_hash { + if node.has_votes() || node.children.len() > 1 { + // A node with votes or more than one child is never removed. + } else if node.children.len() == 1 { + // A node which has only one child may be removed. + // + // Load the child of the node and set it's parent to be the parent of this + // node (viz., graft the node's child to the node's parent) + let child = self.get_mut_node(node.children[0])?; + child.parent_hash = node.parent_hash; - // Graft the parent of this node to it's child. - if let Some(parent_hash) = node.parent_hash { - let parent = self.get_mut_node(parent_hash)?; - parent.replace_child(node.block_hash, node.children[0])?; + // Graft the parent of this node to it's child. + if let Some(parent_hash) = node.parent_hash { + let parent = self.get_mut_node(parent_hash)?; + parent.replace_child(node.block_hash, node.children[0])?; + } + + self.nodes.remove(&vote.hash); + } else if node.children.is_empty() { + // Remove the to-be-deleted node from it's parent. + if let Some(parent_hash) = node.parent_hash { + self.get_mut_node(parent_hash)? + .remove_child(node.block_hash)?; + } + + self.nodes.remove(&vote.hash); + + // A node which has no children may be deleted and potentially it's parent + // too. + self.maybe_delete_node(parent_hash)?; + } else { + // It is impossible for a node to have a number of children that is not 0, 1 or + // greater than one. + // + // This code is strictly unnecessary, however we keep it for readability. + unreachable!(); } - - self.nodes.remove(&vote.hash); - } else if node.children.is_empty() { - // Remove the to-be-deleted node from it's parent. - if let Some(parent_hash) = node.parent_hash { - self.get_mut_node(parent_hash)? - .remove_child(node.block_hash)?; - } - - self.nodes.remove(&vote.hash); - - // A node which has no children may be deleted and potentially it's parent - // too. - self.maybe_delete_node(parent_hash)?; } else { - // It is impossible for a node to have a number of children that is not 0, 1 or - // greater than one. - // - // This code is strictly unnecessary, however we keep it for readability. - unreachable!(); + // A node without a parent is the genesis/finalized node and should never be removed. } - } else { - // A node without a parent is the genesis/finalized node and should never be removed. - } - self.latest_votes.insert(validator_index, Some(vote)); + self.latest_votes.insert(validator_index, Some(vote)); + } } Ok(()) @@ -370,25 +372,30 @@ where /// - it does not have any votes. fn maybe_delete_node(&mut self, hash: Hash256) -> Result<()> { let should_delete = { - let node = self.get_node(hash)?.clone(); + if let Ok(node) = self.get_node(hash) { + let node = node.clone(); - if let Some(parent_hash) = node.parent_hash { - if (node.children.len() == 1) && !node.has_votes() { - let child_hash = node.children[0]; + if let Some(parent_hash) = node.parent_hash { + if (node.children.len() == 1) && !node.has_votes() { + let child_hash = node.children[0]; - // Graft the single descendant `node` to the `parent` of node. - self.get_mut_node(child_hash)?.parent_hash = Some(parent_hash); + // Graft the single descendant `node` to the `parent` of node. + self.get_mut_node(child_hash)?.parent_hash = Some(parent_hash); - // Detach `node` from `parent`, replacing it with `child`. - self.get_mut_node(parent_hash)? - .replace_child(hash, child_hash)?; + // Detach `node` from `parent`, replacing it with `child`. + self.get_mut_node(parent_hash)? + .replace_child(hash, child_hash)?; - true + true + } else { + false + } } else { + // A node without a parent is the genesis node and should not be deleted. false } } else { - // A node without a parent is the genesis node and should not be deleted. + // No need to delete a node that does not exist. false } }; From 16ec330a79af93518ac3b82b2ffa462424191e16 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 28 Aug 2019 02:05:19 +1000 Subject: [PATCH 125/305] Started aligning API spec with implementation. - Adding some missing fields to structs - Rearranged the endpoints in the rest_api router, and renamed, using an 'implementation_pending' function - Added 'content-type' headers, to distinguish difference with /node/metrics - Updated OpenAPI spec to v0.2.0 - Split /node/fork into /node/chain_id and /beacon/fork - Moved /metrics to /node/metrics - Added example to /node/metrics, since it's text/plain - Moved /node/network to just /network - Added lots of stubs for endpoints which exist in the router - Reordered large parts of the OpenAPI spec - Moved /chain/beacon/... to just /beacon/... --- beacon_node/rest_api/src/beacon.rs | 5 + beacon_node/rest_api/src/lib.rs | 79 ++-- beacon_node/rest_api/src/metrics.rs | 10 +- beacon_node/rest_api/src/network.rs | 2 +- docs/rest_oapi.yaml | 584 ++++++++++++++++------------ 5 files changed, 404 insertions(+), 276 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 1c66a2819..fb8386661 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -12,6 +12,11 @@ pub struct HeadResponse { pub slot: Slot, pub block_root: Hash256, pub state_root: Hash256, + /* Not implemented: + pub finalized_slot: Slot, + pub finalized_block_root: Hash256, + pub justified_slot: Hash256, + */ } /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index b943a1d45..770793491 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -124,27 +124,17 @@ pub fn start_server( // Route the request to the correct handler. let result = match (req.method(), path.as_ref()) { - // Methods for Beacon Node - //TODO: Remove? - //(&Method::GET, "/beacon/best_slot") => beacon::get_best_slot::(req), - (&Method::GET, "/beacon/head") => beacon::get_head::(req), - (&Method::GET, "/beacon/block") => beacon::get_block::(req), - (&Method::GET, "/beacon/blocks") => helpers::implementation_pending_response(req), - //TODO Is the below replaced by finalized_checkpoint? - (&Method::GET, "/beacon/chainhead") => { + // Methods for Client + (&Method::GET, "/node/version") => node::get_version(req), + (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), + (&Method::GET, "/node/deposit_contract") => { helpers::implementation_pending_response(req) } - (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), - (&Method::GET, "/beacon/latest_finalized_checkpoint") => { - beacon::get_latest_finalized_checkpoint::(req) - } - (&Method::GET, "/beacon/state") => beacon::get_state::(req), - (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), + (&Method::GET, "/node/syncing") => helpers::implementation_pending_response(req), + (&Method::GET, "/node/chain_id") => helpers::implementation_pending_response(req), + (&Method::GET, "/node/metrics") => metrics::get_prometheus::(req), - //TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances - - // Methods for Client - (&Method::GET, "/metrics") => metrics::get_prometheus::(req), + // Methods for Network (&Method::GET, "/network/enr") => network::get_enr::(req), (&Method::GET, "/network/peer_count") => network::get_peer_count::(req), (&Method::GET, "/network/peer_id") => network::get_peer_id::(req), @@ -153,36 +143,54 @@ pub fn start_server( (&Method::GET, "/network/listen_addresses") => { network::get_listen_addresses::(req) } - (&Method::GET, "/node/version") => node::get_version(req), - (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), - (&Method::GET, "/node/deposit_contract") => { + (&Method::GET, "/network/stats") => helpers::implementation_pending_response(req), + (&Method::GET, "/network/block_discovery") => { helpers::implementation_pending_response(req) } - (&Method::GET, "/node/syncing") => helpers::implementation_pending_response(req), - (&Method::GET, "/node/fork") => helpers::implementation_pending_response(req), - // Methods for Network - (&Method::GET, "/network/enr") => network::get_enr::(req), - (&Method::GET, "/network/peer_count") => network::get_peer_count::(req), - (&Method::GET, "/network/peer_id") => network::get_peer_id::(req), - (&Method::GET, "/network/peers") => network::get_peer_list::(req), - (&Method::GET, "/network/listen_addresses") => { - network::get_listen_addresses::(req) + // Methods for Beacon Node + //TODO: Remove? + //(&Method::GET, "/beacon/best_slot") => beacon::get_best_slot::(req), + (&Method::GET, "/beacon/head") => beacon::get_head::(req), + (&Method::GET, "/beacon/block") => beacon::get_block::(req), + (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), + (&Method::GET, "/beacon/blocks") => helpers::implementation_pending_response(req), + (&Method::GET, "/beacon/fork") => helpers::implementation_pending_response(req), + (&Method::GET, "/beacon/latest_finalized_checkpoint") => { + beacon::get_latest_finalized_checkpoint::(req) + } + (&Method::GET, "/beacon/attestations") => { + helpers::implementation_pending_response(req) + } + (&Method::GET, "/beacon/attestations/pending") => { + helpers::implementation_pending_response(req) + } + (&Method::GET, "/beacon/attestations") => { + helpers::implementation_pending_response(req) } // Methods for Validator - (&Method::GET, "/validator/duties") => validator::get_validator_duties::(req), - (&Method::GET, "/validator/block") => helpers::implementation_pending_response(req), - (&Method::POST, "/validator/block") => { + (&Method::GET, "/beacon/validator/duties") => { + validator::get_validator_duties::(req) + } + (&Method::GET, "/beacon/validator/block") => { helpers::implementation_pending_response(req) } - (&Method::GET, "/validator/attestation") => { + (&Method::POST, "/beacon/validator/block") => { helpers::implementation_pending_response(req) } - (&Method::POST, "/validator/attestation") => { + (&Method::GET, "/beacon/validator/attestation") => { + helpers::implementation_pending_response(req) + } + (&Method::POST, "/beacon/validator/attestation") => { helpers::implementation_pending_response(req) } + (&Method::GET, "/beacon/state") => beacon::get_state::(req), + (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), + //TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances + + // Methods for bootstrap and checking configuration (&Method::GET, "/spec") => spec::get_spec::(req), (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), @@ -237,6 +245,7 @@ pub fn start_server( fn success_response(body: Body) -> Response { Response::builder() .status(StatusCode::OK) + .header("content-type", "application/json") .body(body) .expect("We should always be able to make response from the success body.") } diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 064359337..1a7ca886e 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -64,6 +64,14 @@ pub fn get_prometheus(req: Request) -> ApiR .unwrap(); String::from_utf8(buffer) - .map(|string| success_response(Body::from(string))) + .map(|string| { + let mut response = success_response(Body::from(string)); + // Need to change the header to text/plain for prometheius + response + .headers_mut() + .insert("content-type", "text/plain; charset=utf-8".parse().unwrap()) + .unwrap(); + response + }) .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e))) } diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index a3e4c5ee7..dffa949c9 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -21,7 +21,7 @@ pub fn get_listen_addresses(req: Request) -> ApiResul ))) } -/// HTTP handle to return the list of libp2p multiaddr the client is listening on. +/// HTTP handle to return network port the client is listening on. /// /// Returns a list of `Multiaddr`, serialized according to their `serde` impl. pub fn get_listen_port(req: Request) -> ApiResult { diff --git a/docs/rest_oapi.yaml b/docs/rest_oapi.yaml index dea892c18..0c2f8616d 100644 --- a/docs/rest_oapi.yaml +++ b/docs/rest_oapi.yaml @@ -2,7 +2,7 @@ openapi: "3.0.2" info: title: "Lighthouse REST API" description: "" - version: "0.1.0" + version: "0.2.0" license: name: "Apache 2.0" url: "https://www.apache.org/licenses/LICENSE-2.0.html" @@ -85,7 +85,7 @@ paths: 500: $ref: '#/components/responses/InternalError' - /node/fork: + /node/chain_id: get: tags: - Phase0 @@ -99,8 +99,6 @@ paths: schema: type: object properties: - fork: - $ref: '#/components/schemas/Fork' chain_id: type: integer format: uint64 @@ -108,32 +106,74 @@ paths: 500: $ref: '#/components/responses/InternalError' - /node/stats: + /node/metrics: get: tags: - - Future - summary: "Get operational information about the node." - description: "Fetches some operational information about the node's process, such as memory usage, database size, etc." + - Phase0 + summary: "Get Promethius metrics for the node" + description: "Fetches a range of metrics for measuring nodes health. It is intended for this endpoint to be consumed by Promethius." + responses: + 200: + description: Request successful + content: + text/plain: + example: + summary: 'Promethius metrics' + value: "# HELP beacon_head_state_active_validators_total Count of active validators at the head of the chain + # TYPE beacon_head_state_active_validators_total gauge + beacon_head_state_active_validators_total 16 + # HELP beacon_head_state_current_justified_epoch Current justified epoch at the head of the chain + # TYPE beacon_head_state_current_justified_epoch gauge + beacon_head_state_current_justified_epoch 0 + # HELP beacon_head_state_current_justified_root Current justified root at the head of the chain + # TYPE beacon_head_state_current_justified_root gauge + beacon_head_state_current_justified_root 0 + # HELP beacon_head_state_eth1_deposit_index Eth1 deposit index at the head of the chain + # TYPE beacon_head_state_eth1_deposit_index gauge + beacon_head_state_eth1_deposit_index 16 + # HELP beacon_head_state_finalized_epoch Finalized epoch at the head of the chain + # TYPE beacon_head_state_finalized_epoch gauge + beacon_head_state_finalized_epoch 0 + # HELP beacon_head_state_finalized_root Finalized root at the head of the chain + # TYPE beacon_head_state_finalized_root gauge + beacon_head_state_finalized_root 0 + # HELP beacon_head_state_latest_block_slot Latest block slot at the head of the chain + # TYPE beacon_head_state_latest_block_slot gauge + beacon_head_state_latest_block_slot 0 + # HELP beacon_head_state_previous_justified_epoch Previous justified epoch at the head of the chain + # TYPE beacon_head_state_previous_justified_epoch gauge + beacon_head_state_previous_justified_epoch 0 + # HELP beacon_head_state_previous_justified_root Previous justified root at the head of the chain + # TYPE beacon_head_state_previous_justified_root gauge + beacon_head_state_previous_justified_root 0 + # HELP beacon_head_state_root Root of the block at the head of the chain + # TYPE beacon_head_state_root gauge + beacon_head_state_root -7566315470565629000 + # HELP beacon_head_state_shard_total Count of shards in the beacon chain + # TYPE beacon_head_state_shard_total gauge + beacon_head_state_shard_total 8 + # HELP beacon_head_state_slashed_validators_total Count of all slashed validators at the head of the chain + # TYPE beacon_head_state_slashed_validators_total gauge + beacon_head_state_slashed_validators_total 0" + + #TODO: Complete the /network/enr request + /network/enr: + get: + tags: + - Phase0 + summary: "" + description: "" responses: 200: description: Request successful content: application/json: schema: - type: object - properties: - memory_usage: - type: integer - format: uint64 - description: "The amount of memory used by the currently running beacon node process, expressed in bytes." - uptime: - type: integer - format: uint64 - description: "The number of seconds that have elapsed since beacon node process was started." - #TODO: what other useful process information could be expressed here? + type: integer + format: uint16 + example: 2468 - - /node/network/peer_count: + /network/peer_count: get: tags: - Phase0 @@ -149,7 +189,10 @@ paths: format: uint64 example: 25 - /node/network/peers: + #TODO: Complete our peer ID + /network/peer_id: + + /network/peers: get: tags: - Phase0 @@ -165,7 +208,24 @@ paths: items: $ref: '#/components/schemas/Peer' - /node/network/listening: + #TODO: Complete the /network/listen_port endpoint + /network/listen_port: + get: + tags: + - Phase0 + summary: "" + description: "" + responses: + 200: + description: Request successful + content: + application/json: + schema: + type: integer + format: uint16 + example: 2468 + + /network/listen_addresses: get: tags: - Phase0 @@ -183,10 +243,12 @@ paths: type: boolean nullable: false description: "True if the node is listening for incoming network connections. False if networking has been disabled or if the node has been configured to only connect with a static set of peers." - listen_address: - $ref: '#/components/schemas/multiaddr' + addresses: + type: array + items: + $ref: '#/components/schemas/multiaddr' - /node/network/stats: + /network/stats: get: tags: - Future @@ -215,7 +277,7 @@ paths: description: "The total number of unique peers (by multiaddr) that have been discovered since the beacon node instance was started." #TODO: This might be too difficult to collect - /node/network/block_discovery: + /network/block_discovery: get: tags: - Future @@ -254,177 +316,72 @@ paths: - #TODO: Add the endpoints that enable a validator to join, exit, withdraw, etc. - /validator/duties: + + /beacon/head: get: tags: - Phase0 - summary: "Get validator duties for the requested validators." - description: "Requests the beacon node to provide a set of _duties_, which are actions that should be performed by validators, for a particular epoch. Duties should only need to be checked once per epoch, however a chain reorganization (of > MIN_SEED_LOOKAHEAD epochs) could occur, resulting in a change of duties. For full safety, this API call should be polled at every slot to ensure that chain reorganizations are recognized, and to ensure that the beacon node is properly synchronized. If no epoch parameter is provided, then the current epoch is assumed." - parameters: - - name: validator_pubkeys - in: query - required: true - description: "An array of hex-encoded BLS public keys" - schema: - type: array - items: - $ref: '#/components/schemas/pubkey' - minItems: 1 - - name: epoch - in: query - required: false - schema: - type: integer - format: uint64 + summary: "Detail the current perspective of the beacon node." + description: "Request the beacon node to identify the most up-to-date information about the beacon chain from its perspective. This includes the latest block, which slots have been finalized, etc." responses: 200: description: Success response content: application/json: schema: - type: array - items: - $ref: '#/components/schemas/ValidatorDuty' - 400: - $ref: '#/components/responses/InvalidRequest' - 406: - description: "Duties cannot be provided for the requested epoch." - 500: - $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' + type: object + description: "The latest information about the head of the beacon chain." + properties: + slot: + type: integer + format: uint64 + description: "The slot of the head block." + block_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The merkle tree root of the canonical head block in the beacon node." + state_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The merkle tree root of the current beacon state." + finalized_slot: + type: integer + format: uint64 + description: "The slot number of the most recent finalized slot." + finalized_block_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The block root for the most recent finalized block." + justified_slot: + type: integer + format: uint64 + description: "The slot number of the most recent justified slot." + justified_block_root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The block root of the most recent justified block." + previous_justified_slot: + type: integer + format: uint64 + description: "The slot number of the second most recent justified slot." + previous_justified_block_root: + type: integer + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The block root of the second most recent justified block." - /validator/block: - get: - tags: - - Phase0 - summary: "Produce a new block, without signature." - description: "Requests a beacon node to produce a valid block, which can then be signed by a validator." - parameters: - - name: slot - in: query - required: true - description: "The slot for which the block should be proposed." - schema: - type: integer - format: uint64 - - name: randao_reveal - in: query - required: true - description: "The validator's randao reveal value." - schema: - type: string - format: byte - responses: - 200: - description: Success response - content: - application/json: - schema: - $ref: '#/components/schemas/BeaconBlock' - 400: - $ref: '#/components/responses/InvalidRequest' - 500: - $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' - post: - tags: - - Phase0 - summary: "Publish a signed block." - description: "Instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be included in the beacon chain. The beacon node is not required to validate the signed `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new block into its state, and therefore validate the block internally, however blocks which fail the validation are still broadcast but a different status code is returned (202)" - parameters: - - name: beacon_block - in: query - required: true - description: "The `BeaconBlock` object, as sent from the beacon node originally, but now with the signature field completed." - schema: - $ref: '#/components/schemas/BeaconBlock' - responses: - 200: - description: "The block was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." - 202: - description: "The block failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." - 400: - $ref: '#/components/responses/InvalidRequest' - 500: - $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' - /validator/attestation: - get: - tags: - - Phase0 - summary: "Produce an attestation, without signature." - description: "Requests that the beacon node produce an IndexedAttestation, with a blank signature field, which the validator will then sign." - parameters: - - name: validator_pubkey - in: query - required: true - description: "Uniquely identifying which validator this attestation is to be produced for." - schema: - $ref: '#/components/schemas/pubkey' - - name: poc_bit - in: query - required: true - description: "The proof-of-custody bit that is to be reported by the requesting validator. This bit will be inserted into the appropriate location in the returned `IndexedAttestation`." - schema: - type: integer - format: uint32 - minimum: 0 - maximum: 1 - - name: slot - in: query - required: true - description: "The slot for which the attestation should be proposed." - schema: - type: integer - - name: shard - in: query - required: true - description: "The shard number for which the attestation is to be proposed." - schema: - type: integer - responses: - 200: - description: Success response - content: - application/json: - schema: - $ref: '#/components/schemas/IndexedAttestation' - 400: - $ref: '#/components/responses/InvalidRequest' - 500: - $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' - post: - tags: - - Phase0 - summary: "Publish a signed attestation." - description: "Instructs the beacon node to broadcast a newly signed IndexedAttestation object to the intended shard subnet. The beacon node is not required to validate the signed IndexedAttestation, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new attestation into its state, and therefore validate the attestation internally, however attestations which fail the validation are still broadcast but a different status code is returned (202)" - parameters: - - name: attestation - in: query - required: true - description: "An `IndexedAttestation` structure, as originally provided by the beacon node, but now with the signature field completed." - schema: - $ref: '#/components/schemas/IndexedAttestation' - responses: - 200: - description: "The attestation was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." - 202: - description: "The attestation failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." - 400: - $ref: '#/components/responses/InvalidRequest' - 500: - $ref: '#/components/responses/InternalError' - 503: - $ref: '#/components/responses/CurrentlySyncing' + #TODO Fill out block endpoint + /beacon/block: - /chain/beacon/blocks: + #TODO Fill out block_root endpoint + /beacon/block_root: + + /beacon/blocks: get: tags: - Phase0 @@ -468,59 +425,25 @@ paths: $ref: '#/components/responses/InvalidRequest' #TODO: Make this request error more specific if one of the parameters is not provided correctly. - /chain/beacon/chainhead: + + /beacon/fork: get: tags: - Phase0 - summary: "Detail the current perspective of the beacon node." - description: "Request the beacon node to identify the most up-to-date information about the beacon chain from its perspective. This includes the latest block, which slots have been finalized, etc." + summary: 'Retrieve the current Fork information.' + description: 'Request the beacon node identify the fork it is currently on, from the beacon state.' responses: 200: - description: Success response + description: Success response. content: application/json: schema: - type: object - description: "The latest information about the head of the beacon chain." - properties: - block_root: - type: string - format: bytes - pattern: "^0x[a-fA-F0-9]{64}$" - description: "The merkle tree root of the canonical head block in the beacon node." - block_slot: - type: integer - format: uint64 - description: "The slot of the head block." - finalized_slot: - type: integer - format: uint64 - description: "The slot number of the most recent finalized slot." - finalized_block_root: - type: string - format: bytes - pattern: "^0x[a-fA-F0-9]{64}$" - description: "The block root for the most recent finalized block." - justified_slot: - type: integer - format: uint64 - description: "The slot number of the most recent justified slot." - justified_block_root: - type: string - format: bytes - pattern: "^0x[a-fA-F0-9]{64}$" - description: "The block root of the most recent justified block." - previous_justified_slot: - type: integer - format: uint64 - description: "The slot number of the second most recent justified slot." - previous_justified_block_root: - type: integer - format: bytes - pattern: "^0x[a-fA-F0-9]{64}$" - description: "The block root of the second most recent justified block." + $ref: '#/components/schemas/Fork' - /chain/beacon/attestations: + #TODO fill out latest_finalized_checkpoint + /beacon/latest_finalized_checkpoint: + + /beacon/attestations: get: tags: - Phase0 @@ -564,7 +487,7 @@ paths: $ref: '#/components/responses/InvalidRequest' #TODO: Make this request error more specific if one of the parameters is not provided correctly. - /chain/beacon/attestations/pending: + /beacon/attestations/pending: get: tags: - Phase0 @@ -583,7 +506,8 @@ paths: $ref: '#/components/responses/InvalidRequest' #TODO: Make this request error more specific if one of the parameters is not provided correctly. - /chain/beacon/validators: + #TODO: do all these '/beacon/validators' endpoints come under '/beacon/state' subqueries? + /beacon/validators: get: tags: - Phase0 @@ -614,7 +538,7 @@ paths: items: $ref: '#/components/schemas/ValidatorInfo' - /chain/beacon/validators/activesetchanges: + /beacon/validators/activesetchanges: get: tags: - Phase0 @@ -656,7 +580,7 @@ paths: items: $ref: '#/components/schemas/pubkey' - /chain/beacon/validators/assignments: + /beacon/validators/assignments: get: tags: - Phase0 @@ -688,7 +612,7 @@ paths: $ref: '#/components/schemas/ValidatorDuty' #TODO: This does not include the crosslink committee value, which must be included for Phase1? - /chain/beacon/validators/indices: + /beacon/validators/indices: get: tags: - Phase0 @@ -714,7 +638,7 @@ paths: items: $ref: '#/components/schemas/ValidatorIndexMapping' - /chain/beacon/validators/pubkeys: + /beacon/validators/pubkeys: get: tags: - Phase0 @@ -742,7 +666,7 @@ paths: items: $ref: '#/components/schemas/ValidatorIndexMapping' - /chain/beacon/validators/balances: + /beacon/validators/balances: get: tags: - Phase0 @@ -803,7 +727,7 @@ paths: format: uint64 description: "The balance of the validator at the specified epoch, expressed in Gwei" - /chain/beacon/validators/participation: + /beacon/validators/participation: get: tags: - Phase0 @@ -848,7 +772,7 @@ paths: format: uint64 description: "The total amount of ether, expressed in Gwei, that is eligible for voting in the specified epoch." - /chain/beacon/validators/queue: + /beacon/validators/queue: get: tags: - Phase0 @@ -889,6 +813,188 @@ paths: items: $ref: '#/components/schemas/pubkey' + #TODO: Add the endpoints that enable a validator to join, exit, withdraw, etc. + /beacon/validator/duties: + get: + tags: + - Phase0 + summary: "Get validator duties for the requested validators." + description: "Requests the beacon node to provide a set of _duties_, which are actions that should be performed by validators, for a particular epoch. Duties should only need to be checked once per epoch, however a chain reorganization (of > MIN_SEED_LOOKAHEAD epochs) could occur, resulting in a change of duties. For full safety, this API call should be polled at every slot to ensure that chain reorganizations are recognized, and to ensure that the beacon node is properly synchronized. If no epoch parameter is provided, then the current epoch is assumed." + parameters: + - name: validator_pubkeys + in: query + required: true + description: "An array of hex-encoded BLS public keys" + schema: + type: array + items: + $ref: '#/components/schemas/pubkey' + minItems: 1 + - name: epoch + in: query + required: false + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/ValidatorDuty' + 400: + $ref: '#/components/responses/InvalidRequest' + 406: + description: "Duties cannot be provided for the requested epoch." + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + + /beacon/validator/block: + get: + tags: + - Phase0 + summary: "Produce a new block, without signature." + description: "Requests a beacon node to produce a valid block, which can then be signed by a validator." + parameters: + - name: slot + in: query + required: true + description: "The slot for which the block should be proposed." + schema: + type: integer + format: uint64 + - name: randao_reveal + in: query + required: true + description: "The validator's randao reveal value." + schema: + type: string + format: byte + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/BeaconBlock' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + post: + tags: + - Phase0 + summary: "Publish a signed block." + description: "Instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be included in the beacon chain. The beacon node is not required to validate the signed `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new block into its state, and therefore validate the block internally, however blocks which fail the validation are still broadcast but a different status code is returned (202)" + parameters: + - name: beacon_block + in: query + required: true + description: "The `BeaconBlock` object, as sent from the beacon node originally, but now with the signature field completed." + schema: + $ref: '#/components/schemas/BeaconBlock' + responses: + 200: + description: "The block was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." + 202: + description: "The block failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + + /beacon/validator/attestation: + get: + tags: + - Phase0 + summary: "Produce an attestation, without signature." + description: "Requests that the beacon node produce an IndexedAttestation, with a blank signature field, which the validator will then sign." + parameters: + - name: validator_pubkey + in: query + required: true + description: "Uniquely identifying which validator this attestation is to be produced for." + schema: + $ref: '#/components/schemas/pubkey' + - name: poc_bit + in: query + required: true + description: "The proof-of-custody bit that is to be reported by the requesting validator. This bit will be inserted into the appropriate location in the returned `IndexedAttestation`." + schema: + type: integer + format: uint32 + minimum: 0 + maximum: 1 + - name: slot + in: query + required: true + description: "The slot for which the attestation should be proposed." + schema: + type: integer + - name: shard + in: query + required: true + description: "The shard number for which the attestation is to be proposed." + schema: + type: integer + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/IndexedAttestation' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + post: + tags: + - Phase0 + summary: "Publish a signed attestation." + description: "Instructs the beacon node to broadcast a newly signed IndexedAttestation object to the intended shard subnet. The beacon node is not required to validate the signed IndexedAttestation, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new attestation into its state, and therefore validate the attestation internally, however attestations which fail the validation are still broadcast but a different status code is returned (202)" + parameters: + - name: attestation + in: query + required: true + description: "An `IndexedAttestation` structure, as originally provided by the beacon node, but now with the signature field completed." + schema: + $ref: '#/components/schemas/IndexedAttestation' + responses: + 200: + description: "The attestation was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." + 202: + description: "The attestation failed validation, but was successfully broadcast anyway. It was not integrated into the beacon node's database." + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + 503: + $ref: '#/components/responses/CurrentlySyncing' + + #TODO fill out /beacon/state + /beacon/state: + + #TODO fill out /beacon/state_root + /beacon/state_root: + + #TODO fill spec + /spec: + + #TODO fill spec/slots_per_epoch + /spec/slots_per_epoch: + components: schemas: pubkey: From 23a308e595cad8438aef47a1be95af2516839448 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 28 Aug 2019 18:46:16 +1000 Subject: [PATCH 126/305] BLS and SSZ static tests --- eth2/types/src/attestation_data.rs | 16 +- eth2/types/src/checkpoint.rs | 3 +- tests/ef_tests/src/cases.rs | 27 +++ .../src/cases/bls_aggregate_pubkeys.rs | 6 +- .../ef_tests/src/cases/bls_aggregate_sigs.rs | 6 +- tests/ef_tests/src/cases/bls_g2_compressed.rs | 6 +- tests/ef_tests/src/cases/bls_priv_to_pub.rs | 6 +- tests/ef_tests/src/cases/bls_sign_msg.rs | 6 +- tests/ef_tests/src/cases/ssz_static.rs | 207 +++++++++--------- tests/ef_tests/src/doc.rs | 53 +++-- tests/ef_tests/src/handler.rs | 130 +++++++++++ tests/ef_tests/src/lib.rs | 3 + tests/ef_tests/src/type_name.rs | 61 ++++++ tests/ef_tests/src/yaml_decode.rs | 14 +- tests/ef_tests/src/yaml_decode/utils.rs | 10 - tests/ef_tests/tests/tests.rs | 105 ++++++++- 16 files changed, 481 insertions(+), 178 deletions(-) create mode 100644 tests/ef_tests/src/handler.rs create mode 100644 tests/ef_tests/src/type_name.rs delete mode 100644 tests/ef_tests/src/yaml_decode/utils.rs diff --git a/eth2/types/src/attestation_data.rs b/eth2/types/src/attestation_data.rs index f2e63598f..4d82ce126 100644 --- a/eth2/types/src/attestation_data.rs +++ b/eth2/types/src/attestation_data.rs @@ -4,25 +4,13 @@ use crate::{Checkpoint, Crosslink, Hash256}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; -use tree_hash::TreeHash; -use tree_hash_derive::{SignedRoot, TreeHash}; +use tree_hash_derive::TreeHash; /// The data upon which an attestation is based. /// /// Spec v0.8.0 #[derive( - Debug, - Clone, - PartialEq, - Eq, - Serialize, - Deserialize, - Hash, - Encode, - Decode, - TreeHash, - TestRandom, - SignedRoot, + Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom, )] pub struct AttestationData { // LMD GHOST vote diff --git a/eth2/types/src/checkpoint.rs b/eth2/types/src/checkpoint.rs index dc40b336f..0c7001921 100644 --- a/eth2/types/src/checkpoint.rs +++ b/eth2/types/src/checkpoint.rs @@ -4,7 +4,7 @@ use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; -use tree_hash_derive::{SignedRoot, TreeHash}; +use tree_hash_derive::TreeHash; /// Casper FFG checkpoint, used in attestations. /// @@ -22,7 +22,6 @@ use tree_hash_derive::{SignedRoot, TreeHash}; Decode, TreeHash, TestRandom, - SignedRoot, )] pub struct Checkpoint { pub epoch: Epoch, diff --git a/tests/ef_tests/src/cases.rs b/tests/ef_tests/src/cases.rs index 1ae4ea1d8..7f6ffb0c4 100644 --- a/tests/ef_tests/src/cases.rs +++ b/tests/ef_tests/src/cases.rs @@ -1,5 +1,6 @@ use super::*; use std::fmt::Debug; +use std::path::Path; mod bls_aggregate_pubkeys; mod bls_aggregate_sigs; @@ -53,6 +54,11 @@ pub use shuffling::*; pub use ssz_generic::*; pub use ssz_static::*; +pub trait LoadCase: Sized { + /// Load the test case from a test case directory. + fn load_from_dir(_path: &Path) -> Result; +} + pub trait Case: Debug { /// An optional field for implementing a custom description. /// @@ -68,6 +74,26 @@ pub trait Case: Debug { fn result(&self, case_index: usize) -> Result<(), Error>; } +pub trait BlsCase: serde::de::DeserializeOwned {} + +impl YamlDecode for T +where + T: BlsCase, +{ + fn yaml_decode(string: &str) -> Result { + serde_yaml::from_str(string).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) + } +} + +impl LoadCase for T +where + T: BlsCase, +{ + fn load_from_dir(path: &Path) -> Result { + Self::yaml_decode_file(&path.join("data.yaml")) + } +} + #[derive(Debug)] pub struct Cases { pub test_cases: Vec, @@ -86,6 +112,7 @@ where } } +// FIXME(michael): delete this impl YamlDecode for Cases { /// Decodes a YAML list of test cases fn yaml_decode(yaml: &str) -> Result { diff --git a/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs b/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs index 6e38743f2..c94e14495 100644 --- a/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs +++ b/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs @@ -9,11 +9,7 @@ pub struct BlsAggregatePubkeys { pub output: String, } -impl YamlDecode for BlsAggregatePubkeys { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} +impl BlsCase for BlsAggregatePubkeys {} impl Case for BlsAggregatePubkeys { fn result(&self, _case_index: usize) -> Result<(), Error> { diff --git a/tests/ef_tests/src/cases/bls_aggregate_sigs.rs b/tests/ef_tests/src/cases/bls_aggregate_sigs.rs index eeecab82c..882ad7220 100644 --- a/tests/ef_tests/src/cases/bls_aggregate_sigs.rs +++ b/tests/ef_tests/src/cases/bls_aggregate_sigs.rs @@ -9,11 +9,7 @@ pub struct BlsAggregateSigs { pub output: String, } -impl YamlDecode for BlsAggregateSigs { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} +impl BlsCase for BlsAggregateSigs {} impl Case for BlsAggregateSigs { fn result(&self, _case_index: usize) -> Result<(), Error> { diff --git a/tests/ef_tests/src/cases/bls_g2_compressed.rs b/tests/ef_tests/src/cases/bls_g2_compressed.rs index 185cb58f3..547d8d03a 100644 --- a/tests/ef_tests/src/cases/bls_g2_compressed.rs +++ b/tests/ef_tests/src/cases/bls_g2_compressed.rs @@ -15,11 +15,7 @@ pub struct BlsG2Compressed { pub output: Vec, } -impl YamlDecode for BlsG2Compressed { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} +impl BlsCase for BlsG2Compressed {} impl Case for BlsG2Compressed { fn result(&self, _case_index: usize) -> Result<(), Error> { diff --git a/tests/ef_tests/src/cases/bls_priv_to_pub.rs b/tests/ef_tests/src/cases/bls_priv_to_pub.rs index d72a43bbb..869a0891c 100644 --- a/tests/ef_tests/src/cases/bls_priv_to_pub.rs +++ b/tests/ef_tests/src/cases/bls_priv_to_pub.rs @@ -9,11 +9,7 @@ pub struct BlsPrivToPub { pub output: String, } -impl YamlDecode for BlsPrivToPub { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} +impl BlsCase for BlsPrivToPub {} impl Case for BlsPrivToPub { fn result(&self, _case_index: usize) -> Result<(), Error> { diff --git a/tests/ef_tests/src/cases/bls_sign_msg.rs b/tests/ef_tests/src/cases/bls_sign_msg.rs index e62c3550f..476ecdefb 100644 --- a/tests/ef_tests/src/cases/bls_sign_msg.rs +++ b/tests/ef_tests/src/cases/bls_sign_msg.rs @@ -16,11 +16,7 @@ pub struct BlsSign { pub output: String, } -impl YamlDecode for BlsSign { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} +impl BlsCase for BlsSign {} impl Case for BlsSign { fn result(&self, _case_index: usize) -> Result<(), Error> { diff --git a/tests/ef_tests/src/cases/ssz_static.rs b/tests/ef_tests/src/cases/ssz_static.rs index 96ba38b6a..6a949073d 100644 --- a/tests/ef_tests/src/cases/ssz_static.rs +++ b/tests/ef_tests/src/cases/ssz_static.rs @@ -3,125 +3,120 @@ use crate::case_result::compare_result; use serde_derive::Deserialize; use ssz::{Decode, Encode}; use std::fmt::Debug; -use std::marker::PhantomData; -use tree_hash::TreeHash; -use types::{ - test_utils::TestRandom, Attestation, AttestationData, AttestationDataAndCustodyBit, - AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, BeaconState, Checkpoint, - CompactCommittee, Crosslink, Deposit, DepositData, Eth1Data, EthSpec, Fork, Hash256, - HistoricalBatch, IndexedAttestation, PendingAttestation, ProposerSlashing, Transfer, Validator, - VoluntaryExit, -}; - -// Enum variant names are used by Serde when deserializing the test YAML -#[allow(clippy::large_enum_variant)] -#[derive(Debug, Clone, Deserialize)] -pub enum SszStatic -where - E: EthSpec, -{ - Fork(SszStaticInner), - Crosslink(SszStaticInner), - Checkpoint(SszStaticInner), - CompactCommittee(SszStaticInner, E>), - Eth1Data(SszStaticInner), - AttestationData(SszStaticInner), - AttestationDataAndCustodyBit(SszStaticInner), - IndexedAttestation(SszStaticInner, E>), - DepositData(SszStaticInner), - BeaconBlockHeader(SszStaticInner), - Validator(SszStaticInner), - PendingAttestation(SszStaticInner, E>), - HistoricalBatch(SszStaticInner, E>), - ProposerSlashing(SszStaticInner), - AttesterSlashing(SszStaticInner, E>), - Attestation(SszStaticInner, E>), - Deposit(SszStaticInner), - VoluntaryExit(SszStaticInner), - Transfer(SszStaticInner), - BeaconBlockBody(SszStaticInner, E>), - BeaconBlock(SszStaticInner, E>), - BeaconState(SszStaticInner, E>), -} +use std::fs; +use tree_hash::{SignedRoot, TreeHash}; +use types::Hash256; #[derive(Debug, Clone, Deserialize)] -pub struct SszStaticInner -where - E: EthSpec, -{ - pub value: T, - pub serialized: String, - pub root: String, - #[serde(skip, default)] - _phantom: PhantomData, +struct SszStaticRoots { + root: String, + signing_root: Option, } -impl YamlDecode for SszStatic { +impl YamlDecode for SszStaticRoots { fn yaml_decode(yaml: &str) -> Result { - serde_yaml::from_str(yaml).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) + Ok(serde_yaml::from_str(yaml).unwrap()) } } -impl Case for SszStatic { - fn result(&self, _case_index: usize) -> Result<(), Error> { - use self::SszStatic::*; - - match *self { - Fork(ref val) => ssz_static_test(val), - Crosslink(ref val) => ssz_static_test(val), - Checkpoint(ref val) => ssz_static_test(val), - CompactCommittee(ref val) => ssz_static_test(val), - Eth1Data(ref val) => ssz_static_test(val), - AttestationData(ref val) => ssz_static_test(val), - AttestationDataAndCustodyBit(ref val) => ssz_static_test(val), - IndexedAttestation(ref val) => ssz_static_test(val), - DepositData(ref val) => ssz_static_test(val), - BeaconBlockHeader(ref val) => ssz_static_test(val), - Validator(ref val) => ssz_static_test(val), - PendingAttestation(ref val) => ssz_static_test(val), - HistoricalBatch(ref val) => ssz_static_test(val), - ProposerSlashing(ref val) => ssz_static_test(val), - AttesterSlashing(ref val) => ssz_static_test(val), - Attestation(ref val) => ssz_static_test(val), - Deposit(ref val) => ssz_static_test(val), - VoluntaryExit(ref val) => ssz_static_test(val), - Transfer(ref val) => ssz_static_test(val), - BeaconBlockBody(ref val) => ssz_static_test(val), - BeaconBlock(ref val) => ssz_static_test(val), - BeaconState(ref val) => ssz_static_test(val), - } - } +#[derive(Debug, Clone)] +pub struct SszStatic { + roots: SszStaticRoots, + serialized: Vec, + value: T, } -fn ssz_static_test(tc: &SszStaticInner) -> Result<(), Error> -where - T: Clone - + Decode - + Debug - + Encode - + PartialEq - + serde::de::DeserializeOwned - + TreeHash - + TestRandom, +#[derive(Debug, Clone)] +pub struct SszStaticSR { + roots: SszStaticRoots, + serialized: Vec, + value: T, +} + +// Trait alias for all deez bounds +pub trait SszStaticType: + serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug { - // Verify we can decode SSZ in the same way we can decode YAML. - let ssz = hex::decode(&tc.serialized[2..]) - .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; - let expected = tc.value.clone(); - let decode_result = T::from_ssz_bytes(&ssz); - compare_result(&decode_result, &Some(expected))?; +} - // Verify we can encode the result back into original ssz bytes - let decoded = decode_result.unwrap(); - let encoded_result = decoded.as_ssz_bytes(); - compare_result::, Error>(&Ok(encoded_result), &Some(ssz))?; +impl SszStaticType for T where + T: serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug +{ +} - // Verify the TreeHash root of the decoded struct matches the test. - let expected_root = - &hex::decode(&tc.root[2..]).map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; - let expected_root = Hash256::from_slice(&expected_root); - let tree_hash_root = Hash256::from_slice(&decoded.tree_hash_root()); - compare_result::(&Ok(tree_hash_root), &Some(expected_root))?; +fn load_from_dir(path: &Path) -> Result<(SszStaticRoots, Vec, T), Error> { + // FIXME: set description/name + let roots = SszStaticRoots::yaml_decode_file(&path.join("roots.yaml"))?; + + let serialized = fs::read(&path.join("serialized.ssz")).expect("serialized.ssz exists"); + + let yaml = fs::read_to_string(&path.join("value.yaml")).expect("value.yaml exists"); + let value = + serde_yaml::from_str(&yaml).map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; + + Ok((roots, serialized, value)) +} + +impl LoadCase for SszStatic { + fn load_from_dir(path: &Path) -> Result { + load_from_dir(path).map(|(roots, serialized, value)| Self { + roots, + serialized, + value, + }) + } +} + +impl LoadCase for SszStaticSR { + fn load_from_dir(path: &Path) -> Result { + load_from_dir(path).map(|(roots, serialized, value)| Self { + roots, + serialized, + value, + }) + } +} + +fn check_serialization(value: &T, serialized: &[u8]) -> Result<(), Error> { + // Check serialization + let serialized_result = value.as_ssz_bytes(); + compare_result::, Error>(&Ok(serialized_result), &Some(serialized.to_vec()))?; + + // Check deserialization + let deserialized_result = T::from_ssz_bytes(serialized); + compare_result(&deserialized_result, &Some(value.clone()))?; Ok(()) } + +fn check_tree_hash(expected_str: &str, actual_root: Vec) -> Result<(), Error> { + let expected_root = hex::decode(&expected_str[2..]) + .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; + let expected_root = Hash256::from_slice(&expected_root); + let tree_hash_root = Hash256::from_slice(&actual_root); + compare_result::(&Ok(tree_hash_root), &Some(expected_root)) +} + +impl Case for SszStatic { + fn result(&self, _case_index: usize) -> Result<(), Error> { + check_serialization(&self.value, &self.serialized)?; + check_tree_hash(&self.roots.root, self.value.tree_hash_root())?; + Ok(()) + } +} + +impl Case for SszStaticSR { + fn result(&self, _case_index: usize) -> Result<(), Error> { + check_serialization(&self.value, &self.serialized)?; + check_tree_hash(&self.roots.root, self.value.tree_hash_root())?; + check_tree_hash( + &self + .roots + .signing_root + .as_ref() + .expect("signed root exists"), + self.value.signed_root(), + )?; + Ok(()) + } +} diff --git a/tests/ef_tests/src/doc.rs b/tests/ef_tests/src/doc.rs index 7dfe9954c..f3a41697e 100644 --- a/tests/ef_tests/src/doc.rs +++ b/tests/ef_tests/src/doc.rs @@ -2,10 +2,14 @@ use crate::case_result::CaseResult; use crate::cases::*; use crate::doc_header::DocHeader; use crate::error::Error; -use crate::yaml_decode::{yaml_split_header_and_cases, YamlDecode}; +use crate::yaml_decode::YamlDecode; use crate::EfTest; use serde_derive::Deserialize; -use std::{fs::File, io::prelude::*, path::PathBuf}; +use std::{ + fs::File, + io::prelude::*, + path::{Path, PathBuf}, +}; use types::{MainnetEthSpec, MinimalEthSpec}; #[derive(Debug, Deserialize)] @@ -19,15 +23,13 @@ impl Doc { fn from_path(path: PathBuf) -> Self { let mut file = File::open(path.clone()).unwrap(); - let mut yaml = String::new(); - file.read_to_string(&mut yaml).unwrap(); - - let (header_yaml, cases_yaml) = yaml_split_header_and_cases(yaml.clone()); + let mut cases_yaml = String::new(); + file.read_to_string(&mut cases_yaml).unwrap(); Self { - header_yaml, cases_yaml, path, + header_yaml: String::new(), } } @@ -40,8 +42,6 @@ impl Doc { header.config.as_ref(), ) { ("ssz", "uint", _) => run_test::(self), - ("ssz", "static", "minimal") => run_test::>(self), - ("ssz", "static", "mainnet") => run_test::>(self), ("sanity", "slots", "minimal") => run_test::>(self), // FIXME: skipped due to compact committees issue ("sanity", "slots", "mainnet") => vec![], // run_test::>(self), @@ -172,14 +172,36 @@ impl Doc { } } -pub fn run_test(doc: &Doc) -> Vec +pub fn assert_tests_pass(path: &Path, results: &[CaseResult]) { + let doc = Doc { + header_yaml: String::new(), + cases_yaml: String::new(), + path: path.into(), + }; + + let (failed, skipped_bls, skipped_known_failures) = categorize_results(results); + + if failed.len() + skipped_known_failures.len() > 0 { + print_results( + &doc, + &failed, + &skipped_bls, + &skipped_known_failures, + &results, + ); + if !failed.is_empty() { + panic!("Tests failed (see above)"); + } + } else { + println!("Passed {} tests in {}", results.len(), path.display()); + } +} + +pub fn run_test(_: &Doc) -> Vec where Cases: EfTest + YamlDecode, { - // Pass only the "test_cases" YAML string to `yaml_decode`. - let test_cases: Cases = Cases::yaml_decode(&doc.cases_yaml).unwrap(); - - test_cases.test_results() + panic!("FIXME(michael): delete this") } pub fn categorize_results( @@ -208,7 +230,6 @@ pub fn print_results( skipped_known_failures: &[&CaseResult], results: &[CaseResult], ) { - let header: DocHeader = serde_yaml::from_str(&doc.header_yaml).unwrap(); println!("--------------------------------------------------"); println!( "Test {}", @@ -218,7 +239,7 @@ pub fn print_results( "Failure" } ); - println!("Title: {}", header.title); + println!("Title: TODO"); println!("File: {:?}", doc.path); println!( "{} tests, {} failed, {} skipped (known failure), {} skipped (bls), {} passed. (See below for errors)", diff --git a/tests/ef_tests/src/handler.rs b/tests/ef_tests/src/handler.rs new file mode 100644 index 000000000..1dac988ac --- /dev/null +++ b/tests/ef_tests/src/handler.rs @@ -0,0 +1,130 @@ +use crate::cases::{self, Case, Cases, LoadCase}; +use crate::type_name::TypeName; +use crate::EfTest; +use std::fs; +use std::marker::PhantomData; +use std::path::PathBuf; +use tree_hash::SignedRoot; + +pub trait Handler { + type Case: Case + LoadCase; + + fn config_name() -> &'static str { + "general" + } + + fn fork_name() -> &'static str { + "phase0" + } + + fn runner_name() -> &'static str; + + fn handler_name() -> &'static str; + + fn run() { + let handler_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("eth2.0-spec-tests") + .join("tests") + .join(Self::config_name()) + .join(Self::fork_name()) + .join(Self::runner_name()) + .join(Self::handler_name()); + + // Iterate through test suites + // TODO: parallelism + // TODO: error handling? + let test_cases = fs::read_dir(&handler_path) + .expect("open main directory") + .flat_map(|entry| { + entry + .ok() + .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) + }) + .flat_map(|suite| fs::read_dir(suite.path()).expect("open suite dir")) + .flat_map(Result::ok) + .map(|test_case_dir| Self::Case::load_from_dir(&test_case_dir.path()).expect("loads")) + .collect::>(); + + let results = Cases { test_cases }.test_results(); + + crate::doc::assert_tests_pass(&handler_path, &results); + } +} + +macro_rules! bls_handler { + ($runner_name: ident, $case_name:ident, $handler_name:expr) => { + pub struct $runner_name; + + impl Handler for $runner_name { + type Case = cases::$case_name; + + fn runner_name() -> &'static str { + "bls" + } + + fn handler_name() -> &'static str { + $handler_name + } + } + }; +} + +bls_handler!( + BlsAggregatePubkeysHandler, + BlsAggregatePubkeys, + "aggregate_pubkeys" +); +bls_handler!(BlsAggregateSigsHandler, BlsAggregateSigs, "aggregate_sigs"); +bls_handler!( + BlsG2CompressedHandler, + BlsG2Compressed, + "msg_hash_compressed" +); +bls_handler!(BlsPrivToPubHandler, BlsPrivToPub, "priv_to_pub"); +bls_handler!(BlsSignMsgHandler, BlsSign, "sign_msg"); + +/// Handler for SSZ types that do not implement `SignedRoot`. +pub struct SszStaticHandler(PhantomData<(T, E)>); + +/// Handler for SSZ types that do implement `SignedRoot`. +pub struct SszStaticSRHandler(PhantomData<(T, E)>); + +impl Handler for SszStaticHandler +where + T: cases::SszStaticType + TypeName, + E: TypeName, +{ + type Case = cases::SszStatic; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "ssz_static" + } + + fn handler_name() -> &'static str { + T::name() + } +} + +impl Handler for SszStaticSRHandler +where + T: cases::SszStaticType + SignedRoot + TypeName, + E: TypeName, +{ + type Case = cases::SszStaticSR; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "ssz_static" + } + + fn handler_name() -> &'static str { + T::name() + } +} diff --git a/tests/ef_tests/src/lib.rs b/tests/ef_tests/src/lib.rs index fdd4e7b85..cc17c3ea4 100644 --- a/tests/ef_tests/src/lib.rs +++ b/tests/ef_tests/src/lib.rs @@ -4,6 +4,7 @@ pub use case_result::CaseResult; pub use cases::Case; pub use doc::Doc; pub use error::Error; +pub use handler::*; pub use yaml_decode::YamlDecode; mod bls_setting; @@ -12,6 +13,8 @@ mod cases; mod doc; mod doc_header; mod error; +mod handler; +mod type_name; mod yaml_decode; /// Defined where an object can return the results of some test(s) adhering to the Ethereum diff --git a/tests/ef_tests/src/type_name.rs b/tests/ef_tests/src/type_name.rs new file mode 100644 index 000000000..fe55a7f5f --- /dev/null +++ b/tests/ef_tests/src/type_name.rs @@ -0,0 +1,61 @@ +//! Mapping from types to canonical string identifiers used in testing. +use types::*; + +pub trait TypeName { + fn name() -> &'static str; +} + +impl TypeName for MinimalEthSpec { + fn name() -> &'static str { + "minimal" + } +} + +impl TypeName for MainnetEthSpec { + fn name() -> &'static str { + "mainnet" + } +} + +macro_rules! impl_name { + ($typ:ident) => { + impl TypeName for $typ { + fn name() -> &'static str { + stringify!($typ) + } + } + }; +} + +macro_rules! impl_name_generic { + ($typ:ident) => { + impl TypeName for $typ { + fn name() -> &'static str { + stringify!($typ) + } + } + }; +} + +impl_name_generic!(Attestation); +impl_name!(AttestationData); +impl_name!(AttestationDataAndCustodyBit); +impl_name_generic!(AttesterSlashing); +impl_name_generic!(BeaconBlock); +impl_name_generic!(BeaconBlockBody); +impl_name!(BeaconBlockHeader); +impl_name_generic!(BeaconState); +impl_name!(Checkpoint); +impl_name_generic!(CompactCommittee); +impl_name!(Crosslink); +impl_name!(Deposit); +impl_name!(DepositData); +impl_name!(Eth1Data); +impl_name!(Fork); +impl_name_generic!(HistoricalBatch); +impl_name_generic!(IndexedAttestation); +impl_name_generic!(PendingAttestation); +impl_name!(ProposerSlashing); +impl_name!(Transfer); +impl_name!(Validator); +impl_name!(VoluntaryExit); diff --git a/tests/ef_tests/src/yaml_decode.rs b/tests/ef_tests/src/yaml_decode.rs index c89dd92a9..af122fb0c 100644 --- a/tests/ef_tests/src/yaml_decode.rs +++ b/tests/ef_tests/src/yaml_decode.rs @@ -1,14 +1,20 @@ use super::*; use ethereum_types::{U128, U256}; +use std::fs; +use std::path::Path; use types::Fork; -mod utils; - -pub use utils::*; - pub trait YamlDecode: Sized { /// Decode an object from the test specification YAML. fn yaml_decode(string: &str) -> Result; + + fn yaml_decode_file(path: &Path) -> Result { + fs::read_to_string(path) + .map_err(|e| { + Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) + }) + .and_then(|s| Self::yaml_decode(&s)) + } } /// Basic types can general be decoded with the `parse` fn if they implement `str::FromStr`. diff --git a/tests/ef_tests/src/yaml_decode/utils.rs b/tests/ef_tests/src/yaml_decode/utils.rs deleted file mode 100644 index 7b6caac72..000000000 --- a/tests/ef_tests/src/yaml_decode/utils.rs +++ /dev/null @@ -1,10 +0,0 @@ -pub fn yaml_split_header_and_cases(mut yaml: String) -> (String, String) { - let test_cases_start = yaml.find("\ntest_cases:\n").unwrap(); - // + 1 to skip the \n we used for matching. - let mut test_cases = yaml.split_off(test_cases_start + 1); - - let end_of_first_line = test_cases.find('\n').unwrap(); - let test_cases = test_cases.split_off(end_of_first_line + 1); - - (yaml, test_cases) -} diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index deb699e78..000c53330 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -1,12 +1,20 @@ use ef_tests::*; use rayon::prelude::*; use std::path::{Path, PathBuf}; +use types::{ + Attestation, AttestationData, AttestationDataAndCustodyBit, AttesterSlashing, BeaconBlock, + BeaconBlockBody, BeaconBlockHeader, BeaconState, Checkpoint, CompactCommittee, Crosslink, + Deposit, DepositData, Eth1Data, Fork, HistoricalBatch, IndexedAttestation, MainnetEthSpec, + MinimalEthSpec, PendingAttestation, ProposerSlashing, Transfer, Validator, VoluntaryExit, +}; use walkdir::WalkDir; fn yaml_files_in_test_dir(dir: &Path) -> Vec { let base_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("eth2.0-spec-tests") .join("tests") + .join("general") + .join("phase0") .join(dir); assert!( @@ -155,12 +163,107 @@ fn sanity_slots() { #[cfg(not(feature = "fake_crypto"))] fn bls() { yaml_files_in_test_dir(&Path::new("bls")) - .into_par_iter() + .into_iter() .for_each(|file| { Doc::assert_tests_pass(file); }); } +#[test] +#[cfg(not(feature = "fake_crypto"))] +fn bls_aggregate_pubkeys() { + BlsAggregatePubkeysHandler::run(); +} + +#[test] +#[cfg(not(feature = "fake_crypto"))] +fn bls_aggregate_sigs() { + BlsAggregateSigsHandler::run(); +} + +#[test] +#[cfg(not(feature = "fake_crypto"))] +fn bls_msg_hash_g2_compressed() { + BlsG2CompressedHandler::run(); +} + +#[test] +#[cfg(not(feature = "fake_crypto"))] +fn bls_priv_to_pub() { + BlsPrivToPubHandler::run(); +} + +#[test] +#[cfg(not(feature = "fake_crypto"))] +fn bls_sign_msg() { + BlsSignMsgHandler::run(); +} + +macro_rules! ssz_static_test { + // Signed-root + ($test_name:ident, $typ:ident$(<$generics:tt>)?, SR) => { + ssz_static_test!($test_name, SszStaticSRHandler, $typ$(<$generics>)?); + }; + // Non-signed root + ($test_name:ident, $typ:ident$(<$generics:tt>)?) => { + ssz_static_test!($test_name, SszStaticHandler, $typ$(<$generics>)?); + }; + // Generic + ($test_name:ident, $handler:ident, $typ:ident<_>) => { + ssz_static_test!( + $test_name, $handler, { + ($typ, MinimalEthSpec), + ($typ, MainnetEthSpec) + } + ); + }; + // Non-generic + ($test_name:ident, $handler:ident, $typ:ident) => { + ssz_static_test!( + $test_name, $handler, { + ($typ, MinimalEthSpec), + ($typ, MainnetEthSpec) + } + ); + }; + // Base case + ($test_name:ident, $handler:ident, { $(($typ:ty, $spec:ident)),+ }) => { + #[test] + #[cfg(feature = "fake_crypto")] + fn $test_name() { + $( + $handler::<$typ, $spec>::run(); + )+ + } + }; +} + +ssz_static_test!(ssz_static_attestation, Attestation<_>, SR); +ssz_static_test!(ssz_static_attestation_data, AttestationData); +ssz_static_test!( + ssz_static_attestation_data_and_custody_bit, + AttestationDataAndCustodyBit +); +ssz_static_test!(ssz_static_attester_slashing, AttesterSlashing<_>); +ssz_static_test!(ssz_static_beacon_block, BeaconBlock<_>, SR); +ssz_static_test!(ssz_static_beacon_block_body, BeaconBlockBody<_>); +ssz_static_test!(ssz_static_beacon_block_header, BeaconBlockHeader, SR); +ssz_static_test!(ssz_static_beacon_state, BeaconState<_>); +ssz_static_test!(ssz_static_checkpoint, Checkpoint); +ssz_static_test!(ssz_static_compact_committee, CompactCommittee<_>); +ssz_static_test!(ssz_static_crosslink, Crosslink); +ssz_static_test!(ssz_static_deposit, Deposit); +ssz_static_test!(ssz_static_deposit_data, DepositData, SR); +ssz_static_test!(ssz_static_eth1_data, Eth1Data); +ssz_static_test!(ssz_static_fork, Fork); +ssz_static_test!(ssz_static_historical_batch, HistoricalBatch<_>); +ssz_static_test!(ssz_static_indexed_attestation, IndexedAttestation<_>, SR); +ssz_static_test!(ssz_static_pending_attestation, PendingAttestation<_>); +ssz_static_test!(ssz_static_proposer_slashing, ProposerSlashing); +ssz_static_test!(ssz_static_transfer, Transfer, SR); +ssz_static_test!(ssz_static_validator, Validator); +ssz_static_test!(ssz_static_voluntary_exit, VoluntaryExit, SR); + #[test] fn epoch_processing_justification_and_finalization() { yaml_files_in_test_dir(&Path::new("epoch_processing").join("justification_and_finalization")) From 77e2f576af49c191425f85d285cae39fbb4a4344 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 28 Aug 2019 21:25:38 +1000 Subject: [PATCH 127/305] Further aligning the API & implementation. - Completed implementation of /beacon/head - renamed 'latest_finalized_checkpoint' to 'current_finalized_checkpoint' for consistency - Reorganised list of endpoints in both spec & router so that they match - Fixed the content-type modifications for /metrics - Added a new 'RFC' tag to the spec, to tag things that we have not implemented and aren't sure if it's useful. - Moved 'deposit_contract' under /spec --- beacon_node/rest_api/src/beacon.rs | 35 +++++-- beacon_node/rest_api/src/lib.rs | 27 +++-- beacon_node/rest_api/src/metrics.rs | 9 +- docs/rest_oapi.yaml | 154 ++++++++++++++-------------- 4 files changed, 128 insertions(+), 97 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index fb8386661..b489f1fe7 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -12,11 +12,12 @@ pub struct HeadResponse { pub slot: Slot, pub block_root: Hash256, pub state_root: Hash256, - /* Not implemented: pub finalized_slot: Slot, pub finalized_block_root: Hash256, - pub justified_slot: Hash256, - */ + pub justified_slot: Slot, + pub justified_block_root: Hash256, + pub previous_justified_slot: Slot, + pub previous_justified_block_root: Hash256, } /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. @@ -26,10 +27,30 @@ pub fn get_head(req: Request) -> ApiResult .get::>>() .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let chain_head = beacon_chain.head(); + let head = HeadResponse { - slot: beacon_chain.head().beacon_state.slot, - block_root: beacon_chain.head().beacon_block_root, - state_root: beacon_chain.head().beacon_state_root, + slot: chain_head.beacon_state.slot, + block_root: chain_head.beacon_block_root, + state_root: chain_head.beacon_state_root, + finalized_slot: chain_head + .beacon_state + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + finalized_block_root: chain_head.beacon_state.finalized_checkpoint.root, + justified_slot: chain_head + .beacon_state + .current_justified_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + justified_block_root: chain_head.beacon_state.current_justified_checkpoint.root, + previous_justified_slot: chain_head + .beacon_state + .previous_justified_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()), + previous_justified_block_root: chain_head.beacon_state.previous_justified_checkpoint.root, }; let json: String = serde_json::to_string(&head) @@ -178,7 +199,7 @@ pub fn get_state_root(req: Request) -> ApiR } /// HTTP handler to return the highest finalized slot. -pub fn get_latest_finalized_checkpoint( +pub fn get_current_finalized_checkpoint( req: Request, ) -> ApiResult { let beacon_chain = req diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 770793491..99a8c6343 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -127,12 +127,8 @@ pub fn start_server( // Methods for Client (&Method::GET, "/node/version") => node::get_version(req), (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), - (&Method::GET, "/node/deposit_contract") => { - helpers::implementation_pending_response(req) - } (&Method::GET, "/node/syncing") => helpers::implementation_pending_response(req), (&Method::GET, "/node/chain_id") => helpers::implementation_pending_response(req), - (&Method::GET, "/node/metrics") => metrics::get_prometheus::(req), // Methods for Network (&Method::GET, "/network/enr") => network::get_enr::(req), @@ -143,29 +139,30 @@ pub fn start_server( (&Method::GET, "/network/listen_addresses") => { network::get_listen_addresses::(req) } - (&Method::GET, "/network/stats") => helpers::implementation_pending_response(req), (&Method::GET, "/network/block_discovery") => { helpers::implementation_pending_response(req) } // Methods for Beacon Node - //TODO: Remove? - //(&Method::GET, "/beacon/best_slot") => beacon::get_best_slot::(req), (&Method::GET, "/beacon/head") => beacon::get_head::(req), (&Method::GET, "/beacon/block") => beacon::get_block::(req), (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), (&Method::GET, "/beacon/blocks") => helpers::implementation_pending_response(req), (&Method::GET, "/beacon/fork") => helpers::implementation_pending_response(req), - (&Method::GET, "/beacon/latest_finalized_checkpoint") => { - beacon::get_latest_finalized_checkpoint::(req) - } (&Method::GET, "/beacon/attestations") => { helpers::implementation_pending_response(req) } (&Method::GET, "/beacon/attestations/pending") => { helpers::implementation_pending_response(req) } - (&Method::GET, "/beacon/attestations") => { + + (&Method::GET, "/beacon/validators") => { + helpers::implementation_pending_response(req) + } + (&Method::GET, "/beacon/validators/indicies") => { + helpers::implementation_pending_response(req) + } + (&Method::GET, "/beacon/validators/pubkeys") => { helpers::implementation_pending_response(req) } @@ -188,11 +185,19 @@ pub fn start_server( (&Method::GET, "/beacon/state") => beacon::get_state::(req), (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), + (&Method::GET, "/beacon/state/current_finalized_checkpoint") => { + beacon::get_current_finalized_checkpoint::(req) + } //TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances // Methods for bootstrap and checking configuration (&Method::GET, "/spec") => spec::get_spec::(req), (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), + (&Method::GET, "/spec/deposit_contract") => { + helpers::implementation_pending_response(req) + } + + (&Method::GET, "/metrics") => metrics::get_prometheus::(req), _ => Err(ApiError::NotFound( "Request path and/or method not found.".to_owned(), diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 1a7ca886e..9d2ecc343 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -1,5 +1,6 @@ use crate::{success_response, ApiError, ApiResult, DBPath}; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use http::HeaderValue; use hyper::{Body, Request}; use prometheus::{Encoder, TextEncoder}; use std::sync::Arc; @@ -67,10 +68,10 @@ pub fn get_prometheus(req: Request) -> ApiR .map(|string| { let mut response = success_response(Body::from(string)); // Need to change the header to text/plain for prometheius - response - .headers_mut() - .insert("content-type", "text/plain; charset=utf-8".parse().unwrap()) - .unwrap(); + response.headers_mut().insert( + "content-type", + HeaderValue::from_static("text/plain; charset=utf-8"), + ); response }) .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e))) diff --git a/docs/rest_oapi.yaml b/docs/rest_oapi.yaml index 0c2f8616d..be8991956 100644 --- a/docs/rest_oapi.yaml +++ b/docs/rest_oapi.yaml @@ -13,6 +13,8 @@ tags: description: Endpoints which will be implemented for phase 1 of Ethereum Serenity - name: Future description: Potential future endpoints or optional nice-to-haves + - name: RFC + description: Do we need these endpoints at all? This is a request for comments if you think they're useful. paths: /node/version: @@ -47,21 +49,6 @@ paths: 500: $ref: '#/components/responses/InternalError' - /node/deposit_contract: - get: - tags: - - Phase0 - summary: "Get the address of the Ethereum 1 deposit contract." - description: "Requests the address of the deposit contract on the Ethereum 1 chain, which was used to start the current beacon chain." - responses: - 200: - description: Request successful - content: - application/json: - schema: - $ref: '#/components/schemas/ethereum_address' - 500: - $ref: '#/components/responses/InternalError' /node/syncing: get: @@ -106,55 +93,6 @@ paths: 500: $ref: '#/components/responses/InternalError' - /node/metrics: - get: - tags: - - Phase0 - summary: "Get Promethius metrics for the node" - description: "Fetches a range of metrics for measuring nodes health. It is intended for this endpoint to be consumed by Promethius." - responses: - 200: - description: Request successful - content: - text/plain: - example: - summary: 'Promethius metrics' - value: "# HELP beacon_head_state_active_validators_total Count of active validators at the head of the chain - # TYPE beacon_head_state_active_validators_total gauge - beacon_head_state_active_validators_total 16 - # HELP beacon_head_state_current_justified_epoch Current justified epoch at the head of the chain - # TYPE beacon_head_state_current_justified_epoch gauge - beacon_head_state_current_justified_epoch 0 - # HELP beacon_head_state_current_justified_root Current justified root at the head of the chain - # TYPE beacon_head_state_current_justified_root gauge - beacon_head_state_current_justified_root 0 - # HELP beacon_head_state_eth1_deposit_index Eth1 deposit index at the head of the chain - # TYPE beacon_head_state_eth1_deposit_index gauge - beacon_head_state_eth1_deposit_index 16 - # HELP beacon_head_state_finalized_epoch Finalized epoch at the head of the chain - # TYPE beacon_head_state_finalized_epoch gauge - beacon_head_state_finalized_epoch 0 - # HELP beacon_head_state_finalized_root Finalized root at the head of the chain - # TYPE beacon_head_state_finalized_root gauge - beacon_head_state_finalized_root 0 - # HELP beacon_head_state_latest_block_slot Latest block slot at the head of the chain - # TYPE beacon_head_state_latest_block_slot gauge - beacon_head_state_latest_block_slot 0 - # HELP beacon_head_state_previous_justified_epoch Previous justified epoch at the head of the chain - # TYPE beacon_head_state_previous_justified_epoch gauge - beacon_head_state_previous_justified_epoch 0 - # HELP beacon_head_state_previous_justified_root Previous justified root at the head of the chain - # TYPE beacon_head_state_previous_justified_root gauge - beacon_head_state_previous_justified_root 0 - # HELP beacon_head_state_root Root of the block at the head of the chain - # TYPE beacon_head_state_root gauge - beacon_head_state_root -7566315470565629000 - # HELP beacon_head_state_shard_total Count of shards in the beacon chain - # TYPE beacon_head_state_shard_total gauge - beacon_head_state_shard_total 8 - # HELP beacon_head_state_slashed_validators_total Count of all slashed validators at the head of the chain - # TYPE beacon_head_state_slashed_validators_total gauge - beacon_head_state_slashed_validators_total 0" #TODO: Complete the /network/enr request /network/enr: @@ -251,7 +189,7 @@ paths: /network/stats: get: tags: - - Future + - RFC summary: "Get some simple network statistics from the node." description: "Request that the beacon node provide some historical summary information about its networking interface." #TODO: Do we actually collect these stats? Should we? @@ -280,7 +218,7 @@ paths: /network/block_discovery: get: tags: - - Future + - RFC summary: "Identify the time at which particular blocks were first seen." description: "Request the node to provide the time at which particular blocks were first seen on the network." parameters: @@ -369,7 +307,7 @@ paths: format: uint64 description: "The slot number of the second most recent justified slot." previous_justified_block_root: - type: integer + type: string format: bytes pattern: "^0x[a-fA-F0-9]{64}$" description: "The block root of the second most recent justified block." @@ -440,8 +378,6 @@ paths: schema: $ref: '#/components/schemas/Fork' - #TODO fill out latest_finalized_checkpoint - /beacon/latest_finalized_checkpoint: /beacon/attestations: get: @@ -506,7 +442,6 @@ paths: $ref: '#/components/responses/InvalidRequest' #TODO: Make this request error more specific if one of the parameters is not provided correctly. - #TODO: do all these '/beacon/validators' endpoints come under '/beacon/state' subqueries? /beacon/validators: get: tags: @@ -541,7 +476,7 @@ paths: /beacon/validators/activesetchanges: get: tags: - - Phase0 + - RFC summary: "Retrieve the changes in active validator set." description: "Request that the beacon node describe the changes that occurred at the specified epoch, as compared with the prior epoch." parameters: @@ -583,7 +518,7 @@ paths: /beacon/validators/assignments: get: tags: - - Phase0 + - RFC summary: "Retrieve the assigned responsibilities for validators in a particular epoch." description: "Request that the beacon node list the duties which have been assigned to the active validator set in a particular epoch." parameters: @@ -669,7 +604,7 @@ paths: /beacon/validators/balances: get: tags: - - Phase0 + - RFC summary: "Retrieve the balances of validators at a specified epoch." description: "Retrieve the balances of validators at a specified epoch (or the current epoch if none specified). The list of balances can be filtered by providing a list of validator public keys or indices." parameters: @@ -730,7 +665,7 @@ paths: /beacon/validators/participation: get: tags: - - Phase0 + - RFC summary: "Retrieve aggregate information about validator participation in an epoch." description: "Retrieve some aggregate information about the participation of validators in a specified epoch (or the current epoch if none specified)." parameters: @@ -775,7 +710,7 @@ paths: /beacon/validators/queue: get: tags: - - Phase0 + - RFC summary: "Retrieve information about the validator queue at the specified epoch." description: "Retrieve information about the queue of validators for the specified epoch (or the current epoch if none specified)." parameters: @@ -989,12 +924,81 @@ paths: #TODO fill out /beacon/state_root /beacon/state_root: + #TODO fill out current_finalized_checkpoint + /beacon/current_finalized_checkpoint: + #TODO fill spec /spec: #TODO fill spec/slots_per_epoch /spec/slots_per_epoch: + /spec/deposit_contract: + get: + tags: + - Phase0 + summary: "Get the address of the Ethereum 1 deposit contract." + description: "Requests the address of the deposit contract on the Ethereum 1 chain, which was used to start the current beacon chain." + responses: + 200: + description: Request successful + content: + application/json: + schema: + $ref: '#/components/schemas/ethereum_address' + 500: + $ref: '#/components/responses/InternalError' + + /metrics: + get: + tags: + - Phase0 + summary: "Get Promethius metrics for the node" + description: "Fetches a range of metrics for measuring nodes health. It is intended for this endpoint to be consumed by Promethius." + responses: + 200: + description: Request successful + content: + text/plain: + example: + summary: 'Promethius metrics' + value: "# HELP beacon_head_state_active_validators_total Count of active validators at the head of the chain + # TYPE beacon_head_state_active_validators_total gauge + beacon_head_state_active_validators_total 16 + # HELP beacon_head_state_current_justified_epoch Current justified epoch at the head of the chain + # TYPE beacon_head_state_current_justified_epoch gauge + beacon_head_state_current_justified_epoch 0 + # HELP beacon_head_state_current_justified_root Current justified root at the head of the chain + # TYPE beacon_head_state_current_justified_root gauge + beacon_head_state_current_justified_root 0 + # HELP beacon_head_state_eth1_deposit_index Eth1 deposit index at the head of the chain + # TYPE beacon_head_state_eth1_deposit_index gauge + beacon_head_state_eth1_deposit_index 16 + # HELP beacon_head_state_finalized_epoch Finalized epoch at the head of the chain + # TYPE beacon_head_state_finalized_epoch gauge + beacon_head_state_finalized_epoch 0 + # HELP beacon_head_state_finalized_root Finalized root at the head of the chain + # TYPE beacon_head_state_finalized_root gauge + beacon_head_state_finalized_root 0 + # HELP beacon_head_state_latest_block_slot Latest block slot at the head of the chain + # TYPE beacon_head_state_latest_block_slot gauge + beacon_head_state_latest_block_slot 0 + # HELP beacon_head_state_previous_justified_epoch Previous justified epoch at the head of the chain + # TYPE beacon_head_state_previous_justified_epoch gauge + beacon_head_state_previous_justified_epoch 0 + # HELP beacon_head_state_previous_justified_root Previous justified root at the head of the chain + # TYPE beacon_head_state_previous_justified_root gauge + beacon_head_state_previous_justified_root 0 + # HELP beacon_head_state_root Root of the block at the head of the chain + # TYPE beacon_head_state_root gauge + beacon_head_state_root -7566315470565629000 + # HELP beacon_head_state_shard_total Count of shards in the beacon chain + # TYPE beacon_head_state_shard_total gauge + beacon_head_state_shard_total 8 + # HELP beacon_head_state_slashed_validators_total Count of all slashed validators at the head of the chain + # TYPE beacon_head_state_slashed_validators_total gauge + beacon_head_state_slashed_validators_total 0" + components: schemas: pubkey: From 0bd5ce65f418cc77f1f237b694292aeeba0c1b34 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 28 Aug 2019 21:26:18 +1000 Subject: [PATCH 128/305] Renamed the YAML spec document inside the 'docs' folder. --- docs/{rest_oapi.yaml => api_spec.yaml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/{rest_oapi.yaml => api_spec.yaml} (100%) diff --git a/docs/rest_oapi.yaml b/docs/api_spec.yaml similarity index 100% rename from docs/rest_oapi.yaml rename to docs/api_spec.yaml From faef347d181aea4c485859c5de201f1a99932132 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 28 Aug 2019 23:33:34 +1000 Subject: [PATCH 129/305] Fleshed out some API endpoints. - Added the /beacon/validator/block endpoint for GET (untested) - Added the /beacon/fork endpoint for GET - Cleaned up a bunch of unused imports & variables - Removed '/network/block_discovery' endpoint --- beacon_node/beacon_chain/src/beacon_chain.rs | 18 ++++- beacon_node/rest_api/src/beacon.rs | 16 +++++ beacon_node/rest_api/src/helpers.rs | 4 +- beacon_node/rest_api/src/lib.rs | 7 +- beacon_node/rest_api/src/url_query.rs | 2 +- beacon_node/rest_api/src/validator.rs | 70 +++++++++++++++++--- beacon_node/rpc/src/beacon_block.rs | 2 +- 7 files changed, 99 insertions(+), 20 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5feefd841..500f6411f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -983,20 +983,34 @@ impl BeaconChain { Ok(BlockProcessingOutcome::Processed { block_root }) } - /// Produce a new block at the present slot. + /// Produce a new block at the specified slot. /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. pub fn produce_block( &self, randao_reveal: Signature, + slot: Slot, ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { let state = self.state.read().clone(); + + self.produce_block_on_state(state, slot, randao_reveal) + } + + /// Produce a new block at the current slot + /// + /// Calls `produce_block`, with the slot parameter set as the current. + /// + /// ** This function is probably obsolete (was for previous RPC), and can probably be removed ** + pub fn produce_current_block( + &self, + randao_reveal: Signature, + ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { let slot = self .read_slot_clock() .ok_or_else(|| BlockProductionError::UnableToReadSlot)?; - self.produce_block_on_state(state, slot, randao_reveal) + self.produce_block(randao_reveal, slot) } /// Produce a block for some `slot` upon the given `state`. diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index b489f1fe7..5dcbc728a 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -130,6 +130,22 @@ pub fn get_block_root(req: Request) -> ApiR Ok(success_response(Body::from(json))) } +/// HTTP handler to return the `Fork` of the current head. +pub fn get_fork(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let chain_head = beacon_chain.head(); + + let json: String = serde_json::to_string(&chain_head.beacon_state.fork).map_err(|e| { + ApiError::ServerError(format!("Unable to serialize BeaconState::Fork: {:?}", e)) + })?; + + Ok(success_response(Body::from(json))) +} + #[derive(Serialize)] #[serde(bound = "T: EthSpec")] pub struct StateResponse { diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 88755fcde..2477884c4 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -2,9 +2,7 @@ use crate::{ApiError, ApiResult}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use bls::PublicKey; use hex; -use hyper::{Body, Request, StatusCode}; -use serde::de::value::StringDeserializer; -use serde_json::Deserializer; +use hyper::{Body, Request}; use store::{iter::AncestorIter, Store}; use types::{BeaconState, EthSpec, Hash256, RelativeEpoch, Slot}; diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 99a8c6343..a6ee948ae 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -139,16 +139,13 @@ pub fn start_server( (&Method::GET, "/network/listen_addresses") => { network::get_listen_addresses::(req) } - (&Method::GET, "/network/block_discovery") => { - helpers::implementation_pending_response(req) - } // Methods for Beacon Node (&Method::GET, "/beacon/head") => beacon::get_head::(req), (&Method::GET, "/beacon/block") => beacon::get_block::(req), (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), (&Method::GET, "/beacon/blocks") => helpers::implementation_pending_response(req), - (&Method::GET, "/beacon/fork") => helpers::implementation_pending_response(req), + (&Method::GET, "/beacon/fork") => beacon::get_fork::(req), (&Method::GET, "/beacon/attestations") => { helpers::implementation_pending_response(req) } @@ -171,7 +168,7 @@ pub fn start_server( validator::get_validator_duties::(req) } (&Method::GET, "/beacon/validator/block") => { - helpers::implementation_pending_response(req) + validator::get_new_beacon_block::(req) } (&Method::POST, "/beacon/validator/block") => { helpers::implementation_pending_response(req) diff --git a/beacon_node/rest_api/src/url_query.rs b/beacon_node/rest_api/src/url_query.rs index e39a9a449..3802ff831 100644 --- a/beacon_node/rest_api/src/url_query.rs +++ b/beacon_node/rest_api/src/url_query.rs @@ -64,7 +64,7 @@ impl<'a> UrlQuery<'a> { /// Returns a vector of all values present where `key` is in `keys /// /// If no match is found, an `InvalidQueryParams` error is returned. - pub fn all_of(mut self, key: &str) -> Result, ApiError> { + pub fn all_of(self, key: &str) -> Result, ApiError> { let queries: Vec<_> = self .0 .filter_map(|(k, v)| { diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 4294f9c20..645a35837 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,13 +1,12 @@ use super::{success_response, ApiResult}; use crate::{helpers::*, ApiError, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use bls::PublicKey; +use bls::{PublicKey, Signature}; use hyper::{Body, Request}; use serde::{Deserialize, Serialize}; use std::sync::Arc; -use store::Store; use types::beacon_state::EthSpec; -use types::{BeaconBlock, BeaconState, Epoch, RelativeEpoch, Shard, Slot}; +use types::{Epoch, RelativeEpoch, Shard, Slot}; #[derive(Debug, Serialize, Deserialize)] pub struct ValidatorDuty { @@ -39,16 +38,14 @@ pub fn get_validator_duties(req: Request) - .extensions() .get::>>() .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + //TODO Surely this state_cache thing is not necessary? let _ = beacon_chain .ensure_state_caches_are_built() .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; - let head_state = beacon_chain - .speculative_state() - .expect("This is legacy code and should be removed."); + let head_state = &beacon_chain.head().beacon_state; // Parse and check query parameters let query = UrlQuery::from_request(&req)?; - let current_epoch = head_state.current_epoch(); let epoch = match query.first_of(&["epoch"]) { Ok((_, v)) => Epoch::new(v.parse::().map_err(|e| { @@ -66,7 +63,7 @@ pub fn get_validator_duties(req: Request) - )) })?; //TODO: Handle an array of validators, currently only takes one - let mut validators: Vec = match query.all_of("validator_pubkeys") { + let validators: Vec = match query.all_of("validator_pubkeys") { Ok(v) => v .iter() .map(|pk| parse_pubkey(pk)) @@ -147,3 +144,60 @@ pub fn get_validator_duties(req: Request) - ); Ok(success_response(body)) } + +/// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator. +pub fn get_new_beacon_block(req: Request) -> ApiResult { + // Get beacon state + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + //TODO Surely this state_cache thing is not necessary? + let _ = beacon_chain + .ensure_state_caches_are_built() + .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; + + let query = UrlQuery::from_request(&req)?; + let slot = match query.first_of(&["slot"]) { + Ok((_, v)) => Slot::new(v.parse::().map_err(|e| { + ApiError::InvalidQueryParams(format!("Invalid slot parameter, must be a u64. {:?}", e)) + })?), + Err(e) => { + return Err(e); + } + }; + let randao_reveal = match query.first_of(&["randao_reveal"]) { + Ok((_, v)) => Signature::from_bytes( + hex::decode(&v) + .map_err(|e| { + ApiError::InvalidQueryParams(format!( + "Invalid hex string for randao_reveal: {:?}", + e + )) + })? + .as_slice(), + ) + .map_err(|e| { + ApiError::InvalidQueryParams(format!("randao_reveal is not a valid signature: {:?}", e)) + })?, + Err(e) => { + return Err(e); + } + }; + + let new_block = match beacon_chain.produce_block(randao_reveal, slot) { + Ok((block, _state)) => block, + Err(e) => { + return Err(ApiError::ServerError(format!( + "Beacon node is not able to produce a block: {:?}", + e + ))); + } + }; + + let body = Body::from( + serde_json::to_string(&new_block) + .expect("We should always be able to serialize a new block that we created."), + ); + Ok(success_response(body)) +} diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index b1a67399e..012fcb678 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -51,7 +51,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { } }; - let produced_block = match self.chain.produce_block(randao_reveal) { + let produced_block = match self.chain.produce_current_block(randao_reveal) { Ok((block, _state)) => block, Err(e) => { // could not produce a block From ca07d7245397d7c7efac729326b0c356fc6c471d Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Thu, 29 Aug 2019 12:42:45 +1000 Subject: [PATCH 130/305] Removed methods for 'chain_id', since this is no longer applicable to ETH2. --- beacon_node/rest_api/src/lib.rs | 1 - docs/api_spec.yaml | 22 ---------------------- 2 files changed, 23 deletions(-) diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index a6ee948ae..b7fd3f581 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -128,7 +128,6 @@ pub fn start_server( (&Method::GET, "/node/version") => node::get_version(req), (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), (&Method::GET, "/node/syncing") => helpers::implementation_pending_response(req), - (&Method::GET, "/node/chain_id") => helpers::implementation_pending_response(req), // Methods for Network (&Method::GET, "/network/enr") => network::get_enr::(req), diff --git a/docs/api_spec.yaml b/docs/api_spec.yaml index be8991956..901df3179 100644 --- a/docs/api_spec.yaml +++ b/docs/api_spec.yaml @@ -72,28 +72,6 @@ paths: 500: $ref: '#/components/responses/InternalError' - /node/chain_id: - get: - tags: - - Phase0 - summary: "Get fork information from running beacon node." - description: "Requests the beacon node to provide which fork version it is currently on." - responses: - 200: - description: Request successful - content: - application/json: - schema: - type: object - properties: - chain_id: - type: integer - format: uint64 - description: "Sometimes called the network id, this number discerns the active chain for the beacon node. Analogous to Eth1.0 JSON-RPC net_version." - 500: - $ref: '#/components/responses/InternalError' - - #TODO: Complete the /network/enr request /network/enr: get: From 7bfe02be1cde9c08bb24b3b8ed5b0c5689d96327 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 12:46:18 +1000 Subject: [PATCH 131/305] Refactor slot clock. --- eth2/utils/slot_clock/src/lib.rs | 19 +- eth2/utils/slot_clock/src/metrics.rs | 7 +- .../slot_clock/src/system_time_slot_clock.rs | 191 +++++++----------- .../slot_clock/src/testing_slot_clock.rs | 37 ++-- 4 files changed, 98 insertions(+), 156 deletions(-) diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index 871743c9e..988f3d322 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -5,24 +5,19 @@ mod metrics; mod system_time_slot_clock; mod testing_slot_clock; -use std::time::Duration; +use std::time::{Duration, Instant}; -pub use crate::system_time_slot_clock::{Error as SystemTimeSlotClockError, SystemTimeSlotClock}; -pub use crate::testing_slot_clock::{Error as TestingSlotClockError, TestingSlotClock}; +pub use crate::system_time_slot_clock::SystemTimeSlotClock; +pub use crate::testing_slot_clock::TestingSlotClock; pub use metrics::scrape_for_metrics; pub use types::Slot; pub trait SlotClock: Send + Sync + Sized { - type Error; + fn new(genesis_slot: Slot, genesis: Instant, slot_duration: Duration) -> Self; - /// Create a new `SlotClock`. - /// - /// Returns an Error if `slot_duration_seconds == 0`. - fn new(genesis_slot: Slot, genesis_seconds: u64, slot_duration_seconds: u64) -> Self; + fn present_slot(&self) -> Option; - fn present_slot(&self) -> Result, Self::Error>; + fn duration_to_next_slot(&self) -> Option; - fn duration_to_next_slot(&self) -> Result, Self::Error>; - - fn slot_duration_millis(&self) -> u64; + fn slot_duration(&self) -> Duration; } diff --git a/eth2/utils/slot_clock/src/metrics.rs b/eth2/utils/slot_clock/src/metrics.rs index e0d3923e0..1abd93c48 100644 --- a/eth2/utils/slot_clock/src/metrics.rs +++ b/eth2/utils/slot_clock/src/metrics.rs @@ -18,7 +18,7 @@ lazy_static! { /// Update the global metrics `DEFAULT_REGISTRY` with info from the slot clock. pub fn scrape_for_metrics(clock: &U) { let present_slot = match clock.present_slot() { - Ok(Some(slot)) => slot, + Some(slot) => slot, _ => Slot::new(0), }; @@ -28,5 +28,8 @@ pub fn scrape_for_metrics(clock: &U) { present_slot.epoch(T::slots_per_epoch()).as_u64() as i64, ); set_gauge(&SLOTS_PER_EPOCH, T::slots_per_epoch() as i64); - set_gauge(&MILLISECONDS_PER_SLOT, clock.slot_duration_millis() as i64); + set_gauge( + &MILLISECONDS_PER_SLOT, + clock.slot_duration().as_millis() as i64, + ); } diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index c493a8be8..88c9c0e63 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -1,99 +1,68 @@ use super::SlotClock; -use std::time::{Duration, SystemTime}; +use std::time::{Duration, Instant}; use types::Slot; pub use std::time::SystemTimeError; -#[derive(Debug, PartialEq)] -pub enum Error { - SlotDurationIsZero, - SystemTimeError(String), -} - /// Determines the present slot based upon the present system time. #[derive(Clone)] pub struct SystemTimeSlotClock { genesis_slot: Slot, - genesis_seconds: u64, - slot_duration_seconds: u64, + genesis: Instant, + slot_duration: Duration, } impl SlotClock for SystemTimeSlotClock { - type Error = Error; + fn new(genesis_slot: Slot, genesis: Instant, slot_duration: Duration) -> Self { + if slot_duration.as_millis() == 0 { + panic!("SystemTimeSlotClock cannot have a < 1ms slot duration."); + } - /// Create a new `SystemTimeSlotClock`. - /// - /// Returns an Error if `slot_duration_seconds == 0`. - fn new(genesis_slot: Slot, genesis_seconds: u64, slot_duration_seconds: u64) -> Self { Self { genesis_slot, - genesis_seconds, - slot_duration_seconds, + genesis, + slot_duration, } } - fn present_slot(&self) -> Result, Error> { - if self.slot_duration_seconds == 0 { - return Err(Error::SlotDurationIsZero); - } + fn present_slot(&self) -> Option { + let now = Instant::now(); - let syslot_time = SystemTime::now(); - let duration_since_epoch = syslot_time.duration_since(SystemTime::UNIX_EPOCH)?; - let duration_since_genesis = - duration_since_epoch.checked_sub(Duration::from_secs(self.genesis_seconds)); - - match duration_since_genesis { - None => Ok(None), - Some(d) => Ok(slot_from_duration(self.slot_duration_seconds, d) - .and_then(|s| Some(s + self.genesis_slot))), + if now < self.genesis { + None + } else { + let slot = Slot::from( + (now.duration_since(self.genesis).as_millis() / self.slot_duration.as_millis()) + as u64, + ); + Some(slot + self.genesis_slot) } } - fn duration_to_next_slot(&self) -> Result, Error> { - duration_to_next_slot(self.genesis_seconds, self.slot_duration_seconds) + fn duration_to_next_slot(&self) -> Option { + let now = Instant::now(); + if now < self.genesis { + None + } else { + let duration_since_genesis = now - self.genesis; + let millis_since_genesis = duration_since_genesis.as_millis(); + let millis_per_slot = self.slot_duration.as_millis(); + + let current_slot = millis_since_genesis / millis_per_slot; + let next_slot = current_slot + 1; + + let next_slot = + self.genesis + Duration::from_millis((next_slot * millis_per_slot) as u64); + + Some(next_slot.duration_since(now)) + } } - fn slot_duration_millis(&self) -> u64 { - self.slot_duration_seconds * 1000 + fn slot_duration(&self) -> Duration { + self.slot_duration } } -impl From for Error { - fn from(e: SystemTimeError) -> Error { - Error::SystemTimeError(format!("{:?}", e)) - } -} - -fn slot_from_duration(slot_duration_seconds: u64, duration: Duration) -> Option { - Some(Slot::new( - duration.as_secs().checked_div(slot_duration_seconds)?, - )) -} -// calculate the duration to the next slot -fn duration_to_next_slot( - genesis_time: u64, - seconds_per_slot: u64, -) -> Result, Error> { - let now = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?; - let genesis_time = Duration::from_secs(genesis_time); - - if now < genesis_time { - return Ok(None); - } - - let since_genesis = now - genesis_time; - - let elapsed_slots = since_genesis.as_secs() / seconds_per_slot; - - let next_slot_start_seconds = (elapsed_slots + 1) - .checked_mul(seconds_per_slot) - .expect("Next slot time should not overflow u64"); - - let time_to_next_slot = Duration::from_secs(next_slot_start_seconds) - since_genesis; - - Ok(Some(time_to_next_slot)) -} - #[cfg(test)] mod tests { use super::*; @@ -104,71 +73,51 @@ mod tests { */ #[test] fn test_slot_now() { - let slot_time = 100; let genesis_slot = Slot::new(0); - let now = SystemTime::now(); - let since_epoch = now.duration_since(SystemTime::UNIX_EPOCH).unwrap(); + let prior_genesis = + |seconds_prior: u64| Instant::now() - Duration::from_secs(seconds_prior); - let genesis = since_epoch.as_secs() - slot_time * 89; + let clock = + SystemTimeSlotClock::new(genesis_slot, prior_genesis(0), Duration::from_secs(1)); + assert_eq!(clock.present_slot(), Some(Slot::new(0))); - let clock = SystemTimeSlotClock { + let clock = + SystemTimeSlotClock::new(genesis_slot, prior_genesis(5), Duration::from_secs(1)); + assert_eq!(clock.present_slot(), Some(Slot::new(5))); + + let clock = SystemTimeSlotClock::new( genesis_slot, - genesis_seconds: genesis, - slot_duration_seconds: slot_time, - }; - assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(89))); + Instant::now() - Duration::from_millis(500), + Duration::from_secs(1), + ); + assert_eq!(clock.present_slot(), Some(Slot::new(0))); + assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); - let clock = SystemTimeSlotClock { + let clock = SystemTimeSlotClock::new( genesis_slot, - genesis_seconds: since_epoch.as_secs(), - slot_duration_seconds: slot_time, - }; - assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(0))); - - let clock = SystemTimeSlotClock { - genesis_slot, - genesis_seconds: since_epoch.as_secs() - slot_time * 42 - 5, - slot_duration_seconds: slot_time, - }; - assert_eq!(clock.present_slot().unwrap(), Some(Slot::new(42))); + Instant::now() - Duration::from_millis(1_500), + Duration::from_secs(1), + ); + assert_eq!(clock.present_slot(), Some(Slot::new(1))); + assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); } #[test] - fn test_slot_from_duration() { - let slot_time = 100; - - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(0)), - Some(Slot::new(0)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(10)), - Some(Slot::new(0)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(100)), - Some(Slot::new(1)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(101)), - Some(Slot::new(1)) - ); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(1000)), - Some(Slot::new(10)) - ); + #[should_panic] + fn zero_seconds() { + SystemTimeSlotClock::new(Slot::new(0), Instant::now(), Duration::from_secs(0)); } #[test] - fn test_slot_from_duration_slot_time_zero() { - let slot_time = 0; + #[should_panic] + fn zero_millis() { + SystemTimeSlotClock::new(Slot::new(0), Instant::now(), Duration::from_millis(0)); + } - assert_eq!(slot_from_duration(slot_time, Duration::from_secs(0)), None); - assert_eq!(slot_from_duration(slot_time, Duration::from_secs(10)), None); - assert_eq!( - slot_from_duration(slot_time, Duration::from_secs(1000)), - None - ); + #[test] + #[should_panic] + fn less_than_one_millis() { + SystemTimeSlotClock::new(Slot::new(0), Instant::now(), Duration::from_nanos(999)); } } diff --git a/eth2/utils/slot_clock/src/testing_slot_clock.rs b/eth2/utils/slot_clock/src/testing_slot_clock.rs index f741d3b87..0b65b1569 100644 --- a/eth2/utils/slot_clock/src/testing_slot_clock.rs +++ b/eth2/utils/slot_clock/src/testing_slot_clock.rs @@ -1,12 +1,11 @@ use super::SlotClock; use std::sync::RwLock; -use std::time::Duration; +use std::time::{Duration, Instant}; use types::Slot; -#[derive(Debug, PartialEq)] -pub enum Error {} - -/// Determines the present slot based upon the present system time. +/// A slot clock where the slot is manually set instead of being determined by the system time. +/// +/// Useful for testing scenarios. pub struct TestingSlotClock { slot: RwLock, } @@ -17,32 +16,30 @@ impl TestingSlotClock { } pub fn advance_slot(&self) { - self.set_slot(self.present_slot().unwrap().unwrap().as_u64() + 1) + self.set_slot(self.present_slot().unwrap().as_u64() + 1) } } impl SlotClock for TestingSlotClock { - type Error = Error; - - /// Create a new `TestingSlotClock` at `genesis_slot`. - fn new(genesis_slot: Slot, _genesis_seconds: u64, _slot_duration_seconds: u64) -> Self { + fn new(genesis_slot: Slot, _genesis: Instant, _slot_duration: Duration) -> Self { TestingSlotClock { slot: RwLock::new(genesis_slot), } } - fn present_slot(&self) -> Result, Error> { + fn present_slot(&self) -> Option { let slot = *self.slot.read().expect("TestingSlotClock poisoned."); - Ok(Some(slot)) + Some(slot) } /// Always returns a duration of 1 second. - fn duration_to_next_slot(&self) -> Result, Error> { - Ok(Some(Duration::from_secs(1))) + fn duration_to_next_slot(&self) -> Option { + Some(Duration::from_secs(1)) } - fn slot_duration_millis(&self) -> u64 { - 0 + /// Always returns a slot duration of 0 seconds. + fn slot_duration(&self) -> Duration { + Duration::from_secs(0) } } @@ -52,11 +49,9 @@ mod tests { #[test] fn test_slot_now() { - let null = 0; - - let clock = TestingSlotClock::new(Slot::new(10), null, null); - assert_eq!(clock.present_slot(), Ok(Some(Slot::new(10)))); + let clock = TestingSlotClock::new(Slot::new(10), Instant::now(), Duration::from_secs(0)); + assert_eq!(clock.present_slot(), Some(Slot::new(10))); clock.set_slot(123); - assert_eq!(clock.present_slot(), Ok(Some(Slot::new(123)))); + assert_eq!(clock.present_slot(), Some(Slot::new(123))); } } From 5b5e458938b29a56fef1631a209d78640cf30d0a Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Thu, 29 Aug 2019 13:12:56 +1000 Subject: [PATCH 132/305] Flesh out the API spec for the /network endpoints. --- docs/api_spec.yaml | 64 +++++++++++++++++++++++++++++----------------- 1 file changed, 41 insertions(+), 23 deletions(-) diff --git a/docs/api_spec.yaml b/docs/api_spec.yaml index 901df3179..5053c5dd2 100644 --- a/docs/api_spec.yaml +++ b/docs/api_spec.yaml @@ -72,22 +72,24 @@ paths: 500: $ref: '#/components/responses/InternalError' - #TODO: Complete the /network/enr request /network/enr: get: tags: - Phase0 - summary: "" - description: "" + summary: "Get the node's Ethereum Node Record (ENR)." + description: "The Ethereum Node Record (ENR) contains a compressed public key, an IPv4 address, a TCP port and a UDP port, which is all encoded using base64. This endpoint fetches the base64 encoded version of the ENR for the running beacon node." responses: 200: description: Request successful content: application/json: schema: - type: integer - format: uint16 - example: 2468 + type: string + format: byte + example: "-IW4QHzEZbIB0YN47bVlsUrGbcL9vl21n7xF5gRKjMNkJ4MxfcwiqrsE7Ows8EnzOvC8P4ZyAjfOhr2ffk0bWAxDGq8BgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjzKzqo5c33ydUUHrWJ4FWwIXJa2MN9BBsgZkj6mhthp" + pattern: "^[^-A-Za-z0-9+/=]+$" + 500: + $ref: '#/components/responses/InternalError' /network/peer_count: get: @@ -104,9 +106,27 @@ paths: type: integer format: uint64 example: 25 + 500: + $ref: '#/components/responses/InternalError' - #TODO: Complete our peer ID /network/peer_id: + get: + tags: + - Phase0 + summary: "Get the node's libp2p peer ID." + description: "Requests the node to provide it's libp2p ['peer ID'](https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md), which is a base58 encoded SHA2-256 'multihash' of the node's public key struct." + responses: + 200: + description: Request successful + content: + application/json: + schema: + type: string + format: byte + example: "QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N" + pattern: "^[1-9A-HJ-NP-Za-km-z]{46}$" + 500: + $ref: '#/components/responses/InternalError' /network/peers: get: @@ -123,14 +143,15 @@ paths: type: array items: $ref: '#/components/schemas/Peer' + 500: + $ref: '#/components/responses/InternalError' - #TODO: Complete the /network/listen_port endpoint /network/listen_port: get: tags: - Phase0 - summary: "" - description: "" + summary: "Get the TCP port number for the libp2p listener." + description: "Libp2p is configured to listen to a particular TCP port upon startup of the beacon node. This endpoint returns the port number that the beacon node is listening on. Please note, this is for the libp2p communications, not for discovery." responses: 200: description: Request successful @@ -139,30 +160,27 @@ paths: schema: type: integer format: uint16 - example: 2468 + example: 9000 + 500: + $ref: '#/components/responses/InternalError' /network/listen_addresses: get: tags: - Phase0 - summary: "Identify if the beacon node is listening for networking connections, and on what address." - description: "Requests that the beacon node identify whether it is listening for incoming networking connections, and if so, what network address(es) are being used." + summary: "Identify the port and addresses listened to by the beacon node." + description: "Libp2p is configured to listen to a particular address, on a particular port. This address is represented the [`multiaddr`](https://multiformats.io/multiaddr/) format, and this endpoint requests the beacon node to list all listening addresses in this format." responses: 200: description: Request successful content: application/json: schema: - type: object - properties: - listening: - type: boolean - nullable: false - description: "True if the node is listening for incoming network connections. False if networking has been disabled or if the node has been configured to only connect with a static set of peers." - addresses: - type: array - items: - $ref: '#/components/schemas/multiaddr' + type: array + items: + $ref: '#/components/schemas/multiaddr' + 500: + $ref: '#/components/responses/InternalError' /network/stats: get: From bcd53a8b10f46488eb41eafb110cf8a5576de446 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 13:25:55 +1000 Subject: [PATCH 133/305] Migrate codebase across to new SlotClock API --- beacon_node/beacon_chain/src/beacon_chain.rs | 61 ++++++++++---------- beacon_node/beacon_chain/src/errors.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 18 ++++-- beacon_node/network/src/sync/simple_sync.rs | 2 +- beacon_node/rest_api/src/helpers.rs | 4 +- eth2/utils/slot_clock/src/lib.rs | 20 ++++++- validator_client/src/error.rs | 7 --- validator_client/src/service.rs | 27 ++++----- 8 files changed, 78 insertions(+), 62 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 0fc71fe7b..9283d2231 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -23,6 +23,7 @@ use state_processing::{ per_slot_processing, BlockProcessingError, }; use std::sync::Arc; +use std::time::Duration; use store::iter::{BlockRootsIterator, StateRootsIterator}; use store::{Error as DBError, Store}; use tree_hash::TreeHash; @@ -173,11 +174,12 @@ impl BeaconChain { Ok(Some(p)) => p, }; - let slot_clock = T::SlotClock::new( + let slot_clock = T::SlotClock::from_eth2_genesis( spec.genesis_slot, p.state.genesis_time, - spec.seconds_per_slot, - ); + Duration::from_secs(spec.seconds_per_slot), + ) + .ok_or_else(|| Error::SlotClockDidNotStart)?; let last_finalized_root = p.canonical_head.beacon_state.finalized_checkpoint.root; let last_finalized_block = &p.canonical_head.beacon_block; @@ -216,6 +218,20 @@ impl BeaconChain { Ok(()) } + /// Reads the slot clock, returns `Err` if the slot is unavailable. + /// + /// The slot might be unavailable due to an error with the system clock, or if the present time + /// is before genesis (i.e., a negative slot). + /// + /// This is distinct to `present_slot`, which simply reads the latest state. If a + /// call to `read_slot_clock` results in a higher slot than a call to `present_slot`, + /// `self.state` should undergo per slot processing. + pub fn present_slot(&self) -> Result { + self.slot_clock + .present_slot() + .ok_or_else(|| Error::UnableToReadSlot) + } + /// Returns the beacon block body for each beacon block root in `roots`. /// /// Fails if any root in `roots` does not have a corresponding block. @@ -326,10 +342,10 @@ impl BeaconChain { pub fn catchup_state(&self) -> Result<(), Error> { let spec = &self.spec; - let present_slot = match self.slot_clock.present_slot() { - Ok(Some(slot)) => slot, - _ => return Err(Error::UnableToReadSlot), - }; + let present_slot = self + .slot_clock + .present_slot() + .ok_or_else(|| Error::UnableToReadSlot)?; if self.state.read().slot < present_slot { let mut state = self.state.write(); @@ -369,26 +385,10 @@ impl BeaconChain { None } - /// Reads the slot clock, returns `None` if the slot is unavailable. - /// - /// The slot might be unavailable due to an error with the system clock, or if the present time - /// is before genesis (i.e., a negative slot). - /// - /// This is distinct to `present_slot`, which simply reads the latest state. If a - /// call to `read_slot_clock` results in a higher slot than a call to `present_slot`, - /// `self.state` should undergo per slot processing. - pub fn read_slot_clock(&self) -> Option { - match self.slot_clock.present_slot() { - Ok(Some(some_slot)) => Some(some_slot), - Ok(None) => None, - _ => None, - } - } - /// Reads the slot clock (see `self.read_slot_clock()` and returns the number of slots since /// genesis. pub fn slots_since_genesis(&self) -> Option { - let now = self.read_slot_clock()?; + let now = self.slot_clock.present_slot()?; let genesis_slot = self.spec.genesis_slot; if now < genesis_slot { @@ -398,6 +398,7 @@ impl BeaconChain { } } + /* /// Returns slot of the present state. /// /// This is distinct to `read_slot_clock`, which reads from the actual system clock. If @@ -406,6 +407,7 @@ impl BeaconChain { pub fn present_slot(&self) -> Slot { self.state.read().slot } + */ /// Returns the block proposer for a given slot. /// @@ -840,7 +842,8 @@ impl BeaconChain { } let present_slot = self - .read_slot_clock() + .slot_clock + .present_slot() .ok_or_else(|| Error::UnableToReadSlot)?; if block.slot > present_slot { @@ -1004,7 +1007,8 @@ impl BeaconChain { ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { let state = self.state.read().clone(); let slot = self - .read_slot_clock() + .slot_clock + .present_slot() .ok_or_else(|| BlockProductionError::UnableToReadSlot)?; self.produce_block_on_state(state, slot, randao_reveal) @@ -1181,10 +1185,7 @@ impl BeaconChain { *self.state.write() = { let mut state = self.canonical_head.read().beacon_state.clone(); - let present_slot = match self.slot_clock.present_slot() { - Ok(Some(slot)) => slot, - _ => return Err(Error::UnableToReadSlot), - }; + let present_slot = self.present_slot()?; // If required, transition the new state to the present slot. for _ in state.slot.as_u64()..present_slot.as_u64() { diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 22df90397..8541a0d0b 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -25,6 +25,7 @@ pub enum BeaconChainError { previous_epoch: Epoch, new_epoch: Epoch, }, + SlotClockDidNotStart, UnableToFindTargetRoot(Slot), BeaconStateError(BeaconStateError), DBInconsistent(String), diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 09f4749ea..4d6e56b04 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -6,6 +6,7 @@ use slot_clock::TestingSlotClock; use state_processing::per_slot_processing; use std::marker::PhantomData; use std::sync::Arc; +use std::time::Duration; use store::MemoryStore; use store::Store; use tree_hash::{SignedRoot, TreeHash}; @@ -115,11 +116,12 @@ where let log = builder.build().expect("logger should build"); // Slot clock - let slot_clock = TestingSlotClock::new( + let slot_clock = TestingSlotClock::from_eth2_genesis( spec.genesis_slot, genesis_state.genesis_time, - spec.seconds_per_slot, - ); + Duration::from_secs(spec.seconds_per_slot), + ) + .expect("Slot clock should start"); let chain = BeaconChain::from_genesis( store, @@ -164,7 +166,9 @@ where let mut state = { // Determine the slot for the first block (or skipped block). let state_slot = match block_strategy { - BlockStrategy::OnCanonicalHead => self.chain.read_slot_clock().unwrap() - 1, + BlockStrategy::OnCanonicalHead => { + self.chain.present_slot().expect("should have a slot") - 1 + } BlockStrategy::ForkCanonicalChainAt { previous_slot, .. } => previous_slot, }; @@ -173,14 +177,16 @@ where // Determine the first slot where a block should be built. let mut slot = match block_strategy { - BlockStrategy::OnCanonicalHead => self.chain.read_slot_clock().unwrap(), + BlockStrategy::OnCanonicalHead => { + self.chain.present_slot().expect("should have a slot") + } BlockStrategy::ForkCanonicalChainAt { first_slot, .. } => first_slot, }; let mut head_block_root = None; for _ in 0..num_blocks { - while self.chain.read_slot_clock().expect("should have a slot") < slot { + while self.chain.present_slot().expect("should have a slot") < slot { self.advance_slot(); } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 573ac9dd1..49196facc 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -387,7 +387,7 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, - "current_slot" => self.chain.present_slot(), + "current_slot" => format!("{:?}", self.chain.present_slot()), "requested" => req.count, "returned" => blocks.len(), ); diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 5365086df..0f47200e9 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -88,8 +88,8 @@ pub fn state_root_at_slot( ) -> Result { let head_state = &beacon_chain.head().beacon_state; let current_slot = beacon_chain - .read_slot_clock() - .ok_or_else(|| ApiError::ServerError("Unable to read slot clock".to_string()))?; + .present_slot() + .map_err(|_| ApiError::ServerError("Unable to read slot clock".to_string()))?; // There are four scenarios when obtaining a state for a given slot: // diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index 988f3d322..5986191dc 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -5,7 +5,7 @@ mod metrics; mod system_time_slot_clock; mod testing_slot_clock; -use std::time::{Duration, Instant}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; pub use crate::system_time_slot_clock::SystemTimeSlotClock; pub use crate::testing_slot_clock::TestingSlotClock; @@ -13,6 +13,24 @@ pub use metrics::scrape_for_metrics; pub use types::Slot; pub trait SlotClock: Send + Sync + Sized { + fn from_eth2_genesis( + genesis_slot: Slot, + genesis_seconds: u64, + slot_duration: Duration, + ) -> Option { + let duration_between_now_and_unix_epoch = + SystemTime::now().duration_since(UNIX_EPOCH).ok()?; + let duration_between_unix_epoch_and_genesis = Duration::from_secs(genesis_seconds); + + if duration_between_now_and_unix_epoch < duration_between_unix_epoch_and_genesis { + None + } else { + let genesis_instant = Instant::now() + - (duration_between_now_and_unix_epoch - duration_between_unix_epoch_and_genesis); + Some(Self::new(genesis_slot, genesis_instant, slot_duration)) + } + } + fn new(genesis_slot: Slot, genesis: Instant, slot_duration: Duration) -> Self; fn present_slot(&self) -> Option; diff --git a/validator_client/src/error.rs b/validator_client/src/error.rs index 97500f900..e13f7ded5 100644 --- a/validator_client/src/error.rs +++ b/validator_client/src/error.rs @@ -1,16 +1,9 @@ -use slot_clock; - use error_chain::error_chain; error_chain! { links { } errors { - SlotClockError(e: slot_clock::SystemTimeSlotClockError) { - description("Error reading system time"), - display("SlotClockError: '{:?}'", e) - } - SystemTimeError(t: String ) { description("Error reading system time"), display("SystemTimeError: '{}'", t) diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 3ddb96e4c..62a782da9 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -13,7 +13,6 @@ use crate::block_producer::{BeaconBlockGrpcClient, BlockProducer}; use crate::config::Config as ValidatorConfig; use crate::duties::{BeaconNodeDuties, DutiesManager, EpochDutiesMap}; use crate::error as error_chain; -use crate::error::ErrorKind; use crate::signer::Signer; use bls::Keypair; use eth2_config::Eth2Config; @@ -159,17 +158,19 @@ impl Service(|| { + "Unable to start slot clock. Genesis may not have occurred yet. Exiting.".into() + })?; let current_slot = slot_clock .present_slot() - .map_err(ErrorKind::SlotClockError)? .ok_or_else::(|| { - "Genesis is not in the past. Exiting.".into() + "Genesis has not yet occurred. Exiting.".into() })?; /* Generate the duties manager */ @@ -244,7 +245,6 @@ impl Service(|| { "Genesis is not in the past. Exiting.".into() })?; @@ -291,15 +291,12 @@ impl Service error_chain::Result<()> { - let current_slot = match self.slot_clock.present_slot() { - Err(e) => { - error!(self.log, "SystemTimeError {:?}", e); - return Err("Could not read system time".into()); - } - Ok(slot) => slot.ok_or_else::(|| { + let current_slot = self + .slot_clock + .present_slot() + .ok_or_else::(|| { "Genesis is not in the past. Exiting.".into() - })?, - }; + })?; let current_epoch = current_slot.epoch(self.slots_per_epoch); From b9276da9db8dedced012b995f354994015b70a28 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Thu, 29 Aug 2019 13:36:51 +1000 Subject: [PATCH 134/305] Flesh spec. & update display bugs. - Add correct string formatting when incorrect parameters provided. - Fill /beacon/block and /beacon/block_root endpoints - Add 500 error responses to endpoints as appropriate --- beacon_node/rest_api/src/beacon.rs | 8 ++-- docs/api_spec.yaml | 69 ++++++++++++++++++++++++++++-- 2 files changed, 69 insertions(+), 8 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 5dcbc728a..66d0b2673 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -81,7 +81,7 @@ pub fn get_block(req: Request) -> ApiResult let target = parse_slot(&value)?; block_root_at_slot(&beacon_chain, target).ok_or_else(|| { - ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {:?}", target)) })? } ("root", value) => parse_root(&value)?, @@ -93,7 +93,7 @@ pub fn get_block(req: Request) -> ApiResult .get::>(&block_root)? .ok_or_else(|| { ApiError::NotFound(format!( - "Unable to find BeaconBlock for root {}", + "Unable to find BeaconBlock for root {:?}", block_root )) })?; @@ -121,7 +121,7 @@ pub fn get_block_root(req: Request) -> ApiR let target = parse_slot(&slot_string)?; let root = block_root_at_slot(&beacon_chain, target).ok_or_else(|| { - ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target)) + ApiError::NotFound(format!("Unable to find BeaconBlock for slot {:?}", target)) })?; let json: String = serde_json::to_string(&root) @@ -174,7 +174,7 @@ pub fn get_state(req: Request) -> ApiResult let state = beacon_chain .store .get(root)? - .ok_or_else(|| ApiError::NotFound(format!("No state for root: {}", root)))?; + .ok_or_else(|| ApiError::NotFound(format!("No state for root: {:?}", root)))?; (*root, state) } diff --git a/docs/api_spec.yaml b/docs/api_spec.yaml index 5053c5dd2..b43d99b8a 100644 --- a/docs/api_spec.yaml +++ b/docs/api_spec.yaml @@ -248,9 +248,6 @@ paths: format: uint64 description: "UNIX time in milliseconds that the block was first discovered, either from a network peer or the validator client." - - - /beacon/head: get: tags: @@ -307,13 +304,75 @@ paths: format: bytes pattern: "^0x[a-fA-F0-9]{64}$" description: "The block root of the second most recent justified block." + 500: + $ref: '#/components/responses/InternalError' - #TODO Fill out block endpoint /beacon/block: + get: + tags: + - Phase0 + summary: 'Retrieve blocks by root or slot.' + description: "Request that the beacon node return beacon chain blocks that match the provided criteria (a block root or beacon chain slot). Only one of the parameters can be be provided at a time." + parameters: + - name: root + description: "Filter by block root." + in: query + required: false + schema: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + - name: slot + description: "Filter blocks by slot number. Only one block which has been finalized, or is believed to be the canonical block for that slot, is returned." + in: query + required: false + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response. + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/BeaconBlock' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' #TODO Fill out block_root endpoint /beacon/block_root: + get: + tags: + - Phase0 + summary: "Retrieve the canonical block root, given a particular slot." + description: "Request that the beacon node return the root of the canonical beacon chain block, which matches the provided slot number." + parameters: + - name: slot + description: "Filter blocks by slot number. Only one block which has been finalized, or is believed to be the canonical block for that slot, is returned." + in: query + required: true + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response. + content: + application/json: + schema: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The 0x prefixed block root." + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' /beacon/blocks: get: @@ -373,6 +432,8 @@ paths: application/json: schema: $ref: '#/components/schemas/Fork' + 500: + $ref: '#/components/responses/InternalError' /beacon/attestations: From e9e912323e7a1327f6f6b26e64d7429fa9311a29 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 13:56:00 +1000 Subject: [PATCH 135/305] Restrict fork choice iterators to the root --- eth2/lmd_ghost/src/reduced_tree.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/eth2/lmd_ghost/src/reduced_tree.rs b/eth2/lmd_ghost/src/reduced_tree.rs index a388d2c38..73fab13bf 100644 --- a/eth2/lmd_ghost/src/reduced_tree.rs +++ b/eth2/lmd_ghost/src/reduced_tree.rs @@ -470,6 +470,7 @@ where // descendant of both `node` and `prev_in_tree`. if self .iter_ancestors(child_hash)? + .take_while(|(_, slot)| *slot >= self.root_slot()) .any(|(ancestor, _slot)| ancestor == node.block_hash) { let child = self.get_mut_node(child_hash)?; @@ -555,6 +556,7 @@ where fn find_prev_in_tree(&mut self, hash: Hash256) -> Option { self.iter_ancestors(hash) .ok()? + .take_while(|(_, slot)| *slot >= self.root_slot()) .find(|(root, _slot)| self.nodes.contains_key(root)) .and_then(|(root, _slot)| Some(root)) } @@ -562,8 +564,12 @@ where /// For the two given block roots (`a_root` and `b_root`), find the first block they share in /// the tree. Viz, find the block that these two distinct blocks forked from. fn find_highest_common_ancestor(&self, a_root: Hash256, b_root: Hash256) -> Result { - let mut a_iter = self.iter_ancestors(a_root)?; - let mut b_iter = self.iter_ancestors(b_root)?; + let mut a_iter = self + .iter_ancestors(a_root)? + .take_while(|(_, slot)| *slot >= self.root_slot()); + let mut b_iter = self + .iter_ancestors(b_root)? + .take_while(|(_, slot)| *slot >= self.root_slot()); // Combines the `next()` fns on the `a_iter` and `b_iter` and returns the roots of two // blocks at the same slot, or `None` if we have gone past genesis or the root of this tree. From 7d03806107db9c2f6ad0984682ae0d7f652fb563 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 14:26:30 +1000 Subject: [PATCH 136/305] Upgrade codebase to new SlotClock API --- beacon_node/beacon_chain/src/beacon_chain.rs | 39 +++++++------------ beacon_node/beacon_chain/src/test_utils.rs | 8 ++-- beacon_node/client/src/lib.rs | 10 +++-- beacon_node/network/src/sync/simple_sync.rs | 2 +- beacon_node/rest_api/src/helpers.rs | 2 +- .../builders/testing_beacon_state_builder.rs | 4 +- eth2/utils/slot_clock/src/lib.rs | 2 +- eth2/utils/slot_clock/src/metrics.rs | 2 +- .../slot_clock/src/system_time_slot_clock.rs | 10 ++--- .../slot_clock/src/testing_slot_clock.rs | 8 ++-- validator_client/src/service.rs | 10 ++--- 11 files changed, 43 insertions(+), 54 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9ad4d5414..67e4646c6 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -148,11 +148,12 @@ impl BeaconChain { ); // Slot clock - let slot_clock = T::SlotClock::new( + let slot_clock = T::SlotClock::from_eth2_genesis( spec.genesis_slot, genesis_state.genesis_time, - spec.seconds_per_slot, - ); + Duration::from_secs(spec.seconds_per_slot), + ) + .ok_or_else(|| Error::SlotClockDidNotStart)?; Ok(Self { spec, @@ -224,18 +225,13 @@ impl BeaconChain { Ok(()) } - /// Reads the slot clock, returns `Err` if the slot is unavailable. + /// Returns the slot _right now_ according to `self.slot_clock`. Returns `Err` if the slot is + /// unavailable. /// /// The slot might be unavailable due to an error with the system clock, or if the present time /// is before genesis (i.e., a negative slot). - /// - /// This is distinct to `present_slot`, which simply reads the latest state. If a - /// call to `read_slot_clock` results in a higher slot than a call to `present_slot`, - /// `self.state` should undergo per slot processing. - pub fn present_slot(&self) -> Result { - self.slot_clock - .present_slot() - .ok_or_else(|| Error::UnableToReadSlot) + pub fn slot(&self) -> Result { + self.slot_clock.now().ok_or_else(|| Error::UnableToReadSlot) } /// Returns the beacon block body for each beacon block root in `roots`. @@ -348,10 +344,7 @@ impl BeaconChain { pub fn catchup_state(&self) -> Result<(), Error> { let spec = &self.spec; - let present_slot = self - .slot_clock - .present_slot() - .ok_or_else(|| Error::UnableToReadSlot)?; + let present_slot = self.slot()?; if self.state.read().slot < present_slot { let mut state = self.state.write(); @@ -394,7 +387,7 @@ impl BeaconChain { /// Reads the slot clock (see `self.read_slot_clock()` and returns the number of slots since /// genesis. pub fn slots_since_genesis(&self) -> Option { - let now = self.slot_clock.present_slot()?; + let now = self.slot().ok()?; let genesis_slot = self.spec.genesis_slot; if now < genesis_slot { @@ -847,10 +840,7 @@ impl BeaconChain { return Ok(BlockProcessingOutcome::GenesisBlock); } - let present_slot = self - .slot_clock - .present_slot() - .ok_or_else(|| Error::UnableToReadSlot)?; + let present_slot = self.slot()?; if block.slot > present_slot { return Ok(BlockProcessingOutcome::FutureSlot { @@ -1013,9 +1003,8 @@ impl BeaconChain { ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { let state = self.state.read().clone(); let slot = self - .slot_clock - .present_slot() - .ok_or_else(|| BlockProductionError::UnableToReadSlot)?; + .slot() + .map_err(|_| BlockProductionError::UnableToReadSlot)?; self.produce_block_on_state(state, slot, randao_reveal) } @@ -1191,7 +1180,7 @@ impl BeaconChain { *self.state.write() = { let mut state = self.canonical_head.read().beacon_state.clone(); - let present_slot = self.present_slot()?; + let present_slot = self.slot()?; // If required, transition the new state to the present slot. for _ in state.slot.as_u64()..present_slot.as_u64() { diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 6ab657b08..c45a22fd8 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -151,7 +151,7 @@ where // Determine the slot for the first block (or skipped block). let state_slot = match block_strategy { BlockStrategy::OnCanonicalHead => { - self.chain.present_slot().expect("should have a slot") - 1 + self.chain.slot().expect("should have a slot") - 1 } BlockStrategy::ForkCanonicalChainAt { previous_slot, .. } => previous_slot, }; @@ -161,16 +161,14 @@ where // Determine the first slot where a block should be built. let mut slot = match block_strategy { - BlockStrategy::OnCanonicalHead => { - self.chain.present_slot().expect("should have a slot") - } + BlockStrategy::OnCanonicalHead => self.chain.slot().expect("should have a slot"), BlockStrategy::ForkCanonicalChainAt { first_slot, .. } => first_slot, }; let mut head_block_root = None; for _ in 0..num_blocks { - while self.chain.present_slot().expect("should have a slot") < slot { + while self.chain.slot().expect("should have a slot") < slot { self.advance_slot(); } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 2612fd648..004353d38 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -114,7 +114,7 @@ where .map_err(error::Error::from)?, ); - if beacon_chain.read_slot_clock().is_none() { + if beacon_chain.slot().is_err() { panic!("Cannot start client before genesis!") } @@ -124,7 +124,9 @@ where // blocks and we're basically useless. { let state_slot = beacon_chain.head().beacon_state.slot; - let wall_clock_slot = beacon_chain.read_slot_clock().unwrap(); + let wall_clock_slot = beacon_chain + .slot() + .expect("Cannot start client before genesis"); let slots_since_genesis = beacon_chain.slots_since_genesis().unwrap(); info!( log, @@ -176,7 +178,7 @@ where }; let (slot_timer_exit_signal, exit) = exit_future::signal(); - if let Ok(Some(duration_to_next_slot)) = beacon_chain.slot_clock.duration_to_next_slot() { + if let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() { // set up the validator work interval - start at next slot and proceed every slot let interval = { // Set the interval to start at the next slot, and every slot after @@ -223,7 +225,7 @@ impl Drop for Client { fn do_state_catchup(chain: &Arc>, log: &slog::Logger) { // Only attempt to `catchup_state` if we can read the slot clock. - if let Some(current_slot) = chain.read_slot_clock() { + if let Ok(current_slot) = chain.slot() { let state_catchup_result = chain.catchup_state(); let best_slot = chain.head().beacon_block.slot; diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 49196facc..d3ed2f3e4 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -387,7 +387,7 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, - "current_slot" => format!("{:?}", self.chain.present_slot()), + "current_slot" => format!("{:?}", self.chain.slot()), "requested" => req.count, "returned" => blocks.len(), ); diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 0f47200e9..aeaf5ad6e 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -88,7 +88,7 @@ pub fn state_root_at_slot( ) -> Result { let head_state = &beacon_chain.head().beacon_state; let current_slot = beacon_chain - .present_slot() + .slot() .map_err(|_| ApiError::ServerError("Unable to read slot clock".to_string()))?; // There are four scenarios when obtaining a state for a given slot: diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index 98f840953..4f8a2d924 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -123,8 +123,10 @@ impl TestingBeaconStateBuilder { .collect::>() .into(); + let genesis_time = 1567052589; // 29 August, 2019; + let mut state = BeaconState::new( - spec.min_genesis_time, + genesis_time, Eth1Data { deposit_root: Hash256::zero(), deposit_count: 0, diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index 5986191dc..fd3bf029b 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -33,7 +33,7 @@ pub trait SlotClock: Send + Sync + Sized { fn new(genesis_slot: Slot, genesis: Instant, slot_duration: Duration) -> Self; - fn present_slot(&self) -> Option; + fn now(&self) -> Option; fn duration_to_next_slot(&self) -> Option; diff --git a/eth2/utils/slot_clock/src/metrics.rs b/eth2/utils/slot_clock/src/metrics.rs index 1abd93c48..d1de491d0 100644 --- a/eth2/utils/slot_clock/src/metrics.rs +++ b/eth2/utils/slot_clock/src/metrics.rs @@ -17,7 +17,7 @@ lazy_static! { /// Update the global metrics `DEFAULT_REGISTRY` with info from the slot clock. pub fn scrape_for_metrics(clock: &U) { - let present_slot = match clock.present_slot() { + let present_slot = match clock.now() { Some(slot) => slot, _ => Slot::new(0), }; diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index 88c9c0e63..0d4a52ef6 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -25,7 +25,7 @@ impl SlotClock for SystemTimeSlotClock { } } - fn present_slot(&self) -> Option { + fn now(&self) -> Option { let now = Instant::now(); if now < self.genesis { @@ -80,18 +80,18 @@ mod tests { let clock = SystemTimeSlotClock::new(genesis_slot, prior_genesis(0), Duration::from_secs(1)); - assert_eq!(clock.present_slot(), Some(Slot::new(0))); + assert_eq!(clock.now(), Some(Slot::new(0))); let clock = SystemTimeSlotClock::new(genesis_slot, prior_genesis(5), Duration::from_secs(1)); - assert_eq!(clock.present_slot(), Some(Slot::new(5))); + assert_eq!(clock.now(), Some(Slot::new(5))); let clock = SystemTimeSlotClock::new( genesis_slot, Instant::now() - Duration::from_millis(500), Duration::from_secs(1), ); - assert_eq!(clock.present_slot(), Some(Slot::new(0))); + assert_eq!(clock.now(), Some(Slot::new(0))); assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); let clock = SystemTimeSlotClock::new( @@ -99,7 +99,7 @@ mod tests { Instant::now() - Duration::from_millis(1_500), Duration::from_secs(1), ); - assert_eq!(clock.present_slot(), Some(Slot::new(1))); + assert_eq!(clock.now(), Some(Slot::new(1))); assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); } diff --git a/eth2/utils/slot_clock/src/testing_slot_clock.rs b/eth2/utils/slot_clock/src/testing_slot_clock.rs index 0b65b1569..d90cb157a 100644 --- a/eth2/utils/slot_clock/src/testing_slot_clock.rs +++ b/eth2/utils/slot_clock/src/testing_slot_clock.rs @@ -16,7 +16,7 @@ impl TestingSlotClock { } pub fn advance_slot(&self) { - self.set_slot(self.present_slot().unwrap().as_u64() + 1) + self.set_slot(self.now().unwrap().as_u64() + 1) } } @@ -27,7 +27,7 @@ impl SlotClock for TestingSlotClock { } } - fn present_slot(&self) -> Option { + fn now(&self) -> Option { let slot = *self.slot.read().expect("TestingSlotClock poisoned."); Some(slot) } @@ -50,8 +50,8 @@ mod tests { #[test] fn test_slot_now() { let clock = TestingSlotClock::new(Slot::new(10), Instant::now(), Duration::from_secs(0)); - assert_eq!(clock.present_slot(), Some(Slot::new(10))); + assert_eq!(clock.now(), Some(Slot::new(10))); clock.set_slot(123); - assert_eq!(clock.present_slot(), Some(Slot::new(123))); + assert_eq!(clock.now(), Some(Slot::new(123))); } } diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 62a782da9..68a913265 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -167,11 +167,9 @@ impl Service(|| { - "Genesis has not yet occurred. Exiting.".into() - })?; + let current_slot = slot_clock.now().ok_or_else::(|| { + "Genesis has not yet occurred. Exiting.".into() + })?; /* Generate the duties manager */ @@ -293,7 +291,7 @@ impl Service error_chain::Result<()> { let current_slot = self .slot_clock - .present_slot() + .now() .ok_or_else::(|| { "Genesis is not in the past. Exiting.".into() })?; From 8cfbe8bbfba1b0d9371455959a5260f9675f767c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 14:32:21 +1000 Subject: [PATCH 137/305] Change seconds_per_slot to milliseconds_per_slot --- beacon_node/beacon_chain/src/beacon_chain.rs | 15 ++------------- beacon_node/client/src/lib.rs | 4 ++-- eth2/types/src/chain_spec.rs | 6 +++--- validator_client/src/service.rs | 4 ++-- 4 files changed, 9 insertions(+), 20 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 67e4646c6..fb2f8ea6a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -151,7 +151,7 @@ impl BeaconChain { let slot_clock = T::SlotClock::from_eth2_genesis( spec.genesis_slot, genesis_state.genesis_time, - Duration::from_secs(spec.seconds_per_slot), + Duration::from_millis(spec.milliseconds_per_slot), ) .ok_or_else(|| Error::SlotClockDidNotStart)?; @@ -184,7 +184,7 @@ impl BeaconChain { let slot_clock = T::SlotClock::from_eth2_genesis( spec.genesis_slot, p.state.genesis_time, - Duration::from_secs(spec.seconds_per_slot), + Duration::from_millis(spec.milliseconds_per_slot), ) .ok_or_else(|| Error::SlotClockDidNotStart)?; @@ -397,17 +397,6 @@ impl BeaconChain { } } - /* - /// Returns slot of the present state. - /// - /// This is distinct to `read_slot_clock`, which reads from the actual system clock. If - /// `self.state` has not been transitioned it is possible for the system clock to be on a - /// different slot to what is returned from this call. - pub fn present_slot(&self) -> Slot { - self.state.read().slot - } - */ - /// Returns the block proposer for a given slot. /// /// Information is read from the present `beacon_state` shuffling, only information from the diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 004353d38..67528e2f9 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -76,7 +76,7 @@ where executor: &TaskExecutor, ) -> error::Result { let store = Arc::new(store); - let seconds_per_slot = eth2_config.spec.seconds_per_slot; + let milliseconds_per_slot = eth2_config.spec.milliseconds_per_slot; let spec = ð2_config.spec.clone(); @@ -182,7 +182,7 @@ where // set up the validator work interval - start at next slot and proceed every slot let interval = { // Set the interval to start at the next slot, and every slot after - let slot_duration = Duration::from_secs(seconds_per_slot); + let slot_duration = Duration::from_millis(milliseconds_per_slot); //TODO: Handle checked add correctly Interval::new(Instant::now() + duration_to_next_slot, slot_duration) }; diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 9dec626d4..d59e0db0a 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -58,7 +58,7 @@ pub struct ChainSpec { /* * Time parameters */ - pub seconds_per_slot: u64, + pub milliseconds_per_slot: u64, pub min_attestation_inclusion_delay: u64, pub min_seed_lookahead: Epoch, pub activation_exit_delay: u64, @@ -158,7 +158,7 @@ impl ChainSpec { /* * Time parameters */ - seconds_per_slot: 6, + milliseconds_per_slot: 6_000, min_attestation_inclusion_delay: 1, min_seed_lookahead: Epoch::new(1), activation_exit_delay: 4, @@ -221,7 +221,7 @@ impl ChainSpec { let boot_nodes = vec![]; Self { - seconds_per_slot: 12, + milliseconds_per_slot: 12_000, target_committee_size: 4, shuffle_round_count: 10, network_id: 13, diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 68a913265..bd694668b 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -161,7 +161,7 @@ impl Service(|| { "Unable to start slot clock. Genesis may not have occurred yet. Exiting.".into() @@ -250,7 +250,7 @@ impl Service Date: Thu, 29 Aug 2019 14:58:49 +1000 Subject: [PATCH 138/305] Start implementation of 'get attstation' validator function. - Created new /beacon/validator/attestation endpoint - Updated some small issues with the API spec. --- beacon_node/rest_api/src/lib.rs | 2 +- beacon_node/rest_api/src/validator.rs | 66 ++++++++++++++++++++++++++- docs/api_spec.yaml | 4 +- 3 files changed, 67 insertions(+), 5 deletions(-) diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index b7fd3f581..2c7b90e3f 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -173,7 +173,7 @@ pub fn start_server( helpers::implementation_pending_response(req) } (&Method::GET, "/beacon/validator/attestation") => { - helpers::implementation_pending_response(req) + validator::get_new_attestation::(req) } (&Method::POST, "/beacon/validator/attestation") => { helpers::implementation_pending_response(req) diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 645a35837..450ef5e5f 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -62,7 +62,6 @@ pub fn get_validator_duties(req: Request) - e )) })?; - //TODO: Handle an array of validators, currently only takes one let validators: Vec = match query.all_of("validator_pubkeys") { Ok(v) => v .iter() @@ -197,7 +196,70 @@ pub fn get_new_beacon_block(req: Request) - let body = Body::from( serde_json::to_string(&new_block) - .expect("We should always be able to serialize a new block that we created."), + .expect("We should always be able to serialize a new block that we produced."), + ); + Ok(success_response(body)) +} + +/// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. +pub fn get_new_attestation(req: Request) -> ApiResult { + // Get beacon state + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + //TODO Surely this state_cache thing is not necessary? + let _ = beacon_chain + .ensure_state_caches_are_built() + .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; + + let query = UrlQuery::from_request(&req)?; + let validator: PublicKey = match query.first_of(&["validator_pubkey"]) { + Ok((_, v)) => parse_pubkey(v.as_str())?, + Err(e) => { + return Err(e); + } + }; + let poc_bit: bool = match query.first_of(&["poc_bit"]) { + Ok((_, v)) => v.parse::().map_err(|e| ApiError::InvalidQueryParams(format!("poc_bit is not a valid boolean value: {:?}", e)))?, + Err(e) => { + return Err(e); + } + }; + //TODO: this is probably unnecessary if we're always doing it by current slot. + let _slot = match query.first_of(&["slot"]) { + Ok((_, v)) => { + let requested_slot = v.parse::().map_err(|e| { + ApiError::InvalidQueryParams(format!("Invalid slot parameter, must be a u64. {:?}", e)) + })?; + let current_slot = beacon_chain.head().beacon_state.slot.as_u64(); + if requested_slot != current_slot { + return Err(ApiError::InvalidQueryParams(format!("Attestation data can only be requested for the current slot ({:?}), not your requested slot ({:?})", current_slot, requested_slot))); + } + Slot::new(requested_slot) + }, + Err(e) => { + return Err(e); + } + }; + let shard: Shard = match query.first_of(&["shard"]) { + Ok((_, v)) => v.parse::().map_err(|e| ApiError::InvalidQueryParams(format!("Shard is not a valid u64 value: {:?}", e)))?, + Err(e) => { + return Err(e); + } + }; + + let attestation_data = match beacon_chain.produce_attestation_data(shard) { + Ok(v) => v, + Err(e) => { + return Err(ApiError::ServerError(format!("Could not produce an attestation: {:?}", e))); + } + }; + + //TODO: This is currently AttestationData, but should be IndexedAttestation? + let body = Body::from( + serde_json::to_string(&attestation_data) + .expect("We should always be able to serialize a new attestation that we produced."), ); Ok(success_response(body)) } diff --git a/docs/api_spec.yaml b/docs/api_spec.yaml index b43d99b8a..42b394e69 100644 --- a/docs/api_spec.yaml +++ b/docs/api_spec.yaml @@ -839,8 +839,6 @@ paths: $ref: '#/components/schemas/ValidatorDuty' 400: $ref: '#/components/responses/InvalidRequest' - 406: - description: "Duties cannot be provided for the requested epoch." 500: $ref: '#/components/responses/InternalError' 503: @@ -867,6 +865,8 @@ paths: schema: type: string format: byte + pattern: "^0x[a-fA-F0-9]{192}$" + description: "A valid BLS signature." responses: 200: description: Success response From 314780e634aa471e82af81f180582521b62e667e Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 14:59:32 +1000 Subject: [PATCH 139/305] Allow for customizable recent genesis window --- .../beacon_chain/src/beacon_chain_builder.rs | 15 ++++++++++----- beacon_node/client/src/config.rs | 5 ++++- beacon_node/client/src/lib.rs | 12 +++++++++--- beacon_node/src/config.rs | 7 +++++++ beacon_node/src/main.rs | 8 +++++++- 5 files changed, 37 insertions(+), 10 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 79c74b006..223d99d8d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -22,8 +22,13 @@ pub struct BeaconChainBuilder { } impl BeaconChainBuilder { - pub fn recent_genesis(validator_count: usize, spec: ChainSpec, log: Logger) -> Self { - Self::quick_start(recent_genesis_time(), validator_count, spec, log) + pub fn recent_genesis( + validator_count: usize, + minutes: u64, + spec: ChainSpec, + log: Logger, + ) -> Self { + Self::quick_start(recent_genesis_time(minutes), validator_count, spec, log) } pub fn quick_start( @@ -123,12 +128,12 @@ fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) - /// Returns the system time, mod 30 minutes. /// /// Used for easily creating testnets. -fn recent_genesis_time() -> u64 { +fn recent_genesis_time(minutes: u64) -> u64 { let now = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .unwrap() .as_secs(); - let secs_after_last_period = now.checked_rem(30 * 60).unwrap_or(0); - // genesis is now the last 30 minute block. + let secs_after_last_period = now.checked_rem(minutes * 60).unwrap_or(0); + // genesis is now the last 15 minute block. now - secs_after_last_period } diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index f2725b3e7..3aed26881 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -42,7 +42,10 @@ pub enum BeaconChainStartMethod { /// Create a new beacon chain that can connect to mainnet. /// /// Set the genesis time to be the start of the previous 30-minute window. - RecentGenesis { validator_count: usize }, + RecentGenesis { + validator_count: usize, + minutes: u64, + }, /// Create a new beacon chain with `genesis_time` and `validator_count` validators, all with well-known /// secret keys. Generated { diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 67528e2f9..4554ff9a1 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -88,9 +88,15 @@ where crit!(log, "No mainnet beacon chain startup specification."); return Err("Mainnet is not yet specified. We're working on it.".into()); } - BeaconChainStartMethod::RecentGenesis { validator_count } => { - BeaconChainBuilder::recent_genesis(*validator_count, spec.clone(), log.clone()) - } + BeaconChainStartMethod::RecentGenesis { + validator_count, + minutes, + } => BeaconChainBuilder::recent_genesis( + *validator_count, + *minutes, + spec.clone(), + log.clone(), + ), BeaconChainStartMethod::Generated { validator_count, genesis_time, diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index e76bd48fa..7c471e8ac 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -120,8 +120,15 @@ fn process_testnet_subcommand( .parse::() .map_err(|e| format!("Unable to parse validator_count: {:?}", e))?; + let minutes = cli_args + .value_of("minutes") + .ok_or_else(|| "No recent genesis minutes supplied")? + .parse::() + .map_err(|e| format!("Unable to parse minutes: {:?}", e))?; + builder.set_beacon_chain_start_method(BeaconChainStartMethod::RecentGenesis { validator_count, + minutes, }) } _ => return Err("No testnet method specified. See 'testnet --help'.".into()), diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index aba44e6fe..5bfb71215 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -259,11 +259,17 @@ fn main() { */ .subcommand(SubCommand::with_name("recent") .about("Creates a new genesis state where the genesis time was at the previous \ - 30-minute boundary (e.g., 12:00, 12:30, 13:00, etc.)") + MINUTES boundary (e.g., when MINUTES == 30; 12:00, 12:30, 13:00, etc.)") .arg(Arg::with_name("validator_count") .value_name("VALIDATOR_COUNT") .required(true) .help("The number of validators in the genesis state")) + .arg(Arg::with_name("minutes") + .short("m") + .value_name("MINUTES") + .required(true) + .default_value("15") + .help("The maximum number of minutes that will have elapsed before genesis")) ) .subcommand(SubCommand::with_name("yaml-genesis-state") .about("Creates a new datadir where the genesis state is read from YAML. Will fail to parse \ From 75ac21604f7f3d10b0d323a216f3f2cc4c00dc0f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 15:03:52 +1000 Subject: [PATCH 140/305] Add long minutes CLI flag --- beacon_node/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 5bfb71215..8ab20a481 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -265,6 +265,7 @@ fn main() { .required(true) .help("The number of validators in the genesis state")) .arg(Arg::with_name("minutes") + .long("minutes") .short("m") .value_name("MINUTES") .required(true) From 81cafdc804fb58962e08c291f24e8a2f556ded7b Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 29 Aug 2019 17:41:20 +1000 Subject: [PATCH 141/305] Shuffling and sanity tests --- eth2/types/src/checkpoint.rs | 1 - eth2/utils/bls/Cargo.toml | 2 +- tests/ef_tests/src/case_result.rs | 3 + tests/ef_tests/src/cases.rs | 49 +--- tests/ef_tests/src/cases/bls_g2_compressed.rs | 13 +- tests/ef_tests/src/cases/bls_sign_msg.rs | 13 +- tests/ef_tests/src/cases/sanity_blocks.rs | 51 +++- tests/ef_tests/src/cases/sanity_slots.rs | 52 +++- tests/ef_tests/src/cases/shuffling.rs | 14 +- tests/ef_tests/src/doc.rs | 274 ------------------ tests/ef_tests/src/doc_header.rs | 12 - tests/ef_tests/src/handler.rs | 58 +++- tests/ef_tests/src/lib.rs | 4 +- tests/ef_tests/src/results.rs | 91 ++++++ tests/ef_tests/src/yaml_decode.rs | 28 ++ tests/ef_tests/tests/tests.rs | 30 +- 16 files changed, 311 insertions(+), 384 deletions(-) delete mode 100644 tests/ef_tests/src/doc.rs delete mode 100644 tests/ef_tests/src/doc_header.rs create mode 100644 tests/ef_tests/src/results.rs diff --git a/eth2/types/src/checkpoint.rs b/eth2/types/src/checkpoint.rs index 0c7001921..d5d40fa67 100644 --- a/eth2/types/src/checkpoint.rs +++ b/eth2/types/src/checkpoint.rs @@ -3,7 +3,6 @@ use crate::{Epoch, Hash256}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; -use tree_hash::TreeHash; use tree_hash_derive::TreeHash; /// Casper FFG checkpoint, used in attestations. diff --git a/eth2/utils/bls/Cargo.toml b/eth2/utils/bls/Cargo.toml index 5989dce07..15f087b80 100644 --- a/eth2/utils/bls/Cargo.toml +++ b/eth2/utils/bls/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Paul Hauner "] edition = "2018" [dependencies] -milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.9.0" } +milagro_bls = { git = "https://github.com/michaelsproul/milagro_bls", branch = "little-endian" } eth2_hashing = { path = "../eth2_hashing" } hex = "0.3" rand = "^0.5" diff --git a/tests/ef_tests/src/case_result.rs b/tests/ef_tests/src/case_result.rs index 88fd353a1..add428ec5 100644 --- a/tests/ef_tests/src/case_result.rs +++ b/tests/ef_tests/src/case_result.rs @@ -1,6 +1,7 @@ use super::*; use compare_fields::{CompareFields, Comparison, FieldComparison}; use std::fmt::Debug; +use std::path::PathBuf; use types::BeaconState; pub const MAX_VALUE_STRING_LEN: usize = 500; @@ -9,6 +10,7 @@ pub const MAX_VALUE_STRING_LEN: usize = 500; pub struct CaseResult { pub case_index: usize, pub desc: String, + pub path: PathBuf, pub result: Result<(), Error>, } @@ -17,6 +19,7 @@ impl CaseResult { CaseResult { case_index, desc: case.description(), + path: case.path().into(), result, } } diff --git a/tests/ef_tests/src/cases.rs b/tests/ef_tests/src/cases.rs index 7f6ffb0c4..1216b8728 100644 --- a/tests/ef_tests/src/cases.rs +++ b/tests/ef_tests/src/cases.rs @@ -67,6 +67,11 @@ pub trait Case: Debug { "no description".to_string() } + /// Path to the directory for this test case. + fn path(&self) -> &Path { + Path::new("") + } + /// Execute a test and return the result. /// /// `case_index` reports the index of the case in the set of test cases. It is not strictly @@ -76,19 +81,13 @@ pub trait Case: Debug { pub trait BlsCase: serde::de::DeserializeOwned {} -impl YamlDecode for T -where - T: BlsCase, -{ +impl YamlDecode for T { fn yaml_decode(string: &str) -> Result { serde_yaml::from_str(string).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) } } -impl LoadCase for T -where - T: BlsCase, -{ +impl LoadCase for T { fn load_from_dir(path: &Path) -> Result { Self::yaml_decode_file(&path.join("data.yaml")) } @@ -111,37 +110,3 @@ where .collect() } } - -// FIXME(michael): delete this -impl YamlDecode for Cases { - /// Decodes a YAML list of test cases - fn yaml_decode(yaml: &str) -> Result { - let mut p = 0; - let mut elems: Vec<&str> = yaml - .match_indices("\n- ") - // Skip the `\n` used for matching a new line - .map(|(i, _)| i + 1) - .map(|i| { - let yaml_element = &yaml[p..i]; - p = i; - - yaml_element - }) - .collect(); - - elems.push(&yaml[p..]); - - let test_cases = elems - .iter() - .map(|s| { - // Remove the `- ` prefix. - let s = &s[2..]; - // Remove a single level of indenting. - s.replace("\n ", "\n") - }) - .map(|s| T::yaml_decode(&s.to_string()).unwrap()) - .collect(); - - Ok(Self { test_cases }) - } -} diff --git a/tests/ef_tests/src/cases/bls_g2_compressed.rs b/tests/ef_tests/src/cases/bls_g2_compressed.rs index 547d8d03a..f8381f5a7 100644 --- a/tests/ef_tests/src/cases/bls_g2_compressed.rs +++ b/tests/ef_tests/src/cases/bls_g2_compressed.rs @@ -41,14 +41,9 @@ impl Case for BlsG2Compressed { } } -// Converts a vector to u64 (from big endian) +// Converts a vector to u64 (from little endian) fn bytes_to_u64(array: &[u8]) -> u64 { - let mut result: u64 = 0; - for (i, value) in array.iter().rev().enumerate() { - if i == 8 { - break; - } - result += u64::pow(2, i as u32 * 8) * u64::from(*value); - } - result + let mut bytes = [0u8; 8]; + bytes.copy_from_slice(array); + u64::from_le_bytes(bytes) } diff --git a/tests/ef_tests/src/cases/bls_sign_msg.rs b/tests/ef_tests/src/cases/bls_sign_msg.rs index 476ecdefb..18e90896b 100644 --- a/tests/ef_tests/src/cases/bls_sign_msg.rs +++ b/tests/ef_tests/src/cases/bls_sign_msg.rs @@ -41,16 +41,11 @@ impl Case for BlsSign { } } -// Converts a vector to u64 (from big endian) +// Converts a vector to u64 (from little endian) fn bytes_to_u64(array: &[u8]) -> u64 { - let mut result: u64 = 0; - for (i, value) in array.iter().rev().enumerate() { - if i == 8 { - break; - } - result += u64::pow(2, i as u32 * 8) * u64::from(*value); - } - result + let mut bytes = [0u8; 8]; + bytes.copy_from_slice(array); + u64::from_le_bytes(bytes) } // Increase the size of an array to 48 bytes diff --git a/tests/ef_tests/src/cases/sanity_blocks.rs b/tests/ef_tests/src/cases/sanity_blocks.rs index cd9008fda..d88d8f295 100644 --- a/tests/ef_tests/src/cases/sanity_blocks.rs +++ b/tests/ef_tests/src/cases/sanity_blocks.rs @@ -1,35 +1,72 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; +use crate::yaml_decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockInvalid, BlockProcessingError, }; +use std::path::PathBuf; use types::{BeaconBlock, BeaconState, EthSpec, RelativeEpoch}; +#[derive(Debug, Clone, Deserialize)] +pub struct Metadata { + pub description: Option, + pub bls_setting: Option, + pub blocks_count: usize, +} + #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct SanityBlocks { - pub description: String, - pub bls_setting: Option, + pub path: PathBuf, + pub metadata: Metadata, pub pre: BeaconState, pub blocks: Vec>, pub post: Option>, } -impl YamlDecode for SanityBlocks { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) +impl LoadCase for SanityBlocks { + fn load_from_dir(path: &Path) -> Result { + let metadata: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; + let pre = ssz_decode_file(&path.join("pre.ssz"))?; + let blocks: Vec> = (0..metadata.blocks_count) + .map(|i| { + let filename = format!("blocks_{}.ssz", i); + ssz_decode_file(&path.join(filename)) + }) + .collect::>()?; + let post_file = path.join("post.ssz"); + let post = if post_file.is_file() { + Some(ssz_decode_file(&post_file)?) + } else { + None + }; + + Ok(Self { + path: path.into(), + metadata, + pre, + blocks, + post, + }) } } impl Case for SanityBlocks { fn description(&self) -> String { - self.description.clone() + self.metadata + .description + .clone() + .unwrap_or_else(String::new) + } + + fn path(&self) -> &Path { + &self.path } fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; + self.metadata.bls_setting.unwrap_or_default().check()?; let mut state = self.pre.clone(); let mut expected = self.post.clone(); diff --git a/tests/ef_tests/src/cases/sanity_slots.rs b/tests/ef_tests/src/cases/sanity_slots.rs index fbce1a06a..27c6c13c3 100644 --- a/tests/ef_tests/src/cases/sanity_slots.rs +++ b/tests/ef_tests/src/cases/sanity_slots.rs @@ -1,30 +1,70 @@ use super::*; +use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; +use crate::yaml_decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::per_slot_processing; +use std::path::PathBuf; use types::{BeaconState, EthSpec}; +#[derive(Debug, Clone, Default, Deserialize)] +pub struct Metadata { + pub description: Option, + pub bls_setting: Option, +} + #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct SanitySlots { - pub description: String, + pub path: PathBuf, + pub metadata: Metadata, pub pre: BeaconState, - pub slots: usize, + pub slots: u64, pub post: Option>, } -impl YamlDecode for SanitySlots { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) +impl LoadCase for SanitySlots { + fn load_from_dir(path: &Path) -> Result { + let metadata_path = path.join("meta.yaml"); + let metadata: Metadata = if metadata_path.is_file() { + yaml_decode_file(&path.join("meta.yaml"))? + } else { + Metadata::default() + }; + let pre = ssz_decode_file(&path.join("pre.ssz"))?; + let slots: u64 = yaml_decode_file(&path.join("slots.yaml"))?; + let post_file = path.join("post.ssz"); + let post = if post_file.is_file() { + Some(ssz_decode_file(&post_file)?) + } else { + None + }; + + Ok(Self { + path: path.into(), + metadata, + pre, + slots, + post, + }) } } impl Case for SanitySlots { fn description(&self) -> String { - self.description.clone() + self.metadata + .description + .clone() + .unwrap_or_else(String::new) + } + + fn path(&self) -> &Path { + &self.path } fn result(&self, _case_index: usize) -> Result<(), Error> { + self.metadata.bls_setting.unwrap_or_default().check()?; + let mut state = self.pre.clone(); let mut expected = self.post.clone(); let spec = &E::default_spec(); diff --git a/tests/ef_tests/src/cases/shuffling.rs b/tests/ef_tests/src/cases/shuffling.rs index d7ff40e59..c0595e584 100644 --- a/tests/ef_tests/src/cases/shuffling.rs +++ b/tests/ef_tests/src/cases/shuffling.rs @@ -8,7 +8,7 @@ use swap_or_not_shuffle::{get_permutated_index, shuffle_list}; pub struct Shuffling { pub seed: String, pub count: usize, - pub shuffled: Vec, + pub mapping: Vec, #[serde(skip)] _phantom: PhantomData, } @@ -19,10 +19,16 @@ impl YamlDecode for Shuffling { } } +impl LoadCase for Shuffling { + fn load_from_dir(path: &Path) -> Result { + Self::yaml_decode_file(&path.join("mapping.yaml")) + } +} + impl Case for Shuffling { fn result(&self, _case_index: usize) -> Result<(), Error> { if self.count == 0 { - compare_result::<_, Error>(&Ok(vec![]), &Some(self.shuffled.clone()))?; + compare_result::<_, Error>(&Ok(vec![]), &Some(self.mapping.clone()))?; } else { let spec = T::default_spec(); let seed = hex::decode(&self.seed[2..]) @@ -34,12 +40,12 @@ impl Case for Shuffling { get_permutated_index(i, self.count, &seed, spec.shuffle_round_count).unwrap() }) .collect(); - compare_result::<_, Error>(&Ok(shuffling), &Some(self.shuffled.clone()))?; + compare_result::<_, Error>(&Ok(shuffling), &Some(self.mapping.clone()))?; // Test "shuffle_list" let input: Vec = (0..self.count).collect(); let shuffling = shuffle_list(input, spec.shuffle_round_count, &seed, false).unwrap(); - compare_result::<_, Error>(&Ok(shuffling), &Some(self.shuffled.clone()))?; + compare_result::<_, Error>(&Ok(shuffling), &Some(self.mapping.clone()))?; } Ok(()) diff --git a/tests/ef_tests/src/doc.rs b/tests/ef_tests/src/doc.rs deleted file mode 100644 index f3a41697e..000000000 --- a/tests/ef_tests/src/doc.rs +++ /dev/null @@ -1,274 +0,0 @@ -use crate::case_result::CaseResult; -use crate::cases::*; -use crate::doc_header::DocHeader; -use crate::error::Error; -use crate::yaml_decode::YamlDecode; -use crate::EfTest; -use serde_derive::Deserialize; -use std::{ - fs::File, - io::prelude::*, - path::{Path, PathBuf}, -}; -use types::{MainnetEthSpec, MinimalEthSpec}; - -#[derive(Debug, Deserialize)] -pub struct Doc { - pub header_yaml: String, - pub cases_yaml: String, - pub path: PathBuf, -} - -impl Doc { - fn from_path(path: PathBuf) -> Self { - let mut file = File::open(path.clone()).unwrap(); - - let mut cases_yaml = String::new(); - file.read_to_string(&mut cases_yaml).unwrap(); - - Self { - cases_yaml, - path, - header_yaml: String::new(), - } - } - - pub fn test_results(&self) -> Vec { - let header: DocHeader = serde_yaml::from_str(&self.header_yaml.as_str()).unwrap(); - - match ( - header.runner.as_ref(), - header.handler.as_ref(), - header.config.as_ref(), - ) { - ("ssz", "uint", _) => run_test::(self), - ("sanity", "slots", "minimal") => run_test::>(self), - // FIXME: skipped due to compact committees issue - ("sanity", "slots", "mainnet") => vec![], // run_test::>(self), - ("sanity", "blocks", "minimal") => run_test::>(self), - // FIXME: skipped due to compact committees issue - ("sanity", "blocks", "mainnet") => vec![], // run_test::>(self), - ("shuffling", "core", "minimal") => run_test::>(self), - ("shuffling", "core", "mainnet") => run_test::>(self), - ("bls", "aggregate_pubkeys", "mainnet") => run_test::(self), - ("bls", "aggregate_sigs", "mainnet") => run_test::(self), - ("bls", "msg_hash_compressed", "mainnet") => run_test::(self), - // Note this test fails due to a difference in our internal representations. It does - // not effect verification or external representation. - // - // It is skipped. - ("bls", "msg_hash_uncompressed", "mainnet") => vec![], - ("bls", "priv_to_pub", "mainnet") => run_test::(self), - ("bls", "sign_msg", "mainnet") => run_test::(self), - ("operations", "deposit", "mainnet") => { - run_test::>(self) - } - ("operations", "deposit", "minimal") => { - run_test::>(self) - } - ("operations", "transfer", "mainnet") => { - run_test::>(self) - } - ("operations", "transfer", "minimal") => { - run_test::>(self) - } - ("operations", "voluntary_exit", "mainnet") => { - run_test::>(self) - } - ("operations", "voluntary_exit", "minimal") => { - run_test::>(self) - } - ("operations", "proposer_slashing", "mainnet") => { - run_test::>(self) - } - ("operations", "proposer_slashing", "minimal") => { - run_test::>(self) - } - ("operations", "attester_slashing", "mainnet") => { - run_test::>(self) - } - ("operations", "attester_slashing", "minimal") => { - run_test::>(self) - } - ("operations", "attestation", "mainnet") => { - run_test::>(self) - } - ("operations", "attestation", "minimal") => { - run_test::>(self) - } - ("operations", "block_header", "mainnet") => { - run_test::>(self) - } - ("operations", "block_header", "minimal") => { - run_test::>(self) - } - ("epoch_processing", "crosslinks", "minimal") => { - run_test::>(self) - } - ("epoch_processing", "crosslinks", "mainnet") => { - run_test::>(self) - } - ("epoch_processing", "registry_updates", "minimal") => { - run_test::>(self) - } - ("epoch_processing", "registry_updates", "mainnet") => { - run_test::>(self) - } - ("epoch_processing", "justification_and_finalization", "minimal") => { - run_test::>(self) - } - ("epoch_processing", "justification_and_finalization", "mainnet") => { - run_test::>(self) - } - ("epoch_processing", "slashings", "minimal") => { - run_test::>(self) - } - ("epoch_processing", "slashings", "mainnet") => { - run_test::>(self) - } - ("epoch_processing", "final_updates", "minimal") => { - run_test::>(self) - } - ("epoch_processing", "final_updates", "mainnet") => { - vec![] - // FIXME: skipped due to compact committees issue - // run_test::>(self) - } - ("genesis", "initialization", "minimal") => { - run_test::>(self) - } - ("genesis", "initialization", "mainnet") => { - run_test::>(self) - } - ("genesis", "validity", "minimal") => run_test::>(self), - ("genesis", "validity", "mainnet") => run_test::>(self), - (runner, handler, config) => panic!( - "No implementation for runner: \"{}\", handler: \"{}\", config: \"{}\"", - runner, handler, config - ), - } - } - - pub fn assert_tests_pass(path: PathBuf) { - let doc = Self::from_path(path); - let results = doc.test_results(); - - let (failed, skipped_bls, skipped_known_failures) = categorize_results(&results); - - if failed.len() + skipped_known_failures.len() > 0 { - print_results( - &doc, - &failed, - &skipped_bls, - &skipped_known_failures, - &results, - ); - if !failed.is_empty() { - panic!("Tests failed (see above)"); - } - } else { - println!("Passed {} tests in {:?}", results.len(), doc.path); - } - } -} - -pub fn assert_tests_pass(path: &Path, results: &[CaseResult]) { - let doc = Doc { - header_yaml: String::new(), - cases_yaml: String::new(), - path: path.into(), - }; - - let (failed, skipped_bls, skipped_known_failures) = categorize_results(results); - - if failed.len() + skipped_known_failures.len() > 0 { - print_results( - &doc, - &failed, - &skipped_bls, - &skipped_known_failures, - &results, - ); - if !failed.is_empty() { - panic!("Tests failed (see above)"); - } - } else { - println!("Passed {} tests in {}", results.len(), path.display()); - } -} - -pub fn run_test(_: &Doc) -> Vec -where - Cases: EfTest + YamlDecode, -{ - panic!("FIXME(michael): delete this") -} - -pub fn categorize_results( - results: &[CaseResult], -) -> (Vec<&CaseResult>, Vec<&CaseResult>, Vec<&CaseResult>) { - let mut failed = vec![]; - let mut skipped_bls = vec![]; - let mut skipped_known_failures = vec![]; - - for case in results { - match case.result.as_ref().err() { - Some(Error::SkippedBls) => skipped_bls.push(case), - Some(Error::SkippedKnownFailure) => skipped_known_failures.push(case), - Some(_) => failed.push(case), - None => (), - } - } - - (failed, skipped_bls, skipped_known_failures) -} - -pub fn print_results( - doc: &Doc, - failed: &[&CaseResult], - skipped_bls: &[&CaseResult], - skipped_known_failures: &[&CaseResult], - results: &[CaseResult], -) { - println!("--------------------------------------------------"); - println!( - "Test {}", - if failed.is_empty() { - "Result" - } else { - "Failure" - } - ); - println!("Title: TODO"); - println!("File: {:?}", doc.path); - println!( - "{} tests, {} failed, {} skipped (known failure), {} skipped (bls), {} passed. (See below for errors)", - results.len(), - failed.len(), - skipped_known_failures.len(), - skipped_bls.len(), - results.len() - skipped_bls.len() - skipped_known_failures.len() - failed.len() - ); - println!(); - - for case in skipped_known_failures { - println!("-------"); - println!( - "case[{}] ({}) skipped because it's a known failure", - case.case_index, case.desc, - ); - } - for failure in failed { - let error = failure.result.clone().unwrap_err(); - - println!("-------"); - println!( - "case[{}] ({}) failed with {}:", - failure.case_index, - failure.desc, - error.name() - ); - println!("{}", error.message()); - } - println!(); -} diff --git a/tests/ef_tests/src/doc_header.rs b/tests/ef_tests/src/doc_header.rs deleted file mode 100644 index c0d6d3276..000000000 --- a/tests/ef_tests/src/doc_header.rs +++ /dev/null @@ -1,12 +0,0 @@ -use serde_derive::Deserialize; - -#[derive(Debug, Deserialize)] -pub struct DocHeader { - pub title: String, - pub summary: String, - pub forks_timeline: String, - pub forks: Vec, - pub config: String, - pub runner: String, - pub handler: String, -} diff --git a/tests/ef_tests/src/handler.rs b/tests/ef_tests/src/handler.rs index 1dac988ac..f66dc7b18 100644 --- a/tests/ef_tests/src/handler.rs +++ b/tests/ef_tests/src/handler.rs @@ -5,6 +5,7 @@ use std::fs; use std::marker::PhantomData; use std::path::PathBuf; use tree_hash::SignedRoot; +use types::EthSpec; pub trait Handler { type Case: Case + LoadCase; @@ -47,7 +48,8 @@ pub trait Handler { let results = Cases { test_cases }.test_results(); - crate::doc::assert_tests_pass(&handler_path, &results); + let name = format!("{}/{}", Self::runner_name(), Self::handler_name()); + crate::results::assert_tests_pass(&name, &handler_path, &results); } } @@ -128,3 +130,57 @@ where T::name() } } + +pub struct ShufflingHandler(PhantomData); + +impl Handler for ShufflingHandler { + type Case = cases::Shuffling; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "shuffling" + } + + fn handler_name() -> &'static str { + "core" + } +} + +pub struct SanityBlocksHandler(PhantomData); + +impl Handler for SanityBlocksHandler { + type Case = cases::SanityBlocks; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "sanity" + } + + fn handler_name() -> &'static str { + "blocks" + } +} + +pub struct SanitySlotsHandler(PhantomData); + +impl Handler for SanitySlotsHandler { + type Case = cases::SanitySlots; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "sanity" + } + + fn handler_name() -> &'static str { + "slots" + } +} diff --git a/tests/ef_tests/src/lib.rs b/tests/ef_tests/src/lib.rs index cc17c3ea4..06e01fc03 100644 --- a/tests/ef_tests/src/lib.rs +++ b/tests/ef_tests/src/lib.rs @@ -2,7 +2,6 @@ use types::EthSpec; pub use case_result::CaseResult; pub use cases::Case; -pub use doc::Doc; pub use error::Error; pub use handler::*; pub use yaml_decode::YamlDecode; @@ -10,10 +9,9 @@ pub use yaml_decode::YamlDecode; mod bls_setting; mod case_result; mod cases; -mod doc; -mod doc_header; mod error; mod handler; +mod results; mod type_name; mod yaml_decode; diff --git a/tests/ef_tests/src/results.rs b/tests/ef_tests/src/results.rs new file mode 100644 index 000000000..20e59f7b3 --- /dev/null +++ b/tests/ef_tests/src/results.rs @@ -0,0 +1,91 @@ +use crate::case_result::CaseResult; +use crate::error::Error; +use std::path::Path; + +pub fn assert_tests_pass(handler_name: &str, path: &Path, results: &[CaseResult]) { + let (failed, skipped_bls, skipped_known_failures) = categorize_results(results); + + if failed.len() + skipped_known_failures.len() > 0 { + print_results( + handler_name, + &failed, + &skipped_bls, + &skipped_known_failures, + &results, + ); + if !failed.is_empty() { + panic!("Tests failed (see above)"); + } + } else { + println!("Passed {} tests in {}", results.len(), path.display()); + } +} + +pub fn categorize_results( + results: &[CaseResult], +) -> (Vec<&CaseResult>, Vec<&CaseResult>, Vec<&CaseResult>) { + let mut failed = vec![]; + let mut skipped_bls = vec![]; + let mut skipped_known_failures = vec![]; + + for case in results { + match case.result.as_ref().err() { + Some(Error::SkippedBls) => skipped_bls.push(case), + Some(Error::SkippedKnownFailure) => skipped_known_failures.push(case), + Some(_) => failed.push(case), + None => (), + } + } + + (failed, skipped_bls, skipped_known_failures) +} + +pub fn print_results( + handler_name: &str, + failed: &[&CaseResult], + skipped_bls: &[&CaseResult], + skipped_known_failures: &[&CaseResult], + results: &[CaseResult], +) { + println!("--------------------------------------------------"); + println!( + "Test {}", + if failed.is_empty() { + "Result" + } else { + "Failure" + } + ); + println!("Title: {}", handler_name); + println!( + "{} tests, {} failed, {} skipped (known failure), {} skipped (bls), {} passed. (See below for errors)", + results.len(), + failed.len(), + skipped_known_failures.len(), + skipped_bls.len(), + results.len() - skipped_bls.len() - skipped_known_failures.len() - failed.len() + ); + println!(); + + for case in skipped_known_failures { + println!("-------"); + println!( + "case ({}) from {} skipped because it's a known failure", + case.desc, + case.path.display() + ); + } + for failure in failed { + let error = failure.result.clone().unwrap_err(); + + println!("-------"); + println!( + "case ({}) from {} failed with {}:", + failure.desc, + failure.path.display(), + error.name() + ); + println!("{}", error.message()); + } + println!(); +} diff --git a/tests/ef_tests/src/yaml_decode.rs b/tests/ef_tests/src/yaml_decode.rs index af122fb0c..83a162930 100644 --- a/tests/ef_tests/src/yaml_decode.rs +++ b/tests/ef_tests/src/yaml_decode.rs @@ -4,6 +4,34 @@ use std::fs; use std::path::Path; use types::Fork; +pub fn yaml_decode(string: &str) -> Result { + serde_yaml::from_str(string).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) +} + +pub fn yaml_decode_file(path: &Path) -> Result { + fs::read_to_string(path) + .map_err(|e| { + Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) + }) + .and_then(|s| yaml_decode(&s)) +} + +pub fn ssz_decode_file(path: &Path) -> Result { + fs::read(path) + .map_err(|e| { + Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) + }) + .and_then(|s| { + T::from_ssz_bytes(&s).map_err(|e| { + Error::FailedToParseTest(format!( + "Unable to parse SSZ at {}: {:?}", + path.display(), + e + )) + }) + }) +} + pub trait YamlDecode: Sized { /// Decode an object from the test specification YAML. fn yaml_decode(string: &str) -> Result; diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index 000c53330..0fb751c9e 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -48,6 +48,7 @@ fn yaml_files_in_test_dir(dir: &Path) -> Vec { paths } +/* #[test] #[cfg(feature = "fake_crypto")] fn ssz_generic() { @@ -58,6 +59,7 @@ fn ssz_generic() { }); } + #[test] #[cfg(feature = "fake_crypto")] fn ssz_static() { @@ -67,16 +69,15 @@ fn ssz_static() { Doc::assert_tests_pass(file); }); } +*/ #[test] fn shuffling() { - yaml_files_in_test_dir(&Path::new("shuffling").join("core")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + ShufflingHandler::::run(); + ShufflingHandler::::run(); } +/* #[test] fn operations_deposit() { yaml_files_in_test_dir(&Path::new("operations").join("deposit")) @@ -140,25 +141,21 @@ fn operations_block_header() { Doc::assert_tests_pass(file); }); } +*/ #[test] fn sanity_blocks() { - yaml_files_in_test_dir(&Path::new("sanity").join("blocks")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + SanityBlocksHandler::::run(); + SanityBlocksHandler::::run(); } #[test] fn sanity_slots() { - yaml_files_in_test_dir(&Path::new("sanity").join("slots")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + SanitySlotsHandler::::run(); + SanitySlotsHandler::::run(); } +/* #[test] #[cfg(not(feature = "fake_crypto"))] fn bls() { @@ -168,6 +165,7 @@ fn bls() { Doc::assert_tests_pass(file); }); } +*/ #[test] #[cfg(not(feature = "fake_crypto"))] @@ -264,6 +262,7 @@ ssz_static_test!(ssz_static_transfer, Transfer, SR); ssz_static_test!(ssz_static_validator, Validator); ssz_static_test!(ssz_static_voluntary_exit, VoluntaryExit, SR); +/* #[test] fn epoch_processing_justification_and_finalization() { yaml_files_in_test_dir(&Path::new("epoch_processing").join("justification_and_finalization")) @@ -326,3 +325,4 @@ fn genesis_validity() { Doc::assert_tests_pass(file); }); } +*/ From 682081ef072c82a21896f0bee78723464a65a4bf Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 29 Aug 2019 19:14:52 +1000 Subject: [PATCH 142/305] Add first pass at removing speculative state --- beacon_node/beacon_chain/src/beacon_chain.rs | 301 ++++++++++++------ beacon_node/beacon_chain/src/errors.rs | 2 + .../src/persisted_beacon_chain.rs | 3 +- beacon_node/beacon_chain/src/test_utils.rs | 1 - beacon_node/beacon_chain/tests/tests.rs | 4 +- beacon_node/client/src/lib.rs | 35 +- beacon_node/rest_api/src/validator.rs | 7 +- beacon_node/rpc/src/attestation.rs | 48 +-- beacon_node/rpc/src/beacon_block.rs | 4 +- beacon_node/rpc/src/validator.rs | 5 +- 10 files changed, 225 insertions(+), 185 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f552dbd27..56923ab6a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -5,7 +5,6 @@ use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use lmd_ghost::LmdGhost; -use log::trace; use operation_pool::DepositInsertStatus; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{RwLock, RwLockReadGuard}; @@ -77,6 +76,20 @@ pub enum AttestationProcessingOutcome { Invalid(AttestationValidationError), } +pub enum StateCow<'a, T: EthSpec> { + Borrowed(RwLockReadGuard<'a, CheckPoint>), + Owned(BeaconState), +} + +impl<'a, T: EthSpec> AsRef> for StateCow<'a, T> { + fn as_ref(&self) -> &BeaconState { + match self { + StateCow::Borrowed(checkpoint) => &checkpoint.beacon_state, + StateCow::Owned(state) => &state, + } + } +} + pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; type SlotClock: slot_clock::SlotClock; @@ -97,10 +110,6 @@ pub struct BeaconChain { pub op_pool: OperationPool, /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. canonical_head: RwLock>, - /// The same state from `self.canonical_head`, but updated at the start of each slot with a - /// skip slot if no block is received. This is effectively a cache that avoids repeating calls - /// to `per_slot_processing`. - state: RwLock>, /// The root of the genesis block. pub genesis_block_root: Hash256, /// A state-machine that is updated with information from the network and chooses a canonical @@ -158,7 +167,6 @@ impl BeaconChain { spec, slot_clock, op_pool: OperationPool::new(), - state: RwLock::new(genesis_state), canonical_head, genesis_block_root, fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root), @@ -180,9 +188,11 @@ impl BeaconChain { Ok(Some(p)) => p, }; + let state = &p.canonical_head.beacon_state; + let slot_clock = T::SlotClock::from_eth2_genesis( spec.genesis_slot, - p.state.genesis_time, + state.genesis_time, Duration::from_millis(spec.milliseconds_per_slot), ) .ok_or_else(|| Error::SlotClockDidNotStart)?; @@ -190,7 +200,7 @@ impl BeaconChain { let last_finalized_root = p.canonical_head.beacon_state.finalized_checkpoint.root; let last_finalized_block = &p.canonical_head.beacon_block; - let op_pool = p.op_pool.into_operation_pool(&p.state, &spec); + let op_pool = p.op_pool.into_operation_pool(state, &spec); Ok(Some(BeaconChain { spec, @@ -198,7 +208,6 @@ impl BeaconChain { fork_choice: ForkChoice::new(store.clone(), last_finalized_block, last_finalized_root), op_pool, canonical_head: RwLock::new(p.canonical_head), - state: RwLock::new(p.state), genesis_block_root: p.genesis_block_root, store, log, @@ -213,7 +222,6 @@ impl BeaconChain { canonical_head: self.canonical_head.read().clone(), op_pool: PersistedOperationPool::from_operation_pool(&self.op_pool), genesis_block_root: self.genesis_block_root, - state: self.state.read().clone(), }; let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes()); @@ -233,6 +241,16 @@ impl BeaconChain { self.slot_clock.now().ok_or_else(|| Error::UnableToReadSlot) } + /// Returns the epoch _right now_ according to `self.slot_clock`. Returns `Err` if the epoch is + /// unavailable. + /// + /// The epoch might be unavailable due to an error with the system clock, or if the present time + /// is before genesis (i.e., a negative epoch). + pub fn epoch(&self) -> Result { + self.slot() + .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) + } + /// Returns the beacon block body for each beacon block root in `roots`. /// /// Fails if any root in `roots` does not have a corresponding block. @@ -318,12 +336,6 @@ impl BeaconChain { Ok(self.store.get(block_root)?) } - /// Returns a read-lock guarded `BeaconState` which is the `canonical_head` that has been - /// updated to match the current slot clock. - pub fn speculative_state(&self) -> Result>, Error> { - Ok(self.state.read()) - } - /// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the /// fork-choice rule). /// @@ -334,43 +346,74 @@ impl BeaconChain { self.canonical_head.read() } + /// Returns the `BeaconState` at the given slot. + /// + /// May return: + /// + /// - A new state loaded from the database (for states prior to the head) + /// - A reference to the head state (note: this keeps a read lock on the head, try to use + /// sparingly). + /// - The head state, but with skipped slots (for states later than the head). + /// + /// Returns `None` when the state is not found in the database or there is an error skipping + /// to a future state. + pub fn state_at_slot(&self, slot: Slot) -> Result, Error> { + let head_state = &self.head().beacon_state; + + if slot == head_state.slot { + Ok(StateCow::Borrowed(self.head())) + } else if slot > head_state.slot { + let head_state_slot = head_state.slot; + let mut state = head_state.clone(); + drop(head_state); + while state.slot < slot { + match per_slot_processing(&mut state, &self.spec) { + Ok(()) => (), + Err(e) => { + warn!( + self.log, + "Unable to load state at slot"; + "error" => format!("{:?}", e), + "head_slot" => head_state_slot, + "requested_slot" => slot + ); + return Err(Error::NoStateForSlot(slot)); + } + }; + } + Ok(StateCow::Owned(state)) + } else { + let state_root = self + .rev_iter_state_roots() + .find(|(_root, s)| *s == slot) + .map(|(root, _slot)| root) + .ok_or_else(|| Error::NoStateForSlot(slot))?; + + Ok(StateCow::Owned( + self.store + .get(&state_root)? + .ok_or_else(|| Error::NoStateForSlot(slot))?, + )) + } + } + + /// Returns the `BeaconState` the current slot (viz., `self.slot()`). + /// + /// - A reference to the head state (note: this keeps a read lock on the head, try to use + /// sparingly). + /// - The head state, but with skipped slots (for states later than the head). + /// + /// Returns `None` when there is an error skipping to a future state or the slot clock cannot + /// be read. + pub fn state_now(&self) -> Result, Error> { + self.state_at_slot(self.slot()?) + } + /// Returns the slot of the highest block in the canonical chain. pub fn best_slot(&self) -> Slot { self.canonical_head.read().beacon_block.slot } - /// Ensures the current canonical `BeaconState` has been transitioned to match the `slot_clock`. - pub fn catchup_state(&self) -> Result<(), Error> { - let spec = &self.spec; - - let present_slot = self.slot()?; - - if self.state.read().slot < present_slot { - let mut state = self.state.write(); - - // If required, transition the new state to the present slot. - for _ in state.slot.as_u64()..present_slot.as_u64() { - // Ensure the next epoch state caches are built in case of an epoch transition. - state.build_committee_cache(RelativeEpoch::Next, spec)?; - - per_slot_processing(&mut *state, spec)?; - } - - state.build_all_caches(spec)?; - } - - Ok(()) - } - - /// Build all of the caches on the current state. - /// - /// Ideally this shouldn't be required, however we leave it here for testing. - pub fn ensure_state_caches_are_built(&self) -> Result<(), Error> { - self.state.write().build_all_caches(&self.spec)?; - - Ok(()) - } - /// Returns the validator index (if any) for the given public key. /// /// Information is retrieved from the present `beacon_state.validators`. @@ -401,18 +444,19 @@ impl BeaconChain { /// Information is read from the present `beacon_state` shuffling, only information from the /// present epoch is available. pub fn block_proposer(&self, slot: Slot) -> Result { - // Ensures that the present state has been advanced to the present slot, skipping slots if - // blocks are not present. - self.catchup_state()?; + let epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); + let head_state = &self.head().beacon_state; - // TODO: permit lookups of the proposer at any slot. - let index = self.state.read().get_beacon_proposer_index( - slot, - RelativeEpoch::Current, - &self.spec, - )?; + let state = if epoch(slot) == epoch(head_state.slot) { + StateCow::Borrowed(self.head()) + } else { + self.state_at_slot(slot)? + }; - Ok(index) + state + .as_ref() + .get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.spec) + .map_err(Into::into) } /// Returns the attestation slot and shard for a given validator index. @@ -422,14 +466,19 @@ impl BeaconChain { pub fn validator_attestation_slot_and_shard( &self, validator_index: usize, - ) -> Result, BeaconStateError> { - trace!( - "BeaconChain::validator_attestation_slot_and_shard: validator_index: {}", - validator_index - ); - if let Some(attestation_duty) = self - .state - .read() + epoch: Epoch, + ) -> Result, Error> { + let as_epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); + let head_state = &self.head().beacon_state; + + let state = if epoch == as_epoch(head_state.slot) { + StateCow::Borrowed(self.head()) + } else { + self.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))? + }; + + if let Some(attestation_duty) = state + .as_ref() .get_attestation_duties(validator_index, RelativeEpoch::Current)? { Ok(Some((attestation_duty.slot, attestation_duty.shard))) @@ -438,15 +487,25 @@ impl BeaconChain { } } - /// Produce an `AttestationData` that is valid for the present `slot` and given `shard`. + /// Produce an `AttestationData` that is valid for the given `slot` `shard`. /// - /// Attests to the canonical chain. - pub fn produce_attestation_data(&self, shard: u64) -> Result { - let state = self.state.read(); + /// Always attests to the canonical chain. + pub fn produce_attestation_data( + &self, + shard: u64, + slot: Slot, + ) -> Result { + let state = self.state_at_slot(slot)?; + let head_block_root = self.head().beacon_block_root; let head_block_slot = self.head().beacon_block.slot; - self.produce_attestation_data_for_block(shard, head_block_root, head_block_slot, &*state) + self.produce_attestation_data_for_block( + shard, + head_block_root, + head_block_slot, + state.as_ref(), + ) } /// Produce an `AttestationData` that attests to the chain denoted by `block_root` and `state`. @@ -765,14 +824,38 @@ impl BeaconChain { /// Accept some exit and queue it for inclusion in an appropriate block. pub fn process_voluntary_exit(&self, exit: VoluntaryExit) -> Result<(), ExitValidationError> { - self.op_pool - .insert_voluntary_exit(exit, &*self.state.read(), &self.spec) + match self.state_now() { + Ok(state) => self + .op_pool + .insert_voluntary_exit(exit, state.as_ref(), &self.spec), + Err(e) => { + error!( + &self.log, + "Unable to process voluntary exit"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some transfer and queue it for inclusion in an appropriate block. pub fn process_transfer(&self, transfer: Transfer) -> Result<(), TransferValidationError> { - self.op_pool - .insert_transfer(transfer, &*self.state.read(), &self.spec) + match self.state_now() { + Ok(state) => self + .op_pool + .insert_transfer(transfer, state.as_ref(), &self.spec), + Err(e) => { + error!( + &self.log, + "Unable to process transfer"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some proposer slashing and queue it for inclusion in an appropriate block. @@ -780,8 +863,21 @@ impl BeaconChain { &self, proposer_slashing: ProposerSlashing, ) -> Result<(), ProposerSlashingValidationError> { - self.op_pool - .insert_proposer_slashing(proposer_slashing, &*self.state.read(), &self.spec) + match self.state_now() { + Ok(state) => { + self.op_pool + .insert_proposer_slashing(proposer_slashing, state.as_ref(), &self.spec) + } + Err(e) => { + error!( + &self.log, + "Unable to process proposer slashing"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some attester slashing and queue it for inclusion in an appropriate block. @@ -789,8 +885,21 @@ impl BeaconChain { &self, attester_slashing: AttesterSlashing, ) -> Result<(), AttesterSlashingValidationError> { - self.op_pool - .insert_attester_slashing(attester_slashing, &*self.state.read(), &self.spec) + match self.state_now() { + Ok(state) => { + self.op_pool + .insert_attester_slashing(attester_slashing, state.as_ref(), &self.spec) + } + Err(e) => { + error!( + &self.log, + "Unable to process attester slashing"; + "error" => format!("{:?}", e), + "reason" => "no state" + ); + Ok(()) + } + } } /// Accept some block and attempt to add it to block DAG. @@ -804,8 +913,8 @@ impl BeaconChain { let full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); let finalized_slot = self - .state - .read() + .head() + .beacon_state .finalized_checkpoint .epoch .start_slot(T::EthSpec::slots_per_epoch()); @@ -987,20 +1096,24 @@ impl BeaconChain { Ok(BlockProcessingOutcome::Processed { block_root }) } - /// Produce a new block at the present slot. + /// Produce a new block at the given `slot`. /// /// The produced block will not be inherently valid, it must be signed by a block producer. /// Block signing is out of the scope of this function and should be done by a separate program. pub fn produce_block( &self, randao_reveal: Signature, + slot: Slot, ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { - let state = self.state.read().clone(); + let state = self + .state_at_slot(slot) + .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; + let slot = self .slot() .map_err(|_| BlockProductionError::UnableToReadSlot)?; - self.produce_block_on_state(state, slot, randao_reveal) + self.produce_block_on_state(state.as_ref().clone(), slot, randao_reveal) } /// Produce a block for some `slot` upon the given `state`. @@ -1169,29 +1282,15 @@ impl BeaconChain { } /// Update the canonical head to `new_head`. - fn update_canonical_head(&self, new_head: CheckPoint) -> Result<(), Error> { + fn update_canonical_head(&self, mut new_head: CheckPoint) -> Result<(), Error> { let timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); + new_head.beacon_state.build_all_caches(&self.spec)?; + // Update the checkpoint that stores the head of the chain at the time it received the // block. *self.canonical_head.write() = new_head; - // Update the always-at-the-present-slot state we keep around for performance gains. - *self.state.write() = { - let mut state = self.canonical_head.read().beacon_state.clone(); - - let present_slot = self.slot()?; - - // If required, transition the new state to the present slot. - for _ in state.slot.as_u64()..present_slot.as_u64() { - per_slot_processing(&mut state, &self.spec)?; - } - - state.build_all_caches(&self.spec)?; - - state - }; - // Save `self` to `self.store`. self.persist()?; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 75dbb655f..cd8d6aad6 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -24,6 +24,7 @@ pub enum BeaconChainError { new_epoch: Epoch, }, SlotClockDidNotStart, + NoStateForSlot(Slot), UnableToFindTargetRoot(Slot), BeaconStateError(BeaconStateError), DBInconsistent(String), @@ -44,6 +45,7 @@ easy_from_to!(SlotProcessingError, BeaconChainError); pub enum BlockProductionError { UnableToGetBlockRootFromState, UnableToReadSlot, + UnableToProduceAtSlot(Slot), SlotProcessingError(SlotProcessingError), BlockProcessingError(BlockProcessingError), BeaconStateError(BeaconStateError), diff --git a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs index 8b9f78dc5..a85f78ac8 100644 --- a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs +++ b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs @@ -3,7 +3,7 @@ use operation_pool::PersistedOperationPool; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use store::{DBColumn, Error as StoreError, StoreItem}; -use types::{BeaconState, Hash256}; +use types::Hash256; /// 32-byte key for accessing the `PersistedBeaconChain`. pub const BEACON_CHAIN_DB_KEY: &str = "PERSISTEDBEACONCHAINPERSISTEDBEA"; @@ -13,7 +13,6 @@ pub struct PersistedBeaconChain { pub canonical_head: CheckPoint, pub op_pool: PersistedOperationPool, pub genesis_block_root: Hash256, - pub state: BeaconState, } impl StoreItem for PersistedBeaconChain { diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 52e1ec8de..1006fabf5 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -130,7 +130,6 @@ where /// Does not produce blocks or attestations. pub fn advance_slot(&self) { self.chain.slot_clock.advance_slot(); - self.chain.catchup_state().expect("should catchup state"); } /// Extend the `BeaconChain` with some blocks and attestations. Returns the root of the diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index 22b667f15..ba7f7bf84 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -322,7 +322,9 @@ fn roundtrip_operation_pool() { let p: PersistedBeaconChain> = harness.chain.store.get(&key).unwrap().unwrap(); - let restored_op_pool = p.op_pool.into_operation_pool(&p.state, &harness.spec); + let restored_op_pool = p + .op_pool + .into_operation_pool(&p.canonical_head.beacon_state, &harness.spec); assert_eq!(harness.chain.op_pool, restored_op_pool); } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 4554ff9a1..9876e9672 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -143,7 +143,6 @@ where "catchup_distance" => wall_clock_slot - state_slot, ); } - do_state_catchup(&beacon_chain, &log); let network_config = &client_config.network; let (network, network_send) = @@ -199,7 +198,7 @@ where exit.until( interval .for_each(move |_| { - do_state_catchup(&chain, &log); + log_new_slot(&chain, &log); Ok(()) }) @@ -229,35 +228,19 @@ impl Drop for Client { } } -fn do_state_catchup(chain: &Arc>, log: &slog::Logger) { - // Only attempt to `catchup_state` if we can read the slot clock. +fn log_new_slot(chain: &Arc>, log: &slog::Logger) { + let best_slot = chain.head().beacon_block.slot; + let latest_block_root = chain.head().beacon_block_root; + if let Ok(current_slot) = chain.slot() { - let state_catchup_result = chain.catchup_state(); - - let best_slot = chain.head().beacon_block.slot; - let latest_block_root = chain.head().beacon_block_root; - - let common = o!( + info!( + log, + "Slot start"; "skip_slots" => current_slot.saturating_sub(best_slot), "best_block_root" => format!("{}", latest_block_root), "best_block_slot" => best_slot, "slot" => current_slot, - ); - - if let Err(e) = state_catchup_result { - error!( - log, - "State catchup failed"; - "error" => format!("{:?}", e), - common - ) - } else { - info!( - log, - "Slot start"; - common - ) - } + ) } else { error!( log, diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 4294f9c20..365b7e552 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -39,12 +39,7 @@ pub fn get_validator_duties(req: Request) - .extensions() .get::>>() .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; - let _ = beacon_chain - .ensure_state_caches_are_built() - .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; - let head_state = beacon_chain - .speculative_state() - .expect("This is legacy code and should be removed."); + let head_state = &beacon_chain.head().beacon_state; // Parse and check query parameters let query = UrlQuery::from_request(&req)?; diff --git a/beacon_node/rpc/src/attestation.rs b/beacon_node/rpc/src/attestation.rs index 68d3829ee..f4b49049a 100644 --- a/beacon_node/rpc/src/attestation.rs +++ b/beacon_node/rpc/src/attestation.rs @@ -14,7 +14,7 @@ use slog::{error, info, trace, warn}; use ssz::{ssz_encode, Decode, Encode}; use std::sync::Arc; use tokio::sync::mpsc; -use types::Attestation; +use types::{Attestation, Slot}; #[derive(Clone)] pub struct AttestationServiceInstance { @@ -37,49 +37,13 @@ impl AttestationService for AttestationServiceInstance { req.get_slot() ); - // verify the slot, drop lock on state afterwards - { - let slot_requested = req.get_slot(); - // TODO: this whole module is legacy and not maintained well. - let state = &self - .chain - .speculative_state() - .expect("This is legacy code and should be removed"); - - // Start by performing some checks - // Check that the AttestationData is for the current slot (otherwise it will not be valid) - if slot_requested > state.slot.as_u64() { - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::OutOfRange, - Some( - "AttestationData request for a slot that is in the future.".to_string(), - ), - )) - .map_err(move |e| { - error!(log_clone, "Failed to reply with failure {:?}: {:?}", req, e) - }); - return ctx.spawn(f); - } - // currently cannot handle past slots. TODO: Handle this case - else if slot_requested < state.slot.as_u64() { - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::InvalidArgument, - Some("AttestationData request for a slot that is in the past.".to_string()), - )) - .map_err(move |e| { - error!(log_clone, "Failed to reply with failure {:?}: {:?}", req, e) - }); - return ctx.spawn(f); - } - } - // Then get the AttestationData from the beacon chain let shard = req.get_shard(); - let attestation_data = match self.chain.produce_attestation_data(shard) { + let slot_requested = req.get_slot(); + let attestation_data = match self + .chain + .produce_attestation_data(shard, Slot::from(slot_requested)) + { Ok(v) => v, Err(e) => { // Could not produce an attestation diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index 92a543ef3..b7332b395 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -35,7 +35,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { // decode the request // TODO: requested slot currently unused, see: https://github.com/sigp/lighthouse/issues/336 - let _requested_slot = Slot::from(req.get_slot()); + let requested_slot = Slot::from(req.get_slot()); let randao_reveal = match Signature::from_ssz_bytes(req.get_randao_reveal()) { Ok(reveal) => reveal, Err(_) => { @@ -51,7 +51,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { } }; - let produced_block = match self.chain.produce_block(randao_reveal) { + let produced_block = match self.chain.produce_block(randao_reveal, requested_slot) { Ok((block, _state)) => block, Err(e) => { // could not produce a block diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 080c828a7..fd6d7f3d1 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -30,10 +30,7 @@ impl ValidatorService for ValidatorServiceInstance { let spec = &self.chain.spec; // TODO: this whole module is legacy and not maintained well. - let state = &self - .chain - .speculative_state() - .expect("This is legacy code and should be removed"); + let state = &self.chain.head().beacon_state; let epoch = Epoch::from(req.get_epoch()); let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); From ae114889c1e49729205d9377ce41652eac6b2500 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 00:24:46 +1000 Subject: [PATCH 143/305] Fix bugs from removing speculative state --- beacon_node/beacon_chain/src/beacon_chain.rs | 21 +++++++++++++-- beacon_node/rpc/src/validator.rs | 28 ++++++++++++++++++-- 2 files changed, 45 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 56923ab6a..f6cef7dac 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -90,6 +90,15 @@ impl<'a, T: EthSpec> AsRef> for StateCow<'a, T> { } } +impl<'a, T: EthSpec> StateCow<'a, T> { + pub fn as_mut_ref(&mut self) -> Option<&mut BeaconState> { + match self { + StateCow::Borrowed(_) => None, + StateCow::Owned(ref mut state) => Some(state), + } + } +} + pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; type SlotClock: slot_clock::SlotClock; @@ -447,12 +456,16 @@ impl BeaconChain { let epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); let head_state = &self.head().beacon_state; - let state = if epoch(slot) == epoch(head_state.slot) { + let mut state = if epoch(slot) == epoch(head_state.slot) { StateCow::Borrowed(self.head()) } else { self.state_at_slot(slot)? }; + if let Some(state) = state.as_mut_ref() { + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + } + state .as_ref() .get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.spec) @@ -471,12 +484,16 @@ impl BeaconChain { let as_epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch()); let head_state = &self.head().beacon_state; - let state = if epoch == as_epoch(head_state.slot) { + let mut state = if epoch == as_epoch(head_state.slot) { StateCow::Borrowed(self.head()) } else { self.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))? }; + if let Some(state) = state.as_mut_ref() { + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + } + if let Some(attestation_duty) = state .as_ref() .get_attestation_duties(validator_index, RelativeEpoch::Current)? diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index fd6d7f3d1..afe173318 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -28,9 +28,33 @@ impl ValidatorService for ValidatorServiceInstance { let validators = req.get_validators(); trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); + let slot = if let Ok(slot) = self.chain.slot() { + slot + } else { + let log_clone = self.log.clone(); + let f = sink + .fail(RpcStatus::new( + RpcStatusCode::FailedPrecondition, + Some("No slot for chain".to_string()), + )) + .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); + return ctx.spawn(f); + }; + let state_cow = if let Ok(state) = self.chain.state_at_slot(slot) { + state + } else { + let log_clone = self.log.clone(); + let f = sink + .fail(RpcStatus::new( + RpcStatusCode::FailedPrecondition, + Some("No state".to_string()), + )) + .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); + return ctx.spawn(f); + }; + let state = state_cow.as_ref(); + let spec = &self.chain.spec; - // TODO: this whole module is legacy and not maintained well. - let state = &self.chain.head().beacon_state; let epoch = Epoch::from(req.get_epoch()); let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); From ea562595ed4e8b53385357b812236c0289e44ce2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 00:46:25 +1000 Subject: [PATCH 144/305] Fix bugs with gRPC API --- beacon_node/rpc/src/validator.rs | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index afe173318..7f33e0c3a 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -40,8 +40,8 @@ impl ValidatorService for ValidatorServiceInstance { .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); return ctx.spawn(f); }; - let state_cow = if let Ok(state) = self.chain.state_at_slot(slot) { - state + let mut state = if let Ok(state) = self.chain.state_at_slot(slot) { + state.as_ref().clone() } else { let log_clone = self.log.clone(); let f = sink @@ -52,33 +52,16 @@ impl ValidatorService for ValidatorServiceInstance { .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); return ctx.spawn(f); }; - let state = state_cow.as_ref(); - let spec = &self.chain.spec; + let _ = state.build_all_caches(&self.chain.spec); + let epoch = Epoch::from(req.get_epoch()); let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); - let relative_epoch = - match RelativeEpoch::from_epoch(state.slot.epoch(T::EthSpec::slots_per_epoch()), epoch) - { - Ok(v) => v, - Err(e) => { - // incorrect epoch - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::FailedPrecondition, - Some(format!("Invalid epoch: {:?}", e)), - )) - .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); - return ctx.spawn(f); - } - }; - let validator_proposers: Result, _> = epoch .slot_iter(T::EthSpec::slots_per_epoch()) - .map(|slot| state.get_beacon_proposer_index(slot, relative_epoch, &spec)) + .map(|slot| self.chain.block_proposer(slot)) .collect(); let validator_proposers = match validator_proposers { Ok(v) => v, From 8060cd8f5cdc5deff8ff5f312ed108d7cb8bb131 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 01:14:57 +1000 Subject: [PATCH 145/305] Change RPC slot behaviour --- beacon_node/rpc/src/beacon_block.rs | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index b7332b395..f6be6207f 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -35,7 +35,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { // decode the request // TODO: requested slot currently unused, see: https://github.com/sigp/lighthouse/issues/336 - let requested_slot = Slot::from(req.get_slot()); + let _requested_slot = Slot::from(req.get_slot()); let randao_reveal = match Signature::from_ssz_bytes(req.get_randao_reveal()) { Ok(reveal) => reveal, Err(_) => { @@ -51,7 +51,22 @@ impl BeaconBlockService for BeaconBlockServiceInstance { } }; - let produced_block = match self.chain.produce_block(randao_reveal, requested_slot) { + let slot = match self.chain.slot() { + Ok(slot) => slot, + Err(_) => { + // decode error, incorrect signature + let log_clone = self.log.clone(); + let f = sink + .fail(RpcStatus::new( + RpcStatusCode::InvalidArgument, + Some("No slot from chain".to_string()), + )) + .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); + return ctx.spawn(f); + } + }; + + let produced_block = match self.chain.produce_block(randao_reveal, slot) { Ok((block, _state)) => block, Err(e) => { // could not produce a block From 31bbb0f5739d21c37454adc4e17e0562c2668122 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 01:51:18 +1000 Subject: [PATCH 146/305] Modify RPC duties endpoint --- beacon_node/rpc/src/validator.rs | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 7f33e0c3a..2a1ad45f4 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -28,18 +28,9 @@ impl ValidatorService for ValidatorServiceInstance { let validators = req.get_validators(); trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); - let slot = if let Ok(slot) = self.chain.slot() { - slot - } else { - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::FailedPrecondition, - Some("No slot for chain".to_string()), - )) - .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); - return ctx.spawn(f); - }; + let epoch = Epoch::from(req.get_epoch()); + let slot = epoch.start_slot(T::EthSpec::slots_per_epoch()); + let mut state = if let Ok(state) = self.chain.state_at_slot(slot) { state.as_ref().clone() } else { @@ -55,7 +46,6 @@ impl ValidatorService for ValidatorServiceInstance { let _ = state.build_all_caches(&self.chain.spec); - let epoch = Epoch::from(req.get_epoch()); let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); From 336510634038ea0b8c42afb545896e399ade60d9 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 11:04:15 +1000 Subject: [PATCH 147/305] Fix bug with block production --- beacon_node/beacon_chain/src/beacon_chain.rs | 22 +++++++++++++---- beacon_node/beacon_chain/src/errors.rs | 2 ++ beacon_node/rpc/src/beacon_block.rs | 25 ++++++-------------- beacon_node/rpc/src/validator.rs | 10 +++++++- 4 files changed, 35 insertions(+), 24 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f6cef7dac..afc7a992a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -466,6 +466,14 @@ impl BeaconChain { state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; } + if epoch(state.as_ref().slot) != epoch(slot) { + return Err(Error::InvariantViolated(format!( + "Epochs in consistent in proposer lookup: state: {}, requested: {}", + epoch(state.as_ref().slot), + epoch(slot) + ))); + } + state .as_ref() .get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.spec) @@ -494,6 +502,14 @@ impl BeaconChain { state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; } + if as_epoch(state.as_ref().slot) != epoch { + return Err(Error::InvariantViolated(format!( + "Epochs in consistent in attestation duties lookup: state: {}, requested: {}", + as_epoch(state.as_ref().slot), + epoch + ))); + } + if let Some(attestation_duty) = state .as_ref() .get_attestation_duties(validator_index, RelativeEpoch::Current)? @@ -1123,13 +1139,9 @@ impl BeaconChain { slot: Slot, ) -> Result<(BeaconBlock, BeaconState), BlockProductionError> { let state = self - .state_at_slot(slot) + .state_at_slot(slot - 1) .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; - let slot = self - .slot() - .map_err(|_| BlockProductionError::UnableToReadSlot)?; - self.produce_block_on_state(state.as_ref().clone(), slot, randao_reveal) } diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index cd8d6aad6..5ef68f2cd 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -37,6 +37,8 @@ pub enum BeaconChainError { beacon_block_root: Hash256, }, AttestationValidationError(AttestationValidationError), + /// Returned when an internal check fails, indicating corrupt data. + InvariantViolated(String), } easy_from_to!(SlotProcessingError, BeaconChainError); diff --git a/beacon_node/rpc/src/beacon_block.rs b/beacon_node/rpc/src/beacon_block.rs index f6be6207f..346d7e263 100644 --- a/beacon_node/rpc/src/beacon_block.rs +++ b/beacon_node/rpc/src/beacon_block.rs @@ -34,8 +34,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { trace!(self.log, "Generating a beacon block"; "req" => format!("{:?}", req)); // decode the request - // TODO: requested slot currently unused, see: https://github.com/sigp/lighthouse/issues/336 - let _requested_slot = Slot::from(req.get_slot()); + let requested_slot = Slot::from(req.get_slot()); let randao_reveal = match Signature::from_ssz_bytes(req.get_randao_reveal()) { Ok(reveal) => reveal, Err(_) => { @@ -51,22 +50,7 @@ impl BeaconBlockService for BeaconBlockServiceInstance { } }; - let slot = match self.chain.slot() { - Ok(slot) => slot, - Err(_) => { - // decode error, incorrect signature - let log_clone = self.log.clone(); - let f = sink - .fail(RpcStatus::new( - RpcStatusCode::InvalidArgument, - Some("No slot from chain".to_string()), - )) - .map_err(move |e| warn!(log_clone, "failed to reply {:?}: {:?}", req, e)); - return ctx.spawn(f); - } - }; - - let produced_block = match self.chain.produce_block(randao_reveal, slot) { + let produced_block = match self.chain.produce_block(randao_reveal, requested_slot) { Ok((block, _state)) => block, Err(e) => { // could not produce a block @@ -82,6 +66,11 @@ impl BeaconBlockService for BeaconBlockServiceInstance { } }; + assert_eq!( + produced_block.slot, requested_slot, + "should produce at the requested slot" + ); + let mut block = BeaconBlockProto::new(); block.set_ssz(ssz_encode(&produced_block)); diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 2a1ad45f4..84995ca50 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -46,12 +46,20 @@ impl ValidatorService for ValidatorServiceInstance { let _ = state.build_all_caches(&self.chain.spec); + assert_eq!( + state.current_epoch(), + epoch, + "Retrieved state should be from the same epoch" + ); + let mut resp = GetDutiesResponse::new(); let resp_validators = resp.mut_active_validators(); let validator_proposers: Result, _> = epoch .slot_iter(T::EthSpec::slots_per_epoch()) - .map(|slot| self.chain.block_proposer(slot)) + .map(|slot| { + state.get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.chain.spec) + }) .collect(); let validator_proposers = match validator_proposers { Ok(v) => v, From a474061ec75c0858943264053ef05eefd7edd132 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 11:14:13 +1000 Subject: [PATCH 148/305] Disable sig verification when filling blocks --- eth2/operation_pool/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 0badf3807..3e1c0ece1 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -134,7 +134,7 @@ impl OperationPool { verify_attestation_for_block_inclusion( state, attestation, - VerifySignatures::True, + VerifySignatures::False, spec, ) .is_ok() From 2e11faf7631af03408bb45315fc0e8a749f3befe Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 11:19:52 +1000 Subject: [PATCH 149/305] Re-enable signature verification on attn incl --- eth2/operation_pool/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 3e1c0ece1..0badf3807 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -134,7 +134,7 @@ impl OperationPool { verify_attestation_for_block_inclusion( state, attestation, - VerifySignatures::False, + VerifySignatures::True, spec, ) .is_ok() From 6cf9b3c1a41e253de9cc3ef339f451f62a461035 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 30 Aug 2019 13:29:26 +1000 Subject: [PATCH 150/305] Epoch processing tests --- .../src/per_epoch_processing.rs | 7 +- tests/ef_tests/src/cases.rs | 13 +- tests/ef_tests/src/cases/epoch_processing.rs | 147 ++++++++++++++++++ .../src/cases/epoch_processing_crosslinks.rs | 37 ----- .../cases/epoch_processing_final_updates.rs | 41 ----- ...ocessing_justification_and_finalization.rs | 46 ------ .../epoch_processing_registry_updates.rs | 38 ----- .../src/cases/epoch_processing_slashings.rs | 50 ------ tests/ef_tests/src/cases/sanity_slots.rs | 2 +- tests/ef_tests/src/handler.rs | 20 ++- tests/ef_tests/src/lib.rs | 3 + tests/ef_tests/src/type_name.rs | 75 +++++---- tests/ef_tests/tests/tests.rs | 37 ++--- 13 files changed, 225 insertions(+), 291 deletions(-) create mode 100644 tests/ef_tests/src/cases/epoch_processing.rs delete mode 100644 tests/ef_tests/src/cases/epoch_processing_crosslinks.rs delete mode 100644 tests/ef_tests/src/cases/epoch_processing_final_updates.rs delete mode 100644 tests/ef_tests/src/cases/epoch_processing_justification_and_finalization.rs delete mode 100644 tests/ef_tests/src/cases/epoch_processing_registry_updates.rs delete mode 100644 tests/ef_tests/src/cases/epoch_processing_slashings.rs diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index 08f42a229..f66ce4ea2 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -1,8 +1,5 @@ use crate::common::get_compact_committees_root; -use apply_rewards::process_rewards_and_penalties; use errors::EpochProcessingError as Error; -use process_slashings::process_slashings; -use registry_updates::process_registry_updates; use std::collections::HashMap; use tree_hash::TreeHash; use types::*; @@ -17,6 +14,10 @@ pub mod tests; pub mod validator_statuses; pub mod winning_root; +pub use apply_rewards::process_rewards_and_penalties; +pub use process_slashings::process_slashings; +pub use registry_updates::process_registry_updates; + /// Maps a shard to a winning root. /// /// It is generated during crosslink processing and later used to reward/penalize validators. diff --git a/tests/ef_tests/src/cases.rs b/tests/ef_tests/src/cases.rs index 1216b8728..e3ec54a7f 100644 --- a/tests/ef_tests/src/cases.rs +++ b/tests/ef_tests/src/cases.rs @@ -8,11 +8,7 @@ mod bls_g2_compressed; mod bls_g2_uncompressed; mod bls_priv_to_pub; mod bls_sign_msg; -mod epoch_processing_crosslinks; -mod epoch_processing_final_updates; -mod epoch_processing_justification_and_finalization; -mod epoch_processing_registry_updates; -mod epoch_processing_slashings; +mod epoch_processing; mod genesis_initialization; mod genesis_validity; mod operations_attestation; @@ -34,11 +30,7 @@ pub use bls_g2_compressed::*; pub use bls_g2_uncompressed::*; pub use bls_priv_to_pub::*; pub use bls_sign_msg::*; -pub use epoch_processing_crosslinks::*; -pub use epoch_processing_final_updates::*; -pub use epoch_processing_justification_and_finalization::*; -pub use epoch_processing_registry_updates::*; -pub use epoch_processing_slashings::*; +pub use epoch_processing::*; pub use genesis_initialization::*; pub use genesis_validity::*; pub use operations_attestation::*; @@ -69,6 +61,7 @@ pub trait Case: Debug { /// Path to the directory for this test case. fn path(&self) -> &Path { + // FIXME(michael): remove default impl Path::new("") } diff --git a/tests/ef_tests/src/cases/epoch_processing.rs b/tests/ef_tests/src/cases/epoch_processing.rs new file mode 100644 index 000000000..ac47ab236 --- /dev/null +++ b/tests/ef_tests/src/cases/epoch_processing.rs @@ -0,0 +1,147 @@ +use super::*; +use crate::bls_setting::BlsSetting; +use crate::case_result::compare_beacon_state_results_without_caches; +use crate::type_name; +use crate::type_name::TypeName; +use crate::yaml_decode::{ssz_decode_file, yaml_decode_file}; +use serde_derive::Deserialize; +use state_processing::per_epoch_processing::{ + errors::EpochProcessingError, process_crosslinks, process_final_updates, + process_justification_and_finalization, process_registry_updates, process_slashings, + validator_statuses::ValidatorStatuses, +}; +use std::marker::PhantomData; +use std::path::{Path, PathBuf}; +use types::{BeaconState, ChainSpec, EthSpec}; + +#[derive(Debug, Clone, Default, Deserialize)] +pub struct Metadata { + pub description: Option, + pub bls_setting: Option, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(bound = "E: EthSpec")] +pub struct EpochProcessing> { + pub path: PathBuf, + pub metadata: Metadata, + pub pre: BeaconState, + pub post: Option>, + #[serde(skip_deserializing)] + _phantom: PhantomData, +} + +pub trait EpochTransition: TypeName + Debug { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError>; +} + +#[derive(Debug)] +pub struct JustificationAndFinalization; +#[derive(Debug)] +pub struct Crosslinks; +#[derive(Debug)] +pub struct RegistryUpdates; +#[derive(Debug)] +pub struct Slashings; +#[derive(Debug)] +pub struct FinalUpdates; + +type_name!( + JustificationAndFinalization, + "justification_and_finalization" +); +type_name!(Crosslinks, "crosslinks"); +type_name!(RegistryUpdates, "registry_updates"); +type_name!(Slashings, "slashings"); +type_name!(FinalUpdates, "final_updates"); + +impl EpochTransition for JustificationAndFinalization { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + let mut validator_statuses = ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(state, spec)?; + process_justification_and_finalization(state, &validator_statuses.total_balances) + } +} + +impl EpochTransition for Crosslinks { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + process_crosslinks(state, spec)?; + Ok(()) + } +} + +impl EpochTransition for RegistryUpdates { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + process_registry_updates(state, spec) + } +} + +impl EpochTransition for Slashings { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; + validator_statuses.process_attestations(&state, spec)?; + process_slashings(state, validator_statuses.total_balances.current_epoch, spec)?; + Ok(()) + } +} + +impl EpochTransition for FinalUpdates { + fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { + process_final_updates(state, spec) + } +} + +impl> LoadCase for EpochProcessing { + fn load_from_dir(path: &Path) -> Result { + let metadata_path = path.join("meta.yaml"); + let metadata: Metadata = if metadata_path.is_file() { + yaml_decode_file(&metadata_path)? + } else { + Metadata::default() + }; + let pre = ssz_decode_file(&path.join("pre.ssz"))?; + let post_file = path.join("post.ssz"); + let post = if post_file.is_file() { + Some(ssz_decode_file(&post_file)?) + } else { + None + }; + + Ok(Self { + path: path.into(), + metadata, + pre, + post, + _phantom: PhantomData, + }) + } +} + +impl> Case for EpochProcessing { + fn description(&self) -> String { + self.metadata + .description + .clone() + .unwrap_or_else(String::new) + } + + fn path(&self) -> &Path { + &self.path + } + + fn result(&self, _case_index: usize) -> Result<(), Error> { + let mut state = self.pre.clone(); + let mut expected = self.post.clone(); + + let spec = &E::default_spec(); + + let mut result = (|| { + // Processing requires the epoch cache. + state.build_all_caches(spec)?; + + T::run(&mut state, spec).map(|_| state) + })(); + + compare_beacon_state_results_without_caches(&mut result, &mut expected) + } +} diff --git a/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs b/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs deleted file mode 100644 index f2676d122..000000000 --- a/tests/ef_tests/src/cases/epoch_processing_crosslinks.rs +++ /dev/null @@ -1,37 +0,0 @@ -use super::*; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_epoch_processing::process_crosslinks; -use types::{BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct EpochProcessingCrosslinks { - pub description: String, - pub pre: BeaconState, - pub post: Option>, -} - -impl YamlDecode for EpochProcessingCrosslinks { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for EpochProcessingCrosslinks { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); - - // Processing requires the epoch cache. - state.build_all_caches(&E::default_spec()).unwrap(); - - let mut result = process_crosslinks(&mut state, &E::default_spec()).map(|_| state); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/epoch_processing_final_updates.rs b/tests/ef_tests/src/cases/epoch_processing_final_updates.rs deleted file mode 100644 index 69e6b8bd3..000000000 --- a/tests/ef_tests/src/cases/epoch_processing_final_updates.rs +++ /dev/null @@ -1,41 +0,0 @@ -use super::*; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_epoch_processing::process_final_updates; -use types::{BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct EpochProcessingFinalUpdates { - pub description: String, - pub pre: BeaconState, - pub post: Option>, -} - -impl YamlDecode for EpochProcessingFinalUpdates { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for EpochProcessingFinalUpdates { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); - - let spec = &E::default_spec(); - - let mut result = (|| { - // Processing requires the epoch cache. - state.build_all_caches(spec)?; - - process_final_updates(&mut state, spec).map(|_| state) - })(); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/epoch_processing_justification_and_finalization.rs b/tests/ef_tests/src/cases/epoch_processing_justification_and_finalization.rs deleted file mode 100644 index 788301086..000000000 --- a/tests/ef_tests/src/cases/epoch_processing_justification_and_finalization.rs +++ /dev/null @@ -1,46 +0,0 @@ -use super::*; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_epoch_processing::{ - process_justification_and_finalization, validator_statuses::ValidatorStatuses, -}; -use types::{BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct EpochProcessingJustificationAndFinalization { - pub description: String, - pub pre: BeaconState, - pub post: Option>, -} - -impl YamlDecode for EpochProcessingJustificationAndFinalization { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for EpochProcessingJustificationAndFinalization { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); - - let spec = &E::default_spec(); - - // Processing requires the epoch cache. - state.build_all_caches(spec).unwrap(); - - let mut result = (|| { - let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; - validator_statuses.process_attestations(&state, spec)?; - process_justification_and_finalization(&mut state, &validator_statuses.total_balances) - .map(|_| state) - })(); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs b/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs deleted file mode 100644 index a01f895fe..000000000 --- a/tests/ef_tests/src/cases/epoch_processing_registry_updates.rs +++ /dev/null @@ -1,38 +0,0 @@ -use super::*; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_epoch_processing::registry_updates::process_registry_updates; -use types::{BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct EpochProcessingRegistryUpdates { - pub description: String, - pub pre: BeaconState, - pub post: Option>, -} - -impl YamlDecode for EpochProcessingRegistryUpdates { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for EpochProcessingRegistryUpdates { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); - let spec = &E::default_spec(); - - // Processing requires the epoch cache. - state.build_all_caches(spec).unwrap(); - - let mut result = process_registry_updates(&mut state, spec).map(|_| state); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/epoch_processing_slashings.rs b/tests/ef_tests/src/cases/epoch_processing_slashings.rs deleted file mode 100644 index d2a988d92..000000000 --- a/tests/ef_tests/src/cases/epoch_processing_slashings.rs +++ /dev/null @@ -1,50 +0,0 @@ -use super::*; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_epoch_processing::{ - process_slashings::process_slashings, validator_statuses::ValidatorStatuses, -}; -use types::{BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct EpochProcessingSlashings { - pub description: String, - pub pre: BeaconState, - pub post: Option>, -} - -impl YamlDecode for EpochProcessingSlashings { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for EpochProcessingSlashings { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); - - let spec = &E::default_spec(); - - let mut result = (|| { - // Processing requires the epoch cache. - state.build_all_caches(spec)?; - - let mut validator_statuses = ValidatorStatuses::new(&state, spec)?; - validator_statuses.process_attestations(&state, spec)?; - process_slashings( - &mut state, - validator_statuses.total_balances.current_epoch, - spec, - ) - .map(|_| state) - })(); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/sanity_slots.rs b/tests/ef_tests/src/cases/sanity_slots.rs index 27c6c13c3..a66f1c2c4 100644 --- a/tests/ef_tests/src/cases/sanity_slots.rs +++ b/tests/ef_tests/src/cases/sanity_slots.rs @@ -27,7 +27,7 @@ impl LoadCase for SanitySlots { fn load_from_dir(path: &Path) -> Result { let metadata_path = path.join("meta.yaml"); let metadata: Metadata = if metadata_path.is_file() { - yaml_decode_file(&path.join("meta.yaml"))? + yaml_decode_file(&metadata_path)? } else { Metadata::default() }; diff --git a/tests/ef_tests/src/handler.rs b/tests/ef_tests/src/handler.rs index f66dc7b18..a614afe76 100644 --- a/tests/ef_tests/src/handler.rs +++ b/tests/ef_tests/src/handler.rs @@ -1,4 +1,4 @@ -use crate::cases::{self, Case, Cases, LoadCase}; +use crate::cases::{self, Case, Cases, EpochTransition, LoadCase}; use crate::type_name::TypeName; use crate::EfTest; use std::fs; @@ -184,3 +184,21 @@ impl Handler for SanitySlotsHandler { "slots" } } + +pub struct EpochProcessingHandler(PhantomData<(E, T)>); + +impl> Handler for EpochProcessingHandler { + type Case = cases::EpochProcessing; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "epoch_processing" + } + + fn handler_name() -> &'static str { + T::name() + } +} diff --git a/tests/ef_tests/src/lib.rs b/tests/ef_tests/src/lib.rs index 06e01fc03..54e674d85 100644 --- a/tests/ef_tests/src/lib.rs +++ b/tests/ef_tests/src/lib.rs @@ -2,6 +2,9 @@ use types::EthSpec; pub use case_result::CaseResult; pub use cases::Case; +pub use cases::{ + Crosslinks, FinalUpdates, JustificationAndFinalization, RegistryUpdates, Slashings, +}; pub use error::Error; pub use handler::*; pub use yaml_decode::YamlDecode; diff --git a/tests/ef_tests/src/type_name.rs b/tests/ef_tests/src/type_name.rs index fe55a7f5f..5af0c5256 100644 --- a/tests/ef_tests/src/type_name.rs +++ b/tests/ef_tests/src/type_name.rs @@ -5,57 +5,56 @@ pub trait TypeName { fn name() -> &'static str; } -impl TypeName for MinimalEthSpec { - fn name() -> &'static str { - "minimal" - } -} - -impl TypeName for MainnetEthSpec { - fn name() -> &'static str { - "mainnet" - } -} - -macro_rules! impl_name { +#[macro_export] +macro_rules! type_name { ($typ:ident) => { + type_name!($typ, stringify!($typ)); + }; + ($typ:ident, $name:expr) => { impl TypeName for $typ { fn name() -> &'static str { - stringify!($typ) + $name } } }; } -macro_rules! impl_name_generic { +#[macro_export] +macro_rules! type_name_generic { ($typ:ident) => { + type_name_generic!($typ, stringify!($typ)); + }; + ($typ:ident, $name:expr) => { impl TypeName for $typ { fn name() -> &'static str { - stringify!($typ) + $name } } }; } -impl_name_generic!(Attestation); -impl_name!(AttestationData); -impl_name!(AttestationDataAndCustodyBit); -impl_name_generic!(AttesterSlashing); -impl_name_generic!(BeaconBlock); -impl_name_generic!(BeaconBlockBody); -impl_name!(BeaconBlockHeader); -impl_name_generic!(BeaconState); -impl_name!(Checkpoint); -impl_name_generic!(CompactCommittee); -impl_name!(Crosslink); -impl_name!(Deposit); -impl_name!(DepositData); -impl_name!(Eth1Data); -impl_name!(Fork); -impl_name_generic!(HistoricalBatch); -impl_name_generic!(IndexedAttestation); -impl_name_generic!(PendingAttestation); -impl_name!(ProposerSlashing); -impl_name!(Transfer); -impl_name!(Validator); -impl_name!(VoluntaryExit); +type_name!(MinimalEthSpec, "minimal"); +type_name!(MainnetEthSpec, "mainnet"); + +type_name_generic!(Attestation); +type_name!(AttestationData); +type_name!(AttestationDataAndCustodyBit); +type_name_generic!(AttesterSlashing); +type_name_generic!(BeaconBlock); +type_name_generic!(BeaconBlockBody); +type_name!(BeaconBlockHeader); +type_name_generic!(BeaconState); +type_name!(Checkpoint); +type_name_generic!(CompactCommittee); +type_name!(Crosslink); +type_name!(Deposit); +type_name!(DepositData); +type_name!(Eth1Data); +type_name!(Fork); +type_name_generic!(HistoricalBatch); +type_name_generic!(IndexedAttestation); +type_name_generic!(PendingAttestation); +type_name!(ProposerSlashing); +type_name!(Transfer); +type_name!(Validator); +type_name!(VoluntaryExit); diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index 0fb751c9e..848ae05fa 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -262,52 +262,37 @@ ssz_static_test!(ssz_static_transfer, Transfer, SR); ssz_static_test!(ssz_static_validator, Validator); ssz_static_test!(ssz_static_voluntary_exit, VoluntaryExit, SR); -/* #[test] fn epoch_processing_justification_and_finalization() { - yaml_files_in_test_dir(&Path::new("epoch_processing").join("justification_and_finalization")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + EpochProcessingHandler::::run(); + EpochProcessingHandler::::run(); } #[test] fn epoch_processing_crosslinks() { - yaml_files_in_test_dir(&Path::new("epoch_processing").join("crosslinks")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + EpochProcessingHandler::::run(); + EpochProcessingHandler::::run(); } #[test] fn epoch_processing_registry_updates() { - yaml_files_in_test_dir(&Path::new("epoch_processing").join("registry_updates")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + EpochProcessingHandler::::run(); + EpochProcessingHandler::::run(); } #[test] fn epoch_processing_slashings() { - yaml_files_in_test_dir(&Path::new("epoch_processing").join("slashings")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + EpochProcessingHandler::::run(); + EpochProcessingHandler::::run(); } #[test] fn epoch_processing_final_updates() { - yaml_files_in_test_dir(&Path::new("epoch_processing").join("final_updates")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + EpochProcessingHandler::::run(); + EpochProcessingHandler::::run(); } +/* #[test] fn genesis_initialization() { yaml_files_in_test_dir(&Path::new("genesis").join("initialization")) From 25f2e212c307627ef169a93557ad1527d08e0151 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 13:30:07 +1000 Subject: [PATCH 151/305] Update to latest interop keypair spec --- .../generate_deterministic_keypairs.rs | 15 +- eth2/utils/bls/src/public_key.rs | 4 + eth2/utils/bls/src/secret_key.rs | 4 + eth2/utils/eth2_interop_keypairs/Cargo.toml | 8 + eth2/utils/eth2_interop_keypairs/src/lib.rs | 138 +++++------------- .../utils/eth2_interop_keypairs/tests/test.rs | 64 ++++++++ 6 files changed, 121 insertions(+), 112 deletions(-) create mode 100644 eth2/utils/eth2_interop_keypairs/tests/test.rs diff --git a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs index 172b142ef..a687eb978 100644 --- a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs +++ b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs @@ -1,5 +1,5 @@ use crate::*; -use eth2_interop_keypairs::be_private_key; +use eth2_interop_keypairs::keypair; use log::debug; use rayon::prelude::*; @@ -15,8 +15,8 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { let keypairs: Vec = (0..validator_count) .collect::>() - .par_iter() - .map(|&i| generate_deterministic_keypair(i)) + .into_par_iter() + .map(generate_deterministic_keypair) .collect(); keypairs @@ -26,8 +26,9 @@ pub fn generate_deterministic_keypairs(validator_count: usize) -> Vec { /// /// This is used for testing only, and not to be used in production! pub fn generate_deterministic_keypair(validator_index: usize) -> Keypair { - let sk = SecretKey::from_bytes(&be_private_key(validator_index)) - .expect("be_private_key always returns valid keys"); - let pk = PublicKey::from_secret_key(&sk); - Keypair { sk, pk } + let raw = keypair(validator_index); + Keypair { + pk: PublicKey::from_raw(raw.pk), + sk: SecretKey::from_raw(raw.sk), + } } diff --git a/eth2/utils/bls/src/public_key.rs b/eth2/utils/bls/src/public_key.rs index e03b17686..4b5abb58e 100644 --- a/eth2/utils/bls/src/public_key.rs +++ b/eth2/utils/bls/src/public_key.rs @@ -20,6 +20,10 @@ impl PublicKey { PublicKey(RawPublicKey::from_secret_key(secret_key.as_raw())) } + pub fn from_raw(raw: RawPublicKey) -> Self { + Self(raw) + } + /// Returns the underlying signature. pub fn as_raw(&self) -> &RawPublicKey { &self.0 diff --git a/eth2/utils/bls/src/secret_key.rs b/eth2/utils/bls/src/secret_key.rs index 12f9a713b..54da0fa0f 100644 --- a/eth2/utils/bls/src/secret_key.rs +++ b/eth2/utils/bls/src/secret_key.rs @@ -20,6 +20,10 @@ impl SecretKey { SecretKey(RawSecretKey::random(&mut rand::thread_rng())) } + pub fn from_raw(raw: RawSecretKey) -> Self { + Self(raw) + } + /// Returns the underlying point as compressed bytes. fn as_bytes(&self) -> Vec { self.as_raw().as_bytes() diff --git a/eth2/utils/eth2_interop_keypairs/Cargo.toml b/eth2/utils/eth2_interop_keypairs/Cargo.toml index e1c4dab04..31f9718cd 100644 --- a/eth2/utils/eth2_interop_keypairs/Cargo.toml +++ b/eth2/utils/eth2_interop_keypairs/Cargo.toml @@ -7,5 +7,13 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +lazy_static = "1.4" num-bigint = "0.2" eth2_hashing = "0.1" +milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.10.0" } + +[dev-dependencies] +base64 = "0.10" +serde = "1.0" +serde_derive = "1.0" +serde_yaml = "0.8" diff --git a/eth2/utils/eth2_interop_keypairs/src/lib.rs b/eth2/utils/eth2_interop_keypairs/src/lib.rs index 8ba2b9eba..4c1320723 100644 --- a/eth2/utils/eth2_interop_keypairs/src/lib.rs +++ b/eth2/utils/eth2_interop_keypairs/src/lib.rs @@ -5,126 +5,54 @@ //! keys generated here are **not secret** and are **not for production use**. //! //! Note: these keys have not been tested against a reference implementation, yet. +#[macro_use] +extern crate lazy_static; use eth2_hashing::hash; +use milagro_bls::{Keypair, PublicKey, SecretKey}; use num_bigint::BigUint; pub const CURVE_ORDER_BITS: usize = 255; pub const PRIVATE_KEY_BYTES: usize = 48; pub const HASH_BYTES: usize = 32; -fn hash_big_int_le(uint: BigUint) -> BigUint { - let mut preimage = uint.to_bytes_le(); - preimage.resize(32, 0_u8); - BigUint::from_bytes_le(&hash(&preimage)) +lazy_static! { + static ref CURVE_ORDER: BigUint = + "52435875175126190479447740508185965837690552500527637822603658699938581184513" + .parse::() + .expect("Curve order should be valid"); } -fn private_key(validator_index: usize) -> BigUint { - let mut key = BigUint::from(validator_index); - loop { - key = hash_big_int_le(key); - if key.bits() <= CURVE_ORDER_BITS { - break key; - } - } -} - -/// Generates an **unsafe** BLS12-381 private key for the given validator index, where that private -/// key is represented in big-endian bytes. -pub fn be_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { - let vec = private_key(validator_index).to_bytes_be(); - - let mut out = [0; PRIVATE_KEY_BYTES]; - out[PRIVATE_KEY_BYTES - vec.len()..PRIVATE_KEY_BYTES].copy_from_slice(&vec); - out -} - -/// Generates an **unsafe** BLS12-381 private key for the given validator index, where that private -/// key is represented in little-endian bytes. pub fn le_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { - let vec = private_key(validator_index).to_bytes_le(); + let preimage = { + let mut bytes = [0; HASH_BYTES]; + let index = validator_index.to_le_bytes(); + bytes[0..index.len()].copy_from_slice(&index); + bytes + }; - let mut out = [0; PRIVATE_KEY_BYTES]; - out[0..vec.len()].copy_from_slice(&vec); - out + let privkey = BigUint::from_bytes_le(&hash(&preimage)) % &*CURVE_ORDER; + + let mut bytes = [0; PRIVATE_KEY_BYTES]; + let privkey_bytes = privkey.to_bytes_le(); + bytes[0..privkey_bytes.len()].copy_from_slice(&privkey_bytes); + bytes } -#[cfg(test)] -mod tests { - use super::*; +pub fn keypair(validator_index: usize) -> Keypair { + let bytes = le_private_key(validator_index); - fn flip(vec: &[u8]) -> Vec { - let len = vec.len(); - let mut out = vec![0; len]; - for i in 0..len { - out[len - 1 - i] = vec[i]; - } - out - } + let sk = + SecretKey::from_bytes(&swap_bytes(bytes.to_vec())).expect("Should be valid private key"); - fn pad_le_bls(mut vec: Vec) -> Vec { - vec.resize(PRIVATE_KEY_BYTES, 0_u8); - vec - } - - fn pad_be_bls(mut vec: Vec) -> Vec { - let mut out = vec![0; PRIVATE_KEY_BYTES - vec.len()]; - out.append(&mut vec); - out - } - - fn pad_le_hash(index: usize) -> Vec { - let mut vec = index.to_le_bytes().to_vec(); - vec.resize(HASH_BYTES, 0_u8); - vec - } - - fn multihash(index: usize, rounds: usize) -> Vec { - let mut vec = pad_le_hash(index); - for _ in 0..rounds { - vec = hash(&vec); - } - vec - } - - fn compare(validator_index: usize, preimage: &[u8]) { - assert_eq!( - &le_private_key(validator_index)[..], - &pad_le_bls(hash(preimage))[..] - ); - assert_eq!( - &be_private_key(validator_index)[..], - &pad_be_bls(flip(&hash(preimage)))[..] - ); - } - - #[test] - fn consistency() { - for i in 0..256 { - let le = BigUint::from_bytes_le(&le_private_key(i)); - let be = BigUint::from_bytes_be(&be_private_key(i)); - assert_eq!(le, be); - } - } - - #[test] - fn non_repeats() { - // These indices only need one hash to be in the curve order. - compare(0, &pad_le_hash(0)); - compare(3, &pad_le_hash(3)); - } - - #[test] - fn repeats() { - // Index 5 needs 5x hashes to get into the curve order. - compare(5, &multihash(5, 5)); - } - - #[test] - fn doesnt_panic() { - for i in 0..256 { - be_private_key(i); - le_private_key(i); - } + Keypair { + pk: PublicKey::from_secret_key(&sk), + sk, } } + +fn swap_bytes(input: Vec) -> Vec { + let mut output = vec![]; + input.into_iter().rev().for_each(|byte| output.push(byte)); + output +} diff --git a/eth2/utils/eth2_interop_keypairs/tests/test.rs b/eth2/utils/eth2_interop_keypairs/tests/test.rs new file mode 100644 index 000000000..45f128db6 --- /dev/null +++ b/eth2/utils/eth2_interop_keypairs/tests/test.rs @@ -0,0 +1,64 @@ +#![cfg(test)] +use eth2_interop_keypairs::{keypair, le_private_key}; +use num_bigint::BigUint; + +#[test] +fn reference_private_keys() { + // Sourced from: + // + // https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml + let reference = [ + "16808672146709759238327133555736750089977066230599028589193936481731504400486", + "37006103240406073079686739739280712467525465637222501547219594975923976982528", + "22330876536127119444572216874798222843352868708084730796787004036811744442455", + "17048462031355941381150076874414096388968985457797372268770826099852902060945", + "28647806952216650698330424381872693846361470773871570637461872359310549743691", + "2416304019107052589452838695606585506736351107897780798170812672519914514344", + "7300215445567548136411883691093515822872548648751398235557229381530420545683", + "26495790445032093722332687600112008700915252495659977774957922313678954054133", + "2908643403277969554503670470854573663206729491025062456164283925661321952518", + "19554639423851580804889717218680781396599791537051606512605582393920758869044", + ]; + reference + .into_iter() + .enumerate() + .for_each(|(i, reference)| { + let bytes = le_private_key(i); + let num = BigUint::from_bytes_le(&bytes); + assert_eq!(&num.to_str_radix(10), reference) + }); +} + +#[test] +fn reference_public_keys() { + // Sourced from: + // + // https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml + let reference = [ + "qZp27XeW974i1bfoXe63xWd+iOUR4LM3YY+MTrYTSbS/LRU/ZJ97UzWf6LlKOORM", + "uJvrxpl2lyajGMjplxvTFxKXxhrqSmV4p6T5S1R9y6W6wWqJEItrah/jaV0ah0oL", + "o6MrD4tN24PxoKhT2B3XJd/ld9T0w9uOzlLOKwJuyoSBXBp+jpKk3j11VzO/fkqb", + "iMFB33fNnY16cadcgmxBqcnwPG7hsYDz54UvaigAmd7TUbWNZuZTr45CgWpNj1Mu", + "gSg7eiDhykYOvZu9dwBdVXNwyrsfmkT1MMTExmIw9nX434tMKBiFGqfXeoDKWkpe", + "qwvdoPhfhC9DG+rM8SUL8f17pRtBAP1kNktkAf2oW7AGmz5xW1iBloTn/AsQpyo0", + "mXfxyLcxqNVVgUa/uGyuomQ088WHi1ib8oCkLJFZ5wDp3w5AhilsILAR0ueMJ9Nz", + "qNTHwneVpyWWExfvWVOnAy7W2Dc524sOinI1PRuLRDlCf376LInKoDzJ8o+Muris", + "ptMQ27+rmiJFD1mZP4ekzl22Ij87Xx8w0sTscYki1ADgs8d0HejlmWD3JBGg7hCn", + "mJNBPAAoOj+e2f2YRd2hzqOCKNIlZ/lUHczDV+VKLWpuIEEDySVky8BfSQWsfEk6", + ]; + reference + .into_iter() + .enumerate() + .for_each(|(i, reference)| { + let pair = keypair(i); + let reference = base64::decode(reference).expect("Reference should be valid base64"); + + assert_eq!( + reference.len(), + 48, + "Reference should be 48 bytes (public key size)" + ); + + assert_eq!(pair.pk.as_bytes(), reference); + }); +} From c5a22b57d29e59f578affa938f1be50da0b2a539 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 30 Aug 2019 14:10:28 +1000 Subject: [PATCH 152/305] Genesis tests --- .../src/cases/genesis_initialization.rs | 39 ++++++++++--- tests/ef_tests/src/cases/genesis_validity.rs | 24 +++++--- tests/ef_tests/src/handler.rs | 36 ++++++++++++ tests/ef_tests/tests/tests.rs | 57 ++----------------- 4 files changed, 85 insertions(+), 71 deletions(-) diff --git a/tests/ef_tests/src/cases/genesis_initialization.rs b/tests/ef_tests/src/cases/genesis_initialization.rs index 7ae8eef59..4f0fa4296 100644 --- a/tests/ef_tests/src/cases/genesis_initialization.rs +++ b/tests/ef_tests/src/cases/genesis_initialization.rs @@ -1,34 +1,55 @@ use super::*; -use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; +use crate::yaml_decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::initialize_beacon_state_from_eth1; +use std::path::PathBuf; use types::{BeaconState, Deposit, EthSpec, Hash256}; +#[derive(Debug, Clone, Deserialize)] +struct Metadata { + deposits_count: usize, +} + #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct GenesisInitialization { - pub description: String, - pub bls_setting: Option, + pub path: PathBuf, pub eth1_block_hash: Hash256, pub eth1_timestamp: u64, pub deposits: Vec, pub state: Option>, } -impl YamlDecode for GenesisInitialization { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) +impl LoadCase for GenesisInitialization { + fn load_from_dir(path: &Path) -> Result { + let eth1_block_hash = ssz_decode_file(&path.join("eth1_block_hash.ssz"))?; + let eth1_timestamp = yaml_decode_file(&path.join("eth1_timestamp.yaml"))?; + let meta: Metadata = yaml_decode_file(&path.join("meta.yaml"))?; + let deposits: Vec = (0..meta.deposits_count) + .map(|i| { + let filename = format!("deposits_{}.ssz", i); + ssz_decode_file(&path.join(filename)) + }) + .collect::>()?; + let state = ssz_decode_file(&path.join("state.ssz"))?; + + Ok(Self { + path: path.into(), + eth1_block_hash, + eth1_timestamp, + deposits, + state: Some(state), + }) } } impl Case for GenesisInitialization { - fn description(&self) -> String { - self.description.clone() + fn path(&self) -> &Path { + &self.path } fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; let spec = &E::default_spec(); let mut result = initialize_beacon_state_from_eth1( diff --git a/tests/ef_tests/src/cases/genesis_validity.rs b/tests/ef_tests/src/cases/genesis_validity.rs index 7ddd3e8fd..efebe5e11 100644 --- a/tests/ef_tests/src/cases/genesis_validity.rs +++ b/tests/ef_tests/src/cases/genesis_validity.rs @@ -1,31 +1,37 @@ use super::*; -use crate::bls_setting::BlsSetting; +use crate::yaml_decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::is_valid_genesis_state; +use std::path::{Path, PathBuf}; use types::{BeaconState, EthSpec}; #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct GenesisValidity { - pub description: String, - pub bls_setting: Option, + pub path: PathBuf, pub genesis: BeaconState, pub is_valid: bool, } -impl YamlDecode for GenesisValidity { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) +impl LoadCase for GenesisValidity { + fn load_from_dir(path: &Path) -> Result { + let genesis = ssz_decode_file(&path.join("genesis.ssz"))?; + let is_valid = yaml_decode_file(&path.join("is_valid.yaml"))?; + + Ok(Self { + path: path.into(), + genesis, + is_valid, + }) } } impl Case for GenesisValidity { - fn description(&self) -> String { - self.description.clone() + fn path(&self) -> &Path { + &self.path } fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; let spec = &E::default_spec(); let is_valid = is_valid_genesis_state(&self.genesis, spec); diff --git a/tests/ef_tests/src/handler.rs b/tests/ef_tests/src/handler.rs index a614afe76..02518a13d 100644 --- a/tests/ef_tests/src/handler.rs +++ b/tests/ef_tests/src/handler.rs @@ -202,3 +202,39 @@ impl> Handler for EpochProcessingHa T::name() } } + +pub struct GenesisValidityHandler(PhantomData); + +impl Handler for GenesisValidityHandler { + type Case = cases::GenesisValidity; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "genesis" + } + + fn handler_name() -> &'static str { + "validity" + } +} + +pub struct GenesisInitializationHandler(PhantomData); + +impl Handler for GenesisInitializationHandler { + type Case = cases::GenesisInitialization; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "genesis" + } + + fn handler_name() -> &'static str { + "initialization" + } +} diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index 848ae05fa..37740cec0 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -1,52 +1,11 @@ use ef_tests::*; use rayon::prelude::*; -use std::path::{Path, PathBuf}; use types::{ Attestation, AttestationData, AttestationDataAndCustodyBit, AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, BeaconState, Checkpoint, CompactCommittee, Crosslink, Deposit, DepositData, Eth1Data, Fork, HistoricalBatch, IndexedAttestation, MainnetEthSpec, MinimalEthSpec, PendingAttestation, ProposerSlashing, Transfer, Validator, VoluntaryExit, }; -use walkdir::WalkDir; - -fn yaml_files_in_test_dir(dir: &Path) -> Vec { - let base_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) - .join("eth2.0-spec-tests") - .join("tests") - .join("general") - .join("phase0") - .join(dir); - - assert!( - base_path.exists(), - format!( - "Unable to locate {:?}. Did you init git submodules?", - base_path - ) - ); - - let mut paths: Vec = WalkDir::new(base_path) - .into_iter() - .filter_map(|e| e.ok()) - .filter_map(|entry| { - if entry.file_type().is_file() { - match entry.file_name().to_str() { - Some(f) if f.ends_with(".yaml") => Some(entry.path().to_path_buf()), - Some(f) if f.ends_with(".yml") => Some(entry.path().to_path_buf()), - _ => None, - } - } else { - None - } - }) - .collect(); - - // Reverse the file order. Assuming files come in lexicographical order, executing tests in - // reverse means we get the "minimal" tests before the "mainnet" tests. This makes life easier - // for debugging. - paths.reverse(); - paths -} /* #[test] @@ -292,22 +251,14 @@ fn epoch_processing_final_updates() { EpochProcessingHandler::::run(); } -/* #[test] fn genesis_initialization() { - yaml_files_in_test_dir(&Path::new("genesis").join("initialization")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + GenesisInitializationHandler::::run(); } #[test] fn genesis_validity() { - yaml_files_in_test_dir(&Path::new("genesis").join("validity")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + GenesisValidityHandler::::run(); + // TODO: mainnet tests don't exist yet + // GenesisValidityHandler::::run(); } -*/ From e154b30232a9ca5db31cd1d4331d17a49a840ff7 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 25 Jun 2019 18:46:57 +1000 Subject: [PATCH 153/305] merkle_proof: implement tree construction Plus QuickCheck tests! --- eth2/utils/merkle_proof/Cargo.toml | 5 + eth2/utils/merkle_proof/src/lib.rs | 193 ++++++++++++++++++++++++++++- 2 files changed, 193 insertions(+), 5 deletions(-) diff --git a/eth2/utils/merkle_proof/Cargo.toml b/eth2/utils/merkle_proof/Cargo.toml index 6ef6cc0aa..5ffb6af53 100644 --- a/eth2/utils/merkle_proof/Cargo.toml +++ b/eth2/utils/merkle_proof/Cargo.toml @@ -7,3 +7,8 @@ edition = "2018" [dependencies] ethereum-types = "0.6" eth2_hashing = { path = "../eth2_hashing" } +lazy_static = "1.3.0" + +[dev-dependencies] +quickcheck = "0.8" +quickcheck_macros = "0.8" diff --git a/eth2/utils/merkle_proof/src/lib.rs b/eth2/utils/merkle_proof/src/lib.rs index bc8bcea12..73a972c75 100644 --- a/eth2/utils/merkle_proof/src/lib.rs +++ b/eth2/utils/merkle_proof/src/lib.rs @@ -1,6 +1,138 @@ +#[macro_use] +extern crate lazy_static; + use eth2_hashing::hash; use ethereum_types::H256; +const MAX_TREE_DEPTH: usize = 32; +const EMPTY_SLICE: &[H256] = &[]; + +lazy_static! { + /// Cached zero hashes where `ZERO_HASHES[i]` is the hash of a Merkle tree with 2^i zero leaves. + static ref ZERO_HASHES: Vec = { + let mut hashes = vec![H256::from([0; 32]); MAX_TREE_DEPTH + 1]; + + for i in 0..MAX_TREE_DEPTH { + hashes[i + 1] = hash_concat(hashes[i], hashes[i]); + } + + hashes + }; + + /// Zero nodes to act as "synthetic" left and right subtrees of other zero nodes. + static ref ZERO_NODES: Vec = { + (0..MAX_TREE_DEPTH + 1).map(MerkleTree::Zero).collect() + }; +} + +/// Right-sparse Merkle tree. +/// +/// Efficiently represents a Merkle tree of fixed depth where only the first N +/// indices are populated by non-zero leaves (perfect for the deposit contract tree). +#[derive(Debug)] +pub enum MerkleTree { + /// Leaf node with the hash of its content. + Leaf(H256), + /// Internal node with hash, left subtree and right subtree. + Node(H256, Box, Box), + /// Zero subtree of a given depth. + /// + /// It represents a Merkle tree of 2^depth zero leaves. + Zero(usize), +} + +impl MerkleTree { + /// Create a new Merkle tree from a list of leaves and a fixed depth. + pub fn create(leaves: &[H256], depth: usize) -> Self { + use MerkleTree::*; + + if leaves.is_empty() { + return Zero(depth); + } + + match depth { + 0 => { + debug_assert_eq!(leaves.len(), 1); + Leaf(leaves[0]) + } + _ => { + // Split leaves into left and right subtrees + let subtree_capacity = 2usize.pow(depth as u32 - 1); + let (left_leaves, right_leaves) = if leaves.len() <= subtree_capacity { + (leaves, EMPTY_SLICE) + } else { + leaves.split_at(subtree_capacity) + }; + + let left_subtree = MerkleTree::create(left_leaves, depth - 1); + let right_subtree = MerkleTree::create(right_leaves, depth - 1); + let hash = hash_concat(left_subtree.hash(), right_subtree.hash()); + + Node(hash, Box::new(left_subtree), Box::new(right_subtree)) + } + } + } + + /// Retrieve the root hash of this Merkle tree. + pub fn hash(&self) -> H256 { + match *self { + MerkleTree::Leaf(h) => h, + MerkleTree::Node(h, _, _) => h, + MerkleTree::Zero(depth) => ZERO_HASHES[depth], + } + } + + /// Get a reference to the left and right subtrees if they exist. + pub fn left_and_right_branches(&self) -> Option<(&Self, &Self)> { + match *self { + MerkleTree::Leaf(_) | MerkleTree::Zero(0) => None, + MerkleTree::Node(_, ref l, ref r) => Some((l, r)), + MerkleTree::Zero(depth) => Some((&ZERO_NODES[depth - 1], &ZERO_NODES[depth - 1])), + } + } + + /// Is this Merkle tree a leaf? + pub fn is_leaf(&self) -> bool { + match self { + MerkleTree::Leaf(_) => true, + _ => false, + } + } + + /// Return the leaf at `index` and a Merkle proof of its inclusion. + /// + /// The Merkle proof is in "bottom-up" order, starting with a leaf node + /// and moving up the tree. Its length will be exactly equal to `depth`. + pub fn generate_proof(&self, index: usize, depth: usize) -> (H256, Vec) { + let mut proof = vec![]; + let mut current_node = self; + let mut current_depth = depth; + while current_depth > 0 { + let ith_bit = (index >> (current_depth - 1)) & 0x01; + // Note: unwrap is safe because leaves are only ever constructed at depth == 0. + let (left, right) = current_node.left_and_right_branches().unwrap(); + + // Go right, include the left branch in the proof. + if ith_bit == 1 { + proof.push(left.hash()); + current_node = right; + } else { + proof.push(right.hash()); + current_node = left; + } + current_depth -= 1; + } + + debug_assert_eq!(proof.len(), depth); + debug_assert!(current_node.is_leaf()); + + // Put proof in bottom-up order. + proof.reverse(); + + (current_node.hash(), proof) + } +} + /// Verify a proof that `leaf` exists at `index` in a Merkle tree rooted at `root`. /// /// The `branch` argument is the main component of the proof: it should be a list of internal @@ -46,15 +178,66 @@ fn concat(mut vec1: Vec, mut vec2: Vec) -> Vec { vec1 } +/// Compute the hash of two other hashes concatenated. +fn hash_concat(h1: H256, h2: H256) -> H256 { + H256::from_slice(&hash(&concat( + h1.as_bytes().to_vec(), + h2.as_bytes().to_vec(), + ))) +} + #[cfg(test)] mod tests { use super::*; + use quickcheck::TestResult; + use quickcheck_macros::quickcheck; - fn hash_concat(h1: H256, h2: H256) -> H256 { - H256::from_slice(&hash(&concat( - h1.as_bytes().to_vec(), - h2.as_bytes().to_vec(), - ))) + /// Check that we can: + /// 1. Build a MerkleTree from arbitrary leaves and an arbitrary depth. + /// 2. Generate valid proofs for all of the leaves of this MerkleTree. + #[quickcheck] + fn quickcheck_create_and_verify(int_leaves: Vec, depth: usize) -> TestResult { + if depth > MAX_TREE_DEPTH || int_leaves.len() > 2usize.pow(depth as u32) { + return TestResult::discard(); + } + + let leaves: Vec<_> = int_leaves.into_iter().map(H256::from_low_u64_be).collect(); + let merkle_tree = MerkleTree::create(&leaves, depth); + let merkle_root = merkle_tree.hash(); + + let proofs_ok = (0..leaves.len()).into_iter().all(|i| { + let (leaf, branch) = merkle_tree.generate_proof(i, depth); + leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root) + }); + + TestResult::from_bool(proofs_ok) + } + + #[test] + fn sparse_zero_correct() { + let depth = 2; + let zero = H256::from([0x00; 32]); + let dense_tree = MerkleTree::create(&[zero, zero, zero, zero], depth); + let sparse_tree = MerkleTree::create(&[], depth); + assert_eq!(dense_tree.hash(), sparse_tree.hash()); + } + + #[test] + fn create_small_example() { + // Construct a small merkle tree manually and check that it's consistent with + // the MerkleTree type. + let leaf_b00 = H256::from([0xAA; 32]); + let leaf_b01 = H256::from([0xBB; 32]); + let leaf_b10 = H256::from([0xCC; 32]); + let leaf_b11 = H256::from([0xDD; 32]); + + let node_b0x = hash_concat(leaf_b00, leaf_b01); + let node_b1x = hash_concat(leaf_b10, leaf_b11); + + let root = hash_concat(node_b0x, node_b1x); + + let tree = MerkleTree::create(&[leaf_b00, leaf_b01, leaf_b10, leaf_b11], 2); + assert_eq!(tree.hash(), root); } #[test] From 6234adc0d6801b92811383f686f59a1fad8985bc Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 15:33:34 +1000 Subject: [PATCH 154/305] Add interop-spec genesis procedure --- beacon_node/beacon_chain/Cargo.toml | 2 + .../beacon_chain/src/beacon_chain_builder.rs | 174 +++++++++++++++++- beacon_node/client/src/lib.rs | 4 +- eth2/operation_pool/src/lib.rs | 6 +- eth2/types/src/slot_epoch_macros.rs | 2 +- .../builders/testing_beacon_state_builder.rs | 2 +- eth2/utils/bls/src/fake_public_key.rs | 8 + 7 files changed, 181 insertions(+), 17 deletions(-) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 56cf7eed6..3378e6a34 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" [dependencies] eth2_config = { path = "../../eth2/utils/eth2_config" } +merkle_proof = { path = "../../eth2/utils/merkle_proof" } store = { path = "../store" } parking_lot = "0.7" lazy_static = "1.3.0" @@ -21,6 +22,7 @@ eth2-libp2p = { path = "../eth2-libp2p" } slog = { version = "^2.2.3" , features = ["max_level_trace"] } sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } +eth2_hashing = { path = "../../eth2/utils/eth2_hashing" } eth2_ssz = "0.1" eth2_ssz_derive = "0.1" state_processing = { path = "../../eth2/state_processing" } diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 223d99d8d..8a5190048 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -1,11 +1,20 @@ use super::bootstrapper::Bootstrapper; use crate::{BeaconChain, BeaconChainTypes}; +use eth2_hashing::hash; +use merkle_proof::MerkleTree; +use rayon::prelude::*; use slog::Logger; +use ssz::Encode; +use state_processing::initialize_beacon_state_from_eth1; use std::fs::File; use std::path::PathBuf; use std::sync::Arc; use std::time::SystemTime; -use types::{test_utils::TestingBeaconStateBuilder, BeaconBlock, BeaconState, ChainSpec, EthSpec}; +use tree_hash::{SignedRoot, TreeHash}; +use types::{ + test_utils::generate_deterministic_keypairs, BeaconBlock, BeaconState, ChainSpec, Deposit, + DepositData, Domain, EthSpec, Fork, Hash256, PublicKey, Signature, +}; enum BuildStrategy { FromGenesis { @@ -27,7 +36,7 @@ impl BeaconChainBuilder { minutes: u64, spec: ChainSpec, log: Logger, - ) -> Self { + ) -> Result { Self::quick_start(recent_genesis_time(minutes), validator_count, spec, log) } @@ -36,14 +45,10 @@ impl BeaconChainBuilder { validator_count: usize, spec: ChainSpec, log: Logger, - ) -> Self { - let (mut genesis_state, _keypairs) = - TestingBeaconStateBuilder::from_default_keypairs_file_if_exists(validator_count, &spec) - .build(); + ) -> Result { + let genesis_state = interop_genesis_state(validator_count, genesis_time, &spec)?; - genesis_state.genesis_time = genesis_time; - - Self::from_genesis_state(genesis_state, spec, log) + Ok(Self::from_genesis_state(genesis_state, spec, log)) } pub fn yaml_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { @@ -125,6 +130,95 @@ fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) - genesis_block } +fn interop_genesis_state( + validator_count: usize, + genesis_time: u64, + spec: &ChainSpec, +) -> Result, String> { + let keypairs = generate_deterministic_keypairs(validator_count); + let eth1_block_hash = Hash256::from_slice(&[42; 32]); + let eth1_timestamp = 2_u64.pow(40); + let amount = spec.max_effective_balance; + dbg!(amount); + + let withdrawal_credentials = |pubkey: &PublicKey| { + let mut credentials = hash(&pubkey.as_ssz_bytes()); + credentials[0] = spec.bls_withdrawal_prefix_byte; + Hash256::from_slice(&credentials) + }; + + let datas = keypairs + .into_par_iter() + .map(|keypair| { + let mut data = DepositData { + withdrawal_credentials: withdrawal_credentials(&keypair.pk), + pubkey: keypair.pk.into(), + amount, + signature: Signature::empty_signature().into(), + }; + + let domain = spec.get_domain( + spec.genesis_slot.epoch(T::slots_per_epoch()), + Domain::Deposit, + &Fork::default(), + ); + data.signature = Signature::new(&data.signed_root()[..], domain, &keypair.sk).into(); + + data + }) + .collect::>(); + + let deposit_root_leaves = datas + .par_iter() + .map(|data| Hash256::from_slice(&data.tree_hash_root())) + .collect::>(); + + let mut proofs = vec![]; + for i in 1..=deposit_root_leaves.len() { + // Note: this implementation is not so efficient. + // + // If `MerkleTree` had a push method, we could just build one tree and sample it instead of + // rebuilding the tree for each deposit. + let tree = MerkleTree::create( + &deposit_root_leaves[0..i], + spec.deposit_contract_tree_depth as usize, + ); + + let (_, mut proof) = tree.generate_proof(i - 1, spec.deposit_contract_tree_depth as usize); + proof.push(Hash256::from_slice(&int_to_bytes32(i))); + + assert_eq!( + proof.len(), + spec.deposit_contract_tree_depth as usize + 1, + "Deposit proof should be correct len" + ); + + proofs.push(proof); + } + + let deposits = datas + .into_par_iter() + .zip(proofs.into_par_iter()) + .map(|(data, proof)| (data, proof.into())) + .map(|(data, proof)| Deposit { proof, data }) + .collect::>(); + + let mut state = + initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits, spec) + .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; + + state.genesis_time = genesis_time; + + Ok(state) +} + +/// Returns `int` as little-endian bytes with a length of 32. +fn int_to_bytes32(int: usize) -> Vec { + let mut vec = int.to_le_bytes().to_vec(); + vec.resize(32, 0); + vec +} + /// Returns the system time, mod 30 minutes. /// /// Used for easily creating testnets. @@ -134,6 +228,66 @@ fn recent_genesis_time(minutes: u64) -> u64 { .unwrap() .as_secs(); let secs_after_last_period = now.checked_rem(minutes * 60).unwrap_or(0); - // genesis is now the last 15 minute block. now - secs_after_last_period } + +#[cfg(test)] +mod test { + use super::*; + use types::{EthSpec, MinimalEthSpec}; + + type TestEthSpec = MinimalEthSpec; + + #[test] + fn interop_state() { + let validator_count = 16; + let genesis_time = 42; + let spec = &TestEthSpec::default_spec(); + + let state = interop_genesis_state::(validator_count, genesis_time, spec) + .expect("should build state"); + + assert_eq!( + state.eth1_data.block_hash, + Hash256::from_slice(&[42; 32]), + "eth1 block hash should be co-ordinated junk" + ); + + assert_eq!( + state.genesis_time, genesis_time, + "genesis time should be as specified" + ); + + for b in &state.balances { + assert_eq!( + *b, spec.max_effective_balance, + "validator balances should be max effective balance" + ); + } + + for v in &state.validators { + let creds = v.withdrawal_credentials.as_bytes(); + assert_eq!( + creds[0], spec.bls_withdrawal_prefix_byte, + "first byte of withdrawal creds should be bls prefix" + ); + assert_eq!( + &creds[1..], + &hash(&v.pubkey.as_ssz_bytes())[1..], + "rest of withdrawal creds should be pubkey hash" + ) + } + + assert_eq!( + state.balances.len(), + validator_count, + "validator balances len should be correct" + ); + + assert_eq!( + state.validators.len(), + validator_count, + "validator count should be correct" + ); + } +} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 9876e9672..c7558dd5e 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -96,7 +96,7 @@ where *minutes, spec.clone(), log.clone(), - ), + )?, BeaconChainStartMethod::Generated { validator_count, genesis_time, @@ -105,7 +105,7 @@ where *validator_count, spec.clone(), log.clone(), - ), + )?, BeaconChainStartMethod::Yaml { file } => { BeaconChainBuilder::yaml_state(file, spec.clone(), log.clone())? } diff --git a/eth2/operation_pool/src/lib.rs b/eth2/operation_pool/src/lib.rs index 0badf3807..bb64c3ca2 100644 --- a/eth2/operation_pool/src/lib.rs +++ b/eth2/operation_pool/src/lib.rs @@ -16,9 +16,9 @@ use state_processing::per_block_processing::errors::{ }; use state_processing::per_block_processing::{ get_slashable_indices_modular, verify_attestation_for_block_inclusion, - verify_attestation_for_state, verify_attester_slashing, verify_exit, - verify_exit_time_independent_only, verify_proposer_slashing, verify_transfer, - verify_transfer_time_independent_only, VerifySignatures, + verify_attester_slashing, verify_exit, verify_exit_time_independent_only, + verify_proposer_slashing, verify_transfer, verify_transfer_time_independent_only, + VerifySignatures, }; use std::collections::{btree_map::Entry, hash_map, BTreeMap, HashMap, HashSet}; use std::marker::PhantomData; diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index 084ff98e7..62ca6b3af 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -182,7 +182,7 @@ macro_rules! impl_display { &self, record: &slog::Record, key: slog::Key, - serializer: &mut slog::Serializer, + serializer: &mut dyn slog::Serializer, ) -> slog::Result { slog::Value::serialize(&self.0, record, key, serializer) } diff --git a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs index 4f8a2d924..cf8c9ec8e 100644 --- a/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_beacon_state_builder.rs @@ -94,7 +94,7 @@ impl TestingBeaconStateBuilder { /// Creates the builder from an existing set of keypairs. pub fn from_keypairs(keypairs: Vec, spec: &ChainSpec) -> Self { let validator_count = keypairs.len(); - let starting_balance = 32_000_000_000; + let starting_balance = spec.max_effective_balance; debug!( "Building {} Validator objects from keypairs...", diff --git a/eth2/utils/bls/src/fake_public_key.rs b/eth2/utils/bls/src/fake_public_key.rs index e8dafaca6..82b1c707f 100644 --- a/eth2/utils/bls/src/fake_public_key.rs +++ b/eth2/utils/bls/src/fake_public_key.rs @@ -1,5 +1,6 @@ use super::{SecretKey, BLS_PUBLIC_KEY_BYTE_SIZE}; use milagro_bls::G1Point; +use milagro_bls::PublicKey as RawPublicKey; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; use serde_hex::{encode as hex_encode, HexVisitor}; @@ -24,6 +25,13 @@ impl FakePublicKey { Self::zero() } + pub fn from_raw(raw: RawPublicKey) -> Self { + Self { + bytes: raw.clone().as_bytes(), + point: G1Point::new(), + } + } + /// Creates a new all-zero's public key pub fn zero() -> Self { Self { From 9ffb6d0fe141f7919f7122f47d7b6a4c1b3a600a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 15:49:06 +1000 Subject: [PATCH 155/305] Fix fake_crypto test fails --- beacon_node/beacon_chain/src/beacon_chain_builder.rs | 4 ++++ eth2/state_processing/src/per_block_processing/tests.rs | 1 + eth2/state_processing/tests/tests.rs | 2 ++ eth2/utils/bls/src/public_key_bytes.rs | 1 + eth2/utils/bls/src/signature_bytes.rs | 1 + 5 files changed, 9 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 8a5190048..a569fe833 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -130,6 +130,10 @@ fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) - genesis_block } +/// Builds a genesis state as defined by the Eth2 interop procedure (see below). +/// +/// Reference: +/// https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start fn interop_genesis_state( validator_count: usize, genesis_time: u64, diff --git a/eth2/state_processing/src/per_block_processing/tests.rs b/eth2/state_processing/src/per_block_processing/tests.rs index cf64dc85e..f419d5fae 100644 --- a/eth2/state_processing/src/per_block_processing/tests.rs +++ b/eth2/state_processing/src/per_block_processing/tests.rs @@ -1,4 +1,5 @@ #![cfg(all(test, not(feature = "fake_crypto")))] + use super::block_processing_builder::BlockProcessingBuilder; use super::errors::*; use crate::{per_block_processing, BlockSignatureStrategy}; diff --git a/eth2/state_processing/tests/tests.rs b/eth2/state_processing/tests/tests.rs index 43b66f3ed..a7390c850 100644 --- a/eth2/state_processing/tests/tests.rs +++ b/eth2/state_processing/tests/tests.rs @@ -1,3 +1,5 @@ +#![cfg(not(feature = "fake_crypto"))] + use state_processing::{ per_block_processing, test_utils::BlockBuilder, BlockProcessingError, BlockSignatureStrategy, }; diff --git a/eth2/utils/bls/src/public_key_bytes.rs b/eth2/utils/bls/src/public_key_bytes.rs index f75735140..afdbcb270 100644 --- a/eth2/utils/bls/src/public_key_bytes.rs +++ b/eth2/utils/bls/src/public_key_bytes.rs @@ -31,6 +31,7 @@ mod tests { } #[test] + #[cfg(not(feature = "fake_crypto"))] pub fn test_invalid_public_key() { let mut public_key_bytes = [0; BLS_PUBLIC_KEY_BYTE_SIZE]; public_key_bytes[0] = 255; //a_flag1 == b_flag1 == c_flag1 == 1 and x1 = 0 shouldn't be allowed diff --git a/eth2/utils/bls/src/signature_bytes.rs b/eth2/utils/bls/src/signature_bytes.rs index a30cecb4d..b89c0f0d1 100644 --- a/eth2/utils/bls/src/signature_bytes.rs +++ b/eth2/utils/bls/src/signature_bytes.rs @@ -32,6 +32,7 @@ mod tests { } #[test] + #[cfg(not(feature = "fake_crypto"))] pub fn test_invalid_signature() { let mut signature_bytes = [0; BLS_SIG_BYTE_SIZE]; signature_bytes[0] = 255; //a_flag1 == b_flag1 == c_flag1 == 1 and x1 = 0 shouldn't be allowed From fcf16faad38573b6c4b655d2d589f469b6d8b051 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 30 Aug 2019 16:16:38 +1000 Subject: [PATCH 156/305] Operations tests --- tests/ef_tests/src/cases.rs | 16 +- tests/ef_tests/src/cases/operations.rs | 193 ++++++++++++++++++ .../src/cases/operations_attestation.rs | 47 ----- .../src/cases/operations_attester_slashing.rs | 48 ----- .../src/cases/operations_block_header.rs | 44 ---- .../ef_tests/src/cases/operations_deposit.rs | 42 ---- tests/ef_tests/src/cases/operations_exit.rs | 45 ---- .../src/cases/operations_proposer_slashing.rs | 46 ----- .../ef_tests/src/cases/operations_transfer.rs | 47 ----- tests/ef_tests/src/handler.rs | 58 ++++-- tests/ef_tests/tests/tests.rs | 67 ++---- 11 files changed, 248 insertions(+), 405 deletions(-) create mode 100644 tests/ef_tests/src/cases/operations.rs delete mode 100644 tests/ef_tests/src/cases/operations_attestation.rs delete mode 100644 tests/ef_tests/src/cases/operations_attester_slashing.rs delete mode 100644 tests/ef_tests/src/cases/operations_block_header.rs delete mode 100644 tests/ef_tests/src/cases/operations_deposit.rs delete mode 100644 tests/ef_tests/src/cases/operations_exit.rs delete mode 100644 tests/ef_tests/src/cases/operations_proposer_slashing.rs delete mode 100644 tests/ef_tests/src/cases/operations_transfer.rs diff --git a/tests/ef_tests/src/cases.rs b/tests/ef_tests/src/cases.rs index e3ec54a7f..1192eb0a0 100644 --- a/tests/ef_tests/src/cases.rs +++ b/tests/ef_tests/src/cases.rs @@ -11,13 +11,7 @@ mod bls_sign_msg; mod epoch_processing; mod genesis_initialization; mod genesis_validity; -mod operations_attestation; -mod operations_attester_slashing; -mod operations_block_header; -mod operations_deposit; -mod operations_exit; -mod operations_proposer_slashing; -mod operations_transfer; +mod operations; mod sanity_blocks; mod sanity_slots; mod shuffling; @@ -33,13 +27,7 @@ pub use bls_sign_msg::*; pub use epoch_processing::*; pub use genesis_initialization::*; pub use genesis_validity::*; -pub use operations_attestation::*; -pub use operations_attester_slashing::*; -pub use operations_block_header::*; -pub use operations_deposit::*; -pub use operations_exit::*; -pub use operations_proposer_slashing::*; -pub use operations_transfer::*; +pub use operations::*; pub use sanity_blocks::*; pub use sanity_slots::*; pub use shuffling::*; diff --git a/tests/ef_tests/src/cases/operations.rs b/tests/ef_tests/src/cases/operations.rs new file mode 100644 index 000000000..bcc104bad --- /dev/null +++ b/tests/ef_tests/src/cases/operations.rs @@ -0,0 +1,193 @@ +use super::*; +use crate::bls_setting::BlsSetting; +use crate::case_result::compare_beacon_state_results_without_caches; +use crate::type_name::TypeName; +use crate::yaml_decode::{ssz_decode_file, yaml_decode_file}; +use serde_derive::Deserialize; +use ssz::Decode; +use state_processing::per_block_processing::{ + errors::BlockProcessingError, process_attestations, process_attester_slashings, + process_block_header, process_deposits, process_exits, process_proposer_slashings, + process_transfers, +}; +use std::path::{Path, PathBuf}; +use types::{ + Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, + ProposerSlashing, Transfer, VoluntaryExit, +}; + +#[derive(Debug, Clone, Default, Deserialize)] +struct Metadata { + description: Option, + bls_setting: Option, +} + +#[derive(Debug, Clone)] +pub struct Operations> { + pub path: PathBuf, + metadata: Metadata, + pub pre: BeaconState, + pub operation: O, + pub post: Option>, +} + +pub trait Operation: Decode + TypeName + std::fmt::Debug { + fn handler_name() -> String { + Self::name().to_lowercase() + } + + fn filename() -> String { + format!("{}.ssz", Self::handler_name()) + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError>; +} + +impl Operation for Attestation { + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + process_attestations(state, &[self.clone()], spec) + } +} + +impl Operation for AttesterSlashing { + fn handler_name() -> String { + "attester_slashing".into() + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + process_attester_slashings(state, &[self.clone()], spec) + } +} + +impl Operation for Deposit { + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + process_deposits(state, &[self.clone()], spec) + } +} + +impl Operation for ProposerSlashing { + fn handler_name() -> String { + "proposer_slashing".into() + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + process_proposer_slashings(state, &[self.clone()], spec) + } +} + +impl Operation for Transfer { + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + process_transfers(state, &[self.clone()], spec) + } +} + +impl Operation for VoluntaryExit { + fn handler_name() -> String { + "voluntary_exit".into() + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + process_exits(state, &[self.clone()], spec) + } +} + +impl Operation for BeaconBlock { + fn handler_name() -> String { + "block_header".into() + } + + fn filename() -> String { + "block.ssz".into() + } + + fn apply_to( + &self, + state: &mut BeaconState, + spec: &ChainSpec, + ) -> Result<(), BlockProcessingError> { + process_block_header(state, self, spec, true) + } +} + +impl> LoadCase for Operations { + fn load_from_dir(path: &Path) -> Result { + let metadata_path = path.join("meta.yaml"); + let metadata: Metadata = if metadata_path.is_file() { + yaml_decode_file(&metadata_path)? + } else { + Metadata::default() + }; + let pre = ssz_decode_file(&path.join("pre.ssz"))?; + let operation = ssz_decode_file(&path.join(O::filename()))?; + let post_filename = path.join("post.ssz"); + let post = if post_filename.is_file() { + Some(ssz_decode_file(&post_filename)?) + } else { + None + }; + + Ok(Self { + path: path.into(), + metadata, + pre, + operation, + post, + }) + } +} + +impl> Case for Operations { + fn description(&self) -> String { + self.metadata + .description + .clone() + .unwrap_or_else(String::new) + } + + fn path(&self) -> &Path { + &self.path + } + + fn result(&self, _case_index: usize) -> Result<(), Error> { + self.metadata.bls_setting.unwrap_or_default().check()?; + + let spec = &E::default_spec(); + let mut state = self.pre.clone(); + let mut expected = self.post.clone(); + + // Processing requires the epoch cache. + state.build_all_caches(spec).unwrap(); + + let mut result = self.operation.apply_to(&mut state, spec).map(|()| state); + + compare_beacon_state_results_without_caches(&mut result, &mut expected) + } +} diff --git a/tests/ef_tests/src/cases/operations_attestation.rs b/tests/ef_tests/src/cases/operations_attestation.rs deleted file mode 100644 index 76cbe3f18..000000000 --- a/tests/ef_tests/src/cases/operations_attestation.rs +++ /dev/null @@ -1,47 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::process_attestations; -use types::{Attestation, BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct OperationsAttestation { - pub description: String, - pub bls_setting: Option, - pub pre: BeaconState, - pub attestation: Attestation, - pub post: Option>, -} - -impl YamlDecode for OperationsAttestation { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(&yaml).unwrap()) - } -} - -impl Case for OperationsAttestation { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let spec = &E::default_spec(); - - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let attestation = self.attestation.clone(); - let mut expected = self.post.clone(); - - // Processing requires the epoch cache. - state.build_all_caches(spec).unwrap(); - - let result = process_attestations(&mut state, &[attestation], spec); - - let mut result = result.and_then(|_| Ok(state)); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/operations_attester_slashing.rs b/tests/ef_tests/src/cases/operations_attester_slashing.rs deleted file mode 100644 index c658b1af4..000000000 --- a/tests/ef_tests/src/cases/operations_attester_slashing.rs +++ /dev/null @@ -1,48 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::process_attester_slashings; -use types::{AttesterSlashing, BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -pub struct OperationsAttesterSlashing { - pub description: String, - pub bls_setting: Option, - #[serde(bound = "E: EthSpec")] - pub pre: BeaconState, - #[serde(bound = "E: EthSpec")] - pub attester_slashing: AttesterSlashing, - #[serde(bound = "E: EthSpec")] - pub post: Option>, -} - -impl YamlDecode for OperationsAttesterSlashing { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for OperationsAttesterSlashing { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let attester_slashing = self.attester_slashing.clone(); - let mut expected = self.post.clone(); - - // Processing requires the epoch cache. - state.build_all_caches(&E::default_spec()).unwrap(); - - let result = - process_attester_slashings(&mut state, &[attester_slashing], &E::default_spec()); - - let mut result = result.and_then(|_| Ok(state)); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/operations_block_header.rs b/tests/ef_tests/src/cases/operations_block_header.rs deleted file mode 100644 index 8261b16d9..000000000 --- a/tests/ef_tests/src/cases/operations_block_header.rs +++ /dev/null @@ -1,44 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::process_block_header; -use types::{BeaconBlock, BeaconState, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct OperationsBlockHeader { - pub description: String, - pub bls_setting: Option, - pub pre: BeaconState, - pub block: BeaconBlock, - pub post: Option>, -} - -impl YamlDecode for OperationsBlockHeader { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for OperationsBlockHeader { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - let spec = &E::default_spec(); - - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); - - // Processing requires the epoch cache. - state.build_all_caches(spec).unwrap(); - - let mut result = process_block_header(&mut state, &self.block, spec, true).map(|_| state); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/operations_deposit.rs b/tests/ef_tests/src/cases/operations_deposit.rs deleted file mode 100644 index 801c02029..000000000 --- a/tests/ef_tests/src/cases/operations_deposit.rs +++ /dev/null @@ -1,42 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::process_deposits; -use types::{BeaconState, Deposit, EthSpec}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct OperationsDeposit { - pub description: String, - pub bls_setting: Option, - pub pre: BeaconState, - pub deposit: Deposit, - pub post: Option>, -} - -impl YamlDecode for OperationsDeposit { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for OperationsDeposit { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let deposit = self.deposit.clone(); - let mut expected = self.post.clone(); - - let result = process_deposits(&mut state, &[deposit], &E::default_spec()); - - let mut result = result.and_then(|_| Ok(state)); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/operations_exit.rs b/tests/ef_tests/src/cases/operations_exit.rs deleted file mode 100644 index d7e53bcb5..000000000 --- a/tests/ef_tests/src/cases/operations_exit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::process_exits; -use types::{BeaconState, EthSpec, VoluntaryExit}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct OperationsExit { - pub description: String, - pub bls_setting: Option, - pub pre: BeaconState, - pub voluntary_exit: VoluntaryExit, - pub post: Option>, -} - -impl YamlDecode for OperationsExit { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for OperationsExit { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let exit = self.voluntary_exit.clone(); - let mut expected = self.post.clone(); - - // Exit processing requires the epoch cache. - state.build_all_caches(&E::default_spec()).unwrap(); - - let result = process_exits(&mut state, &[exit], &E::default_spec()); - - let mut result = result.and_then(|_| Ok(state)); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/operations_proposer_slashing.rs b/tests/ef_tests/src/cases/operations_proposer_slashing.rs deleted file mode 100644 index e52e84f39..000000000 --- a/tests/ef_tests/src/cases/operations_proposer_slashing.rs +++ /dev/null @@ -1,46 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::process_proposer_slashings; -use types::{BeaconState, EthSpec, ProposerSlashing}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct OperationsProposerSlashing { - pub description: String, - pub bls_setting: Option, - pub pre: BeaconState, - pub proposer_slashing: ProposerSlashing, - pub post: Option>, -} - -impl YamlDecode for OperationsProposerSlashing { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for OperationsProposerSlashing { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let proposer_slashing = self.proposer_slashing.clone(); - let mut expected = self.post.clone(); - - // Processing requires the epoch cache. - state.build_all_caches(&E::default_spec()).unwrap(); - - let result = - process_proposer_slashings(&mut state, &[proposer_slashing], &E::default_spec()); - - let mut result = result.and_then(|_| Ok(state)); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/cases/operations_transfer.rs b/tests/ef_tests/src/cases/operations_transfer.rs deleted file mode 100644 index 250f58769..000000000 --- a/tests/ef_tests/src/cases/operations_transfer.rs +++ /dev/null @@ -1,47 +0,0 @@ -use super::*; -use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; -use serde_derive::Deserialize; -use state_processing::per_block_processing::process_transfers; -use types::{BeaconState, EthSpec, Transfer}; - -#[derive(Debug, Clone, Deserialize)] -#[serde(bound = "E: EthSpec")] -pub struct OperationsTransfer { - pub description: String, - pub bls_setting: Option, - pub pre: BeaconState, - pub transfer: Transfer, - pub post: Option>, -} - -impl YamlDecode for OperationsTransfer { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - -impl Case for OperationsTransfer { - fn description(&self) -> String { - self.description.clone() - } - - fn result(&self, _case_index: usize) -> Result<(), Error> { - self.bls_setting.unwrap_or_default().check()?; - - let mut state = self.pre.clone(); - let transfer = self.transfer.clone(); - let mut expected = self.post.clone(); - - // Transfer processing requires the epoch cache. - state.build_all_caches(&E::default_spec()).unwrap(); - - let spec = E::default_spec(); - - let result = process_transfers(&mut state, &[transfer], &spec); - - let mut result = result.and_then(|_| Ok(state)); - - compare_beacon_state_results_without_caches(&mut result, &mut expected) - } -} diff --git a/tests/ef_tests/src/handler.rs b/tests/ef_tests/src/handler.rs index 02518a13d..ca1304136 100644 --- a/tests/ef_tests/src/handler.rs +++ b/tests/ef_tests/src/handler.rs @@ -1,4 +1,4 @@ -use crate::cases::{self, Case, Cases, EpochTransition, LoadCase}; +use crate::cases::{self, Case, Cases, EpochTransition, LoadCase, Operation}; use crate::type_name::TypeName; use crate::EfTest; use std::fs; @@ -20,7 +20,7 @@ pub trait Handler { fn runner_name() -> &'static str; - fn handler_name() -> &'static str; + fn handler_name() -> String; fn run() { let handler_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) @@ -64,8 +64,8 @@ macro_rules! bls_handler { "bls" } - fn handler_name() -> &'static str { - $handler_name + fn handler_name() -> String { + $handler_name.into() } } }; @@ -106,8 +106,8 @@ where "ssz_static" } - fn handler_name() -> &'static str { - T::name() + fn handler_name() -> String { + T::name().into() } } @@ -126,8 +126,8 @@ where "ssz_static" } - fn handler_name() -> &'static str { - T::name() + fn handler_name() -> String { + T::name().into() } } @@ -144,8 +144,8 @@ impl Handler for ShufflingHandler { "shuffling" } - fn handler_name() -> &'static str { - "core" + fn handler_name() -> String { + "core".into() } } @@ -162,8 +162,8 @@ impl Handler for SanityBlocksHandler { "sanity" } - fn handler_name() -> &'static str { - "blocks" + fn handler_name() -> String { + "blocks".into() } } @@ -180,8 +180,8 @@ impl Handler for SanitySlotsHandler { "sanity" } - fn handler_name() -> &'static str { - "slots" + fn handler_name() -> String { + "slots".into() } } @@ -198,8 +198,8 @@ impl> Handler for EpochProcessingHa "epoch_processing" } - fn handler_name() -> &'static str { - T::name() + fn handler_name() -> String { + T::name().into() } } @@ -216,8 +216,8 @@ impl Handler for GenesisValidityHandler { "genesis" } - fn handler_name() -> &'static str { - "validity" + fn handler_name() -> String { + "validity".into() } } @@ -234,7 +234,25 @@ impl Handler for GenesisInitializationHandler { "genesis" } - fn handler_name() -> &'static str { - "initialization" + fn handler_name() -> String { + "initialization".into() + } +} + +pub struct OperationsHandler(PhantomData<(E, O)>); + +impl> Handler for OperationsHandler { + type Case = cases::Operations; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "operations" + } + + fn handler_name() -> String { + O::handler_name() } } diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index 37740cec0..d663eb454 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -36,71 +36,47 @@ fn shuffling() { ShufflingHandler::::run(); } -/* #[test] fn operations_deposit() { - yaml_files_in_test_dir(&Path::new("operations").join("deposit")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::::run(); + OperationsHandler::::run(); } #[test] fn operations_transfer() { - yaml_files_in_test_dir(&Path::new("operations").join("transfer")) - .into_par_iter() - .rev() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::::run(); + // Note: there are no transfer tests for mainnet } #[test] fn operations_exit() { - yaml_files_in_test_dir(&Path::new("operations").join("voluntary_exit")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::::run(); + OperationsHandler::::run(); } #[test] fn operations_proposer_slashing() { - yaml_files_in_test_dir(&Path::new("operations").join("proposer_slashing")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::::run(); + OperationsHandler::::run(); } #[test] fn operations_attester_slashing() { - yaml_files_in_test_dir(&Path::new("operations").join("attester_slashing")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::>::run(); + OperationsHandler::>::run(); } #[test] fn operations_attestation() { - yaml_files_in_test_dir(&Path::new("operations").join("attestation")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::>::run(); + OperationsHandler::>::run(); } #[test] fn operations_block_header() { - yaml_files_in_test_dir(&Path::new("operations").join("block_header")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + OperationsHandler::>::run(); + OperationsHandler::>::run(); } -*/ #[test] fn sanity_blocks() { @@ -114,18 +90,6 @@ fn sanity_slots() { SanitySlotsHandler::::run(); } -/* -#[test] -#[cfg(not(feature = "fake_crypto"))] -fn bls() { - yaml_files_in_test_dir(&Path::new("bls")) - .into_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); -} -*/ - #[test] #[cfg(not(feature = "fake_crypto"))] fn bls_aggregate_pubkeys() { @@ -259,6 +223,5 @@ fn genesis_initialization() { #[test] fn genesis_validity() { GenesisValidityHandler::::run(); - // TODO: mainnet tests don't exist yet - // GenesisValidityHandler::::run(); + // Note: there are no genesis validity tests for mainnet } From a6e6827337cb62041b1b3e64480854e70bd3016b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 16:48:22 +1000 Subject: [PATCH 157/305] Remove pub const from interop keypairs --- eth2/utils/eth2_interop_keypairs/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/eth2/utils/eth2_interop_keypairs/src/lib.rs b/eth2/utils/eth2_interop_keypairs/src/lib.rs index 4c1320723..490477eb3 100644 --- a/eth2/utils/eth2_interop_keypairs/src/lib.rs +++ b/eth2/utils/eth2_interop_keypairs/src/lib.rs @@ -12,7 +12,6 @@ use eth2_hashing::hash; use milagro_bls::{Keypair, PublicKey, SecretKey}; use num_bigint::BigUint; -pub const CURVE_ORDER_BITS: usize = 255; pub const PRIVATE_KEY_BYTES: usize = 48; pub const HASH_BYTES: usize = 32; From 96fb3be2c753aeb9e4926c6b9bc99a374cfc115a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 17:02:57 +1000 Subject: [PATCH 158/305] Swap endianness of test keys --- eth2/utils/eth2_interop_keypairs/src/lib.rs | 40 +++++++++++-------- .../utils/eth2_interop_keypairs/tests/test.rs | 6 +-- 2 files changed, 27 insertions(+), 19 deletions(-) diff --git a/eth2/utils/eth2_interop_keypairs/src/lib.rs b/eth2/utils/eth2_interop_keypairs/src/lib.rs index 490477eb3..ac610ee77 100644 --- a/eth2/utils/eth2_interop_keypairs/src/lib.rs +++ b/eth2/utils/eth2_interop_keypairs/src/lib.rs @@ -1,10 +1,21 @@ //! Produces the "deterministic" validator private keys used for inter-operability testing for //! Ethereum 2.0 clients. //! -//! Each private key is the first hash in the sha2 hash-chain that is less than 2^255. As such, -//! keys generated here are **not secret** and are **not for production use**. +//! Each private key is the sha2 hash of the validator index (little-endian, padded to 32 bytes), +//! modulo the BLS-381 curve order. //! -//! Note: these keys have not been tested against a reference implementation, yet. +//! Keys generated here are **not secret** and are **not for production use**. It is trivial to +//! know the secret key for any validator. +//! +//!## Reference +//! +//! Reference implementation: +//! +//! https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen.py +//! +//! +//! This implementation passes the [reference implementation +//! tests](https://github.com/ethereum/eth2.0-pm/blob/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start/keygen_test_vector.yaml). #[macro_use] extern crate lazy_static; @@ -22,7 +33,9 @@ lazy_static! { .expect("Curve order should be valid"); } -pub fn le_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { +/// Return a G1 point for the given `validator_index`, encoded as a compressed point in +/// big-endian byte-ordering. +pub fn be_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { let preimage = { let mut bytes = [0; HASH_BYTES]; let index = validator_index.to_le_bytes(); @@ -33,25 +46,20 @@ pub fn le_private_key(validator_index: usize) -> [u8; PRIVATE_KEY_BYTES] { let privkey = BigUint::from_bytes_le(&hash(&preimage)) % &*CURVE_ORDER; let mut bytes = [0; PRIVATE_KEY_BYTES]; - let privkey_bytes = privkey.to_bytes_le(); - bytes[0..privkey_bytes.len()].copy_from_slice(&privkey_bytes); + let privkey_bytes = privkey.to_bytes_be(); + bytes[PRIVATE_KEY_BYTES - privkey_bytes.len()..].copy_from_slice(&privkey_bytes); bytes } +/// Return a public and private keypair for a given `validator_index`. pub fn keypair(validator_index: usize) -> Keypair { - let bytes = le_private_key(validator_index); - - let sk = - SecretKey::from_bytes(&swap_bytes(bytes.to_vec())).expect("Should be valid private key"); + let sk = SecretKey::from_bytes(&be_private_key(validator_index)).expect(&format!( + "Should build valid private key for validator index {}", + validator_index + )); Keypair { pk: PublicKey::from_secret_key(&sk), sk, } } - -fn swap_bytes(input: Vec) -> Vec { - let mut output = vec![]; - input.into_iter().rev().for_each(|byte| output.push(byte)); - output -} diff --git a/eth2/utils/eth2_interop_keypairs/tests/test.rs b/eth2/utils/eth2_interop_keypairs/tests/test.rs index 45f128db6..0d89eaa4d 100644 --- a/eth2/utils/eth2_interop_keypairs/tests/test.rs +++ b/eth2/utils/eth2_interop_keypairs/tests/test.rs @@ -1,5 +1,5 @@ #![cfg(test)] -use eth2_interop_keypairs::{keypair, le_private_key}; +use eth2_interop_keypairs::{be_private_key, keypair}; use num_bigint::BigUint; #[test] @@ -23,8 +23,8 @@ fn reference_private_keys() { .into_iter() .enumerate() .for_each(|(i, reference)| { - let bytes = le_private_key(i); - let num = BigUint::from_bytes_le(&bytes); + let bytes = be_private_key(i); + let num = BigUint::from_bytes_be(&bytes); assert_eq!(&num.to_str_radix(10), reference) }); } From 6ba093d14fd263073902e70b6ced458d45256e6b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 30 Aug 2019 17:06:59 +1000 Subject: [PATCH 159/305] Add warning when disconnecting peer --- beacon_node/network/src/sync/simple_sync.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index d3ed2f3e4..222b4c7fc 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -525,6 +525,13 @@ impl NetworkContext { } pub fn disconnect(&mut self, peer_id: PeerId, reason: GoodbyeReason) { + warn!( + &self.log, + "Disconnecting peer"; + "reason" => format!("{:?}", reason), + "peer_id" => format!("{:?}", peer_id), + ); + self.send_rpc_request(None, peer_id, RPCRequest::Goodbye(reason)) // TODO: disconnect peers. } From 5f0509be501585f1043111df720f157441e8567a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 31 Aug 2019 12:34:27 +1000 Subject: [PATCH 160/305] Improve and extend CLI interface --- beacon_node/beacon_chain/src/beacon_chain.rs | 19 +++-- beacon_node/client/src/lib.rs | 84 ++++++++++++-------- beacon_node/src/config.rs | 38 ++++++++- beacon_node/src/main.rs | 42 +++++++++- beacon_node/src/run.rs | 7 +- 5 files changed, 141 insertions(+), 49 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index afc7a992a..6380d03b3 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -158,12 +158,6 @@ impl BeaconChain { genesis_state_root, )); - info!(log, "BeaconChain init"; - "genesis_validator_count" => genesis_state.validators.len(), - "genesis_state_root" => format!("{}", genesis_state_root), - "genesis_block_root" => format!("{}", genesis_block_root), - ); - // Slot clock let slot_clock = T::SlotClock::from_eth2_genesis( spec.genesis_slot, @@ -172,6 +166,12 @@ impl BeaconChain { ) .ok_or_else(|| Error::SlotClockDidNotStart)?; + info!(log, "Beacon chain initialized from genesis"; + "validator_count" => genesis_state.validators.len(), + "state_root" => format!("{}", genesis_state_root), + "block_root" => format!("{}", genesis_block_root), + ); + Ok(Self { spec, slot_clock, @@ -211,6 +211,13 @@ impl BeaconChain { let op_pool = p.op_pool.into_operation_pool(state, &spec); + info!(log, "Beacon chain initialized from store"; + "head_root" => format!("{}", p.canonical_head.beacon_block_root), + "head_epoch" => format!("{}", p.canonical_head.beacon_block.slot.epoch(T::EthSpec::slots_per_epoch())), + "finalized_root" => format!("{}", last_finalized_root), + "finalized_epoch" => format!("{}", last_finalized_block.slot.epoch(T::EthSpec::slots_per_epoch())), + ); + Ok(Some(BeaconChain { spec, slot_clock, diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index c7558dd5e..766d12c56 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -82,34 +82,70 @@ where let beacon_chain_builder = match &client_config.beacon_chain_start_method { BeaconChainStartMethod::Resume => { + info!( + log, + "Starting beacon chain"; + "method" => "resume" + ); BeaconChainBuilder::from_store(spec.clone(), log.clone()) } BeaconChainStartMethod::Mainnet => { crit!(log, "No mainnet beacon chain startup specification."); - return Err("Mainnet is not yet specified. We're working on it.".into()); + return Err("Mainnet launch is not yet announced.".into()); } BeaconChainStartMethod::RecentGenesis { validator_count, minutes, - } => BeaconChainBuilder::recent_genesis( - *validator_count, - *minutes, - spec.clone(), - log.clone(), - )?, + } => { + info!( + log, + "Starting beacon chain"; + "validator_count" => validator_count, + "minutes" => minutes, + "method" => "recent" + ); + BeaconChainBuilder::recent_genesis( + *validator_count, + *minutes, + spec.clone(), + log.clone(), + )? + } BeaconChainStartMethod::Generated { validator_count, genesis_time, - } => BeaconChainBuilder::quick_start( - *genesis_time, - *validator_count, - spec.clone(), - log.clone(), - )?, + } => { + info!( + log, + "Starting beacon chain"; + "validator_count" => validator_count, + "genesis_time" => genesis_time, + "method" => "quick" + ); + BeaconChainBuilder::quick_start( + *genesis_time, + *validator_count, + spec.clone(), + log.clone(), + )? + } BeaconChainStartMethod::Yaml { file } => { + info!( + log, + "Starting beacon chain"; + "file" => format!("{:?}", file), + "method" => "yaml" + ); BeaconChainBuilder::yaml_state(file, spec.clone(), log.clone())? } - BeaconChainStartMethod::HttpBootstrap { server, .. } => { + BeaconChainStartMethod::HttpBootstrap { server, port } => { + info!( + log, + "Starting beacon chain"; + "port" => port, + "server" => server, + "method" => "bootstrap" + ); BeaconChainBuilder::http_bootstrap(server, spec.clone(), log.clone())? } }; @@ -124,26 +160,6 @@ where panic!("Cannot start client before genesis!") } - // Block starting the client until we have caught the state up to the current slot. - // - // If we don't block here we create an initial scenario where we're unable to process any - // blocks and we're basically useless. - { - let state_slot = beacon_chain.head().beacon_state.slot; - let wall_clock_slot = beacon_chain - .slot() - .expect("Cannot start client before genesis"); - let slots_since_genesis = beacon_chain.slots_since_genesis().unwrap(); - info!( - log, - "BeaconState cache init"; - "state_slot" => state_slot, - "wall_clock_slot" => wall_clock_slot, - "slots_since_genesis" => slots_since_genesis, - "catchup_distance" => wall_clock_slot - state_slot, - ); - } - let network_config = &client_config.network; let (network, network_send) = NetworkService::new(beacon_chain.clone(), network_config, executor, log.clone())?; diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 7c471e8ac..c4fa5eebc 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -15,6 +15,12 @@ type Result = std::result::Result; type Config = (ClientConfig, Eth2Config); /// Gets the fully-initialized global client and eth2 configuration objects. +/// +/// The top-level `clap` arguments should be provied as `cli_args`. +/// +/// The output of this function depends primarily upon the given `cli_args`, however it's behaviour +/// may be influenced by other external services like the contents of the file system or the +/// response of some remote server. pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { let mut builder = ConfigBuilder::new(cli_args, log)?; @@ -95,7 +101,7 @@ fn process_testnet_subcommand( "path" => format!("{:?}", builder.data_dir) ); - // Start matching on the second subcommand (e.g., `testnet bootstrap ...`) + // Start matching on the second subcommand (e.g., `testnet bootstrap ...`). match cli_args.subcommand() { ("bootstrap", Some(cli_args)) => { let server = cli_args @@ -131,6 +137,24 @@ fn process_testnet_subcommand( minutes, }) } + ("quick", Some(cli_args)) => { + let validator_count = cli_args + .value_of("validator_count") + .ok_or_else(|| "No validator_count specified")? + .parse::() + .map_err(|e| format!("Unable to parse validator_count: {:?}", e))?; + + let genesis_time = cli_args + .value_of("genesis_time") + .ok_or_else(|| "No genesis time supplied")? + .parse::() + .map_err(|e| format!("Unable to parse genesis time: {:?}", e))?; + + builder.set_beacon_chain_start_method(BeaconChainStartMethod::Generated { + validator_count, + genesis_time, + }) + } _ => return Err("No testnet method specified. See 'testnet --help'.".into()), }; @@ -420,6 +444,18 @@ impl<'a> ConfigBuilder<'a> { self.client_config .apply_cli_args(cli_args, &mut self.log.clone())?; + if let Some(bump) = cli_args.value_of("port-bump") { + let bump = bump + .parse::() + .map_err(|e| format!("Unable to parse port bump: {}", e))?; + + self.client_config.network.libp2p_port += bump; + self.client_config.network.discovery_port += bump; + self.client_config.rpc.port += bump; + self.client_config.rpc.port += bump; + self.client_config.rest_api.port += bump; + } + if self.eth2_config.spec_constants != self.client_config.spec_constants { crit!(self.log, "Specification constants do not match."; "client_config" => format!("{}", self.client_config.spec_constants), diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 8ab20a481..02e30b660 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -48,18 +48,29 @@ fn main() { /* * Network parameters. */ + .arg( + Arg::with_name("port-bump") + .long("port-bump") + .short("b") + .value_name("INCREMENT") + .help("Sets all listening TCP/UDP ports to default values, but with each port increased by \ + INCREMENT. Useful when starting multiple nodes on a single machine. Using increments \ + in multiples of 10 is recommended.") + .takes_value(true), + ) .arg( Arg::with_name("listen-address") .long("listen-address") .value_name("ADDRESS") .help("The address lighthouse will listen for UDP and TCP connections. (default 127.0.0.1).") - .takes_value(true), + .takes_value(true) ) .arg( Arg::with_name("port") .long("port") .value_name("PORT") .help("The TCP/UDP port to listen on. The UDP port can be modified by the --discovery-port flag.") + .conflicts_with("port-bump") .takes_value(true), ) .arg( @@ -81,6 +92,7 @@ fn main() { .long("disc-port") .value_name("PORT") .help("The discovery UDP port.") + .conflicts_with("port-bump") .takes_value(true), ) .arg( @@ -125,6 +137,7 @@ fn main() { Arg::with_name("rpc-port") .long("rpc-port") .help("Listen port for RPC endpoint.") + .conflicts_with("port-bump") .takes_value(true), ) /* Client related arguments */ @@ -147,6 +160,7 @@ fn main() { .long("api-port") .value_name("APIPORT") .help("Set the listen TCP port for the RESTful HTTP API server.") + .conflicts_with("port-bump") .takes_value(true), ) @@ -230,8 +244,6 @@ fn main() { .conflicts_with("random-datadir") ) /* - * Testnet sub-commands. - * * `boostrap` * * Start a new node by downloading genesis and network info from another node via the @@ -272,7 +284,29 @@ fn main() { .default_value("15") .help("The maximum number of minutes that will have elapsed before genesis")) ) - .subcommand(SubCommand::with_name("yaml-genesis-state") + /* + * `quick` + * + * Start a new node, specifying the number of validators and genesis time + */ + .subcommand(SubCommand::with_name("quick") + .about("Creates a new genesis state from the specified validator count and genesis time. \ + Compatible with the `quick-start genesis` defined in the eth2.0-pm repo.") + .arg(Arg::with_name("validator_count") + .value_name("VALIDATOR_COUNT") + .required(true) + .help("The number of validators in the genesis state")) + .arg(Arg::with_name("genesis_time") + .value_name("UNIX_EPOCH_SECONDS") + .required(true) + .help("The genesis time for the given state.")) + ) + /* + * `yaml` + * + * Start a new node, using a genesis state loaded from a YAML file + */ + .subcommand(SubCommand::with_name("yaml") .about("Creates a new datadir where the genesis state is read from YAML. Will fail to parse \ a YAML state that was generated to a different spec than that specified by --spec.") .arg(Arg::with_name("file") diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 620cb64bb..26225cc92 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -41,11 +41,10 @@ pub fn run_beacon_node( info!( log, - "BeaconNode init"; - "p2p_listen_address" => format!("{:?}", &other_client_config.network.listen_address), - "network_dir" => format!("{:?}", other_client_config.network.network_dir), - "spec_constants" => &spec_constants, + "Starting beacon node"; + "p2p_listen_address" => format!("{}", &other_client_config.network.listen_address), "db_type" => &other_client_config.db_type, + "spec_constants" => &spec_constants, ); match (db_type.as_str(), spec_constants.as_str()) { From 5ee1bb20b74a5938e1b34dbd4c561e2525419b31 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Sat, 31 Aug 2019 23:56:35 +1000 Subject: [PATCH 161/305] WIP: Furthered attestation production for validator. --- beacon_node/rest_api/src/validator.rs | 105 ++++++++++++++++++++++---- 1 file changed, 91 insertions(+), 14 deletions(-) diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 450ef5e5f..1c72874f2 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,12 +1,12 @@ use super::{success_response, ApiResult}; use crate::{helpers::*, ApiError, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes}; -use bls::{PublicKey, Signature}; +use bls::{AggregateSignature, PublicKey, Signature}; use hyper::{Body, Request}; use serde::{Deserialize, Serialize}; use std::sync::Arc; use types::beacon_state::EthSpec; -use types::{Epoch, RelativeEpoch, Shard, Slot}; +use types::{Attestation, BitList, Epoch, RelativeEpoch, Shard, Slot}; #[derive(Debug, Serialize, Deserialize)] pub struct ValidatorDuty { @@ -212,53 +212,130 @@ pub fn get_new_attestation(req: Request) -> let _ = beacon_chain .ensure_state_caches_are_built() .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; + let head_state = &beacon_chain.head().beacon_state; let query = UrlQuery::from_request(&req)?; - let validator: PublicKey = match query.first_of(&["validator_pubkey"]) { + let val_pk: PublicKey = match query.first_of(&["validator_pubkey"]) { Ok((_, v)) => parse_pubkey(v.as_str())?, Err(e) => { return Err(e); } }; + // Get the validator index from the supplied public key + // If it does not exist in the index, we cannot continue. + let val_index: usize = match head_state.get_validator_index(&val_pk) { + Ok(Some(i)) => i, + Ok(None) => { + return Err(ApiError::InvalidQueryParams( + "The provided validator public key does not correspond to a validator index." + .into(), + )); + } + Err(e) => { + return Err(ApiError::ServerError(format!( + "Unable to read validator index cache. {:?}", + e + ))); + } + }; + // Get the duties of the validator, to make sure they match up. + // If they don't have duties this epoch, then return an error + let val_duty = match head_state.get_attestation_duties(val_index, RelativeEpoch::Current) { + Ok(Some(d)) => d, + Ok(None) => { + return Err(ApiError::InvalidQueryParams("No validator duties could be found for the requested validator. Cannot provide valid attestation.".into())); + } + Err(e) => { + return Err(ApiError::ServerError(format!( + "unable to read cache for attestation duties: {:?}", + e + ))) + } + }; + + // Check that we are requesting an attestation during the slot where it is relevant. + let present_slot = match beacon_chain.read_slot_clock() { + Some(s) => s, + None => { + return Err(ApiError::ServerError( + "Beacon node is unable to determine present slot, either the state isn't generated or the chain hasn't begun.".into() + )); + } + }; + if val_duty.slot != present_slot { + return Err(ApiError::InvalidQueryParams(format!("Validator is only able to request an attestation during the slot they are allocated. Current slot: {:?}, allocated slot: {:?}", head_state.slot, val_duty.slot))); + } + + // Parse the POC bit and insert it into the aggregation bits let poc_bit: bool = match query.first_of(&["poc_bit"]) { - Ok((_, v)) => v.parse::().map_err(|e| ApiError::InvalidQueryParams(format!("poc_bit is not a valid boolean value: {:?}", e)))?, + Ok((_, v)) => v.parse::().map_err(|e| { + ApiError::InvalidQueryParams(format!("poc_bit is not a valid boolean value: {:?}", e)) + })?, Err(e) => { return Err(e); } }; - //TODO: this is probably unnecessary if we're always doing it by current slot. + let mut aggregation_bits: BitList = BitList::with_capacity(val_duty.committee_len) + .expect("An empty BitList should always be created, or we have bigger problems.") + .into(); + aggregation_bits.set(val_duty.committee_index, poc_bit); + + // Allow a provided slot parameter to check against the expected slot as a sanity check. + // Presently, we don't support attestations at future or past slots. let _slot = match query.first_of(&["slot"]) { Ok((_, v)) => { let requested_slot = v.parse::().map_err(|e| { - ApiError::InvalidQueryParams(format!("Invalid slot parameter, must be a u64. {:?}", e)) + ApiError::InvalidQueryParams(format!( + "Invalid slot parameter, must be a u64. {:?}", + e + )) })?; let current_slot = beacon_chain.head().beacon_state.slot.as_u64(); if requested_slot != current_slot { return Err(ApiError::InvalidQueryParams(format!("Attestation data can only be requested for the current slot ({:?}), not your requested slot ({:?})", current_slot, requested_slot))); } Slot::new(requested_slot) - }, - Err(e) => { - return Err(e); } - }; - let shard: Shard = match query.first_of(&["shard"]) { - Ok((_, v)) => v.parse::().map_err(|e| ApiError::InvalidQueryParams(format!("Shard is not a valid u64 value: {:?}", e)))?, + Err(ApiError::InvalidQueryParams(_)) => { + // Just fill _slot with a dummy value for now, making the slot parameter optional + // We'll get the real slot from the ValidatorDuty + Slot::new(0) + } Err(e) => { return Err(e); } }; + let shard: Shard = match query.first_of(&["shard"]) { + Ok((_, v)) => v.parse::().map_err(|e| { + ApiError::InvalidQueryParams(format!("Shard is not a valid u64 value: {:?}", e)) + })?, + Err(e) => { + // This is a mandatory parameter, return the error + return Err(e); + } + }; let attestation_data = match beacon_chain.produce_attestation_data(shard) { Ok(v) => v, Err(e) => { - return Err(ApiError::ServerError(format!("Could not produce an attestation: {:?}", e))); + return Err(ApiError::ServerError(format!( + "Could not produce an attestation: {:?}", + e + ))); } }; + let attestation = Attestation { + aggregation_bits, + data: attestation_data, + custody_bits: BitList::with_capacity(val_duty.committee_len) + .expect("Should be able to create an empty BitList for the custody bits."), + signature: AggregateSignature::new(), + }; + //TODO: This is currently AttestationData, but should be IndexedAttestation? let body = Body::from( - serde_json::to_string(&attestation_data) + serde_json::to_string(&attestation) .expect("We should always be able to serialize a new attestation that we produced."), ); Ok(success_response(body)) From 14ea6f7710cb73ba10d0b03324cc02ba1ce5b29b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 11:31:18 +1000 Subject: [PATCH 162/305] Add newly created mdbook --- book/.gitignore | 1 + book/book.toml | 6 ++++ book/src/SUMMARY.md | 7 +++++ book/src/interop.md | 70 ++++++++++++++++++++++++++++++++++++++++++++ book/src/intro.md | 47 +++++++++++++++++++++++++++++ book/src/setup.md | 65 ++++++++++++++++++++++++++++++++++++++++ book/src/testnets.md | 64 ++++++++++++++++++++++++++++++++++++++++ 7 files changed, 260 insertions(+) create mode 100644 book/.gitignore create mode 100644 book/book.toml create mode 100644 book/src/SUMMARY.md create mode 100644 book/src/interop.md create mode 100644 book/src/intro.md create mode 100644 book/src/setup.md create mode 100644 book/src/testnets.md diff --git a/book/.gitignore b/book/.gitignore new file mode 100644 index 000000000..7585238ef --- /dev/null +++ b/book/.gitignore @@ -0,0 +1 @@ +book diff --git a/book/book.toml b/book/book.toml new file mode 100644 index 000000000..829c7b99c --- /dev/null +++ b/book/book.toml @@ -0,0 +1,6 @@ +[book] +authors = ["Paul Hauner"] +language = "en" +multilingual = false +src = "src" +title = "Lighthouse" diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md new file mode 100644 index 000000000..e08af247c --- /dev/null +++ b/book/src/SUMMARY.md @@ -0,0 +1,7 @@ +# Summary + +* [Introduction](./intro.md) +* [Development Environment](./setup.md) +* [Testnets](./testnets.md) + * [Simple local testnet](./testnets.md) + * [Interop](./interop.md) diff --git a/book/src/interop.md b/book/src/interop.md new file mode 100644 index 000000000..79d4a1376 --- /dev/null +++ b/book/src/interop.md @@ -0,0 +1,70 @@ +# Lighthouse Interop Guide + +This guide is intended for other Ethereum 2.0 client developers performing +inter-operability testing with Lighthouse. + +To allow for faster iteration cycles without the "merging to master" overhead, +we will use the [`interop`](https://github.com/sigp/lighthouse/tree/interop) +branch of [sigp/lighthouse](https://github.com/sigp/lighthouse/tree/interop) +for September 2019 interop. **Please use ensure you `git checkout interop` +after cloning the repo.** + +## Environment + +All that is required for inter-op is a built and tested [development +environment](setup). When lighthouse boots, it will create the following +directories: + +- `~/.lighthouse`: database and configuration for the beacon node. +- `~/.lighthouse-validator`: database and configuration for the validator + client. + +After building the binaries with `cargo build --release --all`, there will be a +`target/release` directory in the root of the Lighthouse repository. This is +where the `beacon_node` and `validator_client` binaries are located. + +## Interop Procedure + +The following scenarios are documented: + +- [Starting a "quick-start" beacon node](#quick-start-beacon-node) from a + `(validator_count, genesis)` tuple. +- [Starting a validator client](#validator-client) with `n` interop keypairs. +- [Starting a node from a genesis state file](#starting-from-a-genesis-file). +- [Exporting a genesis state file](#exporting-a-genesis-file) from a running Lighthouse + node. + +First, setup a Lighthouse development environment and navigate to the +`target/release` directory (this is where the binaries are located). + +#### Quick-start Beacon Node + + +To start the node (each time creating a fresh database and configuration in +`~/.lighthouse`), use: + +``` +$ ./beacon_node testnet -f quick 8 1567222226 +``` + +>This method conforms the ["Quick-start +genesis"](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#quick-start-genesis) +method in the `ethereum/eth2.0-pm` repository. +> +> The `-f` flag ignores any existing database or configuration, backing them up +before re-initializing. `8` is the validator count and `1567222226` is the +genesis time. +> +> See `$ ./beacon_node testnet quick --help` for more configuration options. + +#### Validator Client + +**TODO** + +#### Starting from a genesis file + +**TODO** + +#### Exporting a genesis file + +**TODO** diff --git a/book/src/intro.md b/book/src/intro.md new file mode 100644 index 000000000..f290b7e40 --- /dev/null +++ b/book/src/intro.md @@ -0,0 +1,47 @@ +# Lighthouse Documentation + +[![Build Status]][Build Link] [![Doc Status]][Doc Link] [![Chat Badge]][Chat Link] + +[Build Status]: https://gitlab.sigmaprime.io/sigp/lighthouse/badges/master/build.svg +[Build Link]: https://gitlab.sigmaprime.io/sigp/lighthouse/pipelines +[Chat Badge]: https://img.shields.io/badge/chat-discord-%237289da +[Chat Link]: https://discord.gg/cyAszAh +[Doc Status]:https://img.shields.io/badge/rust--docs-master-orange +[Doc Link]: http://lighthouse-docs.sigmaprime.io/ + +Lighthouse is an **Ethereum 2.0 client** that connects to other Ethereum 2.0 +clients to form a resilient and decentralized proof-of-stake blockchain. + +It is written in Rust, maintained by Sigma Prime and funded by the Ethereum +Foundation, Consensys and other individuals and organisations. + +## Developer Resources + +Documentation is provided for **researchers and developers** working on +Ethereum 2.0 and assumes prior knowledge on the topic. + +- Get started with [development environment setup](setup.html). +- [Run a simple testnet](testnets.html) in Only Three CLI Commandsâ„¢. +- Read about our interop workflow. +- API? + +## Release + +Ethereum 2.0 is not fully specified or implemented and as such, Lighthouse is +still **under development**. + +We are on-track to provide a public, multi-client testnet in late-2019 and an +initial production-grade blockchain in 2020. + +## Features + +Lighthouse has been in development since mid-2018 and has an extensive feature +set: + +- Libp2p networking stack, featuring Discovery v5. +- Optimized `BeaconChain` state machine, up-to-date and + passing all tests. +- RESTful HTTP API. +- Documented and feature-rich CLI interface. +- Capable of running small, local testnets with 250ms slot times. +- Detailed metrics exposed in the Prometheus format. diff --git a/book/src/setup.md b/book/src/setup.md new file mode 100644 index 000000000..e53ca93d8 --- /dev/null +++ b/book/src/setup.md @@ -0,0 +1,65 @@ +# Development Environment Setup + +Follow this guide to get a Lighthouse development environment up-and-running. + +See the [Quick instructions](#quick-instructions) for a summary or the +[Detailed instructions](#detailed-instructions) for clarification. + +## Quick instructions + +1. Install Rust + Cargo with [rustup](https://rustup.rs/). +1. Install build dependencies using your package manager. + - `$ clang protobuf libssl-dev cmake git-lfs` + - Ensure [git-lfs](https://git-lfs.github.com/) is installed with `git lfs + install`. +1. Clone the [sigp/lighthouse](https://github.com/sigp/lighthouse), ensuring to + **initialize submodules**. +1. In the root of the repo, run the tests with `cargo test --all --release`. +1. Then, build the binaries with `cargo build --all --release`. +1. Lighthouse is now fully built and tested. + +_Note: first-time compilation may take several minutes._ + +## Detailed instructions + +A fully-featured development environment can be achieved with the following +steps: + + 1. Install [rustup](https://rustup.rs/). + 1. Use the command `rustup show` to get information about the Rust + installation. You should see that the active tool-chain is the stable + version. + - Updates can be performed using` rustup update`, Lighthouse generally + requires a recent version of Rust. + 1. Install build dependencies (Arch packages are listed here, your + distribution will likely be similar): + - `clang`: required by RocksDB. + - `protobuf`: required for protobuf serialization (gRPC) + - `libssl-dev`: also gRPC + - `cmake`: required for building protobuf + - `git-lfs`: The Git extension for [Large File + Support](https://git-lfs.github.com/) (required for Ethereum Foundation + test vectors). + 1. Clone the repository with submodules: `git clone --recursive + https://github.com/sigp/lighthouse`. If you're already cloned the repo, + ensure testing submodules are present: `$ git submodule init; git + submodule update` + 1. Change directory to the root of the repository. + 1. Run the test suite with `cargo test --all --release`. The build and test + process can take several minutes. If you experience any failures on + `master`, please raise an + [issue](https://github.com/sigp/lighthouse/issues). + +### Notes: + +Lighthouse targets Rust `stable` but generally runs on `nightly` too. + +#### Note for Windows users: + +Perl may also be required to build lighthouse. You can install [Strawberry +Perl](http://strawberryperl.com/), or alternatively use a choco install command +`choco install strawberryperl`. + +Additionally, the dependency `protoc-grpcio v0.3.1` is reported to have issues +compiling in Windows. You can specify a known working version by editing +version in `protos/Cargo.toml` section to `protoc-grpcio = "<=0.3.0"`. diff --git a/book/src/testnets.md b/book/src/testnets.md new file mode 100644 index 000000000..c07797ba0 --- /dev/null +++ b/book/src/testnets.md @@ -0,0 +1,64 @@ +# Simple Local Testnet + +You can setup a local, two-node testnet in **Only Three CLI Commandsâ„¢**. + +Follow the [Quick instructions](#tldr) version if you're confident, or see +[Detailed instructions](#detail) for more. + + +## Quick instructions + +Setup a development environment, build the project and navigate to the +`target/release` directory. + +1. Start the first node: `$ ./beacon_node testnet -f recent 8` +1. Start a validator client: **TODO** +1. Start another node `$ ./beacon_node -b 10 testnet -f bootstrap http://localhost:5052` + +_Repeat #3 to add more nodes._ + +## Detailed instructions + +First, setup a Lighthouse development environment and navigate to the +`target/release` directory (this is where the binaries are located). + +## Starting the Beacon Node + +Start a new node (creating a fresh database and configuration in `~/.lighthouse`), using: + +``` +$ ./beacon_node testnet -f recent 8 +``` + +> The `-f` flag ignores any existing database or configuration, backing them up +before re-initializing. `8` is number of validators with deposits in the +genesis state. +> +> See `$ ./beacon_node testnet recent --help` for more configuration options, +including `minimal`/`mainnet` specification. + +## Starting the Validator Client + +**TODO** + +## Adding another Beacon Node + +You may connect another (non-validating) node to your local network using the +lighthouse `bootstrap` command. + +In a new terminal terminal, run: + + +``` +$ ./beacon_node -b 10 testnet -r bootstrap http://localhost:5052 +``` + +> The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of the +new node to `10` higher. Your first node's HTTP server was at TCP `5052` but +this one will be at `5062`. +> +> The `-r` flag creates a new data directory in your home with a random string +appended, to avoid conflicting with any other running node. +> +> The HTTP address is the API of the first node. The new node will download +configuration via HTTP before starting sync via libp2p. From 8ea11675632b7190f344f8eb752d152d9f9b9d35 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Sun, 1 Sep 2019 15:09:01 +1000 Subject: [PATCH 163/305] Factored out getting beacon_chain from request into it's own function. --- beacon_node/rest_api/src/helpers.rs | 13 +++++ beacon_node/rest_api/src/lib.rs | 4 +- beacon_node/rest_api/src/validator.rs | 84 ++++++++++++++++++--------- 3 files changed, 72 insertions(+), 29 deletions(-) diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 2477884c4..98293e75c 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -5,6 +5,7 @@ use hex; use hyper::{Body, Request}; use store::{iter::AncestorIter, Store}; use types::{BeaconState, EthSpec, Hash256, RelativeEpoch, Slot}; +use std::sync::Arc; /// Parse a slot from a `0x` preixed string. /// @@ -169,6 +170,18 @@ pub fn implementation_pending_response(_req: Request) -> ApiResult { )) } +pub fn get_beacon_chain_from_request(req: &Request) -> Result>, ApiError> { + // Get beacon state + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Request is missing the beacon chain extension".into()))?; + let _ = beacon_chain + .ensure_state_caches_are_built() + .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; + Ok(beacon_chain.clone()) +} + #[cfg(test)] mod test { use super::*; diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 2c7b90e3f..2c9c4011a 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -172,9 +172,9 @@ pub fn start_server( (&Method::POST, "/beacon/validator/block") => { helpers::implementation_pending_response(req) } - (&Method::GET, "/beacon/validator/attestation") => { + /*(&Method::GET, "/beacon/validator/attestation") => { validator::get_new_attestation::(req) - } + }*/ (&Method::POST, "/beacon/validator/attestation") => { helpers::implementation_pending_response(req) } diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 1c72874f2..f60acbad8 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -5,6 +5,7 @@ use bls::{AggregateSignature, PublicKey, Signature}; use hyper::{Body, Request}; use serde::{Deserialize, Serialize}; use std::sync::Arc; +use std::borrow::Borrow; use types::beacon_state::EthSpec; use types::{Attestation, BitList, Epoch, RelativeEpoch, Shard, Slot}; @@ -33,15 +34,7 @@ impl ValidatorDuty { /// HTTP Handler to retrieve a the duties for a set of validators during a particular epoch pub fn get_validator_duties(req: Request) -> ApiResult { - // Get beacon state - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; - //TODO Surely this state_cache thing is not necessary? - let _ = beacon_chain - .ensure_state_caches_are_built() - .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let head_state = &beacon_chain.head().beacon_state; // Parse and check query parameters @@ -146,15 +139,8 @@ pub fn get_validator_duties(req: Request) - /// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator. pub fn get_new_beacon_block(req: Request) -> ApiResult { - // Get beacon state - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; - //TODO Surely this state_cache thing is not necessary? - let _ = beacon_chain - .ensure_state_caches_are_built() - .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; + let head_state = &beacon_chain.head().beacon_state; let query = UrlQuery::from_request(&req)?; let slot = match query.first_of(&["slot"]) { @@ -201,17 +187,60 @@ pub fn get_new_beacon_block(req: Request) - Ok(success_response(body)) } +/// HTTP Handler to accept a validator-signed BeaconBlock, and publish it to the network. +pub fn publish_beacon_block(req: Request) -> ApiResult { + let beacon_chain = get_beacon_chain_from_request::(&req)?; + let head_state = &beacon_chain.head().beacon_state; + + let query = UrlQuery::from_request(&req)?; + let slot = match query.first_of(&["slot"]) { + Ok((_, v)) => Slot::new(v.parse::().map_err(|e| { + ApiError::InvalidQueryParams(format!("Invalid slot parameter, must be a u64. {:?}", e)) + })?), + Err(e) => { + return Err(e); + } + }; + let randao_reveal = match query.first_of(&["randao_reveal"]) { + Ok((_, v)) => Signature::from_bytes( + hex::decode(&v) + .map_err(|e| { + ApiError::InvalidQueryParams(format!( + "Invalid hex string for randao_reveal: {:?}", + e + )) + })? + .as_slice(), + ) + .map_err(|e| { + ApiError::InvalidQueryParams(format!("randao_reveal is not a valid signature: {:?}", e)) + })?, + Err(e) => { + return Err(e); + } + }; + + let new_block = match beacon_chain.produce_block(randao_reveal, slot) { + Ok((block, _state)) => block, + Err(e) => { + return Err(ApiError::ServerError(format!( + "Beacon node is not able to produce a block: {:?}", + e + ))); + } + }; + + let body = Body::from( + serde_json::to_string(&new_block) + .expect("We should always be able to serialize a new block that we produced."), + ); + Ok(success_response(body)) +} + +/* /// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. pub fn get_new_attestation(req: Request) -> ApiResult { - // Get beacon state - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; - //TODO Surely this state_cache thing is not necessary? - let _ = beacon_chain - .ensure_state_caches_are_built() - .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; + let beacon_chain = get_beacon_chain_from_request(req)?; let head_state = &beacon_chain.head().beacon_state; let query = UrlQuery::from_request(&req)?; @@ -340,3 +369,4 @@ pub fn get_new_attestation(req: Request) -> ); Ok(success_response(body)) } +*/ From 632c13a9ec18606ca0a97c86bb2e52a6b1643221 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Sun, 1 Sep 2019 15:41:03 +1000 Subject: [PATCH 164/305] Fixing some API bits - Adding the validator routes into the main function. - Fixing the setting of the aggregation bits, and handling errors correctly. - Rust format fixes, and addressing compiler warnings. --- beacon_node/rest_api/src/helpers.rs | 10 +++++--- beacon_node/rest_api/src/lib.rs | 6 ++--- beacon_node/rest_api/src/validator.rs | 33 +++++++++++++-------------- 3 files changed, 26 insertions(+), 23 deletions(-) diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 98293e75c..d47afc02c 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -3,9 +3,9 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use bls::PublicKey; use hex; use hyper::{Body, Request}; +use std::sync::Arc; use store::{iter::AncestorIter, Store}; use types::{BeaconState, EthSpec, Hash256, RelativeEpoch, Slot}; -use std::sync::Arc; /// Parse a slot from a `0x` preixed string. /// @@ -170,12 +170,16 @@ pub fn implementation_pending_response(_req: Request) -> ApiResult { )) } -pub fn get_beacon_chain_from_request(req: &Request) -> Result>, ApiError> { +pub fn get_beacon_chain_from_request( + req: &Request, +) -> Result>, ApiError> { // Get beacon state let beacon_chain = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("Request is missing the beacon chain extension".into()))?; + .ok_or_else(|| { + ApiError::ServerError("Request is missing the beacon chain extension".into()) + })?; let _ = beacon_chain .ensure_state_caches_are_built() .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 2c9c4011a..b269bd476 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -170,11 +170,11 @@ pub fn start_server( validator::get_new_beacon_block::(req) } (&Method::POST, "/beacon/validator/block") => { - helpers::implementation_pending_response(req) + validator::publish_beacon_block::(req) } - /*(&Method::GET, "/beacon/validator/attestation") => { + (&Method::GET, "/beacon/validator/attestation") => { validator::get_new_attestation::(req) - }*/ + } (&Method::POST, "/beacon/validator/attestation") => { helpers::implementation_pending_response(req) } diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index f60acbad8..427f6a514 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,11 +1,9 @@ use super::{success_response, ApiResult}; use crate::{helpers::*, ApiError, UrlQuery}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::BeaconChainTypes; use bls::{AggregateSignature, PublicKey, Signature}; use hyper::{Body, Request}; use serde::{Deserialize, Serialize}; -use std::sync::Arc; -use std::borrow::Borrow; use types::beacon_state::EthSpec; use types::{Attestation, BitList, Epoch, RelativeEpoch, Shard, Slot}; @@ -140,7 +138,6 @@ pub fn get_validator_duties(req: Request) - /// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator. pub fn get_new_beacon_block(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; - let head_state = &beacon_chain.head().beacon_state; let query = UrlQuery::from_request(&req)?; let slot = match query.first_of(&["slot"]) { @@ -190,7 +187,6 @@ pub fn get_new_beacon_block(req: Request) - /// HTTP Handler to accept a validator-signed BeaconBlock, and publish it to the network. pub fn publish_beacon_block(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; - let head_state = &beacon_chain.head().beacon_state; let query = UrlQuery::from_request(&req)?; let slot = match query.first_of(&["slot"]) { @@ -212,9 +208,9 @@ pub fn publish_beacon_block(req: Request) - })? .as_slice(), ) - .map_err(|e| { - ApiError::InvalidQueryParams(format!("randao_reveal is not a valid signature: {:?}", e)) - })?, + .map_err(|e| { + ApiError::InvalidQueryParams(format!("randao_reveal is not a valid signature: {:?}", e)) + })?, Err(e) => { return Err(e); } @@ -237,10 +233,9 @@ pub fn publish_beacon_block(req: Request) - Ok(success_response(body)) } -/* /// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. pub fn get_new_attestation(req: Request) -> ApiResult { - let beacon_chain = get_beacon_chain_from_request(req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let head_state = &beacon_chain.head().beacon_state; let query = UrlQuery::from_request(&req)?; @@ -304,10 +299,16 @@ pub fn get_new_attestation(req: Request) -> return Err(e); } }; - let mut aggregation_bits: BitList = BitList::with_capacity(val_duty.committee_len) - .expect("An empty BitList should always be created, or we have bigger problems.") - .into(); - aggregation_bits.set(val_duty.committee_index, poc_bit); + let mut aggregation_bits = BitList::with_capacity(val_duty.committee_len) + .expect("An empty BitList should always be created, or we have bigger problems."); + aggregation_bits + .set(val_duty.committee_index, poc_bit) + .map_err(|e| { + ApiError::ServerError(format!( + "Unable to set aggregation bits for the attestation: {:?}", + e + )) + })?; // Allow a provided slot parameter to check against the expected slot as a sanity check. // Presently, we don't support attestations at future or past slots. @@ -354,7 +355,7 @@ pub fn get_new_attestation(req: Request) -> } }; - let attestation = Attestation { + let attestation: Attestation = Attestation { aggregation_bits, data: attestation_data, custody_bits: BitList::with_capacity(val_duty.committee_len) @@ -362,11 +363,9 @@ pub fn get_new_attestation(req: Request) -> signature: AggregateSignature::new(), }; - //TODO: This is currently AttestationData, but should be IndexedAttestation? let body = Body::from( serde_json::to_string(&attestation) .expect("We should always be able to serialize a new attestation that we produced."), ); Ok(success_response(body)) } -*/ From 543e9457b7e7e0329b442e0a09f6daeca9e47e6c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 19:32:57 +1000 Subject: [PATCH 165/305] Move bootstrapper into own crate --- Cargo.toml | 1 + beacon_node/Cargo.toml | 1 + beacon_node/beacon_chain/Cargo.toml | 4 +- .../beacon_chain/src/beacon_chain_builder.rs | 2 +- beacon_node/beacon_chain/src/lib.rs | 2 - beacon_node/client/src/bootstrapper.rs | 210 ------------------ beacon_node/src/config.rs | 2 +- eth2/utils/lighthouse_bootstrap/Cargo.toml | 15 ++ .../utils/lighthouse_bootstrap/src/lib.rs | 0 validator_client/Cargo.toml | 1 + 10 files changed, 21 insertions(+), 217 deletions(-) delete mode 100644 beacon_node/client/src/bootstrapper.rs create mode 100644 eth2/utils/lighthouse_bootstrap/Cargo.toml rename beacon_node/beacon_chain/src/bootstrapper.rs => eth2/utils/lighthouse_bootstrap/src/lib.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index f087539e6..d081ee74f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "eth2/utils/logging", "eth2/utils/eth2_hashing", "eth2/utils/lighthouse_metrics", + "eth2/utils/lighthouse_bootstrap", "eth2/utils/merkle_proof", "eth2/utils/int_to_bytes", "eth2/utils/serde_hex", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index 531c4615a..0e4299018 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" [dependencies] eth2_config = { path = "../eth2/utils/eth2_config" } +lighthouse_bootstrap = { path = "../eth2/utils/lighthouse_bootstrap" } beacon_chain = { path = "beacon_chain" } types = { path = "../eth2/types" } store = { path = "./store" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 3378e6a34..d5594a49a 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -11,14 +11,13 @@ store = { path = "../store" } parking_lot = "0.7" lazy_static = "1.3.0" lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } +lighthouse_bootstrap = { path = "../../eth2/utils/lighthouse_bootstrap" } log = "0.4" operation_pool = { path = "../../eth2/operation_pool" } -reqwest = "0.9" rayon = "1.0" serde = "1.0" serde_derive = "1.0" serde_yaml = "0.8" -eth2-libp2p = { path = "../eth2-libp2p" } slog = { version = "^2.2.3" , features = ["max_level_trace"] } sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } @@ -28,7 +27,6 @@ eth2_ssz_derive = "0.1" state_processing = { path = "../../eth2/state_processing" } tree_hash = "0.1" types = { path = "../../eth2/types" } -url = "1.2" lmd_ghost = { path = "../../eth2/lmd_ghost" } [dev-dependencies] diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index a569fe833..fdddf6481 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -1,6 +1,6 @@ -use super::bootstrapper::Bootstrapper; use crate::{BeaconChain, BeaconChainTypes}; use eth2_hashing::hash; +use lighthouse_bootstrap::Bootstrapper; use merkle_proof::MerkleTree; use rayon::prelude::*; use slog::Logger; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 560da6519..9c833f778 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -4,7 +4,6 @@ extern crate lazy_static; mod beacon_chain; mod beacon_chain_builder; -mod bootstrapper; mod checkpoint; mod errors; mod fork_choice; @@ -19,7 +18,6 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use beacon_chain_builder::BeaconChainBuilder; -pub use bootstrapper::Bootstrapper; pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/client/src/bootstrapper.rs b/beacon_node/client/src/bootstrapper.rs deleted file mode 100644 index c94d9a51d..000000000 --- a/beacon_node/client/src/bootstrapper.rs +++ /dev/null @@ -1,210 +0,0 @@ -use eth2_libp2p::{ - multiaddr::{Multiaddr, Protocol}, - Enr, -}; -use reqwest::{Error as HttpError, Url}; -use serde::Deserialize; -use std::borrow::Cow; -use std::net::Ipv4Addr; -use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; -use url::Host; - -#[derive(Debug)] -enum Error { - InvalidUrl, - HttpError(HttpError), -} - -impl From for Error { - fn from(e: HttpError) -> Error { - Error::HttpError(e) - } -} - -/// Used to load "bootstrap" information from the HTTP API of another Lighthouse beacon node. -/// -/// Bootstrapping information includes things like genesis and finalized states and blocks, and -/// libp2p connection details. -pub struct Bootstrapper { - url: Url, -} - -impl Bootstrapper { - /// Parses the given `server` as a URL, instantiating `Self`. - pub fn from_server_string(server: String) -> Result { - Ok(Self { - url: Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?, - }) - } - - /// Build a multiaddr using the HTTP server URL that is not guaranteed to be correct. - /// - /// The address is created by querying the HTTP server for its listening libp2p addresses. - /// Then, we find the first TCP port in those addresses and combine the port with the URL of - /// the server. - /// - /// For example, the server `http://192.168.0.1` might end up with a `best_effort_multiaddr` of - /// `/ipv4/192.168.0.1/tcp/9000` if the server advertises a listening address of - /// `/ipv4/172.0.0.1/tcp/9000`. - pub fn best_effort_multiaddr(&self) -> Option { - let tcp_port = self.listen_port().ok()?; - - let mut multiaddr = Multiaddr::with_capacity(2); - - match self.url.host()? { - Host::Ipv4(addr) => multiaddr.push(Protocol::Ip4(addr)), - Host::Domain(s) => multiaddr.push(Protocol::Dns4(Cow::Borrowed(s))), - _ => return None, - }; - - multiaddr.push(Protocol::Tcp(tcp_port)); - - Some(multiaddr) - } - - /// Returns the IPv4 address of the server URL, unless it contains a FQDN. - pub fn server_ipv4_addr(&self) -> Option { - match self.url.host()? { - Host::Ipv4(addr) => Some(addr), - _ => None, - } - } - - /// Returns the servers ENR address. - pub fn enr(&self) -> Result { - get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e)) - } - - /// Returns the servers listening libp2p addresses. - pub fn listen_port(&self) -> Result { - get_listen_port(self.url.clone()).map_err(|e| format!("Unable to get listen port: {:?}", e)) - } - - /// Returns the genesis block and state. - pub fn genesis(&self) -> Result<(BeaconState, BeaconBlock), String> { - let genesis_slot = Slot::new(0); - - let block = get_block(self.url.clone(), genesis_slot) - .map_err(|e| format!("Unable to get genesis block: {:?}", e))? - .beacon_block; - let state = get_state(self.url.clone(), genesis_slot) - .map_err(|e| format!("Unable to get genesis state: {:?}", e))? - .beacon_state; - - Ok((state, block)) - } - - /// Returns the most recent finalized state and block. - pub fn finalized(&self) -> Result<(BeaconState, BeaconBlock), String> { - let slots_per_epoch = get_slots_per_epoch(self.url.clone()) - .map_err(|e| format!("Unable to get slots per epoch: {:?}", e))?; - let finalized_slot = get_finalized_slot(self.url.clone(), slots_per_epoch.as_u64()) - .map_err(|e| format!("Unable to get finalized slot: {:?}", e))?; - - let block = get_block(self.url.clone(), finalized_slot) - .map_err(|e| format!("Unable to get finalized block: {:?}", e))? - .beacon_block; - let state = get_state(self.url.clone(), finalized_slot) - .map_err(|e| format!("Unable to get finalized state: {:?}", e))? - .beacon_state; - - Ok((state, block)) - } -} - -fn get_slots_per_epoch(mut url: Url) -> Result { - url.path_segments_mut() - .map(|mut url| { - url.push("spec").push("slots_per_epoch"); - }) - .map_err(|_| Error::InvalidUrl)?; - - reqwest::get(url)? - .error_for_status()? - .json() - .map_err(Into::into) -} - -fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result { - url.path_segments_mut() - .map(|mut url| { - url.push("beacon").push("latest_finalized_checkpoint"); - }) - .map_err(|_| Error::InvalidUrl)?; - - let checkpoint: Checkpoint = reqwest::get(url)?.error_for_status()?.json()?; - - Ok(checkpoint.epoch.start_slot(slots_per_epoch)) -} - -#[derive(Deserialize)] -#[serde(bound = "T: EthSpec")] -pub struct StateResponse { - pub root: Hash256, - pub beacon_state: BeaconState, -} - -fn get_state(mut url: Url, slot: Slot) -> Result, Error> { - url.path_segments_mut() - .map(|mut url| { - url.push("beacon").push("state"); - }) - .map_err(|_| Error::InvalidUrl)?; - - url.query_pairs_mut() - .append_pair("slot", &format!("{}", slot.as_u64())); - - reqwest::get(url)? - .error_for_status()? - .json() - .map_err(Into::into) -} - -#[derive(Deserialize)] -#[serde(bound = "T: EthSpec")] -pub struct BlockResponse { - pub root: Hash256, - pub beacon_block: BeaconBlock, -} - -fn get_block(mut url: Url, slot: Slot) -> Result, Error> { - url.path_segments_mut() - .map(|mut url| { - url.push("beacon").push("block"); - }) - .map_err(|_| Error::InvalidUrl)?; - - url.query_pairs_mut() - .append_pair("slot", &format!("{}", slot.as_u64())); - - reqwest::get(url)? - .error_for_status()? - .json() - .map_err(Into::into) -} - -fn get_enr(mut url: Url) -> Result { - url.path_segments_mut() - .map(|mut url| { - url.push("network").push("enr"); - }) - .map_err(|_| Error::InvalidUrl)?; - - reqwest::get(url)? - .error_for_status()? - .json() - .map_err(Into::into) -} - -fn get_listen_port(mut url: Url) -> Result { - url.path_segments_mut() - .map(|mut url| { - url.push("network").push("listen_port"); - }) - .map_err(|_| Error::InvalidUrl)?; - - reqwest::get(url)? - .error_for_status()? - .json() - .map_err(Into::into) -} diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c4fa5eebc..c9ad964f5 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,7 +1,7 @@ -use beacon_chain::Bootstrapper; use clap::ArgMatches; use client::{BeaconChainStartMethod, ClientConfig, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; +use lighthouse_bootstrap::Bootstrapper; use rand::{distributions::Alphanumeric, Rng}; use slog::{crit, info, warn, Logger}; use std::fs; diff --git a/eth2/utils/lighthouse_bootstrap/Cargo.toml b/eth2/utils/lighthouse_bootstrap/Cargo.toml new file mode 100644 index 000000000..3f48505b8 --- /dev/null +++ b/eth2/utils/lighthouse_bootstrap/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "lighthouse_bootstrap" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +eth2_config = { path = "../eth2_config" } +eth2-libp2p = { path = "../../../beacon_node/eth2-libp2p" } +reqwest = "0.9" +url = "1.2" +types = { path = "../../types" } +serde = "1.0" diff --git a/beacon_node/beacon_chain/src/bootstrapper.rs b/eth2/utils/lighthouse_bootstrap/src/lib.rs similarity index 100% rename from beacon_node/beacon_chain/src/bootstrapper.rs rename to eth2/utils/lighthouse_bootstrap/src/lib.rs diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 927731f63..2000f5409 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -18,6 +18,7 @@ eth2_ssz = "0.1" eth2_config = { path = "../eth2/utils/eth2_config" } tree_hash = "0.1" clap = "2.32.0" +lighthouse_bootstrap = { path = "../eth2/utils/lighthouse_bootstrap" } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } protos = { path = "../protos" } slot_clock = { path = "../eth2/utils/slot_clock" } From fa6ba51eb776e5b8dfe7842f07387464f2ea601c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 19:33:21 +1000 Subject: [PATCH 166/305] Make gRPC and HTTP on by default --- beacon_node/rest_api/src/config.rs | 6 +++--- beacon_node/rpc/src/config.rs | 6 +++--- beacon_node/rpc/src/lib.rs | 7 ++++++- beacon_node/src/main.rs | 14 ++++++-------- 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/beacon_node/rest_api/src/config.rs b/beacon_node/rest_api/src/config.rs index 90ac0821b..c262a128a 100644 --- a/beacon_node/rest_api/src/config.rs +++ b/beacon_node/rest_api/src/config.rs @@ -16,7 +16,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Config { - enabled: true, // rest_api enabled by default + enabled: true, listen_address: Ipv4Addr::new(127, 0, 0, 1), port: 5052, } @@ -25,8 +25,8 @@ impl Default for Config { impl Config { pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { - if args.is_present("api") { - self.enabled = true; + if args.is_present("no-api") { + self.enabled = false; } if let Some(rpc_address) = args.value_of("api-address") { diff --git a/beacon_node/rpc/src/config.rs b/beacon_node/rpc/src/config.rs index 0f031ddc6..47eff6824 100644 --- a/beacon_node/rpc/src/config.rs +++ b/beacon_node/rpc/src/config.rs @@ -16,7 +16,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { Config { - enabled: false, // rpc disabled by default + enabled: true, listen_address: Ipv4Addr::new(127, 0, 0, 1), port: 5051, } @@ -25,8 +25,8 @@ impl Default for Config { impl Config { pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { - if args.is_present("rpc") { - self.enabled = true; + if args.is_present("no-grpc") { + self.enabled = false; } if let Some(rpc_address) = args.value_of("rpc-address") { diff --git a/beacon_node/rpc/src/lib.rs b/beacon_node/rpc/src/lib.rs index eef009292..59902ff43 100644 --- a/beacon_node/rpc/src/lib.rs +++ b/beacon_node/rpc/src/lib.rs @@ -80,7 +80,12 @@ pub fn start_server( let spawn_rpc = { server.start(); for &(ref host, port) in server.bind_addrs() { - info!(log, "gRPC listening on {}:{}", host, port); + info!( + log, + "gRPC API started"; + "port" => port, + "host" => host, + ); } rpc_exit.and_then(move |_| { info!(log, "RPC Server shutting down"); diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 02e30b660..26537c6f7 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -120,10 +120,9 @@ fn main() { * gRPC parameters. */ .arg( - Arg::with_name("rpc") - .long("rpc") - .value_name("RPC") - .help("Enable the RPC server.") + Arg::with_name("no-grpc") + .long("no-grpc") + .help("Disable the gRPC server.") .takes_value(false), ) .arg( @@ -142,10 +141,9 @@ fn main() { ) /* Client related arguments */ .arg( - Arg::with_name("api") - .long("api") - .value_name("API") - .help("Enable the RESTful HTTP API server.") + Arg::with_name("no-api") + .long("no-api") + .help("Disable RESTful HTTP API server.") .takes_value(false), ) .arg( From 4a69d01a3781d1c26e95ffb595cfe11a1c519853 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 19:33:43 +1000 Subject: [PATCH 167/305] Add first changes to validator CLI --- beacon_node/client/src/config.rs | 2 + validator_client/src/config.rs | 178 +++++++++++++++++++++------- validator_client/src/main.rs | 193 +++++++++++++++++++++++++++---- validator_client/src/service.rs | 32 +++-- 4 files changed, 331 insertions(+), 74 deletions(-) diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 3aed26881..2f5389ce5 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -89,6 +89,8 @@ impl Config { } /// Returns the core path for the client. + /// + /// Creates the directory if it does not exist. pub fn data_dir(&self) -> Option { let path = dirs::home_dir()?.join(&self.data_dir); fs::create_dir_all(&path).ok()?; diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 7bc504b23..8e148cfab 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -5,19 +5,45 @@ use serde_derive::{Deserialize, Serialize}; use slog::{debug, error, info, o, Drain}; use std::fs::{self, File, OpenOptions}; use std::io::{Error, ErrorKind}; +use std::ops::Range; use std::path::PathBuf; use std::sync::Mutex; use types::{EthSpec, MainnetEthSpec}; +pub const DEFAULT_SERVER: &str = "localhost"; +pub const DEFAULT_SERVER_GRPC_PORT: &str = "5051"; +pub const DEFAULT_SERVER_HTTP_PORT: &str = "5052"; + +#[derive(Clone)] +pub enum KeySource { + /// Load the keypairs from disk. + Disk, + /// Generate the keypairs (insecure, generates predictable keys). + TestingKeypairRange(Range), +} + +impl Default for KeySource { + fn default() -> Self { + KeySource::Disk + } +} + /// Stores the core configuration for this validator instance. #[derive(Clone, Serialize, Deserialize)] pub struct Config { /// The data directory, which stores all validator databases pub data_dir: PathBuf, + /// The source for loading keypairs + #[serde(skip)] + pub key_source: KeySource, /// The path where the logs will be outputted pub log_file: PathBuf, /// The server at which the Beacon Node can be contacted pub server: String, + /// The gRPC port on the server + pub server_grpc_port: u16, + /// The HTTP port on the server, for the REST API. + pub server_http_port: u16, /// The number of slots per epoch. pub slots_per_epoch: u64, } @@ -29,14 +55,33 @@ impl Default for Config { fn default() -> Self { Self { data_dir: PathBuf::from(".lighthouse-validator"), + key_source: <_>::default(), log_file: PathBuf::from(""), - server: "localhost:5051".to_string(), + server: DEFAULT_SERVER.into(), + server_grpc_port: DEFAULT_SERVER_GRPC_PORT + .parse::() + .expect("gRPC port constant should be valid"), + server_http_port: DEFAULT_SERVER_GRPC_PORT + .parse::() + .expect("HTTP port constant should be valid"), slots_per_epoch: MainnetEthSpec::slots_per_epoch(), } } } impl Config { + /// Returns the full path for the client data directory (not just the name of the directory). + pub fn full_data_dir(&self) -> Option { + dirs::home_dir().map(|path| path.join(&self.data_dir)) + } + + /// Creates the data directory (and any non-existing parent directories). + pub fn create_data_dir(&self) -> Option { + let path = dirs::home_dir()?.join(&self.data_dir); + fs::create_dir_all(&path).ok()?; + Some(path) + } + /// Apply the following arguments to `self`, replacing values if they are specified in `args`. /// /// Returns an error if arguments are obviously invalid. May succeed even if some values are @@ -94,61 +139,106 @@ impl Config { Ok(()) } + /// Reads a single keypair from the given `path`. + /// + /// `path` should be the path to a directory containing a private key. The file name of `path` + /// must align with the public key loaded from it, otherwise an error is returned. + /// + /// An error will be returned if `path` is a file (not a directory). + fn read_keypair_file(&self, path: PathBuf) -> Result { + if !path.is_dir() { + return Err("Is not a directory".into()); + } + + let key_filename: PathBuf = path.join(DEFAULT_PRIVATE_KEY_FILENAME); + + if !key_filename.is_file() { + return Err(format!( + "Private key is not a file: {:?}", + key_filename.to_str() + )); + } + + let mut key_file = File::open(key_filename.clone()) + .map_err(|e| format!("Unable to open private key file: {}", e))?; + + let key: Keypair = bincode::deserialize_from(&mut key_file) + .map_err(|e| format!("Unable to deserialize private key: {:?}", e))?; + + let ki = key.identifier(); + if &ki + != &path + .file_name() + .ok_or_else(|| "Invalid path".to_string())? + .to_string_lossy() + { + return Err(format!( + "The validator key ({:?}) did not match the directory filename {:?}.", + ki, + path.to_str() + )); + } else { + Ok(key) + } + } + /// Try to load keys from validator_dir, returning None if none are found or an error. #[allow(dead_code)] pub fn fetch_keys(&self, log: &slog::Logger) -> Option> { - let key_pairs: Vec = fs::read_dir(&self.data_dir) - .ok()? - .filter_map(|validator_dir| { - let validator_dir = validator_dir.ok()?; + let key_pairs: Vec = + fs::read_dir(&self.full_data_dir().expect("Data dir must exist")) + .ok()? + .filter_map(|validator_dir| { + let validator_dir = validator_dir.ok()?; - if !(validator_dir.file_type().ok()?.is_dir()) { - // Skip non-directories (i.e. no files/symlinks) - return None; - } + if !(validator_dir.file_type().ok()?.is_dir()) { + // Skip non-directories (i.e. no files/symlinks) + return None; + } - let key_filename = validator_dir.path().join(DEFAULT_PRIVATE_KEY_FILENAME); + let key_filename = validator_dir.path().join(DEFAULT_PRIVATE_KEY_FILENAME); - if !(key_filename.is_file()) { - info!( + if !(key_filename.is_file()) { + info!( + log, + "Private key is not a file: {:?}", + key_filename.to_str() + ); + return None; + } + + debug!( log, - "Private key is not a file: {:?}", + "Deserializing private key from file: {:?}", key_filename.to_str() ); - return None; - } - debug!( - log, - "Deserializing private key from file: {:?}", - key_filename.to_str() - ); + let mut key_file = File::open(key_filename.clone()).ok()?; - let mut key_file = File::open(key_filename.clone()).ok()?; + let key: Keypair = if let Ok(key_ok) = bincode::deserialize_from(&mut key_file) + { + key_ok + } else { + error!( + log, + "Unable to deserialize the private key file: {:?}", key_filename + ); + return None; + }; - let key: Keypair = if let Ok(key_ok) = bincode::deserialize_from(&mut key_file) { - key_ok - } else { - error!( - log, - "Unable to deserialize the private key file: {:?}", key_filename - ); - return None; - }; - - let ki = key.identifier(); - if ki != validator_dir.file_name().into_string().ok()? { - error!( - log, - "The validator key ({:?}) did not match the directory filename {:?}.", - ki, - &validator_dir.path().to_string_lossy() - ); - return None; - } - Some(key) - }) - .collect(); + let ki = key.identifier(); + if ki != validator_dir.file_name().into_string().ok()? { + error!( + log, + "The validator key ({:?}) did not match the directory filename {:?}.", + ki, + &validator_dir.path().to_string_lossy() + ); + return None; + } + Some(key) + }) + .collect(); // Check if it's an empty vector, and return none. if key_pairs.is_empty() { diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 83a874df7..40d5f6ab0 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -6,12 +6,16 @@ pub mod error; mod service; mod signer; -use crate::config::Config as ValidatorClientConfig; +use crate::config::{ + Config as ClientConfig, KeySource, DEFAULT_SERVER, DEFAULT_SERVER_GRPC_PORT, + DEFAULT_SERVER_HTTP_PORT, +}; use crate::service::Service as ValidatorService; -use clap::{App, Arg}; +use clap::{App, Arg, ArgMatches, SubCommand}; use eth2_config::{read_from_file, write_to_file, Eth2Config}; +use lighthouse_bootstrap::Bootstrapper; use protos::services_grpc::ValidatorServiceClient; -use slog::{crit, error, info, o, warn, Drain, Level}; +use slog::{crit, error, info, o, warn, Drain, Level, Logger}; use std::fs; use std::path::PathBuf; use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; @@ -21,6 +25,8 @@ pub const DEFAULT_DATA_DIR: &str = ".lighthouse-validator"; pub const CLIENT_CONFIG_FILENAME: &str = "validator-client.toml"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; +type Result = core::result::Result; + fn main() { // Logging let decorator = slog_term::TermDecorator::new().build(); @@ -49,28 +55,36 @@ fn main() { .takes_value(true), ) .arg( - Arg::with_name("eth2-spec") - .long("eth2-spec") + Arg::with_name("eth2-config") + .long("eth2-config") .short("e") .value_name("TOML_FILE") - .help("Path to Ethereum 2.0 specifications file.") + .help("Path to Ethereum 2.0 config and specification file (e.g., eth2_spec.toml).") .takes_value(true), ) .arg( Arg::with_name("server") .long("server") - .value_name("server") + .value_name("NETWORK_ADDRESS") .help("Address to connect to BeaconNode.") + .default_value(DEFAULT_SERVER) .takes_value(true), ) .arg( - Arg::with_name("default-spec") - .long("default-spec") - .value_name("TITLE") - .short("default-spec") - .help("Specifies the default eth2 spec to be used. This will override any spec written to disk and will therefore be used by default in future instances.") - .takes_value(true) - .possible_values(&["mainnet", "minimal", "interop"]) + Arg::with_name("server-grpc-port") + .long("g") + .value_name("PORT") + .help("Port to use for gRPC API connection to the server.") + .default_value(DEFAULT_SERVER_GRPC_PORT) + .takes_value(true), + ) + .arg( + Arg::with_name("server-http-port") + .long("h") + .value_name("PORT") + .help("Port to use for HTTP API connection to the server.") + .default_value(DEFAULT_SERVER_HTTP_PORT) + .takes_value(true), ) .arg( Arg::with_name("debug-level") @@ -82,6 +96,33 @@ fn main() { .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) .default_value("info"), ) + /* + * The "testnet" sub-command. + * + * Used for starting testnet validator clients. + */ + .subcommand(SubCommand::with_name("testnet") + .about("Starts a testnet validator using INSECURE, predicatable private keys, based off the canonical \ + validator index. ONLY USE FOR TESTING PURPOSES!") + .arg( + Arg::with_name("bootstrap") + .short("b") + .long("bootstrap") + .help("Connect to the RPC server to download the eth2_config via the HTTP API.") + ) + .subcommand(SubCommand::with_name("range") + .about("Uses the standard, predicatable `interop` keygen method to produce a range \ + of predicatable private keys and starts performing their validator duties.") + .arg(Arg::with_name("first_validator") + .value_name("VALIDATOR_INDEX") + .required(true) + .help("The first validator public key to be generated for this client.")) + .arg(Arg::with_name("validator_count") + .value_name("COUNT") + .required(true) + .help("The number of validators.")) + ) + ) .get_matches(); let drain = match matches.value_of("debug-level") { @@ -93,8 +134,9 @@ fn main() { Some("crit") => drain.filter_level(Level::Critical), _ => unreachable!("guarded by clap"), }; - let mut log = slog::Logger::root(drain.fuse(), o!()); + let log = slog::Logger::root(drain.fuse(), o!()); + /* let data_dir = match matches .value_of("datadir") .and_then(|v| Some(PathBuf::from(v))) @@ -128,12 +170,10 @@ fn main() { // Attempt to load the `ClientConfig` from disk. // // If file doesn't exist, create a new, default one. - let mut client_config = match read_from_file::( - client_config_path.clone(), - ) { + let mut client_config = match read_from_file::(client_config_path.clone()) { Ok(Some(c)) => c, Ok(None) => { - let default = ValidatorClientConfig::default(); + let default = ClientConfig::default(); if let Err(e) = write_to_file(client_config_path.clone(), &default) { crit!(log, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e)); return; @@ -223,12 +263,23 @@ fn main() { return; } }; + */ + let (client_config, eth2_config) = match get_configs(&matches, &log) { + Ok(tuple) => tuple, + Err(e) => { + crit!( + log, + "Unable to initialize configuration"; + "error" => e + ); + return; + } + }; info!( log, "Starting validator client"; - "datadir" => client_config.data_dir.to_str(), - "spec_constants" => ð2_config.spec_constants, + "datadir" => client_config.full_data_dir().expect("Unable to find datadir").to_str(), ); let result = match eth2_config.spec_constants.as_str() { @@ -260,3 +311,103 @@ fn main() { Err(e) => crit!(log, "Validator client exited with error"; "error" => e.to_string()), } } + +/// Parses the CLI arguments and attempts to load the client and eth2 configuration. +/// +/// This is not a pure function, it reads from disk and may contact network servers. +pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result<(ClientConfig, Eth2Config)> { + let mut client_config = ClientConfig::default(); + + if let Some(server) = cli_args.value_of("server") { + client_config.server = server.to_string(); + } + + if let Some(port) = cli_args.value_of("server-http-port") { + client_config.server_http_port = port + .parse::() + .map_err(|e| format!("Unable to parse HTTP port: {:?}", e))?; + } + + if let Some(port) = cli_args.value_of("server-grpc-port") { + client_config.server_grpc_port = port + .parse::() + .map_err(|e| format!("Unable to parse gRPC port: {:?}", e))?; + } + + info!( + log, + "Beacon node connection info"; + "grpc_port" => client_config.server_grpc_port, + "http_port" => client_config.server_http_port, + "server" => &client_config.server, + ); + + match cli_args.subcommand() { + ("testnet", Some(sub_cli_args)) => { + if cli_args.is_present("eth2-config") && sub_cli_args.is_present("bootstrap") { + return Err( + "Cannot specify --eth2-config and --bootstrap as it may result \ + in ambiguity." + .into(), + ); + } + process_testnet_subcommand(sub_cli_args, client_config, log) + } + _ => { + unimplemented!("Resuming (not starting a testnet)"); + } + } +} + +fn process_testnet_subcommand( + cli_args: &ArgMatches, + mut client_config: ClientConfig, + log: &Logger, +) -> Result<(ClientConfig, Eth2Config)> { + let eth2_config = if cli_args.is_present("bootstrap") { + let bootstrapper = Bootstrapper::from_server_string(format!( + "http://{}:{}", + client_config.server, client_config.server_http_port + ))?; + + let eth2_config = bootstrapper.eth2_config()?; + + info!( + log, + "Bootstrapped eth2 config via HTTP"; + "slot_time_millis" => eth2_config.spec.milliseconds_per_slot, + "spec" => ð2_config.spec_constants, + ); + + eth2_config + } else { + return Err("Starting without bootstrap is not implemented".into()); + }; + + client_config.key_source = match cli_args.subcommand() { + ("range", Some(sub_cli_args)) => { + let first = sub_cli_args + .value_of("first_validator") + .ok_or_else(|| "No first validator supplied")? + .parse::() + .map_err(|e| format!("Unable to parse first validator: {:?}", e))?; + let count = sub_cli_args + .value_of("validator_count") + .ok_or_else(|| "No validator count supplied")? + .parse::() + .map_err(|e| format!("Unable to parse validator count: {:?}", e))?; + + info!( + log, + "Generating unsafe testing keys"; + "first_validator" => first, + "count" => count + ); + + KeySource::TestingKeypairRange(first..first + count) + } + _ => KeySource::Disk, + }; + + Ok((client_config, eth2_config)) +} diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index bd694668b..ae6f94531 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -73,12 +73,15 @@ impl Service error_chain::Result> { - // initialise the beacon node client to check for a connection + let server_url = format!( + "{}:{}", + client_config.server, client_config.server_grpc_port + ); let env = Arc::new(EnvBuilder::new().build()); // Beacon node gRPC beacon node endpoints. let beacon_node_client = { - let ch = ChannelBuilder::new(env.clone()).connect(&client_config.server); + let ch = ChannelBuilder::new(env.clone()).connect(&server_url); BeaconNodeServiceClient::new(ch) }; @@ -86,9 +89,14 @@ impl Service { - warn!(log, "Could not connect to node. Error: {}", e); - info!(log, "Retrying in 5 seconds..."); - std::thread::sleep(Duration::from_secs(5)); + let retry_seconds = 5; + warn!( + log, + "Could not connect to beacon node"; + "error" => format!("{:?}", e), + "retry_in" => format!("{} seconds", retry_seconds), + ); + std::thread::sleep(Duration::from_secs(retry_seconds)); continue; } Ok(info) => { @@ -122,7 +130,13 @@ impl Service node_info.version.clone(), "Chain ID" => node_info.network_id, "Genesis time" => genesis_time); + info!( + log, + "Beacon node connected"; + "version" => node_info.version.clone(), + "network_id" => node_info.network_id, + "genesis_time" => genesis_time + ); let proto_fork = node_info.get_fork(); let mut previous_version: [u8; 4] = [0; 4]; @@ -139,7 +153,7 @@ impl Service Service Date: Sun, 1 Sep 2019 20:09:46 +1000 Subject: [PATCH 168/305] Add testing keypairs to validator client --- validator_client/src/config.rs | 108 +++++++++++---------- validator_client/src/main.rs | 160 +++++--------------------------- validator_client/src/service.rs | 7 +- 3 files changed, 76 insertions(+), 199 deletions(-) diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 8e148cfab..3e13de722 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -2,13 +2,13 @@ use bincode; use bls::Keypair; use clap::ArgMatches; use serde_derive::{Deserialize, Serialize}; -use slog::{debug, error, info, o, Drain}; +use slog::{error, info, o, warn, Drain}; use std::fs::{self, File, OpenOptions}; use std::io::{Error, ErrorKind}; use std::ops::Range; use std::path::PathBuf; use std::sync::Mutex; -use types::{EthSpec, MainnetEthSpec}; +use types::{test_utils::generate_deterministic_keypair, EthSpec, MainnetEthSpec}; pub const DEFAULT_SERVER: &str = "localhost"; pub const DEFAULT_SERVER_GRPC_PORT: &str = "5051"; @@ -182,69 +182,65 @@ impl Config { } } - /// Try to load keys from validator_dir, returning None if none are found or an error. - #[allow(dead_code)] - pub fn fetch_keys(&self, log: &slog::Logger) -> Option> { - let key_pairs: Vec = + pub fn fetch_keys_from_disk(&self, log: &slog::Logger) -> Result, String> { + Ok( fs::read_dir(&self.full_data_dir().expect("Data dir must exist")) - .ok()? + .map_err(|e| format!("Failed to read datadir: {:?}", e))? .filter_map(|validator_dir| { - let validator_dir = validator_dir.ok()?; + let path = validator_dir.ok()?.path(); - if !(validator_dir.file_type().ok()?.is_dir()) { - // Skip non-directories (i.e. no files/symlinks) - return None; - } - - let key_filename = validator_dir.path().join(DEFAULT_PRIVATE_KEY_FILENAME); - - if !(key_filename.is_file()) { - info!( - log, - "Private key is not a file: {:?}", - key_filename.to_str() - ); - return None; - } - - debug!( - log, - "Deserializing private key from file: {:?}", - key_filename.to_str() - ); - - let mut key_file = File::open(key_filename.clone()).ok()?; - - let key: Keypair = if let Ok(key_ok) = bincode::deserialize_from(&mut key_file) - { - key_ok + if path.is_dir() { + match self.read_keypair_file(path.clone()) { + Ok(keypair) => Some(keypair), + Err(e) => { + error!( + log, + "Failed to parse a validator keypair"; + "error" => e, + "path" => path.to_str(), + ); + None + } + } } else { - error!( - log, - "Unable to deserialize the private key file: {:?}", key_filename - ); - return None; - }; - - let ki = key.identifier(); - if ki != validator_dir.file_name().into_string().ok()? { - error!( - log, - "The validator key ({:?}) did not match the directory filename {:?}.", - ki, - &validator_dir.path().to_string_lossy() - ); - return None; + None } - Some(key) }) - .collect(); + .collect(), + ) + } + + pub fn fetch_testing_keypairs( + &self, + range: std::ops::Range, + ) -> Result, String> { + Ok(range + .into_iter() + .map(generate_deterministic_keypair) + .collect()) + } + + /// Loads the keypairs according to `self.key_source`. Will return one or more keypairs, or an + /// error. + #[allow(dead_code)] + pub fn fetch_keys(&self, log: &slog::Logger) -> Result, String> { + let keypairs = match &self.key_source { + KeySource::Disk => self.fetch_keys_from_disk(log)?, + KeySource::TestingKeypairRange(range) => { + warn!(log, "Using insecure private keys"); + self.fetch_testing_keypairs(range.clone())? + } + }; // Check if it's an empty vector, and return none. - if key_pairs.is_empty() { - None + if keypairs.is_empty() { + Err( + "No validator keypairs were found, unable to proceed. To generate \ + testing keypairs, see 'testnet range --help'." + .into(), + ) } else { - Some(key_pairs) + Ok(keypairs) } } diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 40d5f6ab0..c849be31b 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -12,12 +12,10 @@ use crate::config::{ }; use crate::service::Service as ValidatorService; use clap::{App, Arg, ArgMatches, SubCommand}; -use eth2_config::{read_from_file, write_to_file, Eth2Config}; +use eth2_config::Eth2Config; use lighthouse_bootstrap::Bootstrapper; use protos::services_grpc::ValidatorServiceClient; -use slog::{crit, error, info, o, warn, Drain, Level, Logger}; -use std::fs; -use std::path::PathBuf; +use slog::{crit, error, info, o, Drain, Level, Logger}; use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; pub const DEFAULT_SPEC: &str = "minimal"; @@ -54,6 +52,17 @@ fn main() { .help("File path where output will be written.") .takes_value(true), ) + .arg( + Arg::with_name("spec") + .short("s") + .long("spec") + .value_name("TITLE") + .help("Specifies the default eth2 spec type.") + .takes_value(true) + .possible_values(&["mainnet", "minimal", "interop"]) + .conflicts_with("eth2-config") + .global(true) + ) .arg( Arg::with_name("eth2-config") .long("eth2-config") @@ -135,135 +144,6 @@ fn main() { _ => unreachable!("guarded by clap"), }; let log = slog::Logger::root(drain.fuse(), o!()); - - /* - let data_dir = match matches - .value_of("datadir") - .and_then(|v| Some(PathBuf::from(v))) - { - Some(v) => v, - None => { - // use the default - let mut default_dir = match dirs::home_dir() { - Some(v) => v, - None => { - crit!(log, "Failed to find a home directory"); - return; - } - }; - default_dir.push(DEFAULT_DATA_DIR); - default_dir - } - }; - - // create the directory if needed - match fs::create_dir_all(&data_dir) { - Ok(_) => {} - Err(e) => { - crit!(log, "Failed to initialize data dir"; "error" => format!("{}", e)); - return; - } - } - - let client_config_path = data_dir.join(CLIENT_CONFIG_FILENAME); - - // Attempt to load the `ClientConfig` from disk. - // - // If file doesn't exist, create a new, default one. - let mut client_config = match read_from_file::(client_config_path.clone()) { - Ok(Some(c)) => c, - Ok(None) => { - let default = ClientConfig::default(); - if let Err(e) = write_to_file(client_config_path.clone(), &default) { - crit!(log, "Failed to write default ClientConfig to file"; "error" => format!("{:?}", e)); - return; - } - default - } - Err(e) => { - crit!(log, "Failed to load a ChainConfig file"; "error" => format!("{:?}", e)); - return; - } - }; - - // Ensure the `data_dir` in the config matches that supplied to the CLI. - client_config.data_dir = data_dir.clone(); - - // Update the client config with any CLI args. - match client_config.apply_cli_args(&matches, &mut log) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse ClientConfig CLI arguments"; "error" => s); - return; - } - }; - - let eth2_config_path: PathBuf = matches - .value_of("eth2-spec") - .and_then(|s| Some(PathBuf::from(s))) - .unwrap_or_else(|| data_dir.join(ETH2_CONFIG_FILENAME)); - - // Initialise the `Eth2Config`. - // - // If a CLI parameter is set, overwrite any config file present. - // If a parameter is not set, use either the config file present or default to minimal. - let cli_config = match matches.value_of("default-spec") { - Some("mainnet") => Some(Eth2Config::mainnet()), - Some("minimal") => Some(Eth2Config::minimal()), - Some("interop") => Some(Eth2Config::interop()), - _ => None, - }; - // if a CLI flag is specified, write the new config if it doesn't exist, - // otherwise notify the user that the file will not be written. - let eth2_config_from_file = match read_from_file::(eth2_config_path.clone()) { - Ok(config) => config, - Err(e) => { - crit!(log, "Failed to read the Eth2Config from file"; "error" => format!("{:?}", e)); - return; - } - }; - - let mut eth2_config = { - if let Some(cli_config) = cli_config { - if eth2_config_from_file.is_none() { - // write to file if one doesn't exist - if let Err(e) = write_to_file(eth2_config_path, &cli_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - } else { - warn!( - log, - "Eth2Config file exists. Configuration file is ignored, using default" - ); - } - cli_config - } else { - // CLI config not specified, read from disk - match eth2_config_from_file { - Some(config) => config, - None => { - // set default to minimal - let eth2_config = Eth2Config::minimal(); - if let Err(e) = write_to_file(eth2_config_path, ð2_config) { - crit!(log, "Failed to write default Eth2Config to file"; "error" => format!("{:?}", e)); - return; - } - eth2_config - } - } - } - }; - - // Update the eth2 config with any CLI flags. - match eth2_config.apply_cli_args(&matches) { - Ok(()) => (), - Err(s) => { - crit!(log, "Failed to parse Eth2Config CLI arguments"; "error" => s); - return; - } - }; - */ let (client_config, eth2_config) = match get_configs(&matches, &log) { Ok(tuple) => tuple, Err(e) => { @@ -353,12 +233,13 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result<(ClientConfig, } process_testnet_subcommand(sub_cli_args, client_config, log) } - _ => { - unimplemented!("Resuming (not starting a testnet)"); - } + _ => return Err("You must use the testnet command. See '--help'.".into()), } } +/// Parses the `testnet` CLI subcommand. +/// +/// This is not a pure function, it reads from disk and may contact network servers. fn process_testnet_subcommand( cli_args: &ArgMatches, mut client_config: ClientConfig, @@ -381,7 +262,12 @@ fn process_testnet_subcommand( eth2_config } else { - return Err("Starting without bootstrap is not implemented".into()); + match cli_args.value_of("spec") { + Some("mainnet") => Eth2Config::mainnet(), + Some("minimal") => Eth2Config::minimal(), + Some("interop") => Eth2Config::interop(), + _ => return Err("No --spec flag provided. See '--help'.".into()), + } }; client_config.key_source = match cli_args.subcommand() { diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index ae6f94531..8adc79b91 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -188,12 +188,7 @@ impl Service Arc::new(kps), - None => { - return Err("Unable to locate validator key pairs, nothing to do.".into()); - } - }; + let keypairs = Arc::new(client_config.fetch_keys(&log)?); let slots_per_epoch = E::slots_per_epoch(); From 457e04f1e0cc07da50bcbd0caceda045912d71d0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 20:12:51 +1000 Subject: [PATCH 169/305] Rename key gen subcommand in val client --- validator_client/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index c849be31b..5e9c036ca 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -119,7 +119,7 @@ fn main() { .long("bootstrap") .help("Connect to the RPC server to download the eth2_config via the HTTP API.") ) - .subcommand(SubCommand::with_name("range") + .subcommand(SubCommand::with_name("insecure") .about("Uses the standard, predicatable `interop` keygen method to produce a range \ of predicatable private keys and starts performing their validator duties.") .arg(Arg::with_name("first_validator") @@ -271,7 +271,7 @@ fn process_testnet_subcommand( }; client_config.key_source = match cli_args.subcommand() { - ("range", Some(sub_cli_args)) => { + ("insecure", Some(sub_cli_args)) => { let first = sub_cli_args .value_of("first_validator") .ok_or_else(|| "No first validator supplied")? From d445ae6ee8b605de9fd0f24e29339fbbc382d240 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 20:34:14 +1000 Subject: [PATCH 170/305] Update interop docs with val client CLI --- book/src/interop.md | 34 +++++++++++++++++++++++--------- book/src/testnets.md | 46 +++++++++++++++++++++++++++++--------------- 2 files changed, 56 insertions(+), 24 deletions(-) diff --git a/book/src/interop.md b/book/src/interop.md index 79d4a1376..ea00c4ce8 100644 --- a/book/src/interop.md +++ b/book/src/interop.md @@ -46,20 +46,36 @@ To start the node (each time creating a fresh database and configuration in ``` $ ./beacon_node testnet -f quick 8 1567222226 ``` - ->This method conforms the ["Quick-start +> Notes: +> +> - This method conforms the ["Quick-start genesis"](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#quick-start-genesis) method in the `ethereum/eth2.0-pm` repository. -> -> The `-f` flag ignores any existing database or configuration, backing them up -before re-initializing. `8` is the validator count and `1567222226` is the -genesis time. -> -> See `$ ./beacon_node testnet quick --help` for more configuration options. +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - `8` is the validator count and `1567222226` is the genesis time. +> - See `$ ./beacon_node testnet quick --help` for more configuration options. #### Validator Client -**TODO** +Start the validator client with: + +``` +$ ./validator_client testnet -b insecure 0 8 +``` +> Notes: +> +> - The `-b` flag means the validator client will "bootstrap" specs and config +> from the beacon node. +> - The `insecure` command means the [interop +> keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) +> will be used. +> - The `0 8` indicates that this validator client should manage 8 validators, +> starting at validator 0 (the first deposited validator). +> - The validator client will try to connect to the beacon node at `localhost`. +> See `--help` to configure that address and other features. +> - The validator client will operate very loosely in `testnet` mode, happily +> swapping between chains and creating double-votes. #### Starting from a genesis file diff --git a/book/src/testnets.md b/book/src/testnets.md index c07797ba0..bf41e455d 100644 --- a/book/src/testnets.md +++ b/book/src/testnets.md @@ -12,7 +12,7 @@ Setup a development environment, build the project and navigate to the `target/release` directory. 1. Start the first node: `$ ./beacon_node testnet -f recent 8` -1. Start a validator client: **TODO** +1. Start a validator client: `$ ./validator_client testnet -b insecure 0 8` 1. Start another node `$ ./beacon_node -b 10 testnet -f bootstrap http://localhost:5052` _Repeat #3 to add more nodes._ @@ -30,16 +30,32 @@ Start a new node (creating a fresh database and configuration in `~/.lighthouse` $ ./beacon_node testnet -f recent 8 ``` -> The `-f` flag ignores any existing database or configuration, backing them up -before re-initializing. `8` is number of validators with deposits in the -genesis state. +> Notes: > -> See `$ ./beacon_node testnet recent --help` for more configuration options, -including `minimal`/`mainnet` specification. +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - `8` is number of validators with deposits in the genesis state. +> - See `$ ./beacon_node testnet recent --help` for more configuration options, +> including `minimal`/`mainnet` specification. ## Starting the Validator Client -**TODO** +In a new terminal window, start the validator client with: + +``` +$ ./validator_client testnet -b insecure 0 8 +``` + +> Notes: +> +> - The `-b` flag means the validator client will "bootstrap" specs and config +> from the beacon node. +> - The `insecure` command uses predictable, well-known private keys. Since +> this is just a local testnet, these are fine. +> - The `0 8` indicates that this validator client should manage 8 validators, +> starting at validator 0 (the first deposited validator). +> - The validator client will try to connect to the beacon node at `localhost`. +> See `--help` to configure that address and other features. ## Adding another Beacon Node @@ -53,12 +69,12 @@ In a new terminal terminal, run: $ ./beacon_node -b 10 testnet -r bootstrap http://localhost:5052 ``` -> The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of the -new node to `10` higher. Your first node's HTTP server was at TCP `5052` but -this one will be at `5062`. +> Notes: > -> The `-r` flag creates a new data directory in your home with a random string -appended, to avoid conflicting with any other running node. -> -> The HTTP address is the API of the first node. The new node will download -configuration via HTTP before starting sync via libp2p. +> - The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of +> the new node to `10` higher. Your first node's HTTP server was at TCP +> `5052` but this one will be at `5062`. +> - The `-r` flag creates a new data directory in your home with a random +> string appended, to avoid conflicting with any other running node. +> - The HTTP address is the API of the first node. The new node will download +> configuration via HTTP before starting sync via libp2p. From 6db1a191696d34a244ccdfe48e3292d3efc54991 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:20:31 +1000 Subject: [PATCH 171/305] Remove stray dbg! --- beacon_node/beacon_chain/src/beacon_chain_builder.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index fdddf6481..06d2818e2 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -143,7 +143,6 @@ fn interop_genesis_state( let eth1_block_hash = Hash256::from_slice(&[42; 32]); let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; - dbg!(amount); let withdrawal_credentials = |pubkey: &PublicKey| { let mut credentials = hash(&pubkey.as_ssz_bytes()); From 82b4a1b3eb34b83f8513777aacb0882684ccd40c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:21:29 +1000 Subject: [PATCH 172/305] Fix multiple data_dirs in config --- beacon_node/src/config.rs | 54 +++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c9ad964f5..efc0b125c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -33,7 +33,7 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { info!( log, "Resuming from existing datadir"; - "path" => format!("{:?}", builder.data_dir) + "path" => format!("{:?}", builder.client_config.data_dir) ); // If no primary subcommand was given, start the beacon chain from an existing @@ -42,7 +42,7 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { // Whilst there is no large testnet or mainnet force the user to specify how they want // to start a new chain (e.g., from a genesis YAML file, another node, etc). - if !builder.data_dir.exists() { + if !builder.client_config.data_dir.exists() { return Err( "No datadir found. To start a new beacon chain, see `testnet --help`. \ Use `--datadir` to specify a different directory" @@ -98,7 +98,7 @@ fn process_testnet_subcommand( info!( log, "Creating new datadir"; - "path" => format!("{:?}", builder.data_dir) + "path" => format!("{:?}", builder.client_config.data_dir) ); // Start matching on the second subcommand (e.g., `testnet bootstrap ...`). @@ -166,7 +166,6 @@ fn process_testnet_subcommand( /// Allows for building a set of configurations based upon `clap` arguments. struct ConfigBuilder<'a> { log: &'a Logger, - pub data_dir: PathBuf, eth2_config: Eth2Config, client_config: ClientConfig, } @@ -189,11 +188,13 @@ impl<'a> ConfigBuilder<'a> { }) .ok_or_else(|| "Unable to find a home directory for the datadir".to_string())?; + let mut client_config = ClientConfig::default(); + client_config.data_dir = data_dir; + Ok(Self { log, - data_dir, eth2_config: Eth2Config::minimal(), - client_config: ClientConfig::default(), + client_config, }) } @@ -208,7 +209,7 @@ impl<'a> ConfigBuilder<'a> { let backup_dir = { let mut s = String::from("backup_"); s.push_str(&random_string(6)); - self.data_dir.join(s) + self.client_config.data_dir.join(s) }; fs::create_dir_all(&backup_dir) @@ -229,8 +230,8 @@ impl<'a> ConfigBuilder<'a> { Ok(()) }; - move_to_backup_dir(&self.data_dir.join(CLIENT_CONFIG_FILENAME))?; - move_to_backup_dir(&self.data_dir.join(ETH2_CONFIG_FILENAME))?; + move_to_backup_dir(&self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME))?; + move_to_backup_dir(&self.client_config.data_dir.join(ETH2_CONFIG_FILENAME))?; if let Some(db_path) = self.client_config.db_path() { move_to_backup_dir(&db_path)?; @@ -280,12 +281,10 @@ impl<'a> ConfigBuilder<'a> { /// /// Useful for easily spinning up ephemeral testnets. pub fn set_random_datadir(&mut self) -> Result<()> { - let mut s = DEFAULT_DATA_DIR.to_string(); - s.push_str("_random_"); - s.push_str(&random_string(6)); - - self.data_dir.pop(); - self.data_dir.push(s); + self.client_config + .data_dir + .push(format!("random_{}", random_string(6))); + self.client_config.network.network_dir = self.client_config.data_dir.join("network"); Ok(()) } @@ -339,16 +338,16 @@ impl<'a> ConfigBuilder<'a> { // Do not permit creating a new config when the datadir exists. if db_exists { - return Err("Database already exists. See `-f` in `testnet --help`".into()); + return Err("Database already exists. See `-f` or `-r` in `testnet --help`".into()); } // Create `datadir` and any non-existing parent directories. - fs::create_dir_all(&self.data_dir).map_err(|e| { + fs::create_dir_all(&self.client_config.data_dir).map_err(|e| { crit!(self.log, "Failed to initialize data dir"; "error" => format!("{}", e)); format!("{}", e) })?; - let client_config_file = self.data_dir.join(CLIENT_CONFIG_FILENAME); + let client_config_file = self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME); if client_config_file.exists() { return Err(format!( "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", @@ -357,13 +356,13 @@ impl<'a> ConfigBuilder<'a> { } else { // Write the onfig to a TOML file in the datadir. write_to_file( - self.data_dir.join(CLIENT_CONFIG_FILENAME), + self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME), &self.client_config, ) .map_err(|e| format!("Unable to write {} file: {:?}", CLIENT_CONFIG_FILENAME, e))?; } - let eth2_config_file = self.data_dir.join(ETH2_CONFIG_FILENAME); + let eth2_config_file = self.client_config.data_dir.join(ETH2_CONFIG_FILENAME); if eth2_config_file.exists() { return Err(format!( "Datadir is not clean, {} exists. See `-f` in `testnet --help`.", @@ -371,8 +370,11 @@ impl<'a> ConfigBuilder<'a> { )); } else { // Write the config to a TOML file in the datadir. - write_to_file(self.data_dir.join(ETH2_CONFIG_FILENAME), &self.eth2_config) - .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; + write_to_file( + self.client_config.data_dir.join(ETH2_CONFIG_FILENAME), + &self.eth2_config, + ) + .map_err(|e| format!("Unable to write {} file: {:?}", ETH2_CONFIG_FILENAME, e))?; } Ok(()) @@ -386,7 +388,7 @@ impl<'a> ConfigBuilder<'a> { // // For now we return an error. In the future we may decide to boot a default (e.g., // public testnet or mainnet). - if !self.data_dir.exists() { + if !self.client_config.data_dir.exists() { return Err( "No datadir found. Either create a new testnet or specify a different `--datadir`." .into(), @@ -407,8 +409,8 @@ impl<'a> ConfigBuilder<'a> { ); } - self.load_eth2_config(self.data_dir.join(ETH2_CONFIG_FILENAME))?; - self.load_client_config(self.data_dir.join(CLIENT_CONFIG_FILENAME))?; + self.load_eth2_config(self.client_config.data_dir.join(ETH2_CONFIG_FILENAME))?; + self.load_client_config(self.client_config.data_dir.join(CLIENT_CONFIG_FILENAME))?; Ok(()) } @@ -464,8 +466,6 @@ impl<'a> ConfigBuilder<'a> { return Err("Specification constant mismatch".into()); } - self.client_config.data_dir = self.data_dir; - Ok((self.client_config, self.eth2_config)) } } From 960082fe4ead7d93d2f8c5a1f9e463132cc5e778 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:21:46 +1000 Subject: [PATCH 173/305] Set all listen addresses to 0.0.0.0 for testnets --- beacon_node/src/config.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index efc0b125c..a3829a33c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -5,6 +5,7 @@ use lighthouse_bootstrap::Bootstrapper; use rand::{distributions::Alphanumeric, Rng}; use slog::{crit, info, warn, Logger}; use std::fs; +use std::net::Ipv4Addr; use std::path::{Path, PathBuf}; pub const DEFAULT_DATA_DIR: &str = ".lighthouse"; @@ -101,6 +102,10 @@ fn process_testnet_subcommand( "path" => format!("{:?}", builder.client_config.data_dir) ); + // When using the testnet command we listen on all addresses. + builder.set_listen_addresses("0.0.0.0".into())?; + warn!(log, "All services listening on 0.0.0.0"); + // Start matching on the second subcommand (e.g., `testnet bootstrap ...`). match cli_args.subcommand() { ("bootstrap", Some(cli_args)) => { @@ -437,6 +442,19 @@ impl<'a> ConfigBuilder<'a> { Ok(()) } + /// Sets all listening addresses to the given `addr`. + pub fn set_listen_addresses(&mut self, addr: String) -> Result<()> { + let addr = addr + .parse::() + .map_err(|e| format!("Unable to parse default listen address: {:?}", e))?; + + self.client_config.network.listen_address = addr.clone().into(); + self.client_config.rpc.listen_address = addr.clone(); + self.client_config.rest_api.listen_address = addr.clone(); + + Ok(()) + } + /// Consumes self, returning the configs. /// /// The supplied `cli_args` should be the base-level `clap` cli_args (i.e., not a subcommand From a78b030f584c19ca129074600b6d984e5c2ae491 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:22:09 +1000 Subject: [PATCH 174/305] Fix rpc port-bump bug --- beacon_node/src/config.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index a3829a33c..c3dfad9ba 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -472,7 +472,6 @@ impl<'a> ConfigBuilder<'a> { self.client_config.network.libp2p_port += bump; self.client_config.network.discovery_port += bump; self.client_config.rpc.port += bump; - self.client_config.rpc.port += bump; self.client_config.rest_api.port += bump; } From aa3bc6bf670e809e48bc2a2a15a4b039b7d48874 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:22:20 +1000 Subject: [PATCH 175/305] Update book --- book/src/SUMMARY.md | 3 +- book/src/interop.md | 4 +-- book/src/intro.md | 2 +- book/src/testnets.md | 86 +++++--------------------------------------- 4 files changed, 13 insertions(+), 82 deletions(-) diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index e08af247c..f0ad41144 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -3,5 +3,6 @@ * [Introduction](./intro.md) * [Development Environment](./setup.md) * [Testnets](./testnets.md) - * [Simple local testnet](./testnets.md) + * [Simple Local Testnet](./simple-testnet.md) * [Interop](./interop.md) + * [Interop Tips & Tricks](./interop-tips.md) diff --git a/book/src/interop.md b/book/src/interop.md index ea00c4ce8..c1a1d4a69 100644 --- a/book/src/interop.md +++ b/book/src/interop.md @@ -67,14 +67,14 @@ $ ./validator_client testnet -b insecure 0 8 > > - The `-b` flag means the validator client will "bootstrap" specs and config > from the beacon node. -> - The `insecure` command means the [interop +> - The `insecure` command dictates that the [interop > keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) > will be used. > - The `0 8` indicates that this validator client should manage 8 validators, > starting at validator 0 (the first deposited validator). > - The validator client will try to connect to the beacon node at `localhost`. > See `--help` to configure that address and other features. -> - The validator client will operate very loosely in `testnet` mode, happily +> - The validator client will operate very unsafely in `testnet` mode, happily > swapping between chains and creating double-votes. #### Starting from a genesis file diff --git a/book/src/intro.md b/book/src/intro.md index f290b7e40..e0e3cd6a0 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -21,7 +21,7 @@ Documentation is provided for **researchers and developers** working on Ethereum 2.0 and assumes prior knowledge on the topic. - Get started with [development environment setup](setup.html). -- [Run a simple testnet](testnets.html) in Only Three CLI Commandsâ„¢. +- [Run a simple testnet](simple-testnet.html) in Only Three CLI Commandsâ„¢. - Read about our interop workflow. - API? diff --git a/book/src/testnets.md b/book/src/testnets.md index bf41e455d..180673fb3 100644 --- a/book/src/testnets.md +++ b/book/src/testnets.md @@ -1,80 +1,10 @@ -# Simple Local Testnet +# Testnets -You can setup a local, two-node testnet in **Only Three CLI Commandsâ„¢**. +Lighthouse does not offer a public testnet _yet_. In the meantime, it's easy to +start a local testnet: -Follow the [Quick instructions](#tldr) version if you're confident, or see -[Detailed instructions](#detail) for more. - - -## Quick instructions - -Setup a development environment, build the project and navigate to the -`target/release` directory. - -1. Start the first node: `$ ./beacon_node testnet -f recent 8` -1. Start a validator client: `$ ./validator_client testnet -b insecure 0 8` -1. Start another node `$ ./beacon_node -b 10 testnet -f bootstrap http://localhost:5052` - -_Repeat #3 to add more nodes._ - -## Detailed instructions - -First, setup a Lighthouse development environment and navigate to the -`target/release` directory (this is where the binaries are located). - -## Starting the Beacon Node - -Start a new node (creating a fresh database and configuration in `~/.lighthouse`), using: - -``` -$ ./beacon_node testnet -f recent 8 -``` - -> Notes: -> -> - The `-f` flag ignores any existing database or configuration, backing them -> up before re-initializing. -> - `8` is number of validators with deposits in the genesis state. -> - See `$ ./beacon_node testnet recent --help` for more configuration options, -> including `minimal`/`mainnet` specification. - -## Starting the Validator Client - -In a new terminal window, start the validator client with: - -``` -$ ./validator_client testnet -b insecure 0 8 -``` - -> Notes: -> -> - The `-b` flag means the validator client will "bootstrap" specs and config -> from the beacon node. -> - The `insecure` command uses predictable, well-known private keys. Since -> this is just a local testnet, these are fine. -> - The `0 8` indicates that this validator client should manage 8 validators, -> starting at validator 0 (the first deposited validator). -> - The validator client will try to connect to the beacon node at `localhost`. -> See `--help` to configure that address and other features. - -## Adding another Beacon Node - -You may connect another (non-validating) node to your local network using the -lighthouse `bootstrap` command. - -In a new terminal terminal, run: - - -``` -$ ./beacon_node -b 10 testnet -r bootstrap http://localhost:5052 -``` - -> Notes: -> -> - The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of -> the new node to `10` higher. Your first node's HTTP server was at TCP -> `5052` but this one will be at `5062`. -> - The `-r` flag creates a new data directory in your home with a random -> string appended, to avoid conflicting with any other running node. -> - The HTTP address is the API of the first node. The new node will download -> configuration via HTTP before starting sync via libp2p. +- [Run a simple testnet](testnets.html) in Only Three CLI Commandsâ„¢. +- Developers of other Eth2 clients should see the [interop guide](interop.html). +- The [sigp/lighthouse-docker](https://github.com/sigp/lighthouse-docker) repo + contains a `docker-compose` setup that runs a multi-node network with + built-in metrics and monitoring dashboards, all from your local machine. From 8b4b13cb2f6ab332d2f9633ad852b3a97bc6a0b7 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:22:39 +1000 Subject: [PATCH 176/305] Add missed book pages --- book/src/interop-tips.md | 104 +++++++++++++++++++++++++++++++++++++ book/src/simple-testnet.md | 80 ++++++++++++++++++++++++++++ 2 files changed, 184 insertions(+) create mode 100644 book/src/interop-tips.md create mode 100644 book/src/simple-testnet.md diff --git a/book/src/interop-tips.md b/book/src/interop-tips.md new file mode 100644 index 000000000..e581139c4 --- /dev/null +++ b/book/src/interop-tips.md @@ -0,0 +1,104 @@ +# Interop Tips & Tricks + +This document contains a list of tips and tricks that may be useful during +interop testing. + +## Command-line Interface + +The `--help` command provides detail on the CLI interface. Here are some +interop-specific CLI commands. + +### Specify a boot node by multiaddr + +You can specify a static list of multiaddrs when booting Lighthouse using +the `--libp2p-addresses` command. + +#### Example: + +Runs an 8 validator quick-start chain, peering with `/ip4/192.168.0.1/tcp/9000` on boot. + +``` +$ ./beacon_node --libp2p-addresses /ip4/192.168.0.1/tcp/9000 testnet -f quick 8 1567222226 +``` + +### Specify a boot node by ENR + +You can specify a static list of Discv5 addresses when booting Lighthouse using +the `--boot-nodes` command. + +#### Example: + +Runs an 8 validator quick-start chain, peering with `-IW4QB2...` on boot. + +``` +$ ./beacon_node --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 testnet -f quick 8 1567222226 +``` + +### Avoid port clashes when starting nodes + +Starting a second Lighthouse node on the same machine will fail due to TCP/UDP +port collisions. Use the `-b` (`--port-bump`) flag to increase all listening +ports by some `n`. + +#### Example: + +Increase all ports by `10` (using multiples of `10` is recommended). + +``` +$ ./beacon_node -b 10 testnet -f quick 8 1567222226 +``` + +## HTTP API + +Examples assume there is a Lighthouse node exposing a HTTP API on +`localhost:5052`. Responses are JSON. + +### Get the node's ENR + +``` +$ curl localhost:5052/network/enr + +"-IW4QFyf1VlY5pZs0xZuvKMRZ9_cdl9WMCDAAJXZiZiuGcfRYoU40VPrYDLQj5prneJIz3zcbTjHp9BbThc-yiymJO8HgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5"% +``` + +### Get a list of connected peer ids + +``` +$ curl localhost:5052/network/peers + +["QmeMFRTWfo3KbVG7dEBXGhyRMa29yfmnJBXW84rKuGEhuL"]% +``` + +### Get the node's peer id + +``` +curl localhost:5052/network/peer_id + +"QmRD1qs2AqNNRdBcGHUGpUGkpih5cmdL32mhh22Sy79xsJ"% +``` + +### Get the list of listening libp2p addresses + +Lists all the libp2p multiaddrs that the node is listening on. + +``` +curl localhost:5052/network/listen_addresses + +["/ip4/127.0.0.1/tcp/9000","/ip4/192.168.1.121/tcp/9000","/ip4/172.17.0.1/tcp/9000","/ip4/172.42.0.1/tcp/9000","/ip6/::1/tcp/9000","/ip6/fdd3:c293:1bc::203/tcp/9000","/ip6/fdd3:c293:1bc:0:9aa9:b2ea:c610:44db/tcp/9000"]% +``` + +### Get the node's beacon chain head + +``` +curl localhost:5052/beacon/head + +{"slot":0,"block_root":"0x827bf71805540aa13f6d8c7d18b41b287b2094a4d7a28cbb8deb061dbf5df4f5","state_root":"0x90a78d73294bc9c7519a64e1912161be0e823eb472012ff54204e15a4d717fa5"}% +``` + +### Get the node's finalized checkpoint + +``` +curl localhost:5052/beacon/latest_finalized_checkpoint + +{"epoch":0,"root":"0x0000000000000000000000000000000000000000000000000000000000000000"}% +``` diff --git a/book/src/simple-testnet.md b/book/src/simple-testnet.md new file mode 100644 index 000000000..bf41e455d --- /dev/null +++ b/book/src/simple-testnet.md @@ -0,0 +1,80 @@ +# Simple Local Testnet + +You can setup a local, two-node testnet in **Only Three CLI Commandsâ„¢**. + +Follow the [Quick instructions](#tldr) version if you're confident, or see +[Detailed instructions](#detail) for more. + + +## Quick instructions + +Setup a development environment, build the project and navigate to the +`target/release` directory. + +1. Start the first node: `$ ./beacon_node testnet -f recent 8` +1. Start a validator client: `$ ./validator_client testnet -b insecure 0 8` +1. Start another node `$ ./beacon_node -b 10 testnet -f bootstrap http://localhost:5052` + +_Repeat #3 to add more nodes._ + +## Detailed instructions + +First, setup a Lighthouse development environment and navigate to the +`target/release` directory (this is where the binaries are located). + +## Starting the Beacon Node + +Start a new node (creating a fresh database and configuration in `~/.lighthouse`), using: + +``` +$ ./beacon_node testnet -f recent 8 +``` + +> Notes: +> +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - `8` is number of validators with deposits in the genesis state. +> - See `$ ./beacon_node testnet recent --help` for more configuration options, +> including `minimal`/`mainnet` specification. + +## Starting the Validator Client + +In a new terminal window, start the validator client with: + +``` +$ ./validator_client testnet -b insecure 0 8 +``` + +> Notes: +> +> - The `-b` flag means the validator client will "bootstrap" specs and config +> from the beacon node. +> - The `insecure` command uses predictable, well-known private keys. Since +> this is just a local testnet, these are fine. +> - The `0 8` indicates that this validator client should manage 8 validators, +> starting at validator 0 (the first deposited validator). +> - The validator client will try to connect to the beacon node at `localhost`. +> See `--help` to configure that address and other features. + +## Adding another Beacon Node + +You may connect another (non-validating) node to your local network using the +lighthouse `bootstrap` command. + +In a new terminal terminal, run: + + +``` +$ ./beacon_node -b 10 testnet -r bootstrap http://localhost:5052 +``` + +> Notes: +> +> - The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of +> the new node to `10` higher. Your first node's HTTP server was at TCP +> `5052` but this one will be at `5062`. +> - The `-r` flag creates a new data directory in your home with a random +> string appended, to avoid conflicting with any other running node. +> - The HTTP address is the API of the first node. The new node will download +> configuration via HTTP before starting sync via libp2p. From 246dcaa0942067c4199d6559b406120f2166ddfa Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 1 Sep 2019 22:30:56 +1000 Subject: [PATCH 177/305] Add extra log to validator client --- validator_client/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 5e9c036ca..d5d2fc27f 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -246,6 +246,7 @@ fn process_testnet_subcommand( log: &Logger, ) -> Result<(ClientConfig, Eth2Config)> { let eth2_config = if cli_args.is_present("bootstrap") { + info!(log, "Connecting to bootstrap server"); let bootstrapper = Bootstrapper::from_server_string(format!( "http://{}:{}", client_config.server, client_config.server_http_port From 74baeb4d08a958890b85a53ed93c6a382623b60b Mon Sep 17 00:00:00 2001 From: Age Manning Date: Mon, 2 Sep 2019 05:38:11 +1000 Subject: [PATCH 178/305] WIP - Upgrade Sync algorithm --- beacon_node/network/Cargo.toml | 1 + beacon_node/network/src/sync/manager.rs | 188 +++++++++++++++++--- beacon_node/network/src/sync/simple_sync.rs | 23 ++- 3 files changed, 180 insertions(+), 32 deletions(-) diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index dc08bd311..06fc06dde 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -19,3 +19,4 @@ futures = "0.1.25" error-chain = "0.12.0" tokio = "0.1.16" parking_lot = "0.9.0" +smallvec = "0.6.10" diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index b81da0991..9b2d780f4 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1,33 +1,110 @@ +//! The `ImportManager` facilities the block syncing logic of lighthouse. The current networking +//! specification provides two methods from which to obtain blocks from peers. The `BeaconBlocks` +//! request and the `RecentBeaconBlocks` request. The former is used to obtain a large number of +//! blocks and the latter allows for searching for blocks given a block-hash. +//! +//! These two RPC methods are designed for two type of syncing. +//! - Long range (batch) sync, when a client is out of date and needs to the latest head. +//! - Parent lookup - when a peer provides us a block whose parent is unknown to us. +//! +//! Both of these syncing strategies are built into the `ImportManager`. +//! +//! +//! Currently the long-range (batch) syncing method functions by opportunistically downloading +//! batches blocks from all peers who know about a chain that we do not. When a new peer connects +//! which has a later head that is greater than `SLOT_IMPORT_TOLERANCE` from our current head slot, +//! the manager's state becomes `Syncing` and begins a batch syncing process with this peer. If +//! further peers connect, this process is run in parallel with those peers, until our head is +//! within `SLOT_IMPORT_TOLERANCE` of all connected peers. +//! +//! Batch Syncing +//! +//! This syncing process start by requesting `MAX_BLOCKS_PER_REQUEST` blocks from a peer with an +//! unknown chain (with a greater slot height) starting from our current head slot. If the earliest +//! block returned is known to us, then the group of blocks returned form part of a known chain, +//! and we process this batch of blocks, before requesting more batches forward and processing +//! those in turn until we reach the peer's chain's head. If the first batch doesn't contain a +//! block we know of, we must iteratively request blocks backwards (until our latest finalized head +//! slot) until we find a common ancestor before we can start processing the blocks. If no common +//! ancestor is found, the peer has a chain which is not part of our finalized head slot and we +//! drop the peer and the downloaded blocks. +//! Once we are fully synced with all known peers, the state of the manager becomes `Regular` which +//! then allows for parent lookups of propagated blocks. +//! +//! A schematic version of this logic with two chain variations looks like the following. +//! +//! |----------------------|---------------------------------| +//! ^finalized head ^current local head ^remotes head +//! +//! +//! An example of the remotes chain diverging before our current head. +//! |---------------------------| +//! ^---------------------------------------------| +//! ^chain diverges |initial batch| ^remotes head +//! +//! In this example, we cannot process the initial batch as it is not on a known chain. We must +//! then backwards sync until we reach a common chain to begin forwarding batch syncing. +//! +//! +//! Parent Lookup +//! +//! When a block with an unknown parent is received and we are in `Regular` sync mode, the block is +//! queued for lookup. A round-robin approach is used to request the parent from the known list of +//! fully sync'd peers. If `PARENT_FAIL_TOLERANCE` attempts at requesting the block fails, we +//! drop the propagated block and downvote the peer that sent it to us. + use super::simple_sync::{PeerSyncInfo, FUTURE_SLOT_TOLERANCE}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::RequestId; use eth2_libp2p::PeerId; use slog::{debug, info, trace, warn, Logger}; +use smallvec::SmallVec; use std::collections::{HashMap, HashSet}; use std::ops::{Add, Sub}; -use std::sync::Arc; +use std::sync::{Arc, Weak}; use types::{BeaconBlock, EthSpec, Hash256, Slot}; +/// Blocks are downloaded in batches from peers. This constant specifies how many blocks per batch +/// is requested. Currently the value is small for testing. This will be incremented for +/// production. const MAX_BLOCKS_PER_REQUEST: u64 = 10; -/// The number of slots that we can import blocks ahead of us, before going into full Sync mode. +/// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync +/// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a +/// fully sync'd peer. const SLOT_IMPORT_TOLERANCE: usize = 10; +/// How many attempts we try to find a parent of a block before we give up trying . const PARENT_FAIL_TOLERANCE: usize = 3; +/// The maximum depth we will search for a parent block. In principle we should have sync'd any +/// canonical chain to its head once the peer connects. A chain should not appear where it's depth +/// is further back than the most recent head slot. const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; #[derive(PartialEq)] +/// The current state of a block or batches lookup. enum BlockRequestsState { + /// The object is queued to be downloaded from a peer but has not yet been requested. Queued, + /// The batch or parent has been requested with the `RequestId` and we are awaiting a response. Pending(RequestId), - Complete, + /// The downloaded blocks are ready to be processed by the beacon chain. For a batch process + /// this means we have found a common chain. + ReadyToProcess, + /// A failure has occurred and we will drop and downvote the peer that caused the request. Failed, } +/// `BlockRequests` keep track of the long-range (batch) sync process per peer. struct BlockRequests { + /// The peer's head slot and the target of this batch download. target_head_slot: Slot, + /// The peer's head root, used to specify which chain of blocks we are downloading from the + /// blocks. target_head_root: Hash256, + /// The blocks that we have currently downloaded from the peer that are yet to be processed. downloaded_blocks: Vec>, + /// The current state of this batch request. state: BlockRequestsState, /// Specifies whether the current state is syncing forwards or backwards. forward_sync: bool, @@ -35,16 +112,22 @@ struct BlockRequests { current_start_slot: Slot, } +/// Maintains a sequential list of parents to lookup and the lookup's current state. struct ParentRequests { + /// The blocks that have currently been downloaded. downloaded_blocks: Vec>, + /// The number of failed attempts to retrieve a parent block. If too many attempts occur, this + /// lookup is failed and rejected. failed_attempts: usize, - last_submitted_peer: PeerId, // to downvote the submitting peer. + /// The peer who last submitted a block. If the chain ends or fails, this is the peer that is + /// downvoted. + last_submitted_peer: PeerId, + /// The current state of the parent lookup. state: BlockRequestsState, } impl BlockRequests { - // gets the start slot for next batch - // last block slot downloaded plus 1 + /// Gets the next start slot for a batch and transitions the state to a Queued state. fn update_start_slot(&mut self) { if self.forward_sync { self.current_start_slot += Slot::from(MAX_BLOCKS_PER_REQUEST); @@ -56,58 +139,104 @@ impl BlockRequests { } #[derive(PartialEq, Debug, Clone)] +/// The current state of the `ImportManager`. enum ManagerState { + /// The manager is performing a long-range (batch) sync. In this mode, parent lookups are + /// disabled. Syncing, + /// The manager is up to date with all known peers and is connected to at least one + /// fully-syncing peer. In this state, parent lookups are enabled. Regular, + /// No useful peers are connected. Long-range sync's cannot proceed and we have no useful + /// peers to download parents for. More peers need to be connected before we can proceed. Stalled, } +/// The output states that can occur from driving (polling) the manager state machine. pub(crate) enum ImportManagerOutcome { + /// There is no further work to complete. The manager is waiting for further input. Idle, + /// A `BeaconBlocks` request is required. RequestBlocks { peer_id: PeerId, request_id: RequestId, request: BeaconBlocksRequest, }, + /// A `RecentBeaconBlocks` request is required. + RecentRequest(PeerId, RecentBeaconBlocksRequest), /// Updates information with peer via requesting another HELLO handshake. Hello(PeerId), - RecentRequest(PeerId, RecentBeaconBlocksRequest), + /// A peer has caused a punishable error and should be downvoted. DownvotePeer(PeerId), } +/// The primary object for handling and driving all the current syncing logic. It maintains the +/// current state of the syncing process, the number of useful peers, downloaded blocks and +/// controls the logic behind both the long-range (batch) sync and the on-going potential parent +/// look-up of blocks. pub struct ImportManager { - /// A reference to the underlying beacon chain. - chain: Arc>, + /// A weak reference to the underlying beacon chain. + chain: Weak>, + /// The current state of the import manager. state: ManagerState, + /// A collection of `BlockRequest` per peer that is currently being downloaded. Used in the + /// long-range (batch) sync process. import_queue: HashMap>, - parent_queue: Vec>, + /// A collection of parent block lookups. + parent_queue: SmallVec<[ParentRequests; 3]>, + /// The collection of known, connected, fully-sync'd peers. full_peers: HashSet, + /// The current request Id. This is used to keep track of responses to various outbound + /// requests. This is an internal accounting mechanism, request id's are never sent to any + /// peers. current_req_id: usize, + /// The logger for the import manager. log: Logger, } impl ImportManager { + /// Generates a new `ImportManager` given a logger and an Arc reference to a beacon chain. The + /// import manager keeps a weak reference to the beacon chain, which allows the chain to be + /// dropped during the syncing process. The syncing handles this termination gracefully. pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { ImportManager { - chain: beacon_chain.clone(), + chain: Arc::downgrade(&beacon_chain), state: ManagerState::Regular, import_queue: HashMap::new(), - parent_queue: Vec::new(), + parent_queue: SmallVec::new(), full_peers: HashSet::new(), current_req_id: 0, log: log.clone(), } } + /// A peer has connected which has blocks that are unknown to us. + /// + /// This function handles the logic associated with the connection of a new peer. If the peer + /// is sufficiently ahead of our current head, a long-range (batch) sync is started and + /// batches of blocks are queued to download from the peer. Batched blocks begin at our + /// current head. If the resulting downloaded blocks are part of our current chain, we + /// continue with a forward sync. If not, we download blocks (in batches) backwards until we + /// reach a common ancestor. Batches are then processed and downloaded sequentially forwards. + /// + /// If the peer is within the `SLOT_IMPORT_TOLERANCE`, then it's head is sufficiently close to + /// ours that we consider it fully sync'd with respect to our current chain. pub fn add_peer(&mut self, peer_id: PeerId, remote: PeerSyncInfo) { - // TODO: Improve comments. - // initially try to download blocks from our current head - // then backwards search all the way back to our finalized epoch until we match on a chain - // has to be done sequentially to find next slot to start the batch from + // ensure the beacon chain still exists + let chain = match self.chain.upgrade() { + Some(chain) => chain, + None => { + warn!(self.log, + "Beacon chain dropped. Peer not considered for sync"; + "peer_id" => format!("{:?}", peer_id)); + return; + } + }; - let local = PeerSyncInfo::from(&self.chain); + let local = PeerSyncInfo::from(&chain); - // If a peer is within SLOT_IMPORT_TOLERANCE from our head slot, ignore a batch sync + // If a peer is within SLOT_IMPORT_TOLERANCE from our head slot, ignore a batch sync, + // consider it a fully-sync'd peer. if remote.head_slot.sub(local.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { trace!(self.log, "Ignoring full sync with peer"; "peer" => format!("{:?}", peer_id), @@ -116,34 +245,53 @@ impl ImportManager { ); // remove the peer from the queue if it exists self.import_queue.remove(&peer_id); + self.add_full_peer(peer_id); + // return; } + // Check if the peer is significantly is behind us. If within `SLOT_IMPORT_TOLERANCE` + // treat them as a fully synced peer. If not, ignore them in the sync process + if local.head_slot.sub(remote.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { + self.add_full_peer(peer_id); + } else { + debug!( + self.log, + "Out of sync peer connected"; + "peer" => format!("{:?}", peer_id), + ); + return; + } + + // Check if we are already downloading blocks from this peer, if so update, if not set up + // a new request structure if let Some(block_requests) = self.import_queue.get_mut(&peer_id) { // update the target head slot if remote.head_slot > block_requests.target_head_slot { block_requests.target_head_slot = remote.head_slot; } } else { + // not already downloading blocks from this peer let block_requests = BlockRequests { target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called target_head_root: remote.head_root, downloaded_blocks: Vec::new(), state: BlockRequestsState::Queued, forward_sync: true, - current_start_slot: self.chain.best_slot(), + current_start_slot: chain.best_slot(), }; self.import_queue.insert(peer_id, block_requests); } } + /// A `BeaconBlocks` request has received a response. This function process the response. pub fn beacon_blocks_response( &mut self, peer_id: PeerId, request_id: RequestId, mut blocks: Vec>, ) { - // find the request + // find the request associated with this response let block_requests = match self .import_queue .get_mut(&peer_id) diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 573ac9dd1..dd857d8c3 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -16,8 +16,6 @@ use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256, Slot}; /// Otherwise we queue it. pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; -/// The number of slots behind our head that we still treat a peer as a fully synced peer. -const FULL_PEER_TOLERANCE: u64 = 10; const SHOULD_FORWARD_GOSSIP_BLOCK: bool = true; const SHOULD_NOT_FORWARD_GOSSIP_BLOCK: bool = false; @@ -189,18 +187,17 @@ impl SimpleSync { .exists::>(&remote.head_root) .unwrap_or_else(|_| false) { + trace!( + self.log, "Out of date or potentially sync'd peer found"; + "peer" => format!("{:?}", peer_id), + "remote_head_slot" => remote.head_slot + "remote_latest_finalized_epoch" => remote.finalized_epoch, + ); + // If the node's best-block is already known to us and they are close to our current // head, treat them as a fully sync'd peer. - if self.chain.best_slot().sub(remote.head_slot).as_u64() < FULL_PEER_TOLERANCE { - self.manager.add_full_peer(peer_id); - self.process_sync(); - } else { - debug!( - self.log, - "Out of sync peer connected"; - "peer" => format!("{:?}", peer_id), - ); - } + self.manager.add_peer(peer_id, remote); + self.process_sync(); } else { // The remote node has an equal or great finalized epoch and we don't know it's head. // @@ -218,6 +215,8 @@ impl SimpleSync { } } + /// This function drives the `ImportManager` state machine. The outcomes it provides are + /// actioned until the `ImportManager` is idle. fn process_sync(&mut self) { loop { match self.manager.poll() { From 4aa12dc4084513aaaf3aa5bb355e7bcd3005b1d3 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 09:59:52 +1000 Subject: [PATCH 179/305] Set a default BN bootstrap address --- beacon_node/src/main.rs | 1 + book/src/simple-testnet.md | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 26537c6f7..69ac6f1bd 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -252,6 +252,7 @@ fn main() { .arg(Arg::with_name("server") .value_name("HTTP_SERVER") .required(true) + .default_value("http://localhost:5052") .help("A HTTP server, with a http:// prefix")) .arg(Arg::with_name("libp2p-port") .short("p") diff --git a/book/src/simple-testnet.md b/book/src/simple-testnet.md index bf41e455d..b6fa19d6f 100644 --- a/book/src/simple-testnet.md +++ b/book/src/simple-testnet.md @@ -66,7 +66,7 @@ In a new terminal terminal, run: ``` -$ ./beacon_node -b 10 testnet -r bootstrap http://localhost:5052 +$ ./beacon_node -b 10 testnet -r bootstrap ``` > Notes: @@ -74,7 +74,8 @@ $ ./beacon_node -b 10 testnet -r bootstrap http://localhost:5052 > - The `-b` (or `--port-bump`) increases all the listening TCP/UDP ports of > the new node to `10` higher. Your first node's HTTP server was at TCP > `5052` but this one will be at `5062`. -> - The `-r` flag creates a new data directory in your home with a random -> string appended, to avoid conflicting with any other running node. -> - The HTTP address is the API of the first node. The new node will download -> configuration via HTTP before starting sync via libp2p. +> - The `-r` flag creates a new data directory with a random string appended +> (avoids data directory collisions between nodes). +> - The default bootstrap HTTP address is `http://localhost:5052`. The new node +> will download configuration via HTTP before starting sync via libp2p. +> - See `$ ./beacon_node testnet bootstrap --help` for more configuration. From 82dc84ebbff604304d5a08c2b6e09d1cdbd4024a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 10:22:29 +1000 Subject: [PATCH 180/305] Add slot-time CLI argument --- beacon_node/src/config.rs | 18 +++++++++++++++++- beacon_node/src/main.rs | 7 +++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c3dfad9ba..949b7277e 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -74,7 +74,7 @@ fn process_testnet_subcommand( if let Some(path_string) = cli_args.value_of("eth2-config") { if is_bootstrap { - return Err("Cannot supply --eth2-config when using bootsrap".to_string()); + return Err("Cannot supply --eth2-config when using bootstrap".to_string()); } let path = path_string @@ -85,6 +85,18 @@ fn process_testnet_subcommand( builder.update_spec_from_subcommand(&cli_args)?; } + if let Some(slot_time) = cli_args.value_of("slot-time") { + if is_bootstrap { + return Err("Cannot supply --slot-time flag whilst using bootstrap.".into()); + } + + let slot_time = slot_time + .parse::() + .map_err(|e| format!("Unable to parse slot-time: {:?}", e))?; + + builder.set_slot_time(slot_time); + } + if let Some(path_string) = cli_args.value_of("client-config") { let path = path_string .parse::() @@ -307,6 +319,10 @@ impl<'a> ConfigBuilder<'a> { self.eth2_config = eth2_config; } + fn set_slot_time(&mut self, milliseconds_per_slot: u64) { + self.eth2_config.spec.milliseconds_per_slot = milliseconds_per_slot; + } + /// Reads the subcommand and tries to update `self.eth2_config` based up on the `--spec` flag. /// /// Returns an error if the `--spec` flag is not present in the given `cli_args`. diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 69ac6f1bd..a2a977e85 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -241,6 +241,13 @@ fn main() { backup directory.") .conflicts_with("random-datadir") ) + .arg( + Arg::with_name("slot-time") + .long("slot-time") + .short("t") + .value_name("MILLISECONDS") + .help("Defines the slot time when creating a new testnet.") + ) /* * `boostrap` * From 215200e9eba6151d92f77f9ca10bdea59361c093 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 10:34:24 +1000 Subject: [PATCH 181/305] Add interop tip --- book/src/interop-tips.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/book/src/interop-tips.md b/book/src/interop-tips.md index e581139c4..969d49b4f 100644 --- a/book/src/interop-tips.md +++ b/book/src/interop-tips.md @@ -48,6 +48,22 @@ Increase all ports by `10` (using multiples of `10` is recommended). $ ./beacon_node -b 10 testnet -f quick 8 1567222226 ``` +### Start a testnet with a custom slot time + +Lighthouse can run at quite low slot times when there are few validators (e.g., +`500 ms` slot times should be fine for 8 validators). + +#### Example + +The `-t` (`--slot-time`) flag specifies the milliseconds per slot. + +``` +$ ./beacon_node -b 10 testnet -t 500 -f quick 8 1567222226 +``` + +> Note: `bootstrap` loads the slot time via HTTP and therefore conflicts with +> this flag. + ## HTTP API Examples assume there is a Lighthouse node exposing a HTTP API on From 5616e0a2393b73ff5d692667dd78a2029d05da03 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 10:52:22 +1000 Subject: [PATCH 182/305] Update interop docs --- book/src/interop.md | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/book/src/interop.md b/book/src/interop.md index c1a1d4a69..3f5bfdbd4 100644 --- a/book/src/interop.md +++ b/book/src/interop.md @@ -23,9 +23,30 @@ After building the binaries with `cargo build --release --all`, there will be a `target/release` directory in the root of the Lighthouse repository. This is where the `beacon_node` and `validator_client` binaries are located. -## Interop Procedure +## CLI Overview -The following scenarios are documented: +The Lighthouse CLI has two primary tasks: + +- **Starting** a new testnet chain using `$ ./beacon_node testnet`. +- **Resuming** an existing chain with `$ ./beacon_node` (omit `testnet`). + +There are several methods for starting a new chain: + +- `quick`: using the `(validator_client, genesis_time)` tuple. +- `recent`: as above but `genesis_time` is set to the start of some recent time + window. +- `bootstrap`: a Lighthouse-specific method where we connect to a running node + and download it's specification and genesis state via the HTTP API. + +See `$ ./beacon_node testnet --help` for more detail. + +Once a chain has been started, it can be resumed by running `$ ./beacon_node` +(potentially supplying the `--datadir`, if a non-default directory was used). + + +## Scenarios + +The following scenarios are documented here: - [Starting a "quick-start" beacon node](#quick-start-beacon-node) from a `(validator_count, genesis)` tuple. @@ -34,8 +55,9 @@ The following scenarios are documented: - [Exporting a genesis state file](#exporting-a-genesis-file) from a running Lighthouse node. -First, setup a Lighthouse development environment and navigate to the -`target/release` directory (this is where the binaries are located). +All scenarios assume a working development environment and commands are based +in the `target/release` directory (this is the build dir for `cargo`). + #### Quick-start Beacon Node From d4bf1390c9cdf6753000bfd879bcb31dba073e20 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 11:39:28 +1000 Subject: [PATCH 183/305] Add interop eth1 data stub --- beacon_node/beacon_chain/src/beacon_chain.rs | 30 +++++++++++++++---- .../src/beacon_state/beacon_state_types.rs | 7 +++++ 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6380d03b3..99dd9a642 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -4,6 +4,7 @@ use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; +use eth2_hashing::hash; use lmd_ghost::LmdGhost; use operation_pool::DepositInsertStatus; use operation_pool::{OperationPool, PersistedOperationPool}; @@ -1198,11 +1199,7 @@ impl BeaconChain { body: BeaconBlockBody { randao_reveal, // TODO: replace with real data. - eth1_data: Eth1Data { - deposit_count: state.eth1_data.deposit_count, - deposit_root: Hash256::zero(), - block_hash: Hash256::zero(), - }, + eth1_data: Self::eth1_data_stub(&state), graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), @@ -1231,6 +1228,22 @@ impl BeaconChain { Ok((block, state)) } + fn eth1_data_stub(state: &BeaconState) -> Eth1Data { + let current_epoch = state.current_epoch(); + let slots_per_voting_period = T::EthSpec::slots_per_eth1_voting_period() as u64; + let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; + + // TODO: confirm that `int_to_bytes32` is correct. + let deposit_root = hash(&int_to_bytes32(current_voting_period)); + let block_hash = hash(&deposit_root); + + Eth1Data { + deposit_root: Hash256::from_slice(&deposit_root), + deposit_count: state.eth1_deposit_index, + block_hash: Hash256::from_slice(&block_hash), + } + } + /// Execute the fork choice algorithm and enthrone the result as the canonical head. pub fn fork_choice(&self) -> Result<(), Error> { metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); @@ -1426,6 +1439,13 @@ impl BeaconChain { } } +/// Returns `int` as little-endian bytes with a length of 32. +fn int_to_bytes32(int: u64) -> Vec { + let mut vec = int.to_le_bytes().to_vec(); + vec.resize(32, 0); + vec +} + impl From for Error { fn from(e: DBError) -> Error { Error::DBError(e) diff --git a/eth2/types/src/beacon_state/beacon_state_types.rs b/eth2/types/src/beacon_state/beacon_state_types.rs index 0e76942dd..f589b3d3e 100644 --- a/eth2/types/src/beacon_state/beacon_state_types.rs +++ b/eth2/types/src/beacon_state/beacon_state_types.rs @@ -120,6 +120,13 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq { fn epochs_per_historical_vector() -> usize { Self::EpochsPerHistoricalVector::to_usize() } + + /// Returns the `SLOTS_PER_ETH1_VOTING_PERIOD` constant for this specification. + /// + /// Spec v0.8.1 + fn slots_per_eth1_voting_period() -> usize { + Self::EpochsPerHistoricalVector::to_usize() + } } /// Macro to inherit some type values from another EthSpec. From d05c2d4110d24d02c3be35632b0ab1505da36d33 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 13:21:53 +1000 Subject: [PATCH 184/305] Start working on u64 json issue --- eth2/types/Cargo.toml | 1 + eth2/types/src/beacon_state/committee_cache/tests.rs | 2 +- eth2/types/src/slot_epoch.rs | 12 ++++++++++++ .../builders/testing_proposer_slashing_builder.rs | 4 ++-- 4 files changed, 16 insertions(+), 3 deletions(-) diff --git a/eth2/types/Cargo.toml b/eth2/types/Cargo.toml index 36cfc39ec..95d7a0317 100644 --- a/eth2/types/Cargo.toml +++ b/eth2/types/Cargo.toml @@ -31,3 +31,4 @@ tree_hash_derive = "0.2" [dev-dependencies] env_logger = "0.6.0" +serde_json = "^1.0" diff --git a/eth2/types/src/beacon_state/committee_cache/tests.rs b/eth2/types/src/beacon_state/committee_cache/tests.rs index 28e9d92f8..4c17d3f96 100644 --- a/eth2/types/src/beacon_state/committee_cache/tests.rs +++ b/eth2/types/src/beacon_state/committee_cache/tests.rs @@ -9,7 +9,7 @@ fn default_values() { let cache = CommitteeCache::default(); assert_eq!(cache.is_initialized_at(Epoch::new(0)), false); - assert_eq!(cache.active_validator_indices(), &[]); + assert!(&cache.active_validator_indices().is_empty()); assert_eq!(cache.get_crosslink_committee_for_shard(0), None); assert_eq!(cache.get_attestation_duties(0), None); assert_eq!(cache.active_validator_count(), 0); diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index bd611aa0c..748d6445f 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -191,4 +191,16 @@ mod epoch_tests { Epoch::from_ssz_bytes(&max_epoch.as_ssz_bytes()).unwrap() ); } + + #[test] + fn epoch_max_value_json() { + let x: Epoch = Epoch::from(u64::max_value()); + let json = serde_json::to_string(&x).expect("should json encode"); + + assert_eq!(&json, "18446744073709552000"); + assert_eq!( + serde_json::from_str::(&json).expect("should json decode"), + x + ); + } } diff --git a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs index 6c72b520f..b97293427 100644 --- a/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs +++ b/eth2/types/src/test_utils/builders/testing_proposer_slashing_builder.rs @@ -39,15 +39,15 @@ impl TestingProposerSlashingBuilder { ..header_1.clone() }; + let epoch = slot.epoch(T::slots_per_epoch()); + header_1.signature = { let message = header_1.signed_root(); - let epoch = slot.epoch(T::slots_per_epoch()); signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) }; header_2.signature = { let message = header_2.signed_root(); - let epoch = slot.epoch(T::slots_per_epoch()); signer(proposer_index, &message[..], epoch, Domain::BeaconProposer) }; From 6c50758bdf84747ff79e9bf834dec139682b5817 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 14:29:36 +1000 Subject: [PATCH 185/305] Add ResponseBuilder to rest_api --- beacon_node/rest_api/Cargo.toml | 3 ++ beacon_node/rest_api/src/beacon.rs | 19 +++----- beacon_node/rest_api/src/lib.rs | 2 + beacon_node/rest_api/src/response_builder.rs | 50 ++++++++++++++++++++ 4 files changed, 61 insertions(+), 13 deletions(-) create mode 100644 beacon_node/rest_api/src/response_builder.rs diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index 057de4f94..863ea04da 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -14,9 +14,12 @@ store = { path = "../store" } version = { path = "../version" } serde = { version = "1.0", features = ["derive"] } serde_json = "^1.0" +serde_yaml = "0.8" slog = "^2.2.3" slog-term = "^2.4.0" slog-async = "^2.3.0" +eth2_ssz = { path = "../../eth2/utils/ssz" } +eth2_ssz_derive = { path = "../../eth2/utils/ssz_derive" } state_processing = { path = "../../eth2/state_processing" } types = { path = "../../eth2/types" } clap = "2.32.0" diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 1c66a2819..85f20294d 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -1,8 +1,9 @@ -use super::{success_response, ApiResult}; +use super::{success_response, ApiResult, ResponseBuilder}; use crate::{helpers::*, ApiError, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; use serde::Serialize; +use ssz_derive::Encode; use std::sync::Arc; use store::Store; use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot}; @@ -33,7 +34,7 @@ pub fn get_head(req: Request) -> ApiResult Ok(success_response(Body::from(json))) } -#[derive(Serialize)] +#[derive(Serialize, Encode)] #[serde(bound = "T: EthSpec")] pub struct BlockResponse { pub root: Hash256, @@ -77,11 +78,7 @@ pub fn get_block(req: Request) -> ApiResult beacon_block: block, }; - let json: String = serde_json::to_string(&response).map_err(|e| { - ApiError::ServerError(format!("Unable to serialize BlockResponse: {:?}", e)) - })?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req).body(&response) } /// HTTP handler to return a `BeaconBlock` root at a given `slot`. @@ -104,7 +101,7 @@ pub fn get_block_root(req: Request) -> ApiR Ok(success_response(Body::from(json))) } -#[derive(Serialize)] +#[derive(Serialize, Encode)] #[serde(bound = "T: EthSpec")] pub struct StateResponse { pub root: Hash256, @@ -144,11 +141,7 @@ pub fn get_state(req: Request) -> ApiResult beacon_state: state, }; - let json: String = serde_json::to_string(&response).map_err(|e| { - ApiError::ServerError(format!("Unable to serialize StateResponse: {:?}", e)) - })?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req).body(&response) } /// HTTP handler to return a `BeaconState` root at a given `slot`. diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 7c5ab30ef..1b5a2d6ee 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -8,6 +8,7 @@ mod helpers; mod metrics; mod network; mod node; +mod response_builder; mod spec; mod url_query; mod validator; @@ -18,6 +19,7 @@ use eth2_config::Eth2Config; use hyper::rt::Future; use hyper::service::service_fn_ok; use hyper::{Body, Method, Response, Server, StatusCode}; +use response_builder::ResponseBuilder; use slog::{info, o, warn}; use std::ops::Deref; use std::path::PathBuf; diff --git a/beacon_node/rest_api/src/response_builder.rs b/beacon_node/rest_api/src/response_builder.rs new file mode 100644 index 000000000..9b8819996 --- /dev/null +++ b/beacon_node/rest_api/src/response_builder.rs @@ -0,0 +1,50 @@ +use super::{ApiError, ApiResult}; +use http::header; +use hyper::{Body, Request, Response, StatusCode}; +use serde::Serialize; +use ssz::Encode; + +pub enum Encoding { + JSON, + SSZ, + YAML, +} + +pub struct ResponseBuilder { + encoding: Encoding, +} + +impl ResponseBuilder { + pub fn new(req: &Request) -> Self { + let encoding = match req.headers().get(header::CONTENT_TYPE) { + Some(h) if h == "application/ssz" => Encoding::SSZ, + Some(h) if h == "application/yaml" => Encoding::YAML, + _ => Encoding::JSON, + }; + + Self { encoding } + } + + pub fn body(self, item: &T) -> ApiResult { + let body: Body = match self.encoding { + Encoding::JSON => Body::from(serde_json::to_string(&item).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as JSON: {:?}", + e + )) + })?), + Encoding::SSZ => Body::from(item.as_ssz_bytes()), + Encoding::YAML => Body::from(serde_yaml::to_string(&item).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as YAML: {:?}", + e + )) + })?), + }; + + Response::builder() + .status(StatusCode::OK) + .body(Body::from(body)) + .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) + } +} From 11a1505784570b946a18f2f1883e148c8be0fb78 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 15:07:32 +1000 Subject: [PATCH 186/305] Allow starting from SSZ genesis state --- .../beacon_chain/src/beacon_chain_builder.rs | 17 ++++++++++++++++- beacon_node/client/src/config.rs | 2 ++ beacon_node/client/src/lib.rs | 9 +++++++++ 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 06d2818e2..514a72a40 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -4,9 +4,10 @@ use lighthouse_bootstrap::Bootstrapper; use merkle_proof::MerkleTree; use rayon::prelude::*; use slog::Logger; -use ssz::Encode; +use ssz::{Decode, Encode}; use state_processing::initialize_beacon_state_from_eth1; use std::fs::File; +use std::io::prelude::*; use std::path::PathBuf; use std::sync::Arc; use std::time::SystemTime; @@ -61,6 +62,20 @@ impl BeaconChainBuilder { Ok(Self::from_genesis_state(genesis_state, spec, log)) } + pub fn ssz_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { + let mut file = File::open(file.clone()) + .map_err(|e| format!("Unable to open SSZ genesis state file {:?}: {:?}", file, e))?; + + let mut bytes = vec![]; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Failed to read SSZ file: {:?}", e))?; + + let genesis_state = BeaconState::from_ssz_bytes(&bytes) + .map_err(|e| format!("Unable to parse SSZ genesis state file: {:?}", e))?; + + Ok(Self::from_genesis_state(genesis_state, spec, log)) + } + pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result { let bootstrapper = Bootstrapper::from_server_string(server.to_string()) .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 2f5389ce5..2fb62c3f9 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -54,6 +54,8 @@ pub enum BeaconChainStartMethod { }, /// Create a new beacon chain by loading a YAML-encoded genesis state from a file. Yaml { file: PathBuf }, + /// Create a new beacon chain by loading a SSZ-encoded genesis state from a file. + Ssz { file: PathBuf }, /// Create a new beacon chain by using a HTTP server (running our REST-API) to load genesis and /// finalized states and blocks. HttpBootstrap { server: String, port: Option }, diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 766d12c56..1396ed45f 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -138,6 +138,15 @@ where ); BeaconChainBuilder::yaml_state(file, spec.clone(), log.clone())? } + BeaconChainStartMethod::Ssz { file } => { + info!( + log, + "Starting beacon chain"; + "file" => format!("{:?}", file), + "method" => "ssz" + ); + BeaconChainBuilder::ssz_state(file, spec.clone(), log.clone())? + } BeaconChainStartMethod::HttpBootstrap { server, port } => { info!( log, From bfbe7767123ffabd56bafe5146fd45918b1a1e96 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 15:07:48 +1000 Subject: [PATCH 187/305] Add special genesis state API endpoint --- beacon_node/rest_api/src/beacon.rs | 15 +++++++++++++++ beacon_node/rest_api/src/lib.rs | 1 + beacon_node/src/config.rs | 25 +++++++++++++++++++++++++ beacon_node/src/main.rs | 11 ++++++++--- 4 files changed, 49 insertions(+), 3 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 85f20294d..a4660836d 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -101,6 +101,21 @@ pub fn get_block_root(req: Request) -> ApiR Ok(success_response(Body::from(json))) } +/// HTTP handler to return a `BeaconState` at a given `root` or `slot`. +/// +/// Will not return a state if the request slot is in the future. Will return states higher than +/// the current head by skipping slots. +pub fn get_genesis_state(req: Request) -> ApiResult { + let beacon_chain = req + .extensions() + .get::>>() + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + + let (_root, state) = state_at_slot(&beacon_chain, Slot::new(0))?; + + ResponseBuilder::new(&req).body(&state) +} + #[derive(Serialize, Encode)] #[serde(bound = "T: EthSpec")] pub struct StateResponse { diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 1b5a2d6ee..4aab91e69 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -147,6 +147,7 @@ pub fn start_server( beacon::get_latest_finalized_checkpoint::(req) } (&Method::GET, "/beacon/state") => beacon::get_state::(req), + (&Method::GET, "/beacon/state/genesis") => beacon::get_genesis_state::(req), (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), //TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 949b7277e..ba831c733 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -172,6 +172,31 @@ fn process_testnet_subcommand( genesis_time, }) } + ("file", Some(cli_args)) => { + let file = cli_args + .value_of("file") + .ok_or_else(|| "No filename specified")? + .parse::() + .map_err(|e| format!("Unable to parse filename: {:?}", e))?; + + let format = cli_args + .value_of("format") + .ok_or_else(|| "No file format specified")?; + + let start_method = match format { + "yaml" => BeaconChainStartMethod::Yaml { file }, + "ssz" => BeaconChainStartMethod::Ssz { file }, + other => return Err(format!("Unknown genesis file format: {}", other)), + }; + + builder.set_beacon_chain_start_method(start_method) + } + (cmd, Some(_)) => { + return Err(format!( + "Invalid valid method specified: {}. See 'testnet --help'.", + cmd + )) + } _ => return Err("No testnet method specified. See 'testnet --help'.".into()), }; diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index a2a977e85..cf7a7b854 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -312,9 +312,14 @@ fn main() { * * Start a new node, using a genesis state loaded from a YAML file */ - .subcommand(SubCommand::with_name("yaml") - .about("Creates a new datadir where the genesis state is read from YAML. Will fail to parse \ - a YAML state that was generated to a different spec than that specified by --spec.") + .subcommand(SubCommand::with_name("file") + .about("Creates a new datadir where the genesis state is read from YAML. May fail to parse \ + a file that was generated to a different spec than that specified by --spec.") + .arg(Arg::with_name("format") + .value_name("FORMAT") + .required(true) + .possible_values(&["yaml", "ssz"]) + .help("The encoding of the state in the file.")) .arg(Arg::with_name("file") .value_name("YAML_FILE") .required(true) From ba22d28026c0af1e894d0f5394e19e2d7d040d49 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 15:36:58 +1000 Subject: [PATCH 188/305] Update docs for testnet file start methods --- book/src/interop.md | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/book/src/interop.md b/book/src/interop.md index 3f5bfdbd4..a2f80584d 100644 --- a/book/src/interop.md +++ b/book/src/interop.md @@ -35,6 +35,7 @@ There are several methods for starting a new chain: - `quick`: using the `(validator_client, genesis_time)` tuple. - `recent`: as above but `genesis_time` is set to the start of some recent time window. +- `file`: loads the genesis file from disk in one of multiple formats. - `bootstrap`: a Lighthouse-specific method where we connect to a running node and download it's specification and genesis state via the HTTP API. @@ -85,12 +86,12 @@ Start the validator client with: ``` $ ./validator_client testnet -b insecure 0 8 ``` + > Notes: > > - The `-b` flag means the validator client will "bootstrap" specs and config > from the beacon node. -> - The `insecure` command dictates that the [interop -> keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) +> - The `insecure` command dictates that the [interop keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) > will be used. > - The `0 8` indicates that this validator client should manage 8 validators, > starting at validator 0 (the first deposited validator). @@ -101,8 +102,35 @@ $ ./validator_client testnet -b insecure 0 8 #### Starting from a genesis file -**TODO** +A genesis state can be read from file using the `testnet file` subcommand. +There are three supported formats: + +- `ssz` (default) +- `json` +- `yaml` + +Start a new node using `/tmp/genesis.ssz` as the genesis state: + +``` +$ ./beacon_node testnet --spec minimal -f file ssz /tmp/genesis.ssz +``` + +> Notes: +> +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - See `$ ./beacon_node testnet file --help` for more configuration options. #### Exporting a genesis file -**TODO** +Genesis states can downloaded from a running Lighthouse node via the HTTP API. Three content-types are supported: + +- `application/json` +- `application/yaml` +- `application/ssz` + +Using `curl`, a genesis state can be downloaded to `/tmp/genesis.ssz`: + +``` +$ curl --header "Content-Type: application/ssz" "localhost:5052/beacon/state/genesis" -o /tmp/genesis.ssz +``` From 70f4052b2e67e0d7f14630f4e24c2c45fea54892 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 15:58:53 +1000 Subject: [PATCH 189/305] Allow starting testnet from JSON state --- beacon_node/beacon_chain/Cargo.toml | 1 + beacon_node/beacon_chain/src/beacon_chain_builder.rs | 10 ++++++++++ beacon_node/client/src/config.rs | 2 ++ beacon_node/client/src/lib.rs | 9 +++++++++ beacon_node/src/config.rs | 1 + beacon_node/src/main.rs | 4 ++-- 6 files changed, 25 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index d5594a49a..ae89ac1e1 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -18,6 +18,7 @@ rayon = "1.0" serde = "1.0" serde_derive = "1.0" serde_yaml = "0.8" +serde_json = "^1.0" slog = { version = "^2.2.3" , features = ["max_level_trace"] } sloggers = { version = "^0.3" } slot_clock = { path = "../../eth2/utils/slot_clock" } diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 514a72a40..93c67447e 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -76,6 +76,16 @@ impl BeaconChainBuilder { Ok(Self::from_genesis_state(genesis_state, spec, log)) } + pub fn json_state(file: &PathBuf, spec: ChainSpec, log: Logger) -> Result { + let file = File::open(file.clone()) + .map_err(|e| format!("Unable to open JSON genesis state file {:?}: {:?}", file, e))?; + + let genesis_state = serde_json::from_reader(file) + .map_err(|e| format!("Unable to parse JSON genesis state file: {:?}", e))?; + + Ok(Self::from_genesis_state(genesis_state, spec, log)) + } + pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result { let bootstrapper = Bootstrapper::from_server_string(server.to_string()) .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 2fb62c3f9..f9b366eb1 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -56,6 +56,8 @@ pub enum BeaconChainStartMethod { Yaml { file: PathBuf }, /// Create a new beacon chain by loading a SSZ-encoded genesis state from a file. Ssz { file: PathBuf }, + /// Create a new beacon chain by loading a JSON-encoded genesis state from a file. + Json { file: PathBuf }, /// Create a new beacon chain by using a HTTP server (running our REST-API) to load genesis and /// finalized states and blocks. HttpBootstrap { server: String, port: Option }, diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 1396ed45f..e14da2af9 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -147,6 +147,15 @@ where ); BeaconChainBuilder::ssz_state(file, spec.clone(), log.clone())? } + BeaconChainStartMethod::Json { file } => { + info!( + log, + "Starting beacon chain"; + "file" => format!("{:?}", file), + "method" => "json" + ); + BeaconChainBuilder::json_state(file, spec.clone(), log.clone())? + } BeaconChainStartMethod::HttpBootstrap { server, port } => { info!( log, diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index ba831c733..4a3f6b6a7 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -186,6 +186,7 @@ fn process_testnet_subcommand( let start_method = match format { "yaml" => BeaconChainStartMethod::Yaml { file }, "ssz" => BeaconChainStartMethod::Ssz { file }, + "json" => BeaconChainStartMethod::Json { file }, other => return Err(format!("Unknown genesis file format: {}", other)), }; diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 6ca85bd56..b914be549 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -318,7 +318,7 @@ fn main() { .arg(Arg::with_name("format") .value_name("FORMAT") .required(true) - .possible_values(&["yaml", "ssz"]) + .possible_values(&["yaml", "ssz", "json"]) .help("The encoding of the state in the file.")) .arg(Arg::with_name("file") .value_name("YAML_FILE") @@ -344,7 +344,7 @@ fn main() { _ => unreachable!("guarded by clap"), }; - let mut log = slog::Logger::root(drain.fuse(), o!()); + let log = slog::Logger::root(drain.fuse(), o!()); warn!( log, From 5a8c31e6bf8e10b6fde71fbd3e93fa0b05d6f72a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 16:29:05 +1000 Subject: [PATCH 190/305] Remove JSON epoch test --- eth2/types/src/slot_epoch.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/eth2/types/src/slot_epoch.rs b/eth2/types/src/slot_epoch.rs index 748d6445f..bd611aa0c 100644 --- a/eth2/types/src/slot_epoch.rs +++ b/eth2/types/src/slot_epoch.rs @@ -191,16 +191,4 @@ mod epoch_tests { Epoch::from_ssz_bytes(&max_epoch.as_ssz_bytes()).unwrap() ); } - - #[test] - fn epoch_max_value_json() { - let x: Epoch = Epoch::from(u64::max_value()); - let json = serde_json::to_string(&x).expect("should json encode"); - - assert_eq!(&json, "18446744073709552000"); - assert_eq!( - serde_json::from_str::(&json).expect("should json decode"), - x - ); - } } From a0e019b4d7553555b663d1f7aeb34fdb70fb0626 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 2 Sep 2019 18:19:30 +1000 Subject: [PATCH 191/305] Fix interop eth1 blockhash --- beacon_node/beacon_chain/src/beacon_chain_builder.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 93c67447e..37039dce0 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -165,7 +165,7 @@ fn interop_genesis_state( spec: &ChainSpec, ) -> Result, String> { let keypairs = generate_deterministic_keypairs(validator_count); - let eth1_block_hash = Hash256::from_slice(&[42; 32]); + let eth1_block_hash = Hash256::from_slice(&[0x42; 32]); let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; @@ -277,7 +277,7 @@ mod test { assert_eq!( state.eth1_data.block_hash, - Hash256::from_slice(&[42; 32]), + Hash256::from_slice(&[0x42; 32]), "eth1 block hash should be co-ordinated junk" ); From cd7b6da88ed52a604e9ce3cf1d5985aff4f94a13 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 3 Sep 2019 00:34:41 +1000 Subject: [PATCH 192/305] Updates syncing, corrects CLI variables --- beacon_node/beacon_chain/src/beacon_chain.rs | 11 +- beacon_node/eth2-libp2p/src/discovery.rs | 10 +- beacon_node/eth2-libp2p/src/service.rs | 19 +- beacon_node/network/src/sync/manager.rs | 616 +++++++++++-------- beacon_node/network/src/sync/simple_sync.rs | 5 +- beacon_node/src/main.rs | 13 +- 6 files changed, 374 insertions(+), 300 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 6380d03b3..a142816ae 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -442,6 +442,15 @@ impl BeaconChain { None } + /// Returns the block canonical root of the current canonical chain at a given slot. + /// + /// Returns None if a block doesn't exist at the slot. + pub fn root_at_slot(&self, target_slot: Slot) -> Option { + self.rev_iter_block_roots() + .find(|(_root, slot)| *slot == target_slot) + .map(|(root, _slot)| root) + } + /// Reads the slot clock (see `self.read_slot_clock()` and returns the number of slots since /// genesis. pub fn slots_since_genesis(&self) -> Option { @@ -1006,7 +1015,7 @@ impl BeaconChain { }; // Load the parent blocks state from the database, returning an error if it is not found. - // It is an error because if know the parent block we should also know the parent state. + // It is an error because if we know the parent block we should also know the parent state. let parent_state_root = parent_block.state_root; let parent_state = self .store diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 6c80a8596..4a8aba2b1 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -341,13 +341,9 @@ fn save_enr_to_disc(dir: &Path, enr: &Enr, log: &slog::Logger) { } Err(e) => { warn!( - log, - <<<<<<< HEAD - "Could not write ENR to file"; "file" => format!("{:?}{:?}",dir, ENR_FILENAME), "error" => format!("{}", e) - ======= - "Could not write ENR to file"; "File" => format!("{:?}{:?}",dir, ENR_FILENAME), "Error" => format!("{}", e) - >>>>>>> interop - ); + log, + "Could not write ENR to file"; "file" => format!("{:?}{:?}",dir, ENR_FILENAME), "error" => format!("{}", e) + ); } } } diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 589106f48..1ea1723b6 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -82,17 +82,10 @@ impl Service { // attempt to connect to user-input libp2p nodes for multiaddr in config.libp2p_nodes { match Swarm::dial_addr(&mut swarm, multiaddr.clone()) { -<<<<<<< HEAD Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => format!("{}", multiaddr)), Err(err) => debug!( log, - "Could not connect to peer"; "address" => format!("{}", multiaddr), "Error" => format!("{:?}", err) -======= - Ok(()) => debug!(log, "Dialing libp2p peer"; "Address" => format!("{}", multiaddr)), - Err(err) => debug!( - log, - "Could not connect to peer"; "Address" => format!("{}", multiaddr), "Error" => format!("{:?}", err) ->>>>>>> interop + "Could not connect to peer"; "address" => format!("{}", multiaddr), "error" => format!("{:?}", err) ), }; } @@ -129,7 +122,6 @@ impl Service { let mut subscribed_topics = vec![]; for topic in topics { if swarm.subscribe(topic.clone()) { -<<<<<<< HEAD trace!(log, "Subscribed to topic"; "topic" => format!("{}", topic)); subscribed_topics.push(topic); } else { @@ -137,15 +129,6 @@ impl Service { } } info!(log, "Subscribed to topics"; "topics" => format!("{:?}", subscribed_topics.iter().map(|t| format!("{}", t)).collect::>())); -======= - trace!(log, "Subscribed to topic"; "Topic" => format!("{}", topic)); - subscribed_topics.push(topic); - } else { - warn!(log, "Could not subscribe to topic"; "Topic" => format!("{}", topic)); - } - } - info!(log, "Subscribed to topics"; "Topics" => format!("{:?}", subscribed_topics.iter().map(|t| format!("{}", t)).collect::>())); ->>>>>>> interop Ok(Service { local_peer_id, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 9b2d780f4..a48b43ad7 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -80,6 +80,9 @@ const PARENT_FAIL_TOLERANCE: usize = 3; /// canonical chain to its head once the peer connects. A chain should not appear where it's depth /// is further back than the most recent head slot. const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; +/// The number of empty batches we tolerate before dropping the peer. This prevents endless +/// requests to peers who never return blocks. +const EMPTY_BATCH_TOLERANCE: usize = 100; #[derive(PartialEq)] /// The current state of a block or batches lookup. @@ -95,6 +98,19 @@ enum BlockRequestsState { Failed, } +/// The state of batch requests. +enum SyncDirection { + /// The batch has just been initialised and we need to check to see if a backward sync is + /// required on first batch response. + Initial, + /// We are syncing forwards, the next batch should contain higher slot numbers than is + /// predecessor. + Forwards, + /// We are syncing backwards and looking for a common ancestor chain before we can start + /// processing the downloaded blocks. + Backwards, +} + /// `BlockRequests` keep track of the long-range (batch) sync process per peer. struct BlockRequests { /// The peer's head slot and the target of this batch download. @@ -104,10 +120,13 @@ struct BlockRequests { target_head_root: Hash256, /// The blocks that we have currently downloaded from the peer that are yet to be processed. downloaded_blocks: Vec>, + /// The number of empty batches we have consecutively received. If a peer returns more than + /// EMPTY_BATCHES_TOLERANCE, they are dropped. + consecutive_empty_batches: usize, /// The current state of this batch request. state: BlockRequestsState, - /// Specifies whether the current state is syncing forwards or backwards. - forward_sync: bool, + /// Specifies the current direction of this batch request. + sync_direction: SyncDirection, /// The current `start_slot` of the batched block request. current_start_slot: Slot, } @@ -129,10 +148,13 @@ struct ParentRequests { impl BlockRequests { /// Gets the next start slot for a batch and transitions the state to a Queued state. fn update_start_slot(&mut self) { - if self.forward_sync { - self.current_start_slot += Slot::from(MAX_BLOCKS_PER_REQUEST); - } else { - self.current_start_slot -= Slot::from(MAX_BLOCKS_PER_REQUEST); + match self.sync_direction { + SyncDirection::Initial | SyncDirection::Forwards => { + self.current_start_slot += Slot::from(MAX_BLOCKS_PER_REQUEST); + } + SyncDirection::Backwards => { + self.current_start_slot -= Slot::from(MAX_BLOCKS_PER_REQUEST); + } } self.state = BlockRequestsState::Queued; } @@ -175,6 +197,8 @@ pub(crate) enum ImportManagerOutcome { /// controls the logic behind both the long-range (batch) sync and the on-going potential parent /// look-up of blocks. pub struct ImportManager { + /// List of events to be processed externally. + event_queue: SmallVec<[ImportManagerOutcome; 20]>, /// A weak reference to the underlying beacon chain. chain: Weak>, /// The current state of the import manager. @@ -200,6 +224,7 @@ impl ImportManager { /// dropped during the syncing process. The syncing handles this termination gracefully. pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { ImportManager { + event_queue: SmallVec::new(), chain: Arc::downgrade(&beacon_chain), state: ManagerState::Regular, import_queue: HashMap::new(), @@ -253,7 +278,7 @@ impl ImportManager { // Check if the peer is significantly is behind us. If within `SLOT_IMPORT_TOLERANCE` // treat them as a fully synced peer. If not, ignore them in the sync process if local.head_slot.sub(remote.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { - self.add_full_peer(peer_id); + self.add_full_peer(peer_id.clone()); } else { debug!( self.log, @@ -275,9 +300,10 @@ impl ImportManager { let block_requests = BlockRequests { target_head_slot: remote.head_slot, // this should be larger than the current head. It is checked in the SyncManager before add_peer is called target_head_root: remote.head_root, + consecutive_empty_batches: 0, downloaded_blocks: Vec::new(), state: BlockRequestsState::Queued, - forward_sync: true, + sync_direction: SyncDirection::Initial, current_start_slot: chain.best_slot(), }; self.import_queue.insert(peer_id, block_requests); @@ -291,6 +317,16 @@ impl ImportManager { request_id: RequestId, mut blocks: Vec>, ) { + // ensure the underlying chain still exists + let chain = match self.chain.upgrade() { + Some(chain) => chain, + None => { + debug!(self.log, "Chain dropped. Sync terminating"); + self.event_queue.clear(); + return; + } + }; + // find the request associated with this response let block_requests = match self .import_queue @@ -315,10 +351,19 @@ impl ImportManager { if blocks.is_empty() { debug!(self.log, "BeaconBlocks response was empty"; "request_id" => request_id); - block_requests.update_start_slot(); + block_requests.consecutive_empty_batches += 1; + if block_requests.consecutive_empty_batches >= EMPTY_BATCH_TOLERANCE { + warn!(self.log, "Peer returned too many empty block batches"; + "peer" => format!("{:?}", peer_id)); + block_requests.state = BlockRequestsState::Failed; + } else { + block_requests.update_start_slot(); + } return; } + block_requests.consecutive_empty_batches = 0; + // verify the range of received blocks // Note that the order of blocks is verified in block processing let last_sent_slot = blocks[blocks.len() - 1].slot; @@ -328,83 +373,90 @@ impl ImportManager { .add(MAX_BLOCKS_PER_REQUEST) < last_sent_slot { - //TODO: Downvote peer - add a reason to failed - dbg!(&blocks); warn!(self.log, "BeaconBlocks response returned out of range blocks"; "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.current_start_slot); + self.event_queue + .push(ImportManagerOutcome::DownvotePeer(peer_id)); // consider this sync failed block_requests.state = BlockRequestsState::Failed; return; } // Determine if more blocks need to be downloaded. There are a few cases: - // - We have downloaded a batch from our head_slot, which has not reached the remotes head - // (target head). Therefore we need to download another sequential batch. - // - The latest batch includes blocks that greater than or equal to the target_head slot, - // which means we have caught up to their head. We then check to see if the first - // block downloaded matches our head. If so, we are on the same chain and can process - // the blocks. If not we need to sync back further until we are on the same chain. So - // request more blocks. - // - We are syncing backwards (from our head slot) and need to check if we are on the same - // chain. If so, process the blocks, if not, request more blocks all the way up to - // our last finalized slot. + // - We are in initial sync mode - We have requested blocks and need to determine if this + // is part of a known chain to determine the whether to start syncing backwards or continue + // syncing forwards. + // - We are syncing backwards and need to verify if we have found a common ancestor in + // order to start processing the downloaded blocks. + // - We are syncing forwards. We mark this as complete and check if any further blocks are + // required to download when processing the batch. - if block_requests.forward_sync { - // append blocks if syncing forward - block_requests.downloaded_blocks.append(&mut blocks); - } else { - // prepend blocks if syncing backwards - block_requests.downloaded_blocks.splice(..0, blocks); - } + match block_requests.sync_direction { + SyncDirection::Initial => { + block_requests.downloaded_blocks.append(&mut blocks); - // does the batch contain the target_head_slot - let last_element_index = block_requests.downloaded_blocks.len() - 1; - if block_requests.downloaded_blocks[last_element_index].slot - >= block_requests.target_head_slot - || !block_requests.forward_sync - { - // if the batch is on our chain, this is complete and we can then process. - // Otherwise start backwards syncing until we reach a common chain. - let earliest_slot = block_requests.downloaded_blocks[0].slot; - //TODO: Decide which is faster. Reading block from db and comparing or calculating - //the hash tree root and comparing. - if Some(block_requests.downloaded_blocks[0].canonical_root()) - == root_at_slot(&self.chain, earliest_slot) - { - block_requests.state = BlockRequestsState::Complete; - return; + // this batch is the first batch downloaded. Check if we can process or if we need + // to backwards search. + + //TODO: Decide which is faster. Reading block from db and comparing or calculating + //the hash tree root and comparing. + let earliest_slot = block_requests.downloaded_blocks[0].slot; + if Some(block_requests.downloaded_blocks[0].canonical_root()) + == chain.root_at_slot(earliest_slot) + { + // we have a common head, start processing and begin a forwards sync + block_requests.sync_direction = SyncDirection::Forwards; + block_requests.state = BlockRequestsState::ReadyToProcess; + return; + } + // no common head, begin a backwards search + block_requests.sync_direction = SyncDirection::Backwards; + block_requests.current_start_slot = + std::cmp::min(chain.best_slot(), block_requests.downloaded_blocks[0].slot); + block_requests.update_start_slot(); } - - // not on the same chain, request blocks backwards - let state = &self.chain.head().beacon_state; - let local_finalized_slot = state - .finalized_checkpoint - .epoch - .start_slot(T::EthSpec::slots_per_epoch()); - - // check that the request hasn't failed by having no common chain - if local_finalized_slot >= block_requests.current_start_slot { - warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); - block_requests.state = BlockRequestsState::Failed; - return; + SyncDirection::Forwards => { + // continue processing all blocks forwards, verify the end in the processing + block_requests.downloaded_blocks.append(&mut blocks); + block_requests.state = BlockRequestsState::ReadyToProcess; } + SyncDirection::Backwards => { + block_requests.downloaded_blocks.splice(..0, blocks); - // if this is a forward sync, then we have reached the head without a common chain - // and we need to start syncing backwards. - if block_requests.forward_sync { - // Start a backwards sync by requesting earlier blocks - block_requests.forward_sync = false; - block_requests.current_start_slot = std::cmp::min( - self.chain.best_slot(), - block_requests.downloaded_blocks[0].slot, - ); + // verify the request hasn't failed by having no common ancestor chain + // get our local finalized_slot + let local_finalized_slot = { + let state = &chain.head().beacon_state; + state + .finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()) + }; + + if local_finalized_slot >= block_requests.current_start_slot { + warn!(self.log, "Peer returned an unknown chain."; "request_id" => request_id); + block_requests.state = BlockRequestsState::Failed; + return; + } + + // check if we have reached a common chain ancestor + let earliest_slot = block_requests.downloaded_blocks[0].slot; + if Some(block_requests.downloaded_blocks[0].canonical_root()) + == chain.root_at_slot(earliest_slot) + { + // we have a common head, start processing and begin a forwards sync + block_requests.sync_direction = SyncDirection::Forwards; + block_requests.state = BlockRequestsState::ReadyToProcess; + return; + } + + // no common chain, haven't passed last_finalized_head, so continue backwards + // search + block_requests.update_start_slot(); } } - - // update the start slot and re-queue the batch - block_requests.update_start_slot(); } pub fn recent_blocks_response( @@ -447,7 +499,7 @@ impl ImportManager { } // queue for processing - parent_request.state = BlockRequestsState::Complete; + parent_request.state = BlockRequestsState::ReadyToProcess; } pub fn _inject_error(_peer_id: PeerId, _id: RequestId) { @@ -500,29 +552,41 @@ impl ImportManager { pub(crate) fn poll(&mut self) -> ImportManagerOutcome { loop { + //TODO: Optimize the lookups. Potentially keep state of whether each of these functions + //need to be called. + + // only break once everything has been processed + let mut re_run = false; + + // only process batch requests if there are any + if !self.import_queue.is_empty() { + // process potential block requests + self.process_potential_block_requests(); + + // process any complete long-range batches + re_run = self.process_complete_batches(); + } + + // only process parent objects if we are in regular sync + if let ManagerState::Regular = self.state { + // process any parent block lookup-requests + self.process_parent_requests(); + + // process any complete parent lookups + re_run = self.process_complete_parent_requests(); + } + + // return any queued events + if !self.event_queue.is_empty() { + let event = self.event_queue.remove(0); + self.event_queue.shrink_to_fit(); + return event; + } + // update the state of the manager self.update_state(); - // process potential block requests - if let Some(outcome) = self.process_potential_block_requests() { - return outcome; - } - - // process any complete long-range batches - if let Some(outcome) = self.process_complete_batches() { - return outcome; - } - - // process any parent block lookup-requests - if let Some(outcome) = self.process_parent_requests() { - return outcome; - } - - // process any complete parent lookups - let (re_run, outcome) = self.process_complete_parent_requests(); - if let Some(outcome) = outcome { - return outcome; - } else if !re_run { + if !re_run { break; } } @@ -549,11 +613,11 @@ impl ImportManager { } } - fn process_potential_block_requests(&mut self) -> Option { + fn process_potential_block_requests(&mut self) { // check if an outbound request is required // Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p - // layer and not needed here. - // If any in queued state we submit a request. + // layer and not needed here. Therefore we create many outbound requests and let the RPC + // handle the number of simultaneous requests. Request all queued objects. // remove any failed batches let debug_log = &self.log; @@ -585,56 +649,84 @@ impl ImportManager { count: MAX_BLOCKS_PER_REQUEST, step: 0, }; - return Some(ImportManagerOutcome::RequestBlocks { + self.event_queue.push(ImportManagerOutcome::RequestBlocks { peer_id: peer_id.clone(), request, request_id, }); } - - None } - fn process_complete_batches(&mut self) -> Option { - let completed_batches = self - .import_queue - .iter() - .filter(|(_peer, block_requests)| block_requests.state == BlockRequestsState::Complete) - .map(|(peer, _)| peer) - .cloned() - .collect::>(); - for peer_id in completed_batches { - let block_requests = self.import_queue.remove(&peer_id).expect("key exists"); - match self.process_blocks(block_requests.downloaded_blocks.clone()) { - Ok(()) => { - //TODO: Verify it's impossible to have empty downloaded_blocks - let last_element = block_requests.downloaded_blocks.len() - 1; - debug!(self.log, "Blocks processed successfully"; - "peer" => format!("{:?}", peer_id), - "start_slot" => block_requests.downloaded_blocks[0].slot, - "end_slot" => block_requests.downloaded_blocks[last_element].slot, - "no_blocks" => last_element + 1, - ); - // Re-HELLO to ensure we are up to the latest head - return Some(ImportManagerOutcome::Hello(peer_id)); - } - Err(e) => { - let last_element = block_requests.downloaded_blocks.len() - 1; - warn!(self.log, "Block processing failed"; + fn process_complete_batches(&mut self) -> bool { + // flag to indicate if the manager can be switched to idle or not + let mut re_run = false; + + // create reference variables to be moved into subsequent closure + let chain_ref = self.chain.clone(); + let log_ref = &self.log; + let event_queue_ref = &mut self.event_queue; + + self.import_queue.retain(|peer_id, block_requests| { + // check that the chain still exists + if let Some(chain) = chain_ref.upgrade() { + let downloaded_blocks = + std::mem::replace(&mut block_requests.downloaded_blocks, Vec::new()); + let last_element = block_requests.downloaded_blocks.len() - 1; + let start_slot = block_requests.downloaded_blocks[0].slot; + let end_slot = block_requests.downloaded_blocks[last_element].slot; + + match process_blocks(chain, downloaded_blocks, log_ref) { + Ok(()) => { + debug!(log_ref, "Blocks processed successfully"; "peer" => format!("{:?}", peer_id), - "start_slot" => block_requests.downloaded_blocks[0].slot, - "end_slot" => block_requests.downloaded_blocks[last_element].slot, + "start_slot" => start_slot, + "end_slot" => end_slot, "no_blocks" => last_element + 1, - "error" => format!("{:?}", e), - ); - return Some(ImportManagerOutcome::DownvotePeer(peer_id)); + ); + + // check if the batch is complete, by verifying if we have reached the + // target head + if end_slot >= block_requests.target_head_slot { + // Completed, re-hello the peer to ensure we are up to the latest head + event_queue_ref.push(ImportManagerOutcome::Hello(peer_id.clone())); + // remove the request + false + } else { + // have not reached the end, queue another batch + block_requests.update_start_slot(); + re_run = true; + // keep the batch + true + } + } + Err(e) => { + warn!(log_ref, "Block processing failed"; + "peer" => format!("{:?}", peer_id), + "start_slot" => start_slot, + "end_slot" => end_slot, + "no_blocks" => last_element + 1, + "error" => format!("{:?}", e), + ); + event_queue_ref.push(ImportManagerOutcome::DownvotePeer(peer_id.clone())); + false + } } + } else { + // chain no longer exists, empty the queue and return + event_queue_ref.clear(); + return false; } - } - None + }); + + re_run } - fn process_parent_requests(&mut self) -> Option { + fn process_parent_requests(&mut self) { + // check to make sure there are peers to search for the parent from + if self.full_peers.is_empty() { + return; + } + // remove any failed requests let debug_log = &self.log; self.parent_queue.retain(|parent_request| { @@ -649,11 +741,6 @@ impl ImportManager { } }); - // check to make sure there are peers to search for the parent from - if self.full_peers.is_empty() { - return None; - } - // check if parents need to be searched for for parent_request in self.parent_queue.iter_mut() { if parent_request.failed_attempts >= PARENT_FAIL_TOLERANCE { @@ -677,23 +764,21 @@ impl ImportManager { // select a random fully synced peer to attempt to download the parent block let peer_id = self.full_peers.iter().next().expect("List is not empty"); - return Some(ImportManagerOutcome::RecentRequest(peer_id.clone(), req)); + self.event_queue + .push(ImportManagerOutcome::RecentRequest(peer_id.clone(), req)); } } - - None } - fn process_complete_parent_requests(&mut self) -> (bool, Option) { - // flag to determine if there is more process to drive or if the manager can be switched to - // an idle state + fn process_complete_parent_requests(&mut self) -> bool { + // returned value indicating whether the manager can be switched to idle or not let mut re_run = false; // Find any parent_requests ready to be processed for completed_request in self .parent_queue .iter_mut() - .filter(|req| req.state == BlockRequestsState::Complete) + .filter(|req| req.state == BlockRequestsState::ReadyToProcess) { // verify the last added block is the parent of the last requested block let last_index = completed_request.downloaded_blocks.len() - 1; @@ -711,7 +796,9 @@ impl ImportManager { "received_block" => format!("{}", block_hash), "expected_parent" => format!("{}", expected_hash), ); - return (true, Some(ImportManagerOutcome::DownvotePeer(peer))); + re_run = true; + self.event_queue + .push(ImportManagerOutcome::DownvotePeer(peer)); } // try and process the list of blocks up to the requested block @@ -720,154 +807,153 @@ impl ImportManager { .downloaded_blocks .pop() .expect("Block must exist exist"); - match self.chain.process_block(block.clone()) { - Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => { - // need to keep looking for parents - completed_request.downloaded_blocks.push(block); - completed_request.state = BlockRequestsState::Queued; - re_run = true; - break; - } - Ok(BlockProcessingOutcome::Processed { block_root: _ }) => {} - Ok(outcome) => { - // it's a future slot or an invalid block, remove it and try again - completed_request.failed_attempts += 1; - trace!( - self.log, "Invalid parent block"; - "outcome" => format!("{:?}", outcome), - "peer" => format!("{:?}", completed_request.last_submitted_peer), - ); - completed_request.state = BlockRequestsState::Queued; - re_run = true; - return ( - re_run, - Some(ImportManagerOutcome::DownvotePeer( + + // check if the chain exists + if let Some(chain) = self.chain.upgrade() { + match chain.process_block(block.clone()) { + Ok(BlockProcessingOutcome::ParentUnknown { parent: _ }) => { + // need to keep looking for parents + completed_request.downloaded_blocks.push(block); + completed_request.state = BlockRequestsState::Queued; + re_run = true; + break; + } + Ok(BlockProcessingOutcome::Processed { block_root: _ }) => {} + Ok(outcome) => { + // it's a future slot or an invalid block, remove it and try again + completed_request.failed_attempts += 1; + trace!( + self.log, "Invalid parent block"; + "outcome" => format!("{:?}", outcome), + "peer" => format!("{:?}", completed_request.last_submitted_peer), + ); + completed_request.state = BlockRequestsState::Queued; + re_run = true; + self.event_queue.push(ImportManagerOutcome::DownvotePeer( completed_request.last_submitted_peer.clone(), - )), - ); - } - Err(e) => { - completed_request.failed_attempts += 1; - warn!( - self.log, "Parent processing error"; - "error" => format!("{:?}", e) - ); - completed_request.state = BlockRequestsState::Queued; - re_run = true; - return ( - re_run, - Some(ImportManagerOutcome::DownvotePeer( + )); + return re_run; + } + Err(e) => { + completed_request.failed_attempts += 1; + warn!( + self.log, "Parent processing error"; + "error" => format!("{:?}", e) + ); + completed_request.state = BlockRequestsState::Queued; + re_run = true; + self.event_queue.push(ImportManagerOutcome::DownvotePeer( completed_request.last_submitted_peer.clone(), - )), - ); + )); + return re_run; + } } + } else { + // chain doesn't exist - clear the event queue and return + self.event_queue.clear(); + return false; } } } - // remove any full completed and processed parent chains + // remove any fully processed parent chains self.parent_queue.retain(|req| { - if req.state == BlockRequestsState::Complete { + if req.state == BlockRequestsState::ReadyToProcess { false } else { true } }); - (re_run, None) + re_run } +} - fn process_blocks(&mut self, blocks: Vec>) -> Result<(), String> { - for block in blocks { - let processing_result = self.chain.process_block(block.clone()); +// Helper function to process blocks +fn process_blocks( + chain: Arc>, + blocks: Vec>, + log: &Logger, +) -> Result<(), String> { + for block in blocks { + let processing_result = chain.process_block(block.clone()); - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutcome::Processed { block_root } => { - // The block was valid and we processed it successfully. + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutcome::Processed { block_root } => { + // The block was valid and we processed it successfully. + trace!( + log, "Imported block from network"; + "slot" => block.slot, + "block_root" => format!("{}", block_root), + ); + } + BlockProcessingOutcome::ParentUnknown { parent } => { + // blocks should be sequential and all parents should exist + trace!( + log, "ParentBlockUnknown"; + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + ); + return Err(format!( + "Block at slot {} has an unknown parent.", + block.slot + )); + } + BlockProcessingOutcome::FutureSlot { + present_slot, + block_slot, + } => { + if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { + // The block is too far in the future, drop it. trace!( - self.log, "Imported block from network"; - "slot" => block.slot, - "block_root" => format!("{}", block_root), - ); - } - BlockProcessingOutcome::ParentUnknown { parent } => { - // blocks should be sequential and all parents should exist - trace!( - self.log, "ParentBlockUnknown"; - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, + log, "FutureBlock"; + "msg" => "block for future slot rejected, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, ); return Err(format!( - "Block at slot {} has an unknown parent.", + "Block at slot {} is too far in the future", block.slot )); - } - BlockProcessingOutcome::FutureSlot { - present_slot, - block_slot, - } => { - if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { - // The block is too far in the future, drop it. - trace!( - self.log, "FutureBlock"; - "msg" => "block for future slot rejected, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - ); - return Err(format!( - "Block at slot {} is too far in the future", - block.slot - )); - } else { - // The block is in the future, but not too far. - trace!( - self.log, "QueuedFutureBlock"; - "msg" => "queuing future block, check your time", - "present_slot" => present_slot, - "block_slot" => block_slot, - "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, - ); - } - } - BlockProcessingOutcome::FinalizedSlot => { + } else { + // The block is in the future, but not too far. trace!( - self.log, "Finalized or earlier block processed"; - "outcome" => format!("{:?}", outcome), + log, "QueuedFutureBlock"; + "msg" => "queuing future block, check your time", + "present_slot" => present_slot, + "block_slot" => block_slot, + "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, ); - // block reached our finalized slot or was earlier, move to the next block - } - _ => { - trace!( - self.log, "InvalidBlock"; - "msg" => "peer sent invalid block", - "outcome" => format!("{:?}", outcome), - ); - return Err(format!("Invalid block at slot {}", block.slot)); } } - } else { - trace!( - self.log, "BlockProcessingFailure"; - "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", processing_result) - ); - return Err(format!( - "Unexpected block processing error: {:?}", - processing_result - )); + BlockProcessingOutcome::FinalizedSlot => { + trace!( + log, "Finalized or earlier block processed"; + "outcome" => format!("{:?}", outcome), + ); + // block reached our finalized slot or was earlier, move to the next block + } + _ => { + trace!( + log, "InvalidBlock"; + "msg" => "peer sent invalid block", + "outcome" => format!("{:?}", outcome), + ); + return Err(format!("Invalid block at slot {}", block.slot)); + } } + } else { + trace!( + log, "BlockProcessingFailure"; + "msg" => "unexpected condition in processing block.", + "outcome" => format!("{:?}", processing_result) + ); + return Err(format!( + "Unexpected block processing error: {:?}", + processing_result + )); } - Ok(()) } -} - -fn root_at_slot( - chain: &Arc>, - target_slot: Slot, -) -> Option { - chain - .rev_iter_block_roots() - .find(|(_root, slot)| *slot == target_slot) - .map(|(root, _slot)| root) + Ok(()) } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index dd857d8c3..36947082e 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -6,7 +6,6 @@ use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; use slog::{debug, info, o, trace, warn}; use ssz::Encode; -use std::ops::Sub; use std::sync::Arc; use store::Store; use tokio::sync::mpsc; @@ -190,7 +189,7 @@ impl SimpleSync { trace!( self.log, "Out of date or potentially sync'd peer found"; "peer" => format!("{:?}", peer_id), - "remote_head_slot" => remote.head_slot + "remote_head_slot" => remote.head_slot, "remote_latest_finalized_epoch" => remote.finalized_epoch, ); @@ -386,7 +385,7 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, - "current_slot" => self.chain.present_slot(), + "current_slot" => self.chain.best_slot(), "requested" => req.count, "returned" => blocks.len(), ); diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 26537c6f7..ea801cd8b 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -33,14 +33,14 @@ fn main() { .arg( Arg::with_name("logfile") .long("logfile") - .value_name("logfile") + .value_name("FILE") .help("File path where output will be written.") .takes_value(true), ) .arg( Arg::with_name("network-dir") .long("network-dir") - .value_name("NETWORK-DIR") + .value_name("DIR") .help("Data directory for network keys.") .takes_value(true) .global(true) @@ -83,7 +83,7 @@ fn main() { Arg::with_name("boot-nodes") .long("boot-nodes") .allow_hyphen_values(true) - .value_name("BOOTNODES") + .value_name("ENR-LIST") .help("One or more comma-delimited base64-encoded ENR's to bootstrap the p2p network.") .takes_value(true), ) @@ -128,13 +128,14 @@ fn main() { .arg( Arg::with_name("rpc-address") .long("rpc-address") - .value_name("Address") + .value_name("ADDRESS") .help("Listen address for RPC endpoint.") .takes_value(true), ) .arg( Arg::with_name("rpc-port") .long("rpc-port") + .value_name("PORT") .help("Listen port for RPC endpoint.") .conflicts_with("port-bump") .takes_value(true), @@ -149,14 +150,14 @@ fn main() { .arg( Arg::with_name("api-address") .long("api-address") - .value_name("APIADDRESS") + .value_name("ADDRESS") .help("Set the listen address for the RESTful HTTP API server.") .takes_value(true), ) .arg( Arg::with_name("api-port") .long("api-port") - .value_name("APIPORT") + .value_name("PORT") .help("Set the listen TCP port for the RESTful HTTP API server.") .conflicts_with("port-bump") .takes_value(true), From 13b5df56b3f4e81238733943e23bb3e11d602c5a Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 3 Sep 2019 07:50:44 +1000 Subject: [PATCH 193/305] Account manager, bootnodes, RPC display and sync fixes --- account_manager/src/main.rs | 10 +- beacon_node/eth2-libp2p/src/discovery.rs | 2 +- beacon_node/eth2-libp2p/src/rpc/methods.rs | 48 ++++++++ beacon_node/eth2-libp2p/src/rpc/mod.rs | 10 ++ beacon_node/eth2-libp2p/src/rpc/protocol.rs | 11 ++ beacon_node/eth2-libp2p/src/service.rs | 16 ++- beacon_node/network/src/service.rs | 4 +- beacon_node/network/src/sync/manager.rs | 124 ++++++++++++-------- beacon_node/network/src/sync/simple_sync.rs | 2 +- beacon_node/src/main.rs | 14 --- 10 files changed, 168 insertions(+), 73 deletions(-) diff --git a/account_manager/src/main.rs b/account_manager/src/main.rs index b7448ddf2..ae3823049 100644 --- a/account_manager/src/main.rs +++ b/account_manager/src/main.rs @@ -125,9 +125,13 @@ fn main() { } } } - _ => panic!( - "The account manager must be run with a subcommand. See help for more information." - ), + _ => { + crit!( + log, + "The account manager must be run with a subcommand. See help for more information." + ); + return; + } } } diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 4a8aba2b1..c3f2522d8 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -114,7 +114,7 @@ impl Discovery { self.find_peers(); } - /// Add an Enr to the routing table of the discovery mechanism. + /// Add an ENR to the routing table of the discovery mechanism. pub fn add_enr(&mut self, enr: Enr) { self.discovery.add_enr(enr); } diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index d912bcfa1..c9610b000 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -157,3 +157,51 @@ impl ErrorMessage { String::from_utf8(self.error_message.clone()).unwrap_or_else(|_| "".into()) } } + +impl std::fmt::Display for HelloMessage { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Hello Message: Fork Version: {:?}, Finalized Root: {}, Finalized Epoch: {}, Head Root: {}, Head Slot: {}", self.fork_version, self.finalized_root, self.finalized_epoch, self.head_root, self.head_slot) + } +} + +impl std::fmt::Display for RPCResponse { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RPCResponse::Hello(hello) => write!(f, "{}", hello), + RPCResponse::BeaconBlocks(_) => write!(f, ""), + RPCResponse::RecentBeaconBlocks(_) => write!(f, ""), + } + } +} + +impl std::fmt::Display for RPCErrorResponse { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RPCErrorResponse::Success(res) => write!(f, "{}", res), + RPCErrorResponse::InvalidRequest(err) => write!(f, "Invalid Request: {:?}", err), + RPCErrorResponse::ServerError(err) => write!(f, "Server Error: {:?}", err), + RPCErrorResponse::Unknown(err) => write!(f, "Unknown Error: {:?}", err), + } + } +} + +impl std::fmt::Display for GoodbyeReason { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GoodbyeReason::ClientShutdown => write!(f, "Client Shutdown"), + GoodbyeReason::IrrelevantNetwork => write!(f, "Irrelevant Network"), + GoodbyeReason::Fault => write!(f, "Fault"), + GoodbyeReason::Unknown => write!(f, "Unknown Reason"), + } + } +} + +impl std::fmt::Display for BeaconBlocksRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Head Block Root: {}, Start Slot: {}, Count: {}, Step: {}", + self.head_block_root, self.start_slot, self.count, self.step + ) + } +} diff --git a/beacon_node/eth2-libp2p/src/rpc/mod.rs b/beacon_node/eth2-libp2p/src/rpc/mod.rs index 756a62e71..2076615a9 100644 --- a/beacon_node/eth2-libp2p/src/rpc/mod.rs +++ b/beacon_node/eth2-libp2p/src/rpc/mod.rs @@ -47,6 +47,16 @@ impl RPCEvent { } } +impl std::fmt::Display for RPCEvent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RPCEvent::Request(id, req) => write!(f, "RPC Request(Id: {}, {})", id, req), + RPCEvent::Response(id, res) => write!(f, "RPC Response(Id: {}, {})", id, res), + RPCEvent::Error(id, err) => write!(f, "RPC Request(Id: {}, Error: {:?})", id, err), + } + } +} + /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// logic. pub struct RPC { diff --git a/beacon_node/eth2-libp2p/src/rpc/protocol.rs b/beacon_node/eth2-libp2p/src/rpc/protocol.rs index be1efdf5d..401fa8b9e 100644 --- a/beacon_node/eth2-libp2p/src/rpc/protocol.rs +++ b/beacon_node/eth2-libp2p/src/rpc/protocol.rs @@ -288,3 +288,14 @@ impl std::error::Error for RPCError { } } } + +impl std::fmt::Display for RPCRequest { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RPCRequest::Hello(hello) => write!(f, "Hello Message: {}", hello), + RPCRequest::Goodbye(reason) => write!(f, "Goodbye: {}", reason), + RPCRequest::BeaconBlocks(req) => write!(f, "Beacon Blocks: {}", req), + RPCRequest::RecentBeaconBlocks(req) => write!(f, "Recent Beacon Blocks: {:?}", req), + } + } +} diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 1ea1723b6..96b5a276e 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -79,8 +79,8 @@ impl Service { } }; - // attempt to connect to user-input libp2p nodes - for multiaddr in config.libp2p_nodes { + // helper closure for dialing peers + let mut dial_addr = |multiaddr: Multiaddr| { match Swarm::dial_addr(&mut swarm, multiaddr.clone()) { Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => format!("{}", multiaddr)), Err(err) => debug!( @@ -88,6 +88,18 @@ impl Service { "Could not connect to peer"; "address" => format!("{}", multiaddr), "error" => format!("{:?}", err) ), }; + }; + + // attempt to connect to user-input libp2p nodes + for multiaddr in config.libp2p_nodes { + dial_addr(multiaddr); + } + + // attempt to connect to any specified boot-nodes + for bootnode_enr in config.boot_nodes { + for multiaddr in bootnode_enr.multiaddr() { + dial_addr(multiaddr); + } } // subscribe to default gossipsub topics diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a8b3c74b6..ae7562033 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -161,7 +161,7 @@ fn network_service( Ok(Async::Ready(Some(message))) => match message { NetworkMessage::Send(peer_id, outgoing_message) => match outgoing_message { OutgoingMessage::RPC(rpc_event) => { - trace!(log, "Sending RPC Event: {:?}", rpc_event); + trace!(log, "{}", rpc_event); libp2p_service.lock().swarm.send_rpc(peer_id, rpc_event); } }, @@ -185,7 +185,7 @@ fn network_service( match libp2p_service.lock().poll() { Ok(Async::Ready(Some(event))) => match event { Libp2pEvent::RPC(peer_id, rpc_event) => { - trace!(log, "RPC Event: RPC message received: {:?}", rpc_event); + trace!(log, "{}", rpc_event); message_handler_send .try_send(HandlerMessage::RPC(peer_id, rpc_event)) .map_err(|_| "Failed to send RPC to handler")?; diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index a48b43ad7..8ba7486a5 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -68,7 +68,7 @@ use types::{BeaconBlock, EthSpec, Hash256, Slot}; /// Blocks are downloaded in batches from peers. This constant specifies how many blocks per batch /// is requested. Currently the value is small for testing. This will be incremented for /// production. -const MAX_BLOCKS_PER_REQUEST: u64 = 10; +const MAX_BLOCKS_PER_REQUEST: u64 = 100; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a @@ -120,6 +120,8 @@ struct BlockRequests { target_head_root: Hash256, /// The blocks that we have currently downloaded from the peer that are yet to be processed. downloaded_blocks: Vec>, + /// The number of blocks successfully processed in this request. + blocks_processed: usize, /// The number of empty batches we have consecutively received. If a peer returns more than /// EMPTY_BATCHES_TOLERANCE, they are dropped. consecutive_empty_batches: usize, @@ -302,6 +304,7 @@ impl ImportManager { target_head_root: remote.head_root, consecutive_empty_batches: 0, downloaded_blocks: Vec::new(), + blocks_processed: 0, state: BlockRequestsState::Queued, sync_direction: SyncDirection::Initial, current_start_slot: chain.best_slot(), @@ -356,6 +359,10 @@ impl ImportManager { warn!(self.log, "Peer returned too many empty block batches"; "peer" => format!("{:?}", peer_id)); block_requests.state = BlockRequestsState::Failed; + } else if block_requests.current_start_slot >= block_requests.target_head_slot { + warn!(self.log, "Peer did not return blocks it claimed to possess"; + "peer" => format!("{:?}", peer_id)); + block_requests.state = BlockRequestsState::Failed; } else { block_requests.update_start_slot(); } @@ -561,19 +568,19 @@ impl ImportManager { // only process batch requests if there are any if !self.import_queue.is_empty() { // process potential block requests - self.process_potential_block_requests(); + re_run = re_run || self.process_potential_block_requests(); // process any complete long-range batches - re_run = self.process_complete_batches(); + re_run = re_run || self.process_complete_batches(); } // only process parent objects if we are in regular sync - if let ManagerState::Regular = self.state { + if !self.parent_queue.is_empty() { // process any parent block lookup-requests - self.process_parent_requests(); + re_run = re_run || self.process_parent_requests(); // process any complete parent lookups - re_run = self.process_complete_parent_requests(); + re_run = re_run || self.process_complete_parent_requests(); } // return any queued events @@ -613,20 +620,23 @@ impl ImportManager { } } - fn process_potential_block_requests(&mut self) { + fn process_potential_block_requests(&mut self) -> bool { // check if an outbound request is required // Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p // layer and not needed here. Therefore we create many outbound requests and let the RPC // handle the number of simultaneous requests. Request all queued objects. + let mut re_run = false; // remove any failed batches let debug_log = &self.log; + let full_peer_ref = &mut self.full_peers; self.import_queue.retain(|peer_id, block_request| { if let BlockRequestsState::Failed = block_request.state { debug!(debug_log, "Block import from peer failed"; "peer_id" => format!("{:?}", peer_id), - "downloaded_blocks" => block_request.downloaded_blocks.len() + "downloaded_blocks" => block_request.blocks_processed ); + full_peer_ref.remove(peer_id); false } else { true @@ -654,7 +664,10 @@ impl ImportManager { request, request_id, }); + re_run = true; } + + re_run } fn process_complete_batches(&mut self) -> bool { @@ -667,66 +680,75 @@ impl ImportManager { let event_queue_ref = &mut self.event_queue; self.import_queue.retain(|peer_id, block_requests| { - // check that the chain still exists - if let Some(chain) = chain_ref.upgrade() { - let downloaded_blocks = - std::mem::replace(&mut block_requests.downloaded_blocks, Vec::new()); - let last_element = block_requests.downloaded_blocks.len() - 1; - let start_slot = block_requests.downloaded_blocks[0].slot; - let end_slot = block_requests.downloaded_blocks[last_element].slot; + if block_requests.state == BlockRequestsState::ReadyToProcess { + // check that the chain still exists + if let Some(chain) = chain_ref.upgrade() { + let downloaded_blocks = + std::mem::replace(&mut block_requests.downloaded_blocks, Vec::new()); + let last_element = downloaded_blocks.len() - 1; + let start_slot = downloaded_blocks[0].slot; + let end_slot = downloaded_blocks[last_element].slot; - match process_blocks(chain, downloaded_blocks, log_ref) { - Ok(()) => { - debug!(log_ref, "Blocks processed successfully"; - "peer" => format!("{:?}", peer_id), - "start_slot" => start_slot, - "end_slot" => end_slot, - "no_blocks" => last_element + 1, - ); - - // check if the batch is complete, by verifying if we have reached the - // target head - if end_slot >= block_requests.target_head_slot { - // Completed, re-hello the peer to ensure we are up to the latest head - event_queue_ref.push(ImportManagerOutcome::Hello(peer_id.clone())); - // remove the request - false - } else { - // have not reached the end, queue another batch - block_requests.update_start_slot(); - re_run = true; - // keep the batch - true - } - } - Err(e) => { - warn!(log_ref, "Block processing failed"; + match process_blocks(chain, downloaded_blocks, log_ref) { + Ok(()) => { + debug!(log_ref, "Blocks processed successfully"; "peer" => format!("{:?}", peer_id), "start_slot" => start_slot, "end_slot" => end_slot, "no_blocks" => last_element + 1, - "error" => format!("{:?}", e), - ); - event_queue_ref.push(ImportManagerOutcome::DownvotePeer(peer_id.clone())); - false + ); + block_requests.blocks_processed += last_element + 1; + + // check if the batch is complete, by verifying if we have reached the + // target head + if end_slot >= block_requests.target_head_slot { + // Completed, re-hello the peer to ensure we are up to the latest head + event_queue_ref.push(ImportManagerOutcome::Hello(peer_id.clone())); + // remove the request + false + } else { + // have not reached the end, queue another batch + block_requests.update_start_slot(); + re_run = true; + // keep the batch + true + } + } + Err(e) => { + warn!(log_ref, "Block processing failed"; + "peer" => format!("{:?}", peer_id), + "start_slot" => start_slot, + "end_slot" => end_slot, + "no_blocks" => last_element + 1, + "error" => format!("{:?}", e), + ); + event_queue_ref + .push(ImportManagerOutcome::DownvotePeer(peer_id.clone())); + false + } } + } else { + // chain no longer exists, empty the queue and return + event_queue_ref.clear(); + return false; } } else { - // chain no longer exists, empty the queue and return - event_queue_ref.clear(); - return false; + // not ready to process + true } }); re_run } - fn process_parent_requests(&mut self) { + fn process_parent_requests(&mut self) -> bool { // check to make sure there are peers to search for the parent from if self.full_peers.is_empty() { - return; + return false; } + let mut re_run = false; + // remove any failed requests let debug_log = &self.log; self.parent_queue.retain(|parent_request| { @@ -766,8 +788,10 @@ impl ImportManager { self.event_queue .push(ImportManagerOutcome::RecentRequest(peer_id.clone(), req)); + re_run = true; } } + re_run } fn process_complete_parent_requests(&mut self) -> bool { diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 36947082e..e1ca30b0a 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -453,7 +453,7 @@ impl SimpleSync { } BlockProcessingOutcome::ParentUnknown { parent: _ } => { // Inform the sync manager to find parents for this block - trace!(self.log, "Unknown parent gossip"; + trace!(self.log, "Block with unknown parent received"; "peer_id" => format!("{:?}",peer_id)); self.manager.add_unknown_block(block.clone(), peer_id); SHOULD_FORWARD_GOSSIP_BLOCK diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index ea801cd8b..ab9803eba 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -187,13 +187,6 @@ fn main() { .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) .default_value("trace"), ) - .arg( - Arg::with_name("verbosity") - .short("v") - .multiple(true) - .help("Sets the verbosity level") - .takes_value(true), - ) /* * The "testnet" sub-command. * @@ -332,13 +325,6 @@ fn main() { _ => unreachable!("guarded by clap"), }; - let drain = match matches.occurrences_of("verbosity") { - 0 => drain.filter_level(Level::Info), - 1 => drain.filter_level(Level::Debug), - 2 => drain.filter_level(Level::Trace), - _ => drain.filter_level(Level::Trace), - }; - let log = slog::Logger::root(drain.fuse(), o!()); warn!( From 969b6d7575c69007d273630359cd37b0a61afb5b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 09:50:15 +1000 Subject: [PATCH 194/305] Tidy BeaconStateCow --- beacon_node/beacon_chain/src/beacon_chain.rs | 75 ++++++++++---------- beacon_node/rpc/src/validator.rs | 2 +- 2 files changed, 37 insertions(+), 40 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 99dd9a642..97af43718 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -77,25 +77,31 @@ pub enum AttestationProcessingOutcome { Invalid(AttestationValidationError), } -pub enum StateCow<'a, T: EthSpec> { +/// Effectively a `Cow`, however when it is `Borrowed` it holds a `RwLockReadGuard` (a +/// read-lock on some read/write-locked state). +/// +/// Only has a small subset of the functionality of a `std::borrow::Cow`. +pub enum BeaconStateCow<'a, T: EthSpec> { Borrowed(RwLockReadGuard<'a, CheckPoint>), Owned(BeaconState), } -impl<'a, T: EthSpec> AsRef> for StateCow<'a, T> { - fn as_ref(&self) -> &BeaconState { +impl<'a, T: EthSpec> BeaconStateCow<'a, T> { + pub fn maybe_as_mut_ref(&mut self) -> Option<&mut BeaconState> { match self { - StateCow::Borrowed(checkpoint) => &checkpoint.beacon_state, - StateCow::Owned(state) => &state, + BeaconStateCow::Borrowed(_) => None, + BeaconStateCow::Owned(ref mut state) => Some(state), } } } -impl<'a, T: EthSpec> StateCow<'a, T> { - pub fn as_mut_ref(&mut self) -> Option<&mut BeaconState> { +impl<'a, T: EthSpec> std::ops::Deref for BeaconStateCow<'a, T> { + type Target = BeaconState; + + fn deref(&self) -> &BeaconState { match self { - StateCow::Borrowed(_) => None, - StateCow::Owned(ref mut state) => Some(state), + BeaconStateCow::Borrowed(checkpoint) => &checkpoint.beacon_state, + BeaconStateCow::Owned(state) => &state, } } } @@ -374,11 +380,11 @@ impl BeaconChain { /// /// Returns `None` when the state is not found in the database or there is an error skipping /// to a future state. - pub fn state_at_slot(&self, slot: Slot) -> Result, Error> { + pub fn state_at_slot(&self, slot: Slot) -> Result, Error> { let head_state = &self.head().beacon_state; if slot == head_state.slot { - Ok(StateCow::Borrowed(self.head())) + Ok(BeaconStateCow::Borrowed(self.head())) } else if slot > head_state.slot { let head_state_slot = head_state.slot; let mut state = head_state.clone(); @@ -398,7 +404,7 @@ impl BeaconChain { } }; } - Ok(StateCow::Owned(state)) + Ok(BeaconStateCow::Owned(state)) } else { let state_root = self .rev_iter_state_roots() @@ -406,7 +412,7 @@ impl BeaconChain { .map(|(root, _slot)| root) .ok_or_else(|| Error::NoStateForSlot(slot))?; - Ok(StateCow::Owned( + Ok(BeaconStateCow::Owned( self.store .get(&state_root)? .ok_or_else(|| Error::NoStateForSlot(slot))?, @@ -422,7 +428,7 @@ impl BeaconChain { /// /// Returns `None` when there is an error skipping to a future state or the slot clock cannot /// be read. - pub fn state_now(&self) -> Result, Error> { + pub fn state_now(&self) -> Result, Error> { self.state_at_slot(self.slot()?) } @@ -465,25 +471,24 @@ impl BeaconChain { let head_state = &self.head().beacon_state; let mut state = if epoch(slot) == epoch(head_state.slot) { - StateCow::Borrowed(self.head()) + BeaconStateCow::Borrowed(self.head()) } else { self.state_at_slot(slot)? }; - if let Some(state) = state.as_mut_ref() { + if let Some(state) = state.maybe_as_mut_ref() { state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; } - if epoch(state.as_ref().slot) != epoch(slot) { + if epoch(state.slot) != epoch(slot) { return Err(Error::InvariantViolated(format!( "Epochs in consistent in proposer lookup: state: {}, requested: {}", - epoch(state.as_ref().slot), + epoch(state.slot), epoch(slot) ))); } state - .as_ref() .get_beacon_proposer_index(slot, RelativeEpoch::Current, &self.spec) .map_err(Into::into) } @@ -501,26 +506,25 @@ impl BeaconChain { let head_state = &self.head().beacon_state; let mut state = if epoch == as_epoch(head_state.slot) { - StateCow::Borrowed(self.head()) + BeaconStateCow::Borrowed(self.head()) } else { self.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))? }; - if let Some(state) = state.as_mut_ref() { + if let Some(state) = state.maybe_as_mut_ref() { state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; } - if as_epoch(state.as_ref().slot) != epoch { + if as_epoch(state.slot) != epoch { return Err(Error::InvariantViolated(format!( "Epochs in consistent in attestation duties lookup: state: {}, requested: {}", - as_epoch(state.as_ref().slot), + as_epoch(state.slot), epoch ))); } - if let Some(attestation_duty) = state - .as_ref() - .get_attestation_duties(validator_index, RelativeEpoch::Current)? + if let Some(attestation_duty) = + state.get_attestation_duties(validator_index, RelativeEpoch::Current)? { Ok(Some((attestation_duty.slot, attestation_duty.shard))) } else { @@ -541,12 +545,7 @@ impl BeaconChain { let head_block_root = self.head().beacon_block_root; let head_block_slot = self.head().beacon_block.slot; - self.produce_attestation_data_for_block( - shard, - head_block_root, - head_block_slot, - state.as_ref(), - ) + self.produce_attestation_data_for_block(shard, head_block_root, head_block_slot, &*state) } /// Produce an `AttestationData` that attests to the chain denoted by `block_root` and `state`. @@ -868,7 +867,7 @@ impl BeaconChain { match self.state_now() { Ok(state) => self .op_pool - .insert_voluntary_exit(exit, state.as_ref(), &self.spec), + .insert_voluntary_exit(exit, &*state, &self.spec), Err(e) => { error!( &self.log, @@ -884,9 +883,7 @@ impl BeaconChain { /// Accept some transfer and queue it for inclusion in an appropriate block. pub fn process_transfer(&self, transfer: Transfer) -> Result<(), TransferValidationError> { match self.state_now() { - Ok(state) => self - .op_pool - .insert_transfer(transfer, state.as_ref(), &self.spec), + Ok(state) => self.op_pool.insert_transfer(transfer, &*state, &self.spec), Err(e) => { error!( &self.log, @@ -907,7 +904,7 @@ impl BeaconChain { match self.state_now() { Ok(state) => { self.op_pool - .insert_proposer_slashing(proposer_slashing, state.as_ref(), &self.spec) + .insert_proposer_slashing(proposer_slashing, &*state, &self.spec) } Err(e) => { error!( @@ -929,7 +926,7 @@ impl BeaconChain { match self.state_now() { Ok(state) => { self.op_pool - .insert_attester_slashing(attester_slashing, state.as_ref(), &self.spec) + .insert_attester_slashing(attester_slashing, &*state, &self.spec) } Err(e) => { error!( @@ -1150,7 +1147,7 @@ impl BeaconChain { .state_at_slot(slot - 1) .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; - self.produce_block_on_state(state.as_ref().clone(), slot, randao_reveal) + self.produce_block_on_state(state.clone(), slot, randao_reveal) } /// Produce a block for some `slot` upon the given `state`. diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index 84995ca50..abc1cffc5 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -32,7 +32,7 @@ impl ValidatorService for ValidatorServiceInstance { let slot = epoch.start_slot(T::EthSpec::slots_per_epoch()); let mut state = if let Ok(state) = self.chain.state_at_slot(slot) { - state.as_ref().clone() + state.clone() } else { let log_clone = self.log.clone(); let f = sink From 8d5a579aa64a4ba53c010f12913c9f45adedd56c Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 23 Aug 2019 18:33:27 +1000 Subject: [PATCH 195/305] Fix BeaconChain tests --- eth2/types/src/beacon_state/tests.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/eth2/types/src/beacon_state/tests.rs b/eth2/types/src/beacon_state/tests.rs index 67adccdda..0363e5848 100644 --- a/eth2/types/src/beacon_state/tests.rs +++ b/eth2/types/src/beacon_state/tests.rs @@ -90,11 +90,11 @@ fn test_active_index(state_slot: Slot) { // Test the start and end of the range. assert_eq!( - state.get_active_index_root_index(*range.start(), &spec), + state.get_active_index_root_index(*range.start(), &spec, AllowNextEpoch::False), Ok(modulo(*range.start())) ); assert_eq!( - state.get_active_index_root_index(*range.end(), &spec), + state.get_active_index_root_index(*range.end(), &spec, AllowNextEpoch::False), Ok(modulo(*range.end())) ); @@ -102,12 +102,12 @@ fn test_active_index(state_slot: Slot) { if state.current_epoch() > 0 { // Test is invalid on epoch zero, cannot subtract from zero. assert_eq!( - state.get_active_index_root_index(*range.start() - 1, &spec), + state.get_active_index_root_index(*range.start() - 1, &spec, AllowNextEpoch::False), Err(Error::EpochOutOfBounds) ); } assert_eq!( - state.get_active_index_root_index(*range.end() + 1, &spec), + state.get_active_index_root_index(*range.end() + 1, &spec, AllowNextEpoch::False), Err(Error::EpochOutOfBounds) ); } From 4bfc1a56885e34df8c140bc81be8a025ef803aa8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 11:23:21 +1000 Subject: [PATCH 196/305] Make significant changes to the book --- book/src/SUMMARY.md | 10 ++- book/src/interop-cheat-sheet.md | 143 ++++++++++++++++++++++++++++++++ book/src/interop-cli.md | 29 +++++++ book/src/interop-environment.md | 30 +++++++ book/src/interop-scenarios.md | 97 ++++++++++++++++++++++ book/src/interop-tips.md | 119 -------------------------- book/src/interop.md | 135 ++---------------------------- book/src/intro.md | 32 ++----- 8 files changed, 316 insertions(+), 279 deletions(-) create mode 100644 book/src/interop-cheat-sheet.md create mode 100644 book/src/interop-cli.md create mode 100644 book/src/interop-environment.md create mode 100644 book/src/interop-scenarios.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index f0ad41144..4ffa694cd 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -2,7 +2,9 @@ * [Introduction](./intro.md) * [Development Environment](./setup.md) -* [Testnets](./testnets.md) - * [Simple Local Testnet](./simple-testnet.md) - * [Interop](./interop.md) - * [Interop Tips & Tricks](./interop-tips.md) +* [Simple Local Testnet](./simple-testnet.md) +* [Interop](./interop.md) + * [Environment](./interop-environment.md) + * [CLI Overview](./interop-cli.md) + * [Scenarios](./interop-scenarios.md) + * [Cheat-sheet](./interop-cheat-sheet.md) diff --git a/book/src/interop-cheat-sheet.md b/book/src/interop-cheat-sheet.md new file mode 100644 index 000000000..4f6f079b4 --- /dev/null +++ b/book/src/interop-cheat-sheet.md @@ -0,0 +1,143 @@ +# Interop Cheat-sheet + +This document contains a list of tips and tricks that may be useful during +interop testing. + +- When starting a beacon node: + - [Specify a boot node by multiaddr](#boot-node-multiaddr) + - [Specify a boot node by ENR](#boot-node-enr) + - [Avoid port clashes when starting multiple nodes](#port-bump) + - [Specify a custom slot time](#slot-time) +- Using the beacon node HTTP API: + - [Curl a nodes ENR](#http-enr) + - [Curl a nodes connected peers](#http-peer-ids) + - [Curl a nodes local peer id](#http-peer-id) + - [Curl a nodes listening multiaddrs](#http-listen-addresses) + - [Curl a nodes beacon chain head](#http-head) + - [Curl a nodes finalized checkpoint](#http-finalized) + +## Category: CLI + +The `--help` command provides detail on the CLI interface. Here are some +interop-specific CLI commands. + + +### Specify a boot node by multiaddr + +You can specify a static list of multiaddrs when booting Lighthouse using +the `--libp2p-addresses` command. + +#### Example: + +Runs an 8 validator quick-start chain, peering with `/ip4/192.168.0.1/tcp/9000` on boot. + +``` +$ ./beacon_node --libp2p-addresses /ip4/192.168.0.1/tcp/9000 testnet -f quick 8 1567222226 +``` + + +### Specify a boot node by ENR + +You can specify a static list of Discv5 addresses when booting Lighthouse using +the `--boot-nodes` command. + +#### Example: + +Runs an 8 validator quick-start chain, peering with `-IW4QB2...` on boot. + +``` +$ ./beacon_node --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 testnet -f quick 8 1567222226 +``` + + +### Avoid port clashes when starting nodes + +Starting a second Lighthouse node on the same machine will fail due to TCP/UDP +port collisions. Use the `-b` (`--port-bump`) flag to increase all listening +ports by some `n`. + +#### Example: + +Increase all ports by `10` (using multiples of `10` is recommended). + +``` +$ ./beacon_node -b 10 testnet -f quick 8 1567222226 +``` + + +### Start a testnet with a custom slot time + +Lighthouse can run at quite low slot times when there are few validators (e.g., +`500 ms` slot times should be fine for 8 validators). + +#### Example + +The `-t` (`--slot-time`) flag specifies the milliseconds per slot. + +``` +$ ./beacon_node -b 10 testnet -t 500 -f quick 8 1567222226 +``` + +> Note: `bootstrap` loads the slot time via HTTP and therefore conflicts with +> this flag. + +## Category: HTTP API + +Examples assume there is a Lighthouse node exposing a HTTP API on +`localhost:5052`. Responses are JSON. + + +### Get the node's ENR + +``` +$ curl localhost:5052/network/enr + +"-IW4QFyf1VlY5pZs0xZuvKMRZ9_cdl9WMCDAAJXZiZiuGcfRYoU40VPrYDLQj5prneJIz3zcbTjHp9BbThc-yiymJO8HgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5"% +``` + + +### Get a list of connected peer ids + +``` +$ curl localhost:5052/network/peers + +["QmeMFRTWfo3KbVG7dEBXGhyRMa29yfmnJBXW84rKuGEhuL"]% +``` + + +### Get the node's peer id + +``` +curl localhost:5052/network/peer_id + +"QmRD1qs2AqNNRdBcGHUGpUGkpih5cmdL32mhh22Sy79xsJ"% +``` + + +### Get the list of listening libp2p addresses + +Lists all the libp2p multiaddrs that the node is listening on. + +``` +curl localhost:5052/network/listen_addresses + +["/ip4/127.0.0.1/tcp/9000","/ip4/192.168.1.121/tcp/9000","/ip4/172.17.0.1/tcp/9000","/ip4/172.42.0.1/tcp/9000","/ip6/::1/tcp/9000","/ip6/fdd3:c293:1bc::203/tcp/9000","/ip6/fdd3:c293:1bc:0:9aa9:b2ea:c610:44db/tcp/9000"]% +``` + + +### Get the node's beacon chain head + +``` +curl localhost:5052/beacon/head + +{"slot":0,"block_root":"0x827bf71805540aa13f6d8c7d18b41b287b2094a4d7a28cbb8deb061dbf5df4f5","state_root":"0x90a78d73294bc9c7519a64e1912161be0e823eb472012ff54204e15a4d717fa5"}% +``` + + +### Get the node's finalized checkpoint + +``` +curl localhost:5052/beacon/latest_finalized_checkpoint + +{"epoch":0,"root":"0x0000000000000000000000000000000000000000000000000000000000000000"}% +``` diff --git a/book/src/interop-cli.md b/book/src/interop-cli.md new file mode 100644 index 000000000..3658781d4 --- /dev/null +++ b/book/src/interop-cli.md @@ -0,0 +1,29 @@ +# Interop CLI Overview + +The Lighthouse CLI has two primary tasks: + +- **Resuming** an existing database with `$ ./beacon_node`. +- **Creating** a new testnet database using `$ ./beacon_node testnet`. + +_See [Scenarios](./interop-scenarios.md) for methods we're likely to use +during interop._ + +## Creating a new database + +There are several methods for creating a new beacon node database: + +- `quick`: using the `(validator_client, genesis_time)` tuple. +- `recent`: as above but `genesis_time` is set to the start of some recent time + window. +- `file`: loads the genesis file from disk in one of multiple formats. +- `bootstrap`: a Lighthouse-specific method where we connect to a running node + and download it's specification and genesis state via the HTTP API. + +See `$ ./beacon_node testnet --help` for more detail. + +## Resuming from an existing database + +Once a database has been created, it can be resumed by running `$ ./beacon_node`. + +Presently, this command will fail if no existing database is found. You must +use the `$ ./beacon_node testnet` command to create a new database. diff --git a/book/src/interop-environment.md b/book/src/interop-environment.md new file mode 100644 index 000000000..6d3568e29 --- /dev/null +++ b/book/src/interop-environment.md @@ -0,0 +1,30 @@ +# Interop Environment + +All that is required for inter-op is a built and tested [development +environment](./setup.md). + +## Repositories + +You will only require the [sigp/lighthouse](http://github.com/sigp/lighthouse) +library. + +To allow for faster build/test iterations we will use the +[`interop`](https://github.com/sigp/lighthouse/tree/interop) branch of +[sigp/lighthouse](https://github.com/sigp/lighthouse/tree/interop) for +September 2019 interop. **Please use ensure you `git checkout interop` after +cloning the repo.** + +## File System + +When lighthouse boots, it will create the following +directories: + +- `~/.lighthouse`: database and configuration for the beacon node. +- `~/.lighthouse-validator`: database and configuration for the validator + client. + +After building the binaries with `cargo build --release --all`, there will be a +`target/release` directory in the root of the Lighthouse repository. This is +where the `beacon_node` and `validator_client` binaries are located. + +You do not need to create any of these directories manually. diff --git a/book/src/interop-scenarios.md b/book/src/interop-scenarios.md new file mode 100644 index 000000000..d54772ee8 --- /dev/null +++ b/book/src/interop-scenarios.md @@ -0,0 +1,97 @@ +# Interop Scenarios + +Here we demonstrate some expected interop scenarios. + +All scenarios assume a working [development environment](./setup.md) and +commands are based in the `target/release` directory (this is the build dir for +`cargo`). + +Additional functions can be found in the [interop +cheat-sheet](./interop-cheat-sheet.md). + +### Table of contents + +- [Starting from a`validator_count, genesis_time` tuple](#quick-start) +- [Starting a node from a genesis state file](#state-file) +- [Starting a validator client](#val-client) +- [Exporting a genesis state file](#export) from a running Lighthouse + node + + + +### Start beacon node given a validator count and genesis_time + + +To start a brand-new beacon node (with no history) use: + +``` +$ ./beacon_node testnet -f quick 8 1567222226 +``` +> Notes: +> +> - This method conforms the ["Quick-start +genesis"](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#quick-start-genesis) +method in the `ethereum/eth2.0-pm` repository. +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - `8` is the validator count and `1567222226` is the genesis time. +> - See `$ ./beacon_node testnet quick --help` for more configuration options. + + +### Start Beacon Node given a genesis state file + +A genesis state can be read from file using the `testnet file` subcommand. +There are three supported formats: + +- `ssz` (default) +- `json` +- `yaml` + +Start a new node using `/tmp/genesis.ssz` as the genesis state: + +``` +$ ./beacon_node testnet --spec minimal -f file ssz /tmp/genesis.ssz +``` + +> Notes: +> +> - The `-f` flag ignores any existing database or configuration, backing them +> up before re-initializing. +> - See `$ ./beacon_node testnet file --help` for more configuration options. + + +### Start an auto-configured validator client + +To start a brand-new validator client (with no history) use: + +``` +$ ./validator_client testnet -b insecure 0 8 +``` + +> Notes: +> +> - The `-b` flag means the validator client will "bootstrap" specs and config +> from the beacon node. +> - The `insecure` command dictates that the [interop keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) +> will be used. +> - The `0 8` indicates that this validator client should manage 8 validators, +> starting at validator 0 (the first deposited validator). +> - The validator client will try to connect to the beacon node at `localhost`. +> See `--help` to configure that address and other features. +> - The validator client will operate very unsafely in `testnet` mode, happily +> swapping between chains and creating double-votes. + + +### Exporting a genesis file + +Genesis states can downloaded from a running Lighthouse node via the HTTP API. Three content-types are supported: + +- `application/json` +- `application/yaml` +- `application/ssz` + +Using `curl`, a genesis state can be downloaded to `/tmp/genesis.ssz`: + +``` +$ curl --header "Content-Type: application/ssz" "localhost:5052/beacon/state/genesis" -o /tmp/genesis.ssz +``` diff --git a/book/src/interop-tips.md b/book/src/interop-tips.md index 969d49b4f..0d52e896a 100644 --- a/book/src/interop-tips.md +++ b/book/src/interop-tips.md @@ -1,120 +1 @@ # Interop Tips & Tricks - -This document contains a list of tips and tricks that may be useful during -interop testing. - -## Command-line Interface - -The `--help` command provides detail on the CLI interface. Here are some -interop-specific CLI commands. - -### Specify a boot node by multiaddr - -You can specify a static list of multiaddrs when booting Lighthouse using -the `--libp2p-addresses` command. - -#### Example: - -Runs an 8 validator quick-start chain, peering with `/ip4/192.168.0.1/tcp/9000` on boot. - -``` -$ ./beacon_node --libp2p-addresses /ip4/192.168.0.1/tcp/9000 testnet -f quick 8 1567222226 -``` - -### Specify a boot node by ENR - -You can specify a static list of Discv5 addresses when booting Lighthouse using -the `--boot-nodes` command. - -#### Example: - -Runs an 8 validator quick-start chain, peering with `-IW4QB2...` on boot. - -``` -$ ./beacon_node --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 testnet -f quick 8 1567222226 -``` - -### Avoid port clashes when starting nodes - -Starting a second Lighthouse node on the same machine will fail due to TCP/UDP -port collisions. Use the `-b` (`--port-bump`) flag to increase all listening -ports by some `n`. - -#### Example: - -Increase all ports by `10` (using multiples of `10` is recommended). - -``` -$ ./beacon_node -b 10 testnet -f quick 8 1567222226 -``` - -### Start a testnet with a custom slot time - -Lighthouse can run at quite low slot times when there are few validators (e.g., -`500 ms` slot times should be fine for 8 validators). - -#### Example - -The `-t` (`--slot-time`) flag specifies the milliseconds per slot. - -``` -$ ./beacon_node -b 10 testnet -t 500 -f quick 8 1567222226 -``` - -> Note: `bootstrap` loads the slot time via HTTP and therefore conflicts with -> this flag. - -## HTTP API - -Examples assume there is a Lighthouse node exposing a HTTP API on -`localhost:5052`. Responses are JSON. - -### Get the node's ENR - -``` -$ curl localhost:5052/network/enr - -"-IW4QFyf1VlY5pZs0xZuvKMRZ9_cdl9WMCDAAJXZiZiuGcfRYoU40VPrYDLQj5prneJIz3zcbTjHp9BbThc-yiymJO8HgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5"% -``` - -### Get a list of connected peer ids - -``` -$ curl localhost:5052/network/peers - -["QmeMFRTWfo3KbVG7dEBXGhyRMa29yfmnJBXW84rKuGEhuL"]% -``` - -### Get the node's peer id - -``` -curl localhost:5052/network/peer_id - -"QmRD1qs2AqNNRdBcGHUGpUGkpih5cmdL32mhh22Sy79xsJ"% -``` - -### Get the list of listening libp2p addresses - -Lists all the libp2p multiaddrs that the node is listening on. - -``` -curl localhost:5052/network/listen_addresses - -["/ip4/127.0.0.1/tcp/9000","/ip4/192.168.1.121/tcp/9000","/ip4/172.17.0.1/tcp/9000","/ip4/172.42.0.1/tcp/9000","/ip6/::1/tcp/9000","/ip6/fdd3:c293:1bc::203/tcp/9000","/ip6/fdd3:c293:1bc:0:9aa9:b2ea:c610:44db/tcp/9000"]% -``` - -### Get the node's beacon chain head - -``` -curl localhost:5052/beacon/head - -{"slot":0,"block_root":"0x827bf71805540aa13f6d8c7d18b41b287b2094a4d7a28cbb8deb061dbf5df4f5","state_root":"0x90a78d73294bc9c7519a64e1912161be0e823eb472012ff54204e15a4d717fa5"}% -``` - -### Get the node's finalized checkpoint - -``` -curl localhost:5052/beacon/latest_finalized_checkpoint - -{"epoch":0,"root":"0x0000000000000000000000000000000000000000000000000000000000000000"}% -``` diff --git a/book/src/interop.md b/book/src/interop.md index a2f80584d..cb119d59d 100644 --- a/book/src/interop.md +++ b/book/src/interop.md @@ -3,134 +3,9 @@ This guide is intended for other Ethereum 2.0 client developers performing inter-operability testing with Lighthouse. -To allow for faster iteration cycles without the "merging to master" overhead, -we will use the [`interop`](https://github.com/sigp/lighthouse/tree/interop) -branch of [sigp/lighthouse](https://github.com/sigp/lighthouse/tree/interop) -for September 2019 interop. **Please use ensure you `git checkout interop` -after cloning the repo.** +## Chapters -## Environment - -All that is required for inter-op is a built and tested [development -environment](setup). When lighthouse boots, it will create the following -directories: - -- `~/.lighthouse`: database and configuration for the beacon node. -- `~/.lighthouse-validator`: database and configuration for the validator - client. - -After building the binaries with `cargo build --release --all`, there will be a -`target/release` directory in the root of the Lighthouse repository. This is -where the `beacon_node` and `validator_client` binaries are located. - -## CLI Overview - -The Lighthouse CLI has two primary tasks: - -- **Starting** a new testnet chain using `$ ./beacon_node testnet`. -- **Resuming** an existing chain with `$ ./beacon_node` (omit `testnet`). - -There are several methods for starting a new chain: - -- `quick`: using the `(validator_client, genesis_time)` tuple. -- `recent`: as above but `genesis_time` is set to the start of some recent time - window. -- `file`: loads the genesis file from disk in one of multiple formats. -- `bootstrap`: a Lighthouse-specific method where we connect to a running node - and download it's specification and genesis state via the HTTP API. - -See `$ ./beacon_node testnet --help` for more detail. - -Once a chain has been started, it can be resumed by running `$ ./beacon_node` -(potentially supplying the `--datadir`, if a non-default directory was used). - - -## Scenarios - -The following scenarios are documented here: - -- [Starting a "quick-start" beacon node](#quick-start-beacon-node) from a - `(validator_count, genesis)` tuple. -- [Starting a validator client](#validator-client) with `n` interop keypairs. -- [Starting a node from a genesis state file](#starting-from-a-genesis-file). -- [Exporting a genesis state file](#exporting-a-genesis-file) from a running Lighthouse - node. - -All scenarios assume a working development environment and commands are based -in the `target/release` directory (this is the build dir for `cargo`). - - -#### Quick-start Beacon Node - - -To start the node (each time creating a fresh database and configuration in -`~/.lighthouse`), use: - -``` -$ ./beacon_node testnet -f quick 8 1567222226 -``` -> Notes: -> -> - This method conforms the ["Quick-start -genesis"](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#quick-start-genesis) -method in the `ethereum/eth2.0-pm` repository. -> - The `-f` flag ignores any existing database or configuration, backing them -> up before re-initializing. -> - `8` is the validator count and `1567222226` is the genesis time. -> - See `$ ./beacon_node testnet quick --help` for more configuration options. - -#### Validator Client - -Start the validator client with: - -``` -$ ./validator_client testnet -b insecure 0 8 -``` - -> Notes: -> -> - The `-b` flag means the validator client will "bootstrap" specs and config -> from the beacon node. -> - The `insecure` command dictates that the [interop keypairs](https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#pubkeyprivkey-generation) -> will be used. -> - The `0 8` indicates that this validator client should manage 8 validators, -> starting at validator 0 (the first deposited validator). -> - The validator client will try to connect to the beacon node at `localhost`. -> See `--help` to configure that address and other features. -> - The validator client will operate very unsafely in `testnet` mode, happily -> swapping between chains and creating double-votes. - -#### Starting from a genesis file - -A genesis state can be read from file using the `testnet file` subcommand. -There are three supported formats: - -- `ssz` (default) -- `json` -- `yaml` - -Start a new node using `/tmp/genesis.ssz` as the genesis state: - -``` -$ ./beacon_node testnet --spec minimal -f file ssz /tmp/genesis.ssz -``` - -> Notes: -> -> - The `-f` flag ignores any existing database or configuration, backing them -> up before re-initializing. -> - See `$ ./beacon_node testnet file --help` for more configuration options. - -#### Exporting a genesis file - -Genesis states can downloaded from a running Lighthouse node via the HTTP API. Three content-types are supported: - -- `application/json` -- `application/yaml` -- `application/ssz` - -Using `curl`, a genesis state can be downloaded to `/tmp/genesis.ssz`: - -``` -$ curl --header "Content-Type: application/ssz" "localhost:5052/beacon/state/genesis" -o /tmp/genesis.ssz -``` +- Read about the required [development environment](./interop-environment.md). +- Get an [overview](./interop-cli.md) of the Lighthouse CLI. +- See how we expect to handle some [interop scenarios](./interop-scenarios.md). +- See the [interop cheat-sheet](./interop-cheat-sheet.md) for useful CLI tips. diff --git a/book/src/intro.md b/book/src/intro.md index e0e3cd6a0..ccf867a54 100644 --- a/book/src/intro.md +++ b/book/src/intro.md @@ -17,31 +17,11 @@ Foundation, Consensys and other individuals and organisations. ## Developer Resources -Documentation is provided for **researchers and developers** working on -Ethereum 2.0 and assumes prior knowledge on the topic. +Documentation is presently targeted at **researchers and developers**. It +assumes significant prior knowledge of Ethereum 2.0. -- Get started with [development environment setup](setup.html). -- [Run a simple testnet](simple-testnet.html) in Only Three CLI Commandsâ„¢. -- Read about our interop workflow. -- API? +Topics: -## Release - -Ethereum 2.0 is not fully specified or implemented and as such, Lighthouse is -still **under development**. - -We are on-track to provide a public, multi-client testnet in late-2019 and an -initial production-grade blockchain in 2020. - -## Features - -Lighthouse has been in development since mid-2018 and has an extensive feature -set: - -- Libp2p networking stack, featuring Discovery v5. -- Optimized `BeaconChain` state machine, up-to-date and - passing all tests. -- RESTful HTTP API. -- Documented and feature-rich CLI interface. -- Capable of running small, local testnets with 250ms slot times. -- Detailed metrics exposed in the Prometheus format. +- Get started with [development environment setup](./setup.md). +- See the [interop docs](./interop.md). +- [Run a simple testnet](./simple-testnet.md) in Only Three CLI Commandsâ„¢. From 19dab6422a0df5e8b4dd6dafebb468760a3c499d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 11:35:13 +1000 Subject: [PATCH 197/305] Fix some types in book --- book/src/interop-cheat-sheet.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/book/src/interop-cheat-sheet.md b/book/src/interop-cheat-sheet.md index 4f6f079b4..f12b9652c 100644 --- a/book/src/interop-cheat-sheet.md +++ b/book/src/interop-cheat-sheet.md @@ -9,12 +9,12 @@ interop testing. - [Avoid port clashes when starting multiple nodes](#port-bump) - [Specify a custom slot time](#slot-time) - Using the beacon node HTTP API: - - [Curl a nodes ENR](#http-enr) - - [Curl a nodes connected peers](#http-peer-ids) - - [Curl a nodes local peer id](#http-peer-id) - - [Curl a nodes listening multiaddrs](#http-listen-addresses) - - [Curl a nodes beacon chain head](#http-head) - - [Curl a nodes finalized checkpoint](#http-finalized) + - [Curl a node's ENR](#http-enr) + - [Curl a node's connected peers](#http-peer-ids) + - [Curl a node's local peer id](#http-peer-id) + - [Curl a node's listening multiaddrs](#http-listen-addresses) + - [Curl a node's beacon chain head](#http-head) + - [Curl a node's finalized checkpoint](#http-finalized) ## Category: CLI From ae4700660a02c141e066b3b21b5a9fe6447d4a51 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 11:41:42 +1000 Subject: [PATCH 198/305] Fix typo in book --- book/src/interop-cli.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/book/src/interop-cli.md b/book/src/interop-cli.md index 3658781d4..3dad845f3 100644 --- a/book/src/interop-cli.md +++ b/book/src/interop-cli.md @@ -5,8 +5,8 @@ The Lighthouse CLI has two primary tasks: - **Resuming** an existing database with `$ ./beacon_node`. - **Creating** a new testnet database using `$ ./beacon_node testnet`. -_See [Scenarios](./interop-scenarios.md) for methods we're likely to use -during interop._ +_See [Scenarios](./interop-scenarios.md) for methods we've anticipated will be +used interop._ ## Creating a new database From baaaf59fe5a577aac6b9fe573fcbdef6ce5f924a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 11:49:26 +1000 Subject: [PATCH 199/305] Add message about --spec flag to book --- book/src/interop-scenarios.md | 1 + 1 file changed, 1 insertion(+) diff --git a/book/src/interop-scenarios.md b/book/src/interop-scenarios.md index d54772ee8..dc8789362 100644 --- a/book/src/interop-scenarios.md +++ b/book/src/interop-scenarios.md @@ -58,6 +58,7 @@ $ ./beacon_node testnet --spec minimal -f file ssz /tmp/genesis.ssz > - The `-f` flag ignores any existing database or configuration, backing them > up before re-initializing. > - See `$ ./beacon_node testnet file --help` for more configuration options. +> - The `--spec` flag is required to allow SSZ parsing of fixed-length lists. ### Start an auto-configured validator client From 44a70b94119fb200a19a1e82f90ace833ce76a40 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 13:50:12 +1000 Subject: [PATCH 200/305] Update book cheat-sheet --- book/src/interop-cheat-sheet.md | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/book/src/interop-cheat-sheet.md b/book/src/interop-cheat-sheet.md index f12b9652c..ea7794c33 100644 --- a/book/src/interop-cheat-sheet.md +++ b/book/src/interop-cheat-sheet.md @@ -29,10 +29,8 @@ the `--libp2p-addresses` command. #### Example: -Runs an 8 validator quick-start chain, peering with `/ip4/192.168.0.1/tcp/9000` on boot. - ``` -$ ./beacon_node --libp2p-addresses /ip4/192.168.0.1/tcp/9000 testnet -f quick 8 1567222226 +$ ./beacon_node --libp2p-addresses /ip4/192.168.0.1/tcp/9000 ``` @@ -43,10 +41,8 @@ the `--boot-nodes` command. #### Example: -Runs an 8 validator quick-start chain, peering with `-IW4QB2...` on boot. - ``` -$ ./beacon_node --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 testnet -f quick 8 1567222226 +$ ./beacon_node --boot-nodes -IW4QB2Hi8TPuEzQ41Cdf1r2AUU1FFVFDBJdJyOkWk2qXpZfFZQy2YnJIyoT_5fnbtrXUouoskmydZl4pIg90clIkYUDgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjg0-DsTkQynhJCRnLLttBK1RS78lmUkLa-wgzAi-Ob5 ``` @@ -61,7 +57,7 @@ ports by some `n`. Increase all ports by `10` (using multiples of `10` is recommended). ``` -$ ./beacon_node -b 10 testnet -f quick 8 1567222226 +$ ./beacon_node -b 10 ``` @@ -75,7 +71,7 @@ Lighthouse can run at quite low slot times when there are few validators (e.g., The `-t` (`--slot-time`) flag specifies the milliseconds per slot. ``` -$ ./beacon_node -b 10 testnet -t 500 -f quick 8 1567222226 +$ ./beacon_node testnet -t 500 recent 8 ``` > Note: `bootstrap` loads the slot time via HTTP and therefore conflicts with From 1b4679e5bcac6799a5712ab1c1c29a86625d2ea5 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 14:18:45 +1000 Subject: [PATCH 201/305] Improve block processing outcomes enum --- beacon_node/beacon_chain/src/beacon_chain.rs | 23 ++++++++++++++------ beacon_node/network/src/sync/manager.rs | 8 ++++++- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 97af43718..72400bd53 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -46,11 +46,14 @@ pub enum BlockProcessingOutcome { block_slot: Slot, }, /// The block state_root does not match the generated state. - StateRootMismatch, + StateRootMismatch { block: Hash256, local: Hash256 }, /// The block was a genesis block, these blocks cannot be re-imported. GenesisBlock, /// The slot is finalized, no need to import. - FinalizedSlot, + WouldRevertFinalizedSlot { + block_slot: Slot, + finalized_slot: Slot, + }, /// Block is already known, no need to re-import. BlockIsAlreadyKnown, /// The block could not be applied to the state, it is invalid. @@ -957,14 +960,17 @@ impl BeaconChain { .epoch .start_slot(T::EthSpec::slots_per_epoch()); - if block.slot <= finalized_slot { - return Ok(BlockProcessingOutcome::FinalizedSlot); - } - if block.slot == 0 { return Ok(BlockProcessingOutcome::GenesisBlock); } + if block.slot <= finalized_slot { + return Ok(BlockProcessingOutcome::WouldRevertFinalizedSlot { + block_slot: block.slot, + finalized_slot: finalized_slot, + }); + } + let block_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOCK_ROOT); let block_root = block.canonical_root(); @@ -1062,7 +1068,10 @@ impl BeaconChain { let state_root = state.canonical_root(); if block.state_root != state_root { - return Ok(BlockProcessingOutcome::StateRootMismatch); + return Ok(BlockProcessingOutcome::StateRootMismatch { + block: block.state_root, + local: state_root, + }); } metrics::stop_timer(state_root_timer); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index b81da0991..9cce6300d 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -682,13 +682,19 @@ impl ImportManager { ); } } - BlockProcessingOutcome::FinalizedSlot => { + BlockProcessingOutcome::WouldRevertFinalizedSlot { .. } => { trace!( self.log, "Finalized or earlier block processed"; "outcome" => format!("{:?}", outcome), ); // block reached our finalized slot or was earlier, move to the next block } + BlockProcessingOutcome::GenesisBlock => { + trace!( + self.log, "Genesis block was processed"; + "outcome" => format!("{:?}", outcome), + ); + } _ => { trace!( self.log, "InvalidBlock"; From ab2b8accd4503011eaeb399acd45988f46fba906 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 15:22:00 +1000 Subject: [PATCH 202/305] Add first pass at Eth1Chain trait --- beacon_node/beacon_chain/src/eth1_chain.rs | 61 ++++++++++++++++++++++ beacon_node/beacon_chain/src/lib.rs | 1 + 2 files changed, 62 insertions(+) create mode 100644 beacon_node/beacon_chain/src/eth1_chain.rs diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs new file mode 100644 index 000000000..5f148cd9b --- /dev/null +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -0,0 +1,61 @@ +use crate::BeaconChainTypes; +use eth2_hashing::hash; +use std::marker::PhantomData; +use types::{BeaconState, Deposit, DepositData, Eth1Data, EthSpec, Hash256}; + +type Result = std::result::Result; + +pub enum Error { + /// Unable to return an Eth1Data for the given epoch. + EpochUnavailable, + /// An error from the backend service (e.g., the web3 data fetcher). + BackendError(String), +} + +pub trait Eth1Chain { + /// Returns the `Eth1Data` that should be included in a block being produced for the given + /// `state`. + fn eth1_data_for_epoch(&self, beacon_state: &BeaconState) -> Result; + + /// Returns all `Deposits` between `state.eth1_deposit_index` and + /// `state.eth1_data.deposit_count`. + /// + /// # Note: + /// + /// It is possible that not all returned `Deposits` can be included in a block. E.g., there may + /// be more than `MAX_DEPOSIT_COUNT` or the churn may be too high. + fn queued_deposits(&self, beacon_state: &BeaconState) -> Result>; +} + +pub struct InteropEth1Chain { + _phantom: PhantomData, +} + +impl Eth1Chain for InteropEth1Chain { + fn eth1_data_for_epoch(&self, state: &BeaconState) -> Result { + let current_epoch = state.current_epoch(); + let slots_per_voting_period = T::EthSpec::slots_per_eth1_voting_period() as u64; + let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; + + // TODO: confirm that `int_to_bytes32` is correct. + let deposit_root = hash(&int_to_bytes32(current_voting_period)); + let block_hash = hash(&deposit_root); + + Ok(Eth1Data { + deposit_root: Hash256::from_slice(&deposit_root), + deposit_count: state.eth1_deposit_index, + block_hash: Hash256::from_slice(&block_hash), + }) + } + + fn queued_deposits(&self, beacon_state: &BeaconState) -> Result> { + Ok(vec![]) + } +} + +/// Returns `int` as little-endian bytes with a length of 32. +fn int_to_bytes32(int: u64) -> Vec { + let mut vec = int.to_le_bytes().to_vec(); + vec.resize(32, 0); + vec +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 9c833f778..25f8b74eb 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -6,6 +6,7 @@ mod beacon_chain; mod beacon_chain_builder; mod checkpoint; mod errors; +mod eth1_chain; mod fork_choice; mod iter; mod metrics; From 31557704eb6e789e6e5b65a99b757b0a38cdf718 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 15:52:25 +1000 Subject: [PATCH 203/305] Add Eth1Chain member to BeaconChain --- beacon_node/beacon_chain/src/beacon_chain.rs | 36 ++++--------- .../beacon_chain/src/beacon_chain_builder.rs | 15 ++++-- beacon_node/beacon_chain/src/errors.rs | 5 +- beacon_node/beacon_chain/src/eth1_chain.rs | 54 +++++++++++++++---- beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 15 ++++-- 6 files changed, 82 insertions(+), 44 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 72400bd53..5409d3728 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,10 +1,10 @@ use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; +use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics; use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; -use eth2_hashing::hash; use lmd_ghost::LmdGhost; use operation_pool::DepositInsertStatus; use operation_pool::{OperationPool, PersistedOperationPool}; @@ -113,6 +113,7 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; type SlotClock: slot_clock::SlotClock; type LmdGhost: LmdGhost; + type Eth1Chain: Eth1ChainBackend; type EthSpec: types::EthSpec; } @@ -127,6 +128,8 @@ pub struct BeaconChain { /// Stores all operations (e.g., `Attestation`, `Deposit`, etc) that are candidates for /// inclusion in a block. pub op_pool: OperationPool, + /// Provides information from the Ethereum 1 (PoW) chain. + pub eth1_chain: Eth1Chain, /// Stores a "snapshot" of the chain at the time the head-of-the-chain block was received. canonical_head: RwLock>, /// The root of the genesis block. @@ -142,6 +145,7 @@ impl BeaconChain { /// Instantiate a new Beacon Chain, from genesis. pub fn from_genesis( store: Arc, + eth1_backend: T::Eth1Chain, mut genesis_state: BeaconState, mut genesis_block: BeaconBlock, spec: ChainSpec, @@ -186,6 +190,7 @@ impl BeaconChain { spec, slot_clock, op_pool: OperationPool::new(), + eth1_chain: Eth1Chain::new(eth1_backend), canonical_head, genesis_block_root, fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root), @@ -197,6 +202,7 @@ impl BeaconChain { /// Attempt to load an existing instance from the given `store`. pub fn from_store( store: Arc, + eth1_backend: T::Eth1Chain, spec: ChainSpec, log: Logger, ) -> Result>, Error> { @@ -233,6 +239,7 @@ impl BeaconChain { slot_clock, fork_choice: ForkChoice::new(store.clone(), last_finalized_block, last_finalized_root), op_pool, + eth1_chain: Eth1Chain::new(eth1_backend), canonical_head: RwLock::new(p.canonical_head), genesis_block_root: p.genesis_block_root, store, @@ -1205,12 +1212,12 @@ impl BeaconChain { body: BeaconBlockBody { randao_reveal, // TODO: replace with real data. - eth1_data: Self::eth1_data_stub(&state), + eth1_data: self.eth1_chain.eth1_data_for_block_production(&state)?, graffiti, proposer_slashings: proposer_slashings.into(), attester_slashings: attester_slashings.into(), attestations: self.op_pool.get_attestations(&state, &self.spec).into(), - deposits: self.op_pool.get_deposits(&state).into(), + deposits: self.eth1_chain.deposits_for_block_inclusion(&state)?.into(), voluntary_exits: self.op_pool.get_voluntary_exits(&state, &self.spec).into(), transfers: self.op_pool.get_transfers(&state, &self.spec).into(), }, @@ -1234,22 +1241,6 @@ impl BeaconChain { Ok((block, state)) } - fn eth1_data_stub(state: &BeaconState) -> Eth1Data { - let current_epoch = state.current_epoch(); - let slots_per_voting_period = T::EthSpec::slots_per_eth1_voting_period() as u64; - let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; - - // TODO: confirm that `int_to_bytes32` is correct. - let deposit_root = hash(&int_to_bytes32(current_voting_period)); - let block_hash = hash(&deposit_root); - - Eth1Data { - deposit_root: Hash256::from_slice(&deposit_root), - deposit_count: state.eth1_deposit_index, - block_hash: Hash256::from_slice(&block_hash), - } - } - /// Execute the fork choice algorithm and enthrone the result as the canonical head. pub fn fork_choice(&self) -> Result<(), Error> { metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS); @@ -1445,13 +1436,6 @@ impl BeaconChain { } } -/// Returns `int` as little-endian bytes with a length of 32. -fn int_to_bytes32(int: u64) -> Vec { - let mut vec = int.to_le_bytes().to_vec(); - vec.resize(32, 0); - vec -} - impl From for Error { fn from(e: DBError) -> Error { Error::DBError(e) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 37039dce0..f03cbcc96 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -127,16 +127,23 @@ impl BeaconChainBuilder { } } - pub fn build(self, store: Arc) -> Result, String> { + pub fn build( + self, + store: Arc, + eth1_backend: T::Eth1Chain, + ) -> Result, String> { Ok(match self.build_strategy { - BuildStrategy::LoadFromStore => BeaconChain::from_store(store, self.spec, self.log) - .map_err(|e| format!("Error loading BeaconChain from database: {:?}", e))? - .ok_or_else(|| format!("Unable to find exising BeaconChain in database."))?, + BuildStrategy::LoadFromStore => { + BeaconChain::from_store(store, eth1_backend, self.spec, self.log) + .map_err(|e| format!("Error loading BeaconChain from database: {:?}", e))? + .ok_or_else(|| format!("Unable to find exising BeaconChain in database."))? + } BuildStrategy::FromGenesis { genesis_block, genesis_state, } => BeaconChain::from_genesis( store, + eth1_backend, genesis_state.as_ref().clone(), genesis_block.as_ref().clone(), self.spec, diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 5ef68f2cd..58cfed271 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -1,3 +1,4 @@ +use crate::eth1_chain::Error as Eth1ChainError; use crate::fork_choice::Error as ForkChoiceError; use state_processing::per_block_processing::errors::AttestationValidationError; use state_processing::BlockProcessingError; @@ -42,6 +43,7 @@ pub enum BeaconChainError { } easy_from_to!(SlotProcessingError, BeaconChainError); +easy_from_to!(AttestationValidationError, BeaconChainError); #[derive(Debug, PartialEq)] pub enum BlockProductionError { @@ -50,10 +52,11 @@ pub enum BlockProductionError { UnableToProduceAtSlot(Slot), SlotProcessingError(SlotProcessingError), BlockProcessingError(BlockProcessingError), + Eth1ChainError(Eth1ChainError), BeaconStateError(BeaconStateError), } easy_from_to!(BlockProcessingError, BlockProductionError); easy_from_to!(BeaconStateError, BlockProductionError); easy_from_to!(SlotProcessingError, BlockProductionError); -easy_from_to!(AttestationValidationError, BeaconChainError); +easy_from_to!(Eth1ChainError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 5f148cd9b..3ea37c21d 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -5,6 +5,35 @@ use types::{BeaconState, Deposit, DepositData, Eth1Data, EthSpec, Hash256}; type Result = std::result::Result; +pub struct Eth1Chain { + backend: T::Eth1Chain, +} + +impl Eth1Chain { + pub fn new(backend: T::Eth1Chain) -> Self { + Self { backend } + } + + pub fn eth1_data_for_block_production( + &self, + state: &BeaconState, + ) -> Result { + self.backend.eth1_data(state) + } + + pub fn deposits_for_block_inclusion( + &self, + state: &BeaconState, + ) -> Result> { + let deposits = self.backend.queued_deposits(state)?; + + // TODO: truncate deposits if required. + + Ok(deposits) + } +} + +#[derive(Debug, PartialEq)] pub enum Error { /// Unable to return an Eth1Data for the given epoch. EpochUnavailable, @@ -12,10 +41,10 @@ pub enum Error { BackendError(String), } -pub trait Eth1Chain { +pub trait Eth1ChainBackend { /// Returns the `Eth1Data` that should be included in a block being produced for the given /// `state`. - fn eth1_data_for_epoch(&self, beacon_state: &BeaconState) -> Result; + fn eth1_data(&self, beacon_state: &BeaconState) -> Result; /// Returns all `Deposits` between `state.eth1_deposit_index` and /// `state.eth1_data.deposit_count`. @@ -24,20 +53,19 @@ pub trait Eth1Chain { /// /// It is possible that not all returned `Deposits` can be included in a block. E.g., there may /// be more than `MAX_DEPOSIT_COUNT` or the churn may be too high. - fn queued_deposits(&self, beacon_state: &BeaconState) -> Result>; + fn queued_deposits(&self, beacon_state: &BeaconState) -> Result>; } -pub struct InteropEth1Chain { +pub struct InteropEth1ChainBackend { _phantom: PhantomData, } -impl Eth1Chain for InteropEth1Chain { - fn eth1_data_for_epoch(&self, state: &BeaconState) -> Result { +impl Eth1ChainBackend for InteropEth1ChainBackend { + fn eth1_data(&self, state: &BeaconState) -> Result { let current_epoch = state.current_epoch(); - let slots_per_voting_period = T::EthSpec::slots_per_eth1_voting_period() as u64; + let slots_per_voting_period = T::slots_per_eth1_voting_period() as u64; let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; - // TODO: confirm that `int_to_bytes32` is correct. let deposit_root = hash(&int_to_bytes32(current_voting_period)); let block_hash = hash(&deposit_root); @@ -48,11 +76,19 @@ impl Eth1Chain for InteropEth1Chain { }) } - fn queued_deposits(&self, beacon_state: &BeaconState) -> Result> { + fn queued_deposits(&self, beacon_state: &BeaconState) -> Result> { Ok(vec![]) } } +impl Default for InteropEth1ChainBackend { + fn default() -> Self { + Self { + _phantom: PhantomData, + } + } +} + /// Returns `int` as little-endian bytes with a length of 32. fn int_to_bytes32(int: u64) -> Vec { let mut vec = int.to_le_bytes().to_vec(); diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 25f8b74eb..7883019d7 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -19,6 +19,7 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use beacon_chain_builder::BeaconChainBuilder; +pub use eth1_chain::InteropEth1ChainBackend; pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 1006fabf5..07d181a53 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,4 +1,4 @@ -use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome, InteropEth1ChainBackend}; use lmd_ghost::LmdGhost; use rayon::prelude::*; use sloggers::{null::NullLoggerBuilder, Build}; @@ -60,6 +60,7 @@ where type Store = MemoryStore; type SlotClock = TestingSlotClock; type LmdGhost = L; + type Eth1Chain = InteropEth1ChainBackend; type EthSpec = E; } @@ -114,9 +115,15 @@ where let builder = NullLoggerBuilder; let log = builder.build().expect("logger should build"); - let chain = - BeaconChain::from_genesis(store, genesis_state, genesis_block, spec.clone(), log) - .expect("Terminate if beacon chain generation fails"); + let chain = BeaconChain::from_genesis( + store, + InteropEth1ChainBackend::default(), + genesis_state, + genesis_block, + spec.clone(), + log, + ) + .expect("Terminate if beacon chain generation fails"); Self { chain, From 29584ca08784fd8c4621cb15e5bd358f9f9ddc26 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 15:56:11 +1000 Subject: [PATCH 204/305] Add docs to Eth1Chain --- beacon_node/beacon_chain/src/eth1_chain.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 3ea37c21d..8e578ea9a 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -1,10 +1,11 @@ use crate::BeaconChainTypes; use eth2_hashing::hash; use std::marker::PhantomData; -use types::{BeaconState, Deposit, DepositData, Eth1Data, EthSpec, Hash256}; +use types::{BeaconState, Deposit, Eth1Data, EthSpec, Hash256}; type Result = std::result::Result; +/// Holds an `Eth1ChainBackend` and serves requests from the `BeaconChain`. pub struct Eth1Chain { backend: T::Eth1Chain, } @@ -14,6 +15,8 @@ impl Eth1Chain { Self { backend } } + /// Returns the `Eth1Data` that should be included in a block being produced for the given + /// `state`. pub fn eth1_data_for_block_production( &self, state: &BeaconState, @@ -21,6 +24,10 @@ impl Eth1Chain { self.backend.eth1_data(state) } + /// Returns a list of `Deposits` that may be included in a block. + /// + /// Including all of the returned `Deposits` in a block should _not_ cause it to become + /// invalid. pub fn deposits_for_block_inclusion( &self, state: &BeaconState, @@ -76,7 +83,7 @@ impl Eth1ChainBackend for InteropEth1ChainBackend { }) } - fn queued_deposits(&self, beacon_state: &BeaconState) -> Result> { + fn queued_deposits(&self, _: &BeaconState) -> Result> { Ok(vec![]) } } From c13f27e24558f4b8f396a3b3b0bff89e8f651ae3 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Tue, 3 Sep 2019 16:30:04 +1000 Subject: [PATCH 205/305] Updating REST API. - Made /beacon/state return the current 'head' state when no parameters are provided. - Added some of the YAML api spec stuff to the /beacon/state endpoint in the rest_api spec. --- beacon_node/rest_api/src/beacon.rs | 22 ++++++++++++++-- docs/api_spec.yaml | 41 +++++++++++++++++++++++++++++- 2 files changed, 60 insertions(+), 3 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 66d0b2673..36e7f6c57 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -163,8 +163,26 @@ pub fn get_state(req: Request) -> ApiResult .get::>>() .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; - let query_params = ["root", "slot"]; - let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; + let (key, value) = match UrlQuery::from_request(&req) { + Ok(query) => { + // We have *some* parameters, check them. + let query_params = ["root", "slot"]; + match query.first_of(&query_params) { + Ok((k, v)) => (k, v), + Err(e) => { + // Wrong parameters provided, or another error, return the error. + return Err(e); + } + } + }, + Err(ApiError::InvalidQueryParams(_)) => { + // No parameters provided at all, use current slot. + (String::from("slot"), beacon_chain.head().beacon_state.slot.to_string()) + } + Err(e) => { + return Err(e); + } + }; let (root, state): (Hash256, BeaconState) = match (key.as_ref(), value) { ("slot", value) => state_at_slot(&beacon_chain, parse_slot(&value)?)?, diff --git a/docs/api_spec.yaml b/docs/api_spec.yaml index 42b394e69..892ce7a68 100644 --- a/docs/api_spec.yaml +++ b/docs/api_spec.yaml @@ -344,7 +344,6 @@ paths: 500: $ref: '#/components/responses/InternalError' - #TODO Fill out block_root endpoint /beacon/block_root: get: tags: @@ -977,6 +976,46 @@ paths: #TODO fill out /beacon/state /beacon/state: + get: + tags: + - Phase0 + summary: "Get the full beacon state, at a particular slot or block root." + description: "Requests the beacon node to provide the full beacon state object, and the state root, given a particular slot number or block root. If no parameters are provided, the latest slot of the beacon node (the 'head' slot) is used." + parameters: + - name: root + description: "The block root at which the state should be provided." + in: query + required: false + schema: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + - name: slot + description: "The slot number at which the state should be provided." + in: query + required: false + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + properties: + root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + beacon_state: + #TODO: Need to add BeaconState Schema + $ref: '#/components/schemas/BeaconState' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' #TODO fill out /beacon/state_root /beacon/state_root: From d80d9dba4c62d9b81b9fd233460cdc5e47d23c5b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 3 Sep 2019 16:40:53 +1000 Subject: [PATCH 206/305] Add flag for web3 server --- beacon_node/beacon_chain/src/eth1_chain.rs | 8 ++- beacon_node/beacon_chain/src/lib.rs | 2 +- beacon_node/client/src/config.rs | 18 +++++ beacon_node/client/src/lib.rs | 13 ++-- beacon_node/src/config.rs | 15 ++++- beacon_node/src/main.rs | 10 +++ beacon_node/src/run.rs | 76 ++++++++-------------- 7 files changed, 85 insertions(+), 57 deletions(-) diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 8e578ea9a..e4ccee3ba 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -48,7 +48,9 @@ pub enum Error { BackendError(String), } -pub trait Eth1ChainBackend { +pub trait Eth1ChainBackend: Sized + Send + Sync { + fn new(server: String) -> Result; + /// Returns the `Eth1Data` that should be included in a block being produced for the given /// `state`. fn eth1_data(&self, beacon_state: &BeaconState) -> Result; @@ -68,6 +70,10 @@ pub struct InteropEth1ChainBackend { } impl Eth1ChainBackend for InteropEth1ChainBackend { + fn new(_server: String) -> Result { + Ok(Self::default()) + } + fn eth1_data(&self, state: &BeaconState) -> Result { let current_epoch = state.current_epoch(); let slots_per_voting_period = T::slots_per_eth1_voting_period() as u64; diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 7883019d7..036172348 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -19,7 +19,7 @@ pub use self::beacon_chain::{ pub use self::checkpoint::CheckPoint; pub use self::errors::{BeaconChainError, BlockProductionError}; pub use beacon_chain_builder::BeaconChainBuilder; -pub use eth1_chain::InteropEth1ChainBackend; +pub use eth1_chain::{Eth1ChainBackend, InteropEth1ChainBackend}; pub use lmd_ghost; pub use metrics::scrape_for_metrics; pub use parking_lot; diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index f9b366eb1..5b0553c5b 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -23,6 +23,7 @@ pub struct Config { /// files. It can only be configured via the CLI. #[serde(skip)] pub beacon_chain_start_method: BeaconChainStartMethod, + pub eth1_backend_method: Eth1BackendMethod, pub network: network::NetworkConfig, pub rpc: rpc::RPCConfig, pub rest_api: rest_api::ApiConfig, @@ -69,6 +70,22 @@ impl Default for BeaconChainStartMethod { } } +/// Defines which Eth1 backend the client should use. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(tag = "type")] +pub enum Eth1BackendMethod { + /// Use the mocked eth1 backend used in interop testing + Interop, + /// Use a web3 connection to a running Eth1 node. + Web3 { server: String }, +} + +impl Default for Eth1BackendMethod { + fn default() -> Self { + Eth1BackendMethod::Interop + } +} + impl Default for Config { fn default() -> Self { Self { @@ -81,6 +98,7 @@ impl Default for Config { rest_api: <_>::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), beacon_chain_start_method: <_>::default(), + eth1_backend_method: <_>::default(), } } } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index e14da2af9..33f27f253 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -21,14 +21,14 @@ use tokio::runtime::TaskExecutor; use tokio::timer::Interval; use types::EthSpec; -pub use beacon_chain::BeaconChainTypes; -pub use config::{BeaconChainStartMethod, Config as ClientConfig}; +pub use beacon_chain::{BeaconChainTypes, Eth1ChainBackend, InteropEth1ChainBackend}; +pub use config::{BeaconChainStartMethod, Config as ClientConfig, Eth1BackendMethod}; pub use eth2_config::Eth2Config; #[derive(Clone)] pub struct ClientType { - _phantom_t: PhantomData, - _phantom_u: PhantomData, + _phantom_s: PhantomData, + _phantom_e: PhantomData, } impl BeaconChainTypes for ClientType @@ -39,6 +39,7 @@ where type Store = S; type SlotClock = SystemTimeSlotClock; type LmdGhost = ThreadSafeReducedTree; + type Eth1Chain = InteropEth1ChainBackend; type EthSpec = E; } @@ -168,9 +169,11 @@ where } }; + let eth1_backend = T::Eth1Chain::new(String::new()).map_err(|e| format!("{:?}", e))?; + let beacon_chain: Arc> = Arc::new( beacon_chain_builder - .build(store) + .build(store, eth1_backend) .map_err(error::Error::from)?, ); diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 4a3f6b6a7..47b877ecb 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1,5 +1,5 @@ use clap::ArgMatches; -use client::{BeaconChainStartMethod, ClientConfig, Eth2Config}; +use client::{BeaconChainStartMethod, ClientConfig, Eth1BackendMethod, Eth2Config}; use eth2_config::{read_from_file, write_to_file}; use lighthouse_bootstrap::Bootstrapper; use rand::{distributions::Alphanumeric, Rng}; @@ -25,6 +25,14 @@ type Config = (ClientConfig, Eth2Config); pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { let mut builder = ConfigBuilder::new(cli_args, log)?; + if let Some(server) = cli_args.value_of("eth1-server") { + builder.set_eth1_backend_method(Eth1BackendMethod::Web3 { + server: server.into(), + }) + } else { + builder.set_eth1_backend_method(Eth1BackendMethod::Interop) + } + match cli_args.subcommand() { ("testnet", Some(sub_cmd_args)) => { process_testnet_subcommand(&mut builder, sub_cmd_args, log)? @@ -288,6 +296,11 @@ impl<'a> ConfigBuilder<'a> { self.client_config.beacon_chain_start_method = method; } + /// Sets the method for starting the beacon chain. + pub fn set_eth1_backend_method(&mut self, method: Eth1BackendMethod) { + self.client_config.eth1_backend_method = method; + } + /// Import the libp2p address for `server` into the list of bootnodes in `self`. /// /// If `port` is `Some`, it is used as the port for the `Multiaddr`. If `port` is `None`, diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index b914be549..fab75ea4e 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -162,6 +162,16 @@ fn main() { .takes_value(true), ) + /* + * Eth1 Integration + */ + .arg( + Arg::with_name("eth1-server") + .long("eth1-server") + .value_name("SERVER") + .help("Specifies the server for a web3 connection to the Eth1 chain.") + .takes_value(true) + ) /* * Database parameters. */ diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index 26225cc92..d036ef0c4 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,4 +1,7 @@ -use client::{error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth2Config}; +use client::{ + error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth1BackendMethod, + Eth2Config, +}; use futures::sync::oneshot; use futures::Future; use slog::{error, info}; @@ -47,55 +50,30 @@ pub fn run_beacon_node( "spec_constants" => &spec_constants, ); + macro_rules! run_client { + ($store: ty, $eth_spec: ty) => { + run::>( + &db_path, + client_config, + eth2_config, + executor, + runtime, + log, + ) + }; + } + + if let Eth1BackendMethod::Web3 { .. } = client_config.eth1_backend_method { + return Err("Starting from web3 backend is not supported for interop.".into()); + } + match (db_type.as_str(), spec_constants.as_str()) { - ("disk", "minimal") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("memory", "minimal") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("disk", "mainnet") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("memory", "mainnet") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("disk", "interop") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), - ("memory", "interop") => run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ), + ("disk", "minimal") => run_client!(DiskStore, MinimalEthSpec), + ("disk", "mainnet") => run_client!(DiskStore, MainnetEthSpec), + ("disk", "interop") => run_client!(DiskStore, InteropEthSpec), + ("memory", "minimal") => run_client!(MemoryStore, MinimalEthSpec), + ("memory", "mainnet") => run_client!(MemoryStore, MainnetEthSpec), + ("memory", "interop") => run_client!(MemoryStore, InteropEthSpec), (db_type, spec) => { error!(log, "Unknown runtime configuration"; "spec_constants" => spec, "db_type" => db_type); Err("Unknown specification and/or db_type.".into()) From f47eaf57304d627e552408aae8003197f0318f5d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 3 Sep 2019 16:46:10 +1000 Subject: [PATCH 207/305] Parallel tests and SSZ generic --- eth2/utils/ssz_types/src/fixed_vector.rs | 17 +- tests/ef_tests/src/cases.rs | 12 +- tests/ef_tests/src/cases/epoch_processing.rs | 2 +- tests/ef_tests/src/cases/operations.rs | 3 +- tests/ef_tests/src/cases/ssz_generic.rs | 229 +++++++++++++++---- tests/ef_tests/src/cases/ssz_static.rs | 8 +- tests/ef_tests/src/handler.rs | 32 ++- tests/ef_tests/src/lib.rs | 7 - tests/ef_tests/tests/tests.rs | 25 +- 9 files changed, 246 insertions(+), 89 deletions(-) diff --git a/eth2/utils/ssz_types/src/fixed_vector.rs b/eth2/utils/ssz_types/src/fixed_vector.rs index edac77f0d..edf499adf 100644 --- a/eth2/utils/ssz_types/src/fixed_vector.rs +++ b/eth2/utils/ssz_types/src/fixed_vector.rs @@ -220,13 +220,26 @@ where fn from_ssz_bytes(bytes: &[u8]) -> Result { if bytes.is_empty() { - Ok(FixedVector::from(vec![])) + Err(ssz::DecodeError::InvalidByteLength { + len: 0, + expected: 1, + }) } else if T::is_ssz_fixed_len() { bytes .chunks(T::ssz_fixed_len()) .map(|chunk| T::from_ssz_bytes(chunk)) .collect::, _>>() - .and_then(|vec| Ok(vec.into())) + .and_then(|vec| { + if vec.len() == N::to_usize() { + Ok(vec.into()) + } else { + Err(ssz::DecodeError::BytesInvalid(format!( + "wrong number of vec elements, got: {}, expected: {}", + vec.len(), + N::to_usize() + ))) + } + }) } else { ssz::decode_list_of_variable_length_items(bytes).and_then(|vec| Ok(vec.into())) } diff --git a/tests/ef_tests/src/cases.rs b/tests/ef_tests/src/cases.rs index 1192eb0a0..ed00f0ffe 100644 --- a/tests/ef_tests/src/cases.rs +++ b/tests/ef_tests/src/cases.rs @@ -1,4 +1,5 @@ use super::*; +use rayon::prelude::*; use std::fmt::Debug; use std::path::Path; @@ -39,7 +40,7 @@ pub trait LoadCase: Sized { fn load_from_dir(_path: &Path) -> Result; } -pub trait Case: Debug { +pub trait Case: Debug + Sync { /// An optional field for implementing a custom description. /// /// Defaults to "no description". @@ -79,13 +80,10 @@ pub struct Cases { pub test_cases: Vec, } -impl EfTest for Cases -where - T: Case + Debug, -{ - fn test_results(&self) -> Vec { +impl Cases { + pub fn test_results(&self) -> Vec { self.test_cases - .iter() + .into_par_iter() .enumerate() .map(|(i, tc)| CaseResult::new(i, tc, tc.result(i))) .collect() diff --git a/tests/ef_tests/src/cases/epoch_processing.rs b/tests/ef_tests/src/cases/epoch_processing.rs index ac47ab236..d79b5fc48 100644 --- a/tests/ef_tests/src/cases/epoch_processing.rs +++ b/tests/ef_tests/src/cases/epoch_processing.rs @@ -31,7 +31,7 @@ pub struct EpochProcessing> { _phantom: PhantomData, } -pub trait EpochTransition: TypeName + Debug { +pub trait EpochTransition: TypeName + Debug + Sync { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError>; } diff --git a/tests/ef_tests/src/cases/operations.rs b/tests/ef_tests/src/cases/operations.rs index bcc104bad..e86e6f598 100644 --- a/tests/ef_tests/src/cases/operations.rs +++ b/tests/ef_tests/src/cases/operations.rs @@ -10,6 +10,7 @@ use state_processing::per_block_processing::{ process_block_header, process_deposits, process_exits, process_proposer_slashings, process_transfers, }; +use std::fmt::Debug; use std::path::{Path, PathBuf}; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, @@ -31,7 +32,7 @@ pub struct Operations> { pub post: Option>, } -pub trait Operation: Decode + TypeName + std::fmt::Debug { +pub trait Operation: Decode + TypeName + Debug + Sync { fn handler_name() -> String { Self::name().to_lowercase() } diff --git a/tests/ef_tests/src/cases/ssz_generic.rs b/tests/ef_tests/src/cases/ssz_generic.rs index ca49d2106..05b96ad7d 100644 --- a/tests/ef_tests/src/cases/ssz_generic.rs +++ b/tests/ef_tests/src/cases/ssz_generic.rs @@ -1,68 +1,205 @@ use super::*; -use crate::case_result::compare_result; -use ethereum_types::{U128, U256}; +use crate::cases::ssz_static::{check_serialization, check_tree_hash, SszStaticType}; +use crate::yaml_decode::yaml_decode_file; use serde_derive::Deserialize; -use ssz::Decode; -use std::fmt::Debug; +use std::fs; +use std::path::{Path, PathBuf}; +use types::typenum::*; +use types::{BitList, BitVector, FixedVector, VariableList}; #[derive(Debug, Clone, Deserialize)] -pub struct SszGeneric { - #[serde(alias = "type")] - pub type_name: String, - pub valid: bool, - pub value: Option, - pub ssz: Option, +struct Metadata { + root: String, + signing_root: Option, } -impl YamlDecode for SszGeneric { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) +#[derive(Debug, Clone)] +pub struct SszGeneric { + path: PathBuf, + handler_name: String, + case_name: String, +} + +impl LoadCase for SszGeneric { + fn load_from_dir(path: &Path) -> Result { + let components = path + .components() + .map(|c| c.as_os_str().to_string_lossy().into_owned()) + .rev() + .collect::>(); + // Test case name is last + let case_name = components[0].clone(); + // Handler name is third last, before suite name and case name + let handler_name = components[2].clone(); + Ok(Self { + path: path.into(), + handler_name, + case_name, + }) + } +} + +macro_rules! type_dispatch { + ($function:ident, + ($($arg:expr),*), + $base_ty:tt, + <$($param_ty:ty),*>, + [ $value:expr => primitive_type ] $($rest:tt)*) => { + match $value { + "bool" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* bool>, $($rest)*), + "uint8" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u8>, $($rest)*), + "uint16" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u16>, $($rest)*), + "uint32" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u32>, $($rest)*), + "uint64" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u64>, $($rest)*), + // FIXME(michael): implement tree hash for big ints + // "uint128" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* etherum_types::U128>, $($rest)*), + // "uint256" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* ethereum_types::U256>, $($rest)*), + _ => { println!("unsupported: {}", $value); Ok(()) }, + } + }; + ($function:ident, + ($($arg:expr),*), + $base_ty:tt, + <$($param_ty:ty),*>, + [ $value:expr => typenum ] $($rest:tt)*) => { + match $value { + // DO YOU LIKE NUMBERS? + "0" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U0>, $($rest)*), + "1" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U1>, $($rest)*), + "2" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U2>, $($rest)*), + "3" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U3>, $($rest)*), + "4" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U4>, $($rest)*), + "5" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U5>, $($rest)*), + "6" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U6>, $($rest)*), + "7" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U7>, $($rest)*), + "8" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U8>, $($rest)*), + "9" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U9>, $($rest)*), + "16" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U16>, $($rest)*), + "31" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U31>, $($rest)*), + "32" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U32>, $($rest)*), + "64" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U64>, $($rest)*), + "128" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U128>, $($rest)*), + "256" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U256>, $($rest)*), + "512" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U512>, $($rest)*), + "513" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U513>, $($rest)*), + "1024" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U1024>, $($rest)*), + "2048" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U2048>, $($rest)*), + "4096" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U4096>, $($rest)*), + "8192" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U8192>, $($rest)*), + _ => { println!("unsupported: {}", $value); Ok(()) }, + } + }; + // No base type: apply type params to function + ($function:ident, ($($arg:expr),*), _, <$($param_ty:ty),*>,) => { + $function::<$($param_ty),*>($($arg),*) + }; + ($function:ident, ($($arg:expr),*), $base_type_name:ident, <$($param_ty:ty),*>,) => { + $function::<$base_type_name<$($param_ty),*>>($($arg),*) } } impl Case for SszGeneric { + fn path(&self) -> &Path { + &self.path + } + fn result(&self, _case_index: usize) -> Result<(), Error> { - if let Some(ssz) = &self.ssz { - match self.type_name.as_ref() { - "uint8" => ssz_generic_test::(self.valid, ssz, &self.value), - "uint16" => ssz_generic_test::(self.valid, ssz, &self.value), - "uint32" => ssz_generic_test::(self.valid, ssz, &self.value), - "uint64" => ssz_generic_test::(self.valid, ssz, &self.value), - "uint128" => ssz_generic_test::(self.valid, ssz, &self.value), - "uint256" => ssz_generic_test::(self.valid, ssz, &self.value), - _ => Err(Error::FailedToParseTest(format!( - "Unknown type: {}", - self.type_name - ))), + let parts = self.case_name.split('_').collect::>(); + + match self.handler_name.as_str() { + "basic_vector" => { + let elem_ty = parts[1]; + let length = parts[2]; + + type_dispatch!( + ssz_generic_test, + (&self.path), + FixedVector, + <>, + [elem_ty => primitive_type] + [length => typenum] + )?; } - } else { - // Skip tests that do not have an ssz field. - // - // See: https://github.com/ethereum/eth2.0-specs/issues/1079 - Ok(()) + "bitlist" => { + let limit = parts[1]; + + // FIXME(michael): mark length "no" cases as known failures + + type_dispatch!( + ssz_generic_test, + (&self.path), + BitList, + <>, + [limit => typenum] + )?; + } + "bitvector" => { + let length = parts[1]; + + type_dispatch!( + ssz_generic_test, + (&self.path), + BitVector, + <>, + [length => typenum] + )?; + } + "boolean" => { + ssz_generic_test::(&self.path)?; + } + "uints" => { + let type_name = "uint".to_owned() + parts[1]; + + type_dispatch!( + ssz_generic_test, + (&self.path), + _, + <>, + [type_name.as_str() => primitive_type] + )?; + } + // FIXME(michael): support for the containers tests + _ => panic!("unsupported handler: {}", self.handler_name), } + Ok(()) } } -/// Execute a `ssz_generic` test case. -fn ssz_generic_test(should_be_ok: bool, ssz: &str, value: &Option) -> Result<(), Error> -where - T: Decode + YamlDecode + Debug + PartialEq, -{ - let ssz = hex::decode(&ssz[2..]).map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; - - // We do not cater for the scenario where the test is valid but we are not passed any SSZ. - if should_be_ok && value.is_none() { - panic!("Unexpected test input. Cannot pass without value.") - } - - let expected = if let Some(string) = value { - Some(T::yaml_decode(string)?) +fn ssz_generic_test(path: &Path) -> Result<(), Error> { + let meta_path = path.join("meta.yaml"); + let meta: Option = if meta_path.is_file() { + Some(yaml_decode_file(&meta_path)?) } else { None }; - let decoded = T::from_ssz_bytes(&ssz); + let serialized = fs::read(&path.join("serialized.ssz")).expect("serialized.ssz exists"); - compare_result(&decoded, &expected) + let value_path = path.join("value.yaml"); + let value: Option = if value_path.is_file() { + Some(yaml_decode_file(&value_path)?) + } else { + None + }; + + // Valid + // TODO: signing root + if let Some(value) = value { + check_serialization(&value, &serialized)?; + + if let Some(ref meta) = meta { + check_tree_hash(&meta.root, value.tree_hash_root())?; + } + } + // Invalid + else { + if let Ok(decoded) = T::from_ssz_bytes(&serialized) { + return Err(Error::DidntFail(format!( + "Decoded invalid bytes into: {:?}", + decoded + ))); + } + } + + Ok(()) } diff --git a/tests/ef_tests/src/cases/ssz_static.rs b/tests/ef_tests/src/cases/ssz_static.rs index 6a949073d..f9f59cc4b 100644 --- a/tests/ef_tests/src/cases/ssz_static.rs +++ b/tests/ef_tests/src/cases/ssz_static.rs @@ -35,12 +35,12 @@ pub struct SszStaticSR { // Trait alias for all deez bounds pub trait SszStaticType: - serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug + serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug + Sync { } impl SszStaticType for T where - T: serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug + T: serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug + Sync { } @@ -77,7 +77,7 @@ impl LoadCase for SszStaticSR { } } -fn check_serialization(value: &T, serialized: &[u8]) -> Result<(), Error> { +pub fn check_serialization(value: &T, serialized: &[u8]) -> Result<(), Error> { // Check serialization let serialized_result = value.as_ssz_bytes(); compare_result::, Error>(&Ok(serialized_result), &Some(serialized.to_vec()))?; @@ -89,7 +89,7 @@ fn check_serialization(value: &T, serialized: &[u8]) -> Result Ok(()) } -fn check_tree_hash(expected_str: &str, actual_root: Vec) -> Result<(), Error> { +pub fn check_tree_hash(expected_str: &str, actual_root: Vec) -> Result<(), Error> { let expected_root = hex::decode(&expected_str[2..]) .map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; let expected_root = Hash256::from_slice(&expected_root); diff --git a/tests/ef_tests/src/handler.rs b/tests/ef_tests/src/handler.rs index ca1304136..b6334c383 100644 --- a/tests/ef_tests/src/handler.rs +++ b/tests/ef_tests/src/handler.rs @@ -1,6 +1,6 @@ use crate::cases::{self, Case, Cases, EpochTransition, LoadCase, Operation}; +use crate::type_name; use crate::type_name::TypeName; -use crate::EfTest; use std::fs; use std::marker::PhantomData; use std::path::PathBuf; @@ -256,3 +256,33 @@ impl> Handler for OperationsHandler O::handler_name() } } + +pub struct SszGenericHandler(PhantomData); + +impl Handler for SszGenericHandler { + type Case = cases::SszGeneric; + + fn config_name() -> &'static str { + "general" + } + + fn runner_name() -> &'static str { + "ssz_generic" + } + + fn handler_name() -> String { + H::name().into() + } +} + +// Supported SSZ generic handlers +pub struct BasicVector; +type_name!(BasicVector, "basic_vector"); +pub struct Bitlist; +type_name!(Bitlist, "bitlist"); +pub struct Bitvector; +type_name!(Bitvector, "bitvector"); +pub struct Boolean; +type_name!(Boolean, "boolean"); +pub struct Uints; +type_name!(Uints, "uints"); diff --git a/tests/ef_tests/src/lib.rs b/tests/ef_tests/src/lib.rs index 54e674d85..bcf7c77a0 100644 --- a/tests/ef_tests/src/lib.rs +++ b/tests/ef_tests/src/lib.rs @@ -17,10 +17,3 @@ mod handler; mod results; mod type_name; mod yaml_decode; - -/// Defined where an object can return the results of some test(s) adhering to the Ethereum -/// Foundation testing format. -pub trait EfTest { - /// Returns the results of executing one or more tests. - fn test_results(&self) -> Vec; -} diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index d663eb454..71fa53c66 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -1,5 +1,4 @@ use ef_tests::*; -use rayon::prelude::*; use types::{ Attestation, AttestationData, AttestationDataAndCustodyBit, AttesterSlashing, BeaconBlock, BeaconBlockBody, BeaconBlockHeader, BeaconState, Checkpoint, CompactCommittee, Crosslink, @@ -7,29 +6,15 @@ use types::{ MinimalEthSpec, PendingAttestation, ProposerSlashing, Transfer, Validator, VoluntaryExit, }; -/* #[test] -#[cfg(feature = "fake_crypto")] fn ssz_generic() { - yaml_files_in_test_dir(&Path::new("ssz_generic")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); + SszGenericHandler::::run(); + SszGenericHandler::::run(); + SszGenericHandler::::run(); + SszGenericHandler::::run(); + SszGenericHandler::::run(); } - -#[test] -#[cfg(feature = "fake_crypto")] -fn ssz_static() { - yaml_files_in_test_dir(&Path::new("ssz_static")) - .into_par_iter() - .for_each(|file| { - Doc::assert_tests_pass(file); - }); -} -*/ - #[test] fn shuffling() { ShufflingHandler::::run(); From 777987a49ecdf18e9bea7a3914f345e7452ff980 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Tue, 3 Sep 2019 20:13:26 +1000 Subject: [PATCH 208/305] Updating the spec to align with what's implemented. - Filled the OpenAPI spec for some major functions: - /beacon/state - /beacon/get_finalized_checkpoint - /beacon/state_root - Created some new schemas in the spec, such as Shard, Checkpoint, Validator, Eth1Data, BeaconState, PendingAttestation, Crosslink --- docs/api_spec.yaml | 368 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 311 insertions(+), 57 deletions(-) diff --git a/docs/api_spec.yaml b/docs/api_spec.yaml index 892ce7a68..2356d1f66 100644 --- a/docs/api_spec.yaml +++ b/docs/api_spec.yaml @@ -527,7 +527,7 @@ paths: validators: type: array items: - $ref: '#/components/schemas/ValidatorInfo' + $ref: '#/components/schemas/Validator' /beacon/validators/activesetchanges: get: @@ -974,7 +974,6 @@ paths: 503: $ref: '#/components/responses/CurrentlySyncing' - #TODO fill out /beacon/state /beacon/state: get: tags: @@ -1010,18 +1009,63 @@ paths: format: bytes pattern: "^0x[a-fA-F0-9]{64}$" beacon_state: - #TODO: Need to add BeaconState Schema $ref: '#/components/schemas/BeaconState' 400: $ref: '#/components/responses/InvalidRequest' 500: $ref: '#/components/responses/InternalError' - #TODO fill out /beacon/state_root /beacon/state_root: + get: + tags: + - Phase0 + summary: "Get the beacon state root, at a particular slot." + description: "Requests the beacon node to provide the root of the beacon state object, given a particular slot number." + parameters: + - name: slot + description: "The slot number at which the state should be provided." + in: query + required: true + schema: + type: integer + format: uint64 + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + properties: + root: + type: string + format: bytes + pattern: "^0x[a-fA-F0-9]{64}$" + description: "The state root" + + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' - #TODO fill out current_finalized_checkpoint /beacon/current_finalized_checkpoint: + get: + tags: + - Phase0 + summary: "Get the current finalized checkpoint." + #TODO: is this description correct? + description: "Requests the beacon node to provide the checkpoint for the current finalized epoch." + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/Checkpoint' + + + 500: + $ref: '#/components/responses/InternalError' #TODO fill spec /spec: @@ -1134,6 +1178,28 @@ components: pattern: "^0x[a-fA-F0-9]{64}$" description: "A hex encoded ethereum address." + Shard: + type: integer + format: uint64 + description: "A shard number." + example: 5 + maximum: 1023 + minimum: 0 + + Checkpoint: + type: object + description: "A checkpoint." + properties: + epoch: + type: integer + format: uint64 + description: "The epoch to which the checkpoint applies." + root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "A block root, which is being checkpointed." + Peer: type: object properties: @@ -1163,7 +1229,7 @@ components: format: uint64 description: "The global ValidatorIndex value." - ValidatorInfo: + Validator: type: object properties: public_key: @@ -1173,6 +1239,13 @@ components: format: bytes pattern: "^0x[a-fA-F0-9]{64}$" description: "The 32 byte hash of the public key which the validator uses for withdrawing their rewards." + effective_balance: + type: integer + format: uint64 + description: "The effective balance of the validator, measured in Gwei." + slashed: + type: boolean + description: "Whether the validator has or has not been slashed." activation_eligiblity_epoch: type: integer format: uint64 @@ -1191,13 +1264,6 @@ components: format: uint64 nullable: true description: "Epoch when the validator is eligible to withdraw their funds, or null if the validator has not exited." - slashed: - type: boolean - description: "Whether the validator has or has not been slashed." - effective_balance: - type: integer - format: uint64 - description: "The effective balance of the validator, measured in Gwei." ValidatorDuty: type: object @@ -1235,6 +1301,25 @@ components: format: uint64 description: "Globally, the estimated most recent slot number, or current target slot number." + Eth1Data: + type: object + description: "The [`Eth1Data`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#eth1data) object from the Eth2.0 spec." + properties: + deposit_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Root of the deposit tree." + deposit_count: + type: integer + format: uint64 + description: "Total number of deposits." + block_hash: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Ethereum 1.x block hash." + BeaconBlock: description: "The [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblock) object from the Eth2.0 spec." allOf: @@ -1291,24 +1376,7 @@ components: pattern: "^0x[a-fA-F0-9]{192}$" description: "The RanDAO reveal value provided by the validator." eth1_data: - title: Eth1Data - type: object - description: "The [`Eth1Data`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#eth1data) object from the Eth2.0 spec." - properties: - deposit_root: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Root of the deposit tree." - deposit_count: - type: integer - format: uint64 - description: "Total number of deposits." - block_hash: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Ethereum 1.x block hash." + $ref: '#/components/schemas/Eth1Data' graffiti: type: string format: byte @@ -1442,6 +1510,161 @@ components: pattern: "^0x[a-fA-F0-9]{192}$" description: "Sender signature." + BeaconState: + type: object + description: "The [`BeaconState`](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md#beaconstate) object from the Eth2.0 spec." + properties: + genesis_time: + $ref: '#/components/schemas/genesis_time' + slot: + type: integer + format: uint64 + description: "The latest slot, which the state represents." + fork: + $ref: '#/components/schemas/Fork' + latest_block_header: + $ref: '#/components/schemas/BeaconBlockHeader' + #TODO: Are these descriptions correct? + block_roots: + type: array + description: "The historical block roots." + minLength: 8192 + maxLength: 8192 #The SLOTS_PER_HISTORICAL_ROOT value from the Eth2.0 Spec. + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "A block root" + state_roots: + type: array + description: "The historical state roots." + minLength: 8192 + maxLength: 8192 #The SLOTS_PER_HISTORICAL_ROOT value from the Eth2.0 Spec. + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "A state root" + historical_roots: + type: array + #TODO: are these historical *state* roots? + description: "The historical state roots." + maxLength: 16777216 #The HISTORICAL_ROOTS_LIMIT value from the Eth2.0 Spec. + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "A state root" + eth1_data: + $ref: '#/components/schemas/Eth1Data' + eth1_data_votes: + type: array + description: "The validator votes for the Eth1Data." + maxLength: 1024 #The SLOTS_PER_ETH1_VOTING_PERIOD value from the Eth2.0 spec. + items: + $ref: '#/components/schemas/Eth1Data' + eth1_deposit_index: + type: integer + format: uint64 + #TODO: Clarify this description + description: "The index of the Eth1 deposit." + validators: + type: array + description: "A list of the current validators." + maxLength: 1099511627776 + items: + $ref: '#/components/schemas/Validator' + balances: + type: array + description: "An array of the validator balances." + maxLength: 1099511627776 + items: + type: integer + format: uint64 + description: "The validator balance in GWei." + start_shard: + $ref: '#/components/schemas/Shard' + randao_mixes: + type: array + description: "The hashes for the randao mix." + minLength: 65536 + maxLength: 65536 #The EPOCHS_PER_HISTORICAL_VECTOR value from the Eth2.0 spec. + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "A randao mix hash." + active_index_roots: + type: array + description: "Active index digests for light clients." + minLength: 65536 + maxLength: 65536 #The EPOCHS_PER_HISTORICAL_VECTOR value from the Eth2.0 spec. + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Active index digest" + compact_committees_roots: + type: array + description: "Committee digests for light clients." + minLength: 65536 + maxLength: 65536 #The EPOCHS_PER_HISTORICAL_VECTOR value from the Eth2.0 spec. + items: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Committee digest." + slashings: + type: array + description: "Per-epoch sums of slashed effective balances." + minLength: 8192 + maxLength: 8192 #The EPOCHS_PER_SLASHINGS_VECTOR value from the Eth2.0 spec. + items: + type: integer + format: uint64 + description: "Sum of slashed balance for an epoch." + previous_epoch_attestations: + type: array + description: "A list of attestations in the previous epoch." + maxLength: 8192 # MAX_ATTESTATIONS * SLOTS_PER_EPOCH from the Eth2.0 spec. + items: + $ref: '#/components/schemas/PendingAttestation' + current_epoch_attestations: + type: array + description: "A list of attestations in the current epoch." + maxLength: 8192 # MAX_ATTESTATIONS * SLOTS_PER_EPOCH from the Eth2.0 spec. + items: + $ref: '#/components/schemas/PendingAttestation' + previous_crosslinks: + type: array + description: "The shard crosslinks from the previous epoch." + minLength: 1024 + maxLength: 1024 #The SHARD_COUNT value from the Eth2.0 spec + items: + $ref: '#/components/schemas/Crosslink' + current_crosslinks: + type: array + description: "The shard crosslinks for the current epoch." + minLength: 1024 + maxLength: 1024 #The SHARD_COUNT value from the Eth2.0 spec + items: + $ref: '#/components/schemas/Crosslink' + justification_bits: + type: array + description: "Bit set for every recent justified epoch." + minLength: 4 + maxLength: 4 #The JUSTIFICATION_BITS_LENGTH from the Eth2.0 spec. + items: + type: boolean + #TODO: Check this description + description: "Whethere the recent epochs have been finalized." + previous_justified_checkpoint: + $ref: '#/components/schemas/Checkpoint' + current_justified_checkpoint: + $ref: '#/components/schemas/Checkpoint' + finalized_checkpoint: + $ref: '#/components/schemas/Checkpoint' + Fork: type: object description: "The [`Fork`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#Fork) object from the Eth2.0 spec." @@ -1508,6 +1731,35 @@ components: data: $ref: '#/components/schemas/AttestationData' + PendingAttestation: + type: object + description: "The [`PendingAttestation`](https://github.com/ethereum/eth2.0-specs/blob/v0.8.3/specs/core/0_beacon-chain.md#pendingattestation) object from the Eth2.0 spec." + properties: + aggregation_bits: + type: array + description: "The bits representing aggregation of validator signatures and attestations." + maxLength: 4096 #The MAX_VALIDATORS_PER_COMMITTEE value from the Eth2.0 spec. + items: + type: boolean + description: "Whether the validator has been aggregated or not" + data: + $ref: '#/components/schemas/AttestationData' + inclusion_delay: + type: integer + format: uint64 + description: "The Slot at which it should be included." + proposer_index: + type: integer + format: uint64 + #TODO: This is the block proposer index, not the attestaion right? + description: "The ValidatorIndex of the block proposer" + + + + + + + AttestationData: type: object description: "The [`AttestationData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestationdata) object from the Eth2.0 spec." @@ -1536,34 +1788,36 @@ components: pattern: "^0x[a-fA-F0-9]{64}$" description: "Target root from FFG vote." crosslink: - title: CrossLink - type: object - description: "The [`Crosslink`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#crosslink) object from the Eth2.0 spec, contains data from epochs [`start_epoch`, `end_epoch`)." - properties: - shard: - type: integer - format: uint64 - description: "The shard number." - start_epoch: - type: integer - format: uint64 - description: "The first epoch which the crosslinking data references." - end_epoch: - type: integer - format: uint64 - description: "The 'end' epoch referred to by the crosslinking data; no data in this Crosslink should refer to the `end_epoch` since it is not included in the crosslinking data interval." - parent_root: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Root of the previous crosslink." - data_root: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Root of the crosslinked shard data since the previous crosslink." + $ref: '#/components/schemas/Crosslink' + Crosslink: + type: object + description: "The [`Crosslink`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#crosslink) object from the Eth2.0 spec, contains data from epochs [`start_epoch`, `end_epoch`)." + properties: + shard: + type: integer + format: uint64 + description: "The shard number." + start_epoch: + type: integer + format: uint64 + description: "The first epoch which the crosslinking data references." + end_epoch: + type: integer + format: uint64 + description: "The 'end' epoch referred to by the crosslinking data; no data in this Crosslink should refer to the `end_epoch` since it is not included in the crosslinking data interval." + parent_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Root of the previous crosslink." + data_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Root of the crosslinked shard data since the previous crosslink." + responses: Success: description: "Request successful." From 2706025a3450bc074b38c9851386a1c201c434aa Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 4 Sep 2019 09:07:33 +1000 Subject: [PATCH 209/305] Move data dir cleaning in node runtime start --- beacon_node/src/config.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 47b877ecb..f2c56c524 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -78,6 +78,10 @@ fn process_testnet_subcommand( builder.set_random_datadir()?; } + if cli_args.is_present("force") { + builder.clean_datadir()?; + } + let is_bootstrap = cli_args.subcommand_name() == Some("bootstrap"); if let Some(path_string) = cli_args.value_of("eth2-config") { @@ -112,10 +116,6 @@ fn process_testnet_subcommand( builder.load_client_config(path)?; } - if cli_args.is_present("force") { - builder.clean_datadir()?; - } - info!( log, "Creating new datadir"; From 7edc5f37b9759b328d31221d747eff42af2d1ddb Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 4 Sep 2019 10:25:30 +1000 Subject: [PATCH 210/305] Move BeaconChainHarness to interop spec --- .../beacon_chain/src/beacon_chain_builder.rs | 23 ++--- beacon_node/beacon_chain/src/errors.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 94 +++++++------------ beacon_node/beacon_chain/tests/tests.rs | 42 ++++++++- beacon_node/client/src/lib.rs | 8 +- eth2/lmd_ghost/tests/test.rs | 5 +- 6 files changed, 91 insertions(+), 82 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index f03cbcc96..e59aae22b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -13,8 +13,8 @@ use std::sync::Arc; use std::time::SystemTime; use tree_hash::{SignedRoot, TreeHash}; use types::{ - test_utils::generate_deterministic_keypairs, BeaconBlock, BeaconState, ChainSpec, Deposit, - DepositData, Domain, EthSpec, Fork, Hash256, PublicKey, Signature, + BeaconBlock, BeaconState, ChainSpec, Deposit, DepositData, Domain, EthSpec, Fork, Hash256, + Keypair, PublicKey, Signature, }; enum BuildStrategy { @@ -33,21 +33,21 @@ pub struct BeaconChainBuilder { impl BeaconChainBuilder { pub fn recent_genesis( - validator_count: usize, + keypairs: &[Keypair], minutes: u64, spec: ChainSpec, log: Logger, ) -> Result { - Self::quick_start(recent_genesis_time(minutes), validator_count, spec, log) + Self::quick_start(recent_genesis_time(minutes), keypairs, spec, log) } pub fn quick_start( genesis_time: u64, - validator_count: usize, + keypairs: &[Keypair], spec: ChainSpec, log: Logger, ) -> Result { - let genesis_state = interop_genesis_state(validator_count, genesis_time, &spec)?; + let genesis_state = interop_genesis_state(keypairs, genesis_time, &spec)?; Ok(Self::from_genesis_state(genesis_state, spec, log)) } @@ -167,11 +167,10 @@ fn genesis_block(genesis_state: &BeaconState, spec: &ChainSpec) - /// Reference: /// https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start fn interop_genesis_state( - validator_count: usize, + keypairs: &[Keypair], genesis_time: u64, spec: &ChainSpec, ) -> Result, String> { - let keypairs = generate_deterministic_keypairs(validator_count); let eth1_block_hash = Hash256::from_slice(&[0x42; 32]); let eth1_timestamp = 2_u64.pow(40); let amount = spec.max_effective_balance; @@ -187,7 +186,7 @@ fn interop_genesis_state( .map(|keypair| { let mut data = DepositData { withdrawal_credentials: withdrawal_credentials(&keypair.pk), - pubkey: keypair.pk.into(), + pubkey: keypair.pk.clone().into(), amount, signature: Signature::empty_signature().into(), }; @@ -269,7 +268,7 @@ fn recent_genesis_time(minutes: u64) -> u64 { #[cfg(test)] mod test { use super::*; - use types::{EthSpec, MinimalEthSpec}; + use types::{test_utils::generate_deterministic_keypairs, EthSpec, MinimalEthSpec}; type TestEthSpec = MinimalEthSpec; @@ -279,7 +278,9 @@ mod test { let genesis_time = 42; let spec = &TestEthSpec::default_spec(); - let state = interop_genesis_state::(validator_count, genesis_time, spec) + let keypairs = generate_deterministic_keypairs(validator_count); + + let state = interop_genesis_state::(&keypairs, genesis_time, spec) .expect("should build state"); assert_eq!( diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 58cfed271..030689928 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -34,6 +34,7 @@ pub enum BeaconChainError { MissingBeaconBlock(Hash256), MissingBeaconState(Hash256), SlotProcessingError(SlotProcessingError), + UnableToAdvanceState(String), NoStateForAttestation { beacon_block_root: Hash256, }, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 07d181a53..7670ac74e 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,22 +1,28 @@ -use crate::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome, InteropEth1ChainBackend}; +use crate::{ + AttestationProcessingOutcome, BeaconChain, BeaconChainBuilder, BeaconChainTypes, + BlockProcessingOutcome, InteropEth1ChainBackend, +}; use lmd_ghost::LmdGhost; use rayon::prelude::*; -use sloggers::{null::NullLoggerBuilder, Build}; +use sloggers::{terminal::TerminalLoggerBuilder, types::Severity, Build}; use slot_clock::TestingSlotClock; use state_processing::per_slot_processing; use std::marker::PhantomData; use std::sync::Arc; use store::MemoryStore; -use store::Store; use tree_hash::{SignedRoot, TreeHash}; use types::{ - test_utils::TestingBeaconStateBuilder, AggregateSignature, Attestation, - AttestationDataAndCustodyBit, BeaconBlock, BeaconState, BitList, ChainSpec, Domain, EthSpec, - Hash256, Keypair, RelativeEpoch, SecretKey, Signature, Slot, + AggregateSignature, Attestation, AttestationDataAndCustodyBit, BeaconBlock, BeaconState, + BitList, ChainSpec, Domain, EthSpec, Hash256, Keypair, RelativeEpoch, SecretKey, Signature, + Slot, }; +pub use types::test_utils::generate_deterministic_keypairs; + pub use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; +pub const HARNESS_GENESIS_TIME: u64 = 1567552690; // 4th September 2019 + /// Indicates how the `BeaconChainHarness` should produce blocks. #[derive(Clone, Copy, Debug)] pub enum BlockStrategy { @@ -84,46 +90,21 @@ where E: EthSpec, { /// Instantiate a new harness with `validator_count` initial validators. - pub fn new(validator_count: usize) -> Self { - let state_builder = TestingBeaconStateBuilder::from_default_keypairs_file_if_exists( - validator_count, - &E::default_spec(), - ); - let (genesis_state, keypairs) = state_builder.build(); - - Self::from_state_and_keypairs(genesis_state, keypairs) - } - - /// Instantiate a new harness with an initial validator for each key supplied. - pub fn from_keypairs(keypairs: Vec) -> Self { - let state_builder = TestingBeaconStateBuilder::from_keypairs(keypairs, &E::default_spec()); - let (genesis_state, keypairs) = state_builder.build(); - - Self::from_state_and_keypairs(genesis_state, keypairs) - } - - /// Instantiate a new harness with the given genesis state and a keypair for each of the - /// initial validators in the given state. - pub fn from_state_and_keypairs(genesis_state: BeaconState, keypairs: Vec) -> Self { + pub fn new(keypairs: Vec) -> Self { let spec = E::default_spec(); + let log = TerminalLoggerBuilder::new() + .level(Severity::Warning) + .build() + .expect("logger should build"); + let store = Arc::new(MemoryStore::open()); - let mut genesis_block = BeaconBlock::empty(&spec); - genesis_block.state_root = Hash256::from_slice(&genesis_state.tree_hash_root()); - - let builder = NullLoggerBuilder; - let log = builder.build().expect("logger should build"); - - let chain = BeaconChain::from_genesis( - store, - InteropEth1ChainBackend::default(), - genesis_state, - genesis_block, - spec.clone(), - log, - ) - .expect("Terminate if beacon chain generation fails"); + let chain = + BeaconChainBuilder::quick_start(HARNESS_GENESIS_TIME, &keypairs, spec.clone(), log) + .unwrap_or_else(|e| panic!("Failed to create beacon chain builder: {}", e)) + .build(store.clone(), InteropEth1ChainBackend::default()) + .unwrap_or_else(|e| panic!("Failed to build beacon chain: {}", e)); Self { chain, @@ -163,7 +144,10 @@ where BlockStrategy::ForkCanonicalChainAt { previous_slot, .. } => previous_slot, }; - self.get_state_at_slot(state_slot) + self.chain + .state_at_slot(state_slot) + .expect("should find state for slot") + .clone() }; // Determine the first slot where a block should be built. @@ -201,21 +185,6 @@ where head_block_root.expect("did not produce any blocks") } - fn get_state_at_slot(&self, state_slot: Slot) -> BeaconState { - let state_root = self - .chain - .rev_iter_state_roots() - .find(|(_hash, slot)| *slot == state_slot) - .map(|(hash, _slot)| hash) - .expect("could not find state root"); - - self.chain - .store - .get(&state_root) - .expect("should read db") - .expect("should find state root") - } - /// Returns a newly created block, signed by the proposer for the given slot. fn build_block( &self, @@ -289,9 +258,14 @@ where ) .into_iter() .for_each(|attestation| { - self.chain + match self + .chain .process_attestation(attestation) - .expect("should process attestation"); + .expect("should not error during attestation processing") + { + AttestationProcessingOutcome::Processed => (), + other => panic!("did not successfully process attestation: {:?}", other), + } }); } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index ba7f7bf84..bf853f284 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -3,11 +3,14 @@ #[macro_use] extern crate lazy_static; -use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, - BEACON_CHAIN_DB_KEY, -}; use beacon_chain::AttestationProcessingOutcome; +use beacon_chain::{ + test_utils::{ + AttestationStrategy, BeaconChainHarness, BlockStrategy, CommonTypes, PersistedBeaconChain, + BEACON_CHAIN_DB_KEY, + }, + BlockProcessingOutcome, +}; use lmd_ghost::ThreadSafeReducedTree; use rand::Rng; use store::{MemoryStore, Store}; @@ -25,7 +28,7 @@ lazy_static! { type TestForkChoice = ThreadSafeReducedTree; fn get_harness(validator_count: usize) -> BeaconChainHarness { - let harness = BeaconChainHarness::from_keypairs(KEYPAIRS[0..validator_count].to_vec()); + let harness = BeaconChainHarness::new(KEYPAIRS[0..validator_count].to_vec()); harness.advance_slot(); @@ -461,3 +464,32 @@ fn free_attestations_added_to_fork_choice_all_updated() { } } } + +#[test] +fn produces_and_processes_with_genesis_skip_slots() { + let num_validators = 8; + let harness_a = get_harness(num_validators); + let harness_b = get_harness(num_validators); + let skip_slots = 9; + + for _ in 0..skip_slots { + harness_a.advance_slot(); + harness_b.advance_slot(); + } + + harness_a.extend_chain( + 1, + BlockStrategy::OnCanonicalHead, + // No attestation required for test. + AttestationStrategy::SomeValidators(vec![]), + ); + + assert_eq!( + harness_b + .chain + .process_block(harness_a.chain.head().beacon_block.clone()), + Ok(BlockProcessingOutcome::Processed { + block_root: harness_a.chain.head().beacon_block_root + }) + ); +} diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 33f27f253..1d3cb40ec 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -6,8 +6,8 @@ pub mod error; pub mod notifier; use beacon_chain::{ - lmd_ghost::ThreadSafeReducedTree, slot_clock::SystemTimeSlotClock, store::Store, BeaconChain, - BeaconChainBuilder, + lmd_ghost::ThreadSafeReducedTree, slot_clock::SystemTimeSlotClock, store::Store, + test_utils::generate_deterministic_keypairs, BeaconChain, BeaconChainBuilder, }; use exit_future::Signal; use futures::{future::Future, Stream}; @@ -106,7 +106,7 @@ where "method" => "recent" ); BeaconChainBuilder::recent_genesis( - *validator_count, + &generate_deterministic_keypairs(*validator_count), *minutes, spec.clone(), log.clone(), @@ -125,7 +125,7 @@ where ); BeaconChainBuilder::quick_start( *genesis_time, - *validator_count, + &generate_deterministic_keypairs(*validator_count), spec.clone(), log.clone(), )? diff --git a/eth2/lmd_ghost/tests/test.rs b/eth2/lmd_ghost/tests/test.rs index 4c79a704e..49e9ff738 100644 --- a/eth2/lmd_ghost/tests/test.rs +++ b/eth2/lmd_ghost/tests/test.rs @@ -4,7 +4,8 @@ extern crate lazy_static; use beacon_chain::test_utils::{ - AttestationStrategy, BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy, + generate_deterministic_keypairs, AttestationStrategy, + BeaconChainHarness as BaseBeaconChainHarness, BlockStrategy, }; use lmd_ghost::{LmdGhost, ThreadSafeReducedTree as BaseThreadSafeReducedTree}; use rand::{prelude::*, rngs::StdRng}; @@ -51,7 +52,7 @@ struct ForkedHarness { impl ForkedHarness { /// A new standard instance of with constant parameters. pub fn new() -> Self { - let harness = BeaconChainHarness::new(VALIDATOR_COUNT); + let harness = BeaconChainHarness::new(generate_deterministic_keypairs(VALIDATOR_COUNT)); // Move past the zero slot. harness.advance_slot(); From dcd074877b7f432fbd8c7ad2224cd6b173f0ff6d Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 4 Sep 2019 10:57:09 +1000 Subject: [PATCH 211/305] Removed block publish feature, since it's incomplete currently. --- beacon_node/rest_api/src/lib.rs | 2 +- beacon_node/rest_api/src/validator.rs | 49 --------------------------- 2 files changed, 1 insertion(+), 50 deletions(-) diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index b269bd476..2c7b90e3f 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -170,7 +170,7 @@ pub fn start_server( validator::get_new_beacon_block::(req) } (&Method::POST, "/beacon/validator/block") => { - validator::publish_beacon_block::(req) + helpers::implementation_pending_response(req) } (&Method::GET, "/beacon/validator/attestation") => { validator::get_new_attestation::(req) diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 427f6a514..bbc976175 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -184,55 +184,6 @@ pub fn get_new_beacon_block(req: Request) - Ok(success_response(body)) } -/// HTTP Handler to accept a validator-signed BeaconBlock, and publish it to the network. -pub fn publish_beacon_block(req: Request) -> ApiResult { - let beacon_chain = get_beacon_chain_from_request::(&req)?; - - let query = UrlQuery::from_request(&req)?; - let slot = match query.first_of(&["slot"]) { - Ok((_, v)) => Slot::new(v.parse::().map_err(|e| { - ApiError::InvalidQueryParams(format!("Invalid slot parameter, must be a u64. {:?}", e)) - })?), - Err(e) => { - return Err(e); - } - }; - let randao_reveal = match query.first_of(&["randao_reveal"]) { - Ok((_, v)) => Signature::from_bytes( - hex::decode(&v) - .map_err(|e| { - ApiError::InvalidQueryParams(format!( - "Invalid hex string for randao_reveal: {:?}", - e - )) - })? - .as_slice(), - ) - .map_err(|e| { - ApiError::InvalidQueryParams(format!("randao_reveal is not a valid signature: {:?}", e)) - })?, - Err(e) => { - return Err(e); - } - }; - - let new_block = match beacon_chain.produce_block(randao_reveal, slot) { - Ok((block, _state)) => block, - Err(e) => { - return Err(ApiError::ServerError(format!( - "Beacon node is not able to produce a block: {:?}", - e - ))); - } - }; - - let body = Body::from( - serde_json::to_string(&new_block) - .expect("We should always be able to serialize a new block that we produced."), - ); - Ok(success_response(body)) -} - /// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. pub fn get_new_attestation(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; From b432c8c58c356c8a0c8de96176b091781adc60c6 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 4 Sep 2019 11:18:29 +1000 Subject: [PATCH 212/305] Replaced unnecessary match statements with map_err and ok_or --- beacon_node/rest_api/src/validator.rs | 50 +++++++++------------------ 1 file changed, 17 insertions(+), 33 deletions(-) diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index bbc976175..84ea485b5 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -53,15 +53,11 @@ pub fn get_validator_duties(req: Request) - e )) })?; - let validators: Vec = match query.all_of("validator_pubkeys") { - Ok(v) => v - .iter() - .map(|pk| parse_pubkey(pk)) - .collect::, _>>()?, - Err(e) => { - return Err(e); - } - }; + let validators: Vec = query + .all_of("validator_pubkeys")? + .iter() + .map(|pk| parse_pubkey(pk)) + .collect::, _>>()?; let mut duties: Vec = Vec::new(); // Get a list of all validators for this epoch @@ -167,15 +163,14 @@ pub fn get_new_beacon_block(req: Request) - } }; - let new_block = match beacon_chain.produce_block(randao_reveal, slot) { - Ok((block, _state)) => block, - Err(e) => { - return Err(ApiError::ServerError(format!( + let (new_block, _state) = beacon_chain + .produce_block(randao_reveal, slot) + .map_err(|e| { + ApiError::ServerError(format!( "Beacon node is not able to produce a block: {:?}", e - ))); - } - }; + )) + })?; let body = Body::from( serde_json::to_string(&new_block) @@ -229,14 +224,9 @@ pub fn get_new_attestation(req: Request) -> }; // Check that we are requesting an attestation during the slot where it is relevant. - let present_slot = match beacon_chain.read_slot_clock() { - Some(s) => s, - None => { - return Err(ApiError::ServerError( - "Beacon node is unable to determine present slot, either the state isn't generated or the chain hasn't begun.".into() - )); - } - }; + let present_slot = beacon_chain.read_slot_clock().ok_or(ApiError::ServerError( + "Beacon node is unable to determine present slot, either the state isn't generated or the chain hasn't begun.".into() + ))?; if val_duty.slot != present_slot { return Err(ApiError::InvalidQueryParams(format!("Validator is only able to request an attestation during the slot they are allocated. Current slot: {:?}, allocated slot: {:?}", head_state.slot, val_duty.slot))); } @@ -296,15 +286,9 @@ pub fn get_new_attestation(req: Request) -> return Err(e); } }; - let attestation_data = match beacon_chain.produce_attestation_data(shard) { - Ok(v) => v, - Err(e) => { - return Err(ApiError::ServerError(format!( - "Could not produce an attestation: {:?}", - e - ))); - } - }; + let attestation_data = beacon_chain + .produce_attestation_data(shard) + .map_err(|e| ApiError::ServerError(format!("Could not produce an attestation: {:?}", e)))?; let attestation: Attestation = Attestation { aggregation_bits, From 009a7eb9c72f53733c7d50425c2139c961fc547d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 4 Sep 2019 12:04:15 +1000 Subject: [PATCH 213/305] Fix bug with invalid state root --- .../beacon_chain/src/beacon_chain_builder.rs | 3 +++ beacon_node/beacon_chain/tests/tests.rs | 22 ++++++++++++++++--- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index e59aae22b..ef25c33ec 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -243,6 +243,9 @@ fn interop_genesis_state( state.genesis_time = genesis_time; + // Invalid all the caches after all the manual state surgery. + state.drop_all_caches(); + Ok(state) } diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index bf853f284..82fc88216 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -465,12 +465,10 @@ fn free_attestations_added_to_fork_choice_all_updated() { } } -#[test] -fn produces_and_processes_with_genesis_skip_slots() { +fn run_skip_slot_test(skip_slots: u64) { let num_validators = 8; let harness_a = get_harness(num_validators); let harness_b = get_harness(num_validators); - let skip_slots = 9; for _ in 0..skip_slots { harness_a.advance_slot(); @@ -484,6 +482,12 @@ fn produces_and_processes_with_genesis_skip_slots() { AttestationStrategy::SomeValidators(vec![]), ); + assert_eq!( + harness_a.chain.head().beacon_block.slot, + Slot::new(skip_slots + 1) + ); + assert_eq!(harness_b.chain.head().beacon_block.slot, Slot::new(0)); + assert_eq!( harness_b .chain @@ -492,4 +496,16 @@ fn produces_and_processes_with_genesis_skip_slots() { block_root: harness_a.chain.head().beacon_block_root }) ); + + assert_eq!( + harness_b.chain.head().beacon_block.slot, + Slot::new(skip_slots + 1) + ); +} + +#[test] +fn produces_and_processes_with_genesis_skip_slots() { + for i in 0..MinimalEthSpec::slots_per_epoch() * 4 { + run_skip_slot_test(i) + } } From 0c1ceab5276d97b18a21163fcb6199d2c283b593 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 4 Sep 2019 13:43:45 +1000 Subject: [PATCH 214/305] Addressed Paul's suggestions. - Updated some comments. - Replaced match statements with map functions. --- beacon_node/rest_api/src/metrics.rs | 2 +- beacon_node/rest_api/src/network.rs | 14 +-- beacon_node/rest_api/src/validator.rs | 162 +++++++++++--------------- 3 files changed, 74 insertions(+), 104 deletions(-) diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 9d2ecc343..2239249b6 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -67,7 +67,7 @@ pub fn get_prometheus(req: Request) -> ApiR String::from_utf8(buffer) .map(|string| { let mut response = success_response(Body::from(string)); - // Need to change the header to text/plain for prometheius + // Need to change the header to text/plain for prometheus response.headers_mut().insert( "content-type", HeaderValue::from_static("text/plain; charset=utf-8"), diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index dffa949c9..4f1f53bb9 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -4,7 +4,7 @@ use eth2_libp2p::{Enr, Multiaddr, PeerId}; use hyper::{Body, Request}; use std::sync::Arc; -/// HTTP handle to return the list of libp2p multiaddr the client is listening on. +/// HTTP handler to return the list of libp2p multiaddr the client is listening on. /// /// Returns a list of `Multiaddr`, serialized according to their `serde` impl. pub fn get_listen_addresses(req: Request) -> ApiResult { @@ -21,9 +21,9 @@ pub fn get_listen_addresses(req: Request) -> ApiResul ))) } -/// HTTP handle to return network port the client is listening on. +/// HTTP handler to return the network port the client is listening on. /// -/// Returns a list of `Multiaddr`, serialized according to their `serde` impl. +/// Returns the TCP port number in its plain form (which is also valid JSON serialization) pub fn get_listen_port(req: Request) -> ApiResult { let network = req .extensions() @@ -36,7 +36,7 @@ pub fn get_listen_port(req: Request) -> ApiResult { ))) } -/// HTTP handle to return the Discv5 ENR from the client's libp2p service. +/// HTTP handler to return the Discv5 ENR from the client's libp2p service. /// /// ENR is encoded as base64 string. pub fn get_enr(req: Request) -> ApiResult { @@ -53,7 +53,7 @@ pub fn get_enr(req: Request) -> ApiResult { ))) } -/// HTTP handle to return the `PeerId` from the client's libp2p service. +/// HTTP handler to return the `PeerId` from the client's libp2p service. /// /// PeerId is encoded as base58 string. pub fn get_peer_id(req: Request) -> ApiResult { @@ -70,7 +70,7 @@ pub fn get_peer_id(req: Request) -> ApiResult { ))) } -/// HTTP handle to return the number of peers connected in the client's libp2p service. +/// HTTP handler to return the number of peers connected in the client's libp2p service. pub fn get_peer_count(req: Request) -> ApiResult { let network = req .extensions() @@ -85,7 +85,7 @@ pub fn get_peer_count(req: Request) -> ApiResult { ))) } -/// HTTP handle to return the list of peers connected to the client's libp2p service. +/// HTTP handler to return the list of peers connected to the client's libp2p service. /// /// Peers are presented as a list of `PeerId::to_string()`. pub fn get_peer_list(req: Request) -> ApiResult { diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 84ea485b5..229d84674 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -136,32 +136,24 @@ pub fn get_new_beacon_block(req: Request) - let beacon_chain = get_beacon_chain_from_request::(&req)?; let query = UrlQuery::from_request(&req)?; - let slot = match query.first_of(&["slot"]) { - Ok((_, v)) => Slot::new(v.parse::().map_err(|e| { - ApiError::InvalidQueryParams(format!("Invalid slot parameter, must be a u64. {:?}", e)) - })?), - Err(e) => { - return Err(e); - } - }; - let randao_reveal = match query.first_of(&["randao_reveal"]) { - Ok((_, v)) => Signature::from_bytes( - hex::decode(&v) - .map_err(|e| { - ApiError::InvalidQueryParams(format!( - "Invalid hex string for randao_reveal: {:?}", - e - )) - })? - .as_slice(), - ) + let slot = query + .first_of(&["slot"]) + .map(|(_key, value)| value)? + .parse::() + .map(Slot::from) .map_err(|e| { - ApiError::InvalidQueryParams(format!("randao_reveal is not a valid signature: {:?}", e)) - })?, - Err(e) => { - return Err(e); - } - }; + ApiError::InvalidQueryParams(format!("Invalid slot parameter, must be a u64. {:?}", e)) + })?; + let randao_bytes = query + .first_of(&["randao_reveal"]) + .map(|(_key, value)| value) + .map(hex::decode)? + .map_err(|e| { + ApiError::InvalidQueryParams(format!("Invalid hex string for randao_reveal: {:?}", e)) + })?; + let randao_reveal = Signature::from_bytes(randao_bytes.as_slice()).map_err(|e| { + ApiError::InvalidQueryParams(format!("randao_reveal is not a valid signature: {:?}", e)) + })?; let (new_block, _state) = beacon_chain .produce_block(randao_reveal, slot) @@ -185,43 +177,33 @@ pub fn get_new_attestation(req: Request) -> let head_state = &beacon_chain.head().beacon_state; let query = UrlQuery::from_request(&req)?; - let val_pk: PublicKey = match query.first_of(&["validator_pubkey"]) { - Ok((_, v)) => parse_pubkey(v.as_str())?, - Err(e) => { - return Err(e); - } - }; + let val_pk_str = query + .first_of(&["validator_pubkey"]) + .map(|(_key, value)| value)?; + let val_pk = parse_pubkey(val_pk_str.as_str())?; + // Get the validator index from the supplied public key // If it does not exist in the index, we cannot continue. - let val_index: usize = match head_state.get_validator_index(&val_pk) { - Ok(Some(i)) => i, - Ok(None) => { - return Err(ApiError::InvalidQueryParams( - "The provided validator public key does not correspond to a validator index." - .into(), - )); - } - Err(e) => { - return Err(ApiError::ServerError(format!( - "Unable to read validator index cache. {:?}", - e - ))); - } - }; + let val_index = head_state + .get_validator_index(&val_pk) + .map_err(|e| { + ApiError::ServerError(format!("Unable to read validator index cache. {:?}", e)) + })? + .ok_or(ApiError::InvalidQueryParams( + "The provided validator public key does not correspond to a validator index.".into(), + ))?; + // Get the duties of the validator, to make sure they match up. // If they don't have duties this epoch, then return an error - let val_duty = match head_state.get_attestation_duties(val_index, RelativeEpoch::Current) { - Ok(Some(d)) => d, - Ok(None) => { - return Err(ApiError::InvalidQueryParams("No validator duties could be found for the requested validator. Cannot provide valid attestation.".into())); - } - Err(e) => { - return Err(ApiError::ServerError(format!( + let val_duty = head_state + .get_attestation_duties(val_index, RelativeEpoch::Current) + .map_err(|e| { + ApiError::ServerError(format!( "unable to read cache for attestation duties: {:?}", e - ))) - } - }; + )) + })? + .ok_or(ApiError::InvalidQueryParams("No validator duties could be found for the requested validator. Cannot provide valid attestation.".into()))?; // Check that we are requesting an attestation during the slot where it is relevant. let present_slot = beacon_chain.read_slot_clock().ok_or(ApiError::ServerError( @@ -232,14 +214,14 @@ pub fn get_new_attestation(req: Request) -> } // Parse the POC bit and insert it into the aggregation bits - let poc_bit: bool = match query.first_of(&["poc_bit"]) { - Ok((_, v)) => v.parse::().map_err(|e| { - ApiError::InvalidQueryParams(format!("poc_bit is not a valid boolean value: {:?}", e)) - })?, - Err(e) => { - return Err(e); - } - }; + let poc_bit = query + .first_of(&["poc_bit"]) + .map(|(_key, value)| value)? + .parse::() + .map_err(|e| { + ApiError::InvalidQueryParams(format!("Invalid slot parameter, must be a u64. {:?}", e)) + })?; + let mut aggregation_bits = BitList::with_capacity(val_duty.committee_len) .expect("An empty BitList should always be created, or we have bigger problems."); aggregation_bits @@ -251,41 +233,29 @@ pub fn get_new_attestation(req: Request) -> )) })?; - // Allow a provided slot parameter to check against the expected slot as a sanity check. + // Allow a provided slot parameter to check against the expected slot as a sanity check only. // Presently, we don't support attestations at future or past slots. - let _slot = match query.first_of(&["slot"]) { - Ok((_, v)) => { - let requested_slot = v.parse::().map_err(|e| { - ApiError::InvalidQueryParams(format!( - "Invalid slot parameter, must be a u64. {:?}", - e - )) - })?; - let current_slot = beacon_chain.head().beacon_state.slot.as_u64(); - if requested_slot != current_slot { - return Err(ApiError::InvalidQueryParams(format!("Attestation data can only be requested for the current slot ({:?}), not your requested slot ({:?})", current_slot, requested_slot))); - } - Slot::new(requested_slot) - } - Err(ApiError::InvalidQueryParams(_)) => { - // Just fill _slot with a dummy value for now, making the slot parameter optional - // We'll get the real slot from the ValidatorDuty - Slot::new(0) - } - Err(e) => { - return Err(e); - } - }; + let requested_slot = query + .first_of(&["slot"]) + .map(|(_key, value)| value)? + .parse::() + .map(Slot::from) + .map_err(|e| { + ApiError::InvalidQueryParams(format!("Invalid slot parameter, must be a u64. {:?}", e)) + })?; + let current_slot = beacon_chain.head().beacon_state.slot.as_u64(); + if requested_slot != current_slot { + return Err(ApiError::InvalidQueryParams(format!("Attestation data can only be requested for the current slot ({:?}), not your requested slot ({:?})", current_slot, requested_slot))); + } - let shard: Shard = match query.first_of(&["shard"]) { - Ok((_, v)) => v.parse::().map_err(|e| { + let shard = query + .first_of(&["shard"]) + .map(|(_key, value)| value)? + .parse::() + .map_err(|e| { ApiError::InvalidQueryParams(format!("Shard is not a valid u64 value: {:?}", e)) - })?, - Err(e) => { - // This is a mandatory parameter, return the error - return Err(e); - } - }; + })?; + let attestation_data = beacon_chain .produce_attestation_data(shard) .map_err(|e| ApiError::ServerError(format!("Could not produce an attestation: {:?}", e)))?; From 572df4f37e093c08d1c1a63d047969c5e3ee0781 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 4 Sep 2019 13:56:30 +1000 Subject: [PATCH 215/305] Make bootstrapper block til connection established --- .../beacon_chain/src/beacon_chain_builder.rs | 2 +- beacon_node/src/config.rs | 4 +- eth2/utils/lighthouse_bootstrap/Cargo.toml | 1 + eth2/utils/lighthouse_bootstrap/src/lib.rs | 37 +++++++++++++++++-- validator_client/src/main.rs | 11 ++++-- 5 files changed, 44 insertions(+), 11 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index ef25c33ec..2a3537020 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -87,7 +87,7 @@ impl BeaconChainBuilder { } pub fn http_bootstrap(server: &str, spec: ChainSpec, log: Logger) -> Result { - let bootstrapper = Bootstrapper::from_server_string(server.to_string()) + let bootstrapper = Bootstrapper::connect(server.to_string(), &log) .map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?; let (genesis_state, genesis_block) = bootstrapper diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index f2c56c524..6a13a9aae 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -310,7 +310,7 @@ impl<'a> ConfigBuilder<'a> { server: &str, port: Option, ) -> Result<()> { - let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; + let bootstrapper = Bootstrapper::connect(server.to_string(), &self.log)?; if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr(port) { info!( @@ -347,7 +347,7 @@ impl<'a> ConfigBuilder<'a> { /// Imports an `Eth2Config` from `server`, returning an error if this fails. pub fn import_bootstrap_eth2_config(&mut self, server: &str) -> Result<()> { - let bootstrapper = Bootstrapper::from_server_string(server.to_string())?; + let bootstrapper = Bootstrapper::connect(server.to_string(), &self.log)?; self.update_eth2_config(bootstrapper.eth2_config()?); diff --git a/eth2/utils/lighthouse_bootstrap/Cargo.toml b/eth2/utils/lighthouse_bootstrap/Cargo.toml index 3f48505b8..cfc4c6baf 100644 --- a/eth2/utils/lighthouse_bootstrap/Cargo.toml +++ b/eth2/utils/lighthouse_bootstrap/Cargo.toml @@ -13,3 +13,4 @@ reqwest = "0.9" url = "1.2" types = { path = "../../types" } serde = "1.0" +slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } diff --git a/eth2/utils/lighthouse_bootstrap/src/lib.rs b/eth2/utils/lighthouse_bootstrap/src/lib.rs index dc70c6d21..92a587ff2 100644 --- a/eth2/utils/lighthouse_bootstrap/src/lib.rs +++ b/eth2/utils/lighthouse_bootstrap/src/lib.rs @@ -5,11 +5,16 @@ use eth2_libp2p::{ }; use reqwest::{Error as HttpError, Url}; use serde::Deserialize; +use slog::{error, Logger}; use std::borrow::Cow; use std::net::Ipv4Addr; +use std::time::Duration; use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot}; use url::Host; +pub const RETRY_SLEEP_MILLIS: u64 = 100; +pub const RETRY_WARN_INTERVAL: u64 = 30; + #[derive(Debug)] enum Error { InvalidUrl, @@ -31,11 +36,35 @@ pub struct Bootstrapper { } impl Bootstrapper { - /// Parses the given `server` as a URL, instantiating `Self`. - pub fn from_server_string(server: String) -> Result { - Ok(Self { + /// Parses the given `server` as a URL, instantiating `Self` and blocking until a connection + /// can be made with the server. + /// + /// Never times out. + pub fn connect(server: String, log: &Logger) -> Result { + let bootstrapper = Self { url: Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?, - }) + }; + + let mut retry_count = 0; + loop { + match bootstrapper.enr() { + Ok(_) => break, + Err(_) => { + if retry_count % RETRY_WARN_INTERVAL == 0 { + error!( + log, + "Failed to contact bootstrap server"; + "retry_count" => retry_count, + "retry_delay_millis" => RETRY_SLEEP_MILLIS, + ); + } + retry_count += 1; + std::thread::sleep(Duration::from_millis(RETRY_SLEEP_MILLIS)); + } + } + } + + Ok(bootstrapper) } /// Build a multiaddr using the HTTP server URL that is not guaranteed to be correct. diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index d5d2fc27f..39b2e3eae 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -247,10 +247,13 @@ fn process_testnet_subcommand( ) -> Result<(ClientConfig, Eth2Config)> { let eth2_config = if cli_args.is_present("bootstrap") { info!(log, "Connecting to bootstrap server"); - let bootstrapper = Bootstrapper::from_server_string(format!( - "http://{}:{}", - client_config.server, client_config.server_http_port - ))?; + let bootstrapper = Bootstrapper::connect( + format!( + "http://{}:{}", + client_config.server, client_config.server_http_port + ), + &log, + )?; let eth2_config = bootstrapper.eth2_config()?; From 28a2ce2bdc9f994255011a11896cee28cf0900ed Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 4 Sep 2019 14:19:48 +1000 Subject: [PATCH 216/305] Fix formatting with rustfmt. --- beacon_node/rest_api/src/beacon.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 36e7f6c57..11e1446d9 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -174,10 +174,13 @@ pub fn get_state(req: Request) -> ApiResult return Err(e); } } - }, + } Err(ApiError::InvalidQueryParams(_)) => { // No parameters provided at all, use current slot. - (String::from("slot"), beacon_chain.head().beacon_state.slot.to_string()) + ( + String::from("slot"), + beacon_chain.head().beacon_state.slot.to_string(), + ) } Err(e) => { return Err(e); From d511c939eb4015796573dc3466b40d5a4811735a Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Wed, 4 Sep 2019 13:03:44 +1000 Subject: [PATCH 217/305] SSZ generic tests for big uints --- eth2/utils/tree_hash/src/impls.rs | 42 ++++++++- tests/ef_tests/Cargo.toml | 2 + tests/ef_tests/src/cases.rs | 16 +--- .../src/cases/bls_aggregate_pubkeys.rs | 1 + .../ef_tests/src/cases/bls_aggregate_sigs.rs | 1 + tests/ef_tests/src/cases/bls_g2_compressed.rs | 1 + .../ef_tests/src/cases/bls_g2_uncompressed.rs | 9 +- tests/ef_tests/src/cases/bls_priv_to_pub.rs | 1 + tests/ef_tests/src/cases/bls_sign_msg.rs | 1 + tests/ef_tests/src/cases/common.rs | 72 ++++++++++++++ tests/ef_tests/src/cases/epoch_processing.rs | 2 +- .../src/cases/genesis_initialization.rs | 2 +- tests/ef_tests/src/cases/genesis_validity.rs | 2 +- tests/ef_tests/src/cases/operations.rs | 2 +- tests/ef_tests/src/cases/sanity_blocks.rs | 2 +- tests/ef_tests/src/cases/sanity_slots.rs | 2 +- tests/ef_tests/src/cases/shuffling.rs | 9 +- tests/ef_tests/src/cases/ssz_generic.rs | 20 ++-- tests/ef_tests/src/cases/ssz_static.rs | 33 ++----- tests/ef_tests/src/decode.rs | 31 +++++++ tests/ef_tests/src/lib.rs | 3 +- tests/ef_tests/src/results.rs | 3 +- tests/ef_tests/src/yaml_decode.rs | 93 ------------------- 23 files changed, 185 insertions(+), 165 deletions(-) create mode 100644 tests/ef_tests/src/cases/common.rs create mode 100644 tests/ef_tests/src/decode.rs delete mode 100644 tests/ef_tests/src/yaml_decode.rs diff --git a/eth2/utils/tree_hash/src/impls.rs b/eth2/utils/tree_hash/src/impls.rs index 88293196e..9f09f50ce 100644 --- a/eth2/utils/tree_hash/src/impls.rs +++ b/eth2/utils/tree_hash/src/impls.rs @@ -1,5 +1,5 @@ use super::*; -use ethereum_types::H256; +use ethereum_types::{H256, U128, U256}; macro_rules! impl_for_bitsize { ($type: ident, $bit_size: expr) => { @@ -73,6 +73,46 @@ macro_rules! impl_for_u8_array { impl_for_u8_array!(4); impl_for_u8_array!(32); +impl TreeHash for U128 { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> Vec { + let mut result = vec![0; 16]; + self.to_little_endian(&mut result); + result + } + + fn tree_hash_packing_factor() -> usize { + 2 + } + + fn tree_hash_root(&self) -> Vec { + merkle_root(&self.tree_hash_packed_encoding(), 0) + } +} + +impl TreeHash for U256 { + fn tree_hash_type() -> TreeHashType { + TreeHashType::Basic + } + + fn tree_hash_packed_encoding(&self) -> Vec { + let mut result = vec![0; 32]; + self.to_little_endian(&mut result); + result + } + + fn tree_hash_packing_factor() -> usize { + 1 + } + + fn tree_hash_root(&self) -> Vec { + merkle_root(&self.tree_hash_packed_encoding(), 0) + } +} + impl TreeHash for H256 { fn tree_hash_type() -> TreeHashType { TreeHashType::Vector diff --git a/tests/ef_tests/Cargo.toml b/tests/ef_tests/Cargo.toml index ba6aca259..2f1dea11d 100644 --- a/tests/ef_tests/Cargo.toml +++ b/tests/ef_tests/Cargo.toml @@ -18,7 +18,9 @@ serde_derive = "1.0" serde_repr = "0.1" serde_yaml = "0.8" eth2_ssz = "0.1" +eth2_ssz_derive = "0.1" tree_hash = "0.1" +tree_hash_derive = "0.2" state_processing = { path = "../../eth2/state_processing" } swap_or_not_shuffle = { path = "../../eth2/utils/swap_or_not_shuffle" } types = { path = "../../eth2/types" } diff --git a/tests/ef_tests/src/cases.rs b/tests/ef_tests/src/cases.rs index ed00f0ffe..279086b68 100644 --- a/tests/ef_tests/src/cases.rs +++ b/tests/ef_tests/src/cases.rs @@ -9,6 +9,7 @@ mod bls_g2_compressed; mod bls_g2_uncompressed; mod bls_priv_to_pub; mod bls_sign_msg; +mod common; mod epoch_processing; mod genesis_initialization; mod genesis_validity; @@ -25,6 +26,7 @@ pub use bls_g2_compressed::*; pub use bls_g2_uncompressed::*; pub use bls_priv_to_pub::*; pub use bls_sign_msg::*; +pub use common::SszStaticType; pub use epoch_processing::*; pub use genesis_initialization::*; pub use genesis_validity::*; @@ -61,20 +63,6 @@ pub trait Case: Debug + Sync { fn result(&self, case_index: usize) -> Result<(), Error>; } -pub trait BlsCase: serde::de::DeserializeOwned {} - -impl YamlDecode for T { - fn yaml_decode(string: &str) -> Result { - serde_yaml::from_str(string).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) - } -} - -impl LoadCase for T { - fn load_from_dir(path: &Path) -> Result { - Self::yaml_decode_file(&path.join("data.yaml")) - } -} - #[derive(Debug)] pub struct Cases { pub test_cases: Vec, diff --git a/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs b/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs index c94e14495..13c2fea17 100644 --- a/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs +++ b/tests/ef_tests/src/cases/bls_aggregate_pubkeys.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::BlsCase; use bls::{AggregatePublicKey, PublicKey}; use serde_derive::Deserialize; diff --git a/tests/ef_tests/src/cases/bls_aggregate_sigs.rs b/tests/ef_tests/src/cases/bls_aggregate_sigs.rs index 882ad7220..22fa197df 100644 --- a/tests/ef_tests/src/cases/bls_aggregate_sigs.rs +++ b/tests/ef_tests/src/cases/bls_aggregate_sigs.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::BlsCase; use bls::{AggregateSignature, Signature}; use serde_derive::Deserialize; diff --git a/tests/ef_tests/src/cases/bls_g2_compressed.rs b/tests/ef_tests/src/cases/bls_g2_compressed.rs index f8381f5a7..1a9f1d561 100644 --- a/tests/ef_tests/src/cases/bls_g2_compressed.rs +++ b/tests/ef_tests/src/cases/bls_g2_compressed.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::BlsCase; use bls::{compress_g2, hash_on_g2}; use serde_derive::Deserialize; diff --git a/tests/ef_tests/src/cases/bls_g2_uncompressed.rs b/tests/ef_tests/src/cases/bls_g2_uncompressed.rs index 962b6aac3..3eae29967 100644 --- a/tests/ef_tests/src/cases/bls_g2_uncompressed.rs +++ b/tests/ef_tests/src/cases/bls_g2_uncompressed.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::BlsCase; use bls::hash_on_g2; use serde_derive::Deserialize; @@ -9,18 +10,14 @@ pub struct BlsG2UncompressedInput { pub domain: String, } +impl BlsCase for BlsG2UncompressedInput {} + #[derive(Debug, Clone, Deserialize)] pub struct BlsG2Uncompressed { pub input: BlsG2UncompressedInput, pub output: Vec>, } -impl YamlDecode for BlsG2Uncompressed { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - impl Case for BlsG2Uncompressed { fn result(&self, _case_index: usize) -> Result<(), Error> { // Convert message and domain to required types diff --git a/tests/ef_tests/src/cases/bls_priv_to_pub.rs b/tests/ef_tests/src/cases/bls_priv_to_pub.rs index 869a0891c..016e04dd1 100644 --- a/tests/ef_tests/src/cases/bls_priv_to_pub.rs +++ b/tests/ef_tests/src/cases/bls_priv_to_pub.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::BlsCase; use bls::{PublicKey, SecretKey}; use serde_derive::Deserialize; diff --git a/tests/ef_tests/src/cases/bls_sign_msg.rs b/tests/ef_tests/src/cases/bls_sign_msg.rs index 18e90896b..7ee109f81 100644 --- a/tests/ef_tests/src/cases/bls_sign_msg.rs +++ b/tests/ef_tests/src/cases/bls_sign_msg.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::BlsCase; use bls::{SecretKey, Signature}; use serde_derive::Deserialize; diff --git a/tests/ef_tests/src/cases/common.rs b/tests/ef_tests/src/cases/common.rs new file mode 100644 index 000000000..8e787f157 --- /dev/null +++ b/tests/ef_tests/src/cases/common.rs @@ -0,0 +1,72 @@ +use crate::cases::LoadCase; +use crate::decode::yaml_decode_file; +use crate::error::Error; +use serde_derive::Deserialize; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::convert::TryFrom; +use std::fmt::Debug; +use std::path::Path; +use tree_hash::TreeHash; + +/// Trait for all BLS cases to eliminate some boilerplate. +pub trait BlsCase: serde::de::DeserializeOwned {} + +impl LoadCase for T { + fn load_from_dir(path: &Path) -> Result { + yaml_decode_file(&path.join("data.yaml")) + } +} + +/// Macro to wrap U128 and U256 so they deserialize correctly. +macro_rules! uint_wrapper { + ($wrapper_name:ident, $wrapped_type:ty) => { + #[derive(Debug, Clone, Copy, Default, PartialEq, Decode, Encode, Deserialize)] + #[serde(try_from = "String")] + pub struct $wrapper_name { + pub x: $wrapped_type, + } + + impl TryFrom for $wrapper_name { + type Error = String; + + fn try_from(s: String) -> Result { + <$wrapped_type>::from_dec_str(&s) + .map(|x| Self { x }) + .map_err(|e| format!("{:?}", e)) + } + } + + impl tree_hash::TreeHash for $wrapper_name { + fn tree_hash_type() -> tree_hash::TreeHashType { + <$wrapped_type>::tree_hash_type() + } + + fn tree_hash_packed_encoding(&self) -> Vec { + self.x.tree_hash_packed_encoding() + } + + fn tree_hash_packing_factor() -> usize { + <$wrapped_type>::tree_hash_packing_factor() + } + + fn tree_hash_root(&self) -> Vec { + self.x.tree_hash_root() + } + } + }; +} + +uint_wrapper!(TestU128, ethereum_types::U128); +uint_wrapper!(TestU256, ethereum_types::U256); + +/// Trait alias for all deez bounds +pub trait SszStaticType: + serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug + Sync +{ +} + +impl SszStaticType for T where + T: serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug + Sync +{ +} diff --git a/tests/ef_tests/src/cases/epoch_processing.rs b/tests/ef_tests/src/cases/epoch_processing.rs index d79b5fc48..2a2dde629 100644 --- a/tests/ef_tests/src/cases/epoch_processing.rs +++ b/tests/ef_tests/src/cases/epoch_processing.rs @@ -1,9 +1,9 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; +use crate::decode::{ssz_decode_file, yaml_decode_file}; use crate::type_name; use crate::type_name::TypeName; -use crate::yaml_decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::per_epoch_processing::{ errors::EpochProcessingError, process_crosslinks, process_final_updates, diff --git a/tests/ef_tests/src/cases/genesis_initialization.rs b/tests/ef_tests/src/cases/genesis_initialization.rs index 4f0fa4296..bd0507b9d 100644 --- a/tests/ef_tests/src/cases/genesis_initialization.rs +++ b/tests/ef_tests/src/cases/genesis_initialization.rs @@ -1,6 +1,6 @@ use super::*; use crate::case_result::compare_beacon_state_results_without_caches; -use crate::yaml_decode::{ssz_decode_file, yaml_decode_file}; +use crate::decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::initialize_beacon_state_from_eth1; use std::path::PathBuf; diff --git a/tests/ef_tests/src/cases/genesis_validity.rs b/tests/ef_tests/src/cases/genesis_validity.rs index efebe5e11..3a1b9e267 100644 --- a/tests/ef_tests/src/cases/genesis_validity.rs +++ b/tests/ef_tests/src/cases/genesis_validity.rs @@ -1,5 +1,5 @@ use super::*; -use crate::yaml_decode::{ssz_decode_file, yaml_decode_file}; +use crate::decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::is_valid_genesis_state; use std::path::{Path, PathBuf}; diff --git a/tests/ef_tests/src/cases/operations.rs b/tests/ef_tests/src/cases/operations.rs index e86e6f598..7b4ffff98 100644 --- a/tests/ef_tests/src/cases/operations.rs +++ b/tests/ef_tests/src/cases/operations.rs @@ -1,8 +1,8 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; +use crate::decode::{ssz_decode_file, yaml_decode_file}; use crate::type_name::TypeName; -use crate::yaml_decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use ssz::Decode; use state_processing::per_block_processing::{ diff --git a/tests/ef_tests/src/cases/sanity_blocks.rs b/tests/ef_tests/src/cases/sanity_blocks.rs index d88d8f295..9fadea42e 100644 --- a/tests/ef_tests/src/cases/sanity_blocks.rs +++ b/tests/ef_tests/src/cases/sanity_blocks.rs @@ -1,7 +1,7 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; -use crate::yaml_decode::{ssz_decode_file, yaml_decode_file}; +use crate::decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockInvalid, BlockProcessingError, diff --git a/tests/ef_tests/src/cases/sanity_slots.rs b/tests/ef_tests/src/cases/sanity_slots.rs index a66f1c2c4..34acb1105 100644 --- a/tests/ef_tests/src/cases/sanity_slots.rs +++ b/tests/ef_tests/src/cases/sanity_slots.rs @@ -1,7 +1,7 @@ use super::*; use crate::bls_setting::BlsSetting; use crate::case_result::compare_beacon_state_results_without_caches; -use crate::yaml_decode::{ssz_decode_file, yaml_decode_file}; +use crate::decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::per_slot_processing; use std::path::PathBuf; diff --git a/tests/ef_tests/src/cases/shuffling.rs b/tests/ef_tests/src/cases/shuffling.rs index c0595e584..2fe632e84 100644 --- a/tests/ef_tests/src/cases/shuffling.rs +++ b/tests/ef_tests/src/cases/shuffling.rs @@ -1,5 +1,6 @@ use super::*; use crate::case_result::compare_result; +use crate::decode::yaml_decode_file; use serde_derive::Deserialize; use std::marker::PhantomData; use swap_or_not_shuffle::{get_permutated_index, shuffle_list}; @@ -13,15 +14,9 @@ pub struct Shuffling { _phantom: PhantomData, } -impl YamlDecode for Shuffling { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - impl LoadCase for Shuffling { fn load_from_dir(path: &Path) -> Result { - Self::yaml_decode_file(&path.join("mapping.yaml")) + yaml_decode_file(&path.join("mapping.yaml")) } } diff --git a/tests/ef_tests/src/cases/ssz_generic.rs b/tests/ef_tests/src/cases/ssz_generic.rs index 05b96ad7d..5f9cd3faf 100644 --- a/tests/ef_tests/src/cases/ssz_generic.rs +++ b/tests/ef_tests/src/cases/ssz_generic.rs @@ -1,11 +1,12 @@ use super::*; -use crate::cases::ssz_static::{check_serialization, check_tree_hash, SszStaticType}; -use crate::yaml_decode::yaml_decode_file; +use crate::cases::common::{SszStaticType, TestU128, TestU256}; +use crate::cases::ssz_static::{check_serialization, check_tree_hash}; +use crate::decode::yaml_decode_file; use serde_derive::Deserialize; use std::fs; use std::path::{Path, PathBuf}; use types::typenum::*; -use types::{BitList, BitVector, FixedVector, VariableList}; +use types::{BitList, BitVector, FixedVector}; #[derive(Debug, Clone, Deserialize)] struct Metadata { @@ -51,9 +52,8 @@ macro_rules! type_dispatch { "uint16" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u16>, $($rest)*), "uint32" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u32>, $($rest)*), "uint64" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u64>, $($rest)*), - // FIXME(michael): implement tree hash for big ints - // "uint128" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* etherum_types::U128>, $($rest)*), - // "uint256" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* ethereum_types::U256>, $($rest)*), + "uint128" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* TestU128>, $($rest)*), + "uint256" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* TestU256>, $($rest)*), _ => { println!("unsupported: {}", $value); Ok(()) }, } }; @@ -121,9 +121,13 @@ impl Case for SszGeneric { )?; } "bitlist" => { - let limit = parts[1]; + let mut limit = parts[1]; - // FIXME(michael): mark length "no" cases as known failures + // Test format is inconsistent, pretend the limit is 32 (arbitrary) + // https://github.com/ethereum/eth2.0-spec-tests + if limit == "no" { + limit = "32"; + } type_dispatch!( ssz_generic_test, diff --git a/tests/ef_tests/src/cases/ssz_static.rs b/tests/ef_tests/src/cases/ssz_static.rs index f9f59cc4b..d1c9b1048 100644 --- a/tests/ef_tests/src/cases/ssz_static.rs +++ b/tests/ef_tests/src/cases/ssz_static.rs @@ -1,10 +1,10 @@ use super::*; use crate::case_result::compare_result; +use crate::cases::common::SszStaticType; +use crate::decode::yaml_decode_file; use serde_derive::Deserialize; -use ssz::{Decode, Encode}; -use std::fmt::Debug; use std::fs; -use tree_hash::{SignedRoot, TreeHash}; +use tree_hash::SignedRoot; use types::Hash256; #[derive(Debug, Clone, Deserialize)] @@ -13,12 +13,6 @@ struct SszStaticRoots { signing_root: Option, } -impl YamlDecode for SszStaticRoots { - fn yaml_decode(yaml: &str) -> Result { - Ok(serde_yaml::from_str(yaml).unwrap()) - } -} - #[derive(Debug, Clone)] pub struct SszStatic { roots: SszStaticRoots, @@ -33,26 +27,11 @@ pub struct SszStaticSR { value: T, } -// Trait alias for all deez bounds -pub trait SszStaticType: - serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug + Sync -{ -} - -impl SszStaticType for T where - T: serde::de::DeserializeOwned + Decode + Encode + TreeHash + Clone + PartialEq + Debug + Sync -{ -} - fn load_from_dir(path: &Path) -> Result<(SszStaticRoots, Vec, T), Error> { - // FIXME: set description/name - let roots = SszStaticRoots::yaml_decode_file(&path.join("roots.yaml"))?; - + // FIXME(michael): set description/name + let roots = yaml_decode_file(&path.join("roots.yaml"))?; let serialized = fs::read(&path.join("serialized.ssz")).expect("serialized.ssz exists"); - - let yaml = fs::read_to_string(&path.join("value.yaml")).expect("value.yaml exists"); - let value = - serde_yaml::from_str(&yaml).map_err(|e| Error::FailedToParseTest(format!("{:?}", e)))?; + let value = yaml_decode_file(&path.join("value.yaml"))?; Ok((roots, serialized, value)) } diff --git a/tests/ef_tests/src/decode.rs b/tests/ef_tests/src/decode.rs new file mode 100644 index 000000000..c1ea6fb3b --- /dev/null +++ b/tests/ef_tests/src/decode.rs @@ -0,0 +1,31 @@ +use super::*; +use std::fs; +use std::path::Path; + +pub fn yaml_decode(string: &str) -> Result { + serde_yaml::from_str(string).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) +} + +pub fn yaml_decode_file(path: &Path) -> Result { + fs::read_to_string(path) + .map_err(|e| { + Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) + }) + .and_then(|s| yaml_decode(&s)) +} + +pub fn ssz_decode_file(path: &Path) -> Result { + fs::read(path) + .map_err(|e| { + Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) + }) + .and_then(|s| { + T::from_ssz_bytes(&s).map_err(|e| { + Error::FailedToParseTest(format!( + "Unable to parse SSZ at {}: {:?}", + path.display(), + e + )) + }) + }) +} diff --git a/tests/ef_tests/src/lib.rs b/tests/ef_tests/src/lib.rs index bcf7c77a0..719bfc1aa 100644 --- a/tests/ef_tests/src/lib.rs +++ b/tests/ef_tests/src/lib.rs @@ -7,13 +7,12 @@ pub use cases::{ }; pub use error::Error; pub use handler::*; -pub use yaml_decode::YamlDecode; mod bls_setting; mod case_result; mod cases; +mod decode; mod error; mod handler; mod results; mod type_name; -mod yaml_decode; diff --git a/tests/ef_tests/src/results.rs b/tests/ef_tests/src/results.rs index 20e59f7b3..4f5513a9a 100644 --- a/tests/ef_tests/src/results.rs +++ b/tests/ef_tests/src/results.rs @@ -80,7 +80,8 @@ pub fn print_results( println!("-------"); println!( - "case ({}) from {} failed with {}:", + "case {} ({}) from {} failed with {}:", + failure.case_index, failure.desc, failure.path.display(), error.name() diff --git a/tests/ef_tests/src/yaml_decode.rs b/tests/ef_tests/src/yaml_decode.rs deleted file mode 100644 index 83a162930..000000000 --- a/tests/ef_tests/src/yaml_decode.rs +++ /dev/null @@ -1,93 +0,0 @@ -use super::*; -use ethereum_types::{U128, U256}; -use std::fs; -use std::path::Path; -use types::Fork; - -pub fn yaml_decode(string: &str) -> Result { - serde_yaml::from_str(string).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) -} - -pub fn yaml_decode_file(path: &Path) -> Result { - fs::read_to_string(path) - .map_err(|e| { - Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) - }) - .and_then(|s| yaml_decode(&s)) -} - -pub fn ssz_decode_file(path: &Path) -> Result { - fs::read(path) - .map_err(|e| { - Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) - }) - .and_then(|s| { - T::from_ssz_bytes(&s).map_err(|e| { - Error::FailedToParseTest(format!( - "Unable to parse SSZ at {}: {:?}", - path.display(), - e - )) - }) - }) -} - -pub trait YamlDecode: Sized { - /// Decode an object from the test specification YAML. - fn yaml_decode(string: &str) -> Result; - - fn yaml_decode_file(path: &Path) -> Result { - fs::read_to_string(path) - .map_err(|e| { - Error::FailedToParseTest(format!("Unable to load {}: {:?}", path.display(), e)) - }) - .and_then(|s| Self::yaml_decode(&s)) - } -} - -/// Basic types can general be decoded with the `parse` fn if they implement `str::FromStr`. -macro_rules! impl_via_parse { - ($ty: ty) => { - impl YamlDecode for $ty { - fn yaml_decode(string: &str) -> Result { - string - .parse::() - .map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) - } - } - }; -} - -impl_via_parse!(u8); -impl_via_parse!(u16); -impl_via_parse!(u32); -impl_via_parse!(u64); - -/// Some `ethereum-types` methods have a `str::FromStr` implementation that expects `0x`-prefixed: -/// hex, so we use `from_dec_str` instead. -macro_rules! impl_via_from_dec_str { - ($ty: ty) => { - impl YamlDecode for $ty { - fn yaml_decode(string: &str) -> Result { - Self::from_dec_str(string).map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) - } - } - }; -} - -impl_via_from_dec_str!(U128); -impl_via_from_dec_str!(U256); - -/// Types that already implement `serde::Deserialize` can be decoded using `serde_yaml`. -macro_rules! impl_via_serde_yaml { - ($ty: ty) => { - impl YamlDecode for $ty { - fn yaml_decode(string: &str) -> Result { - serde_yaml::from_str(string) - .map_err(|e| Error::FailedToParseTest(format!("{:?}", e))) - } - } - }; -} - -impl_via_serde_yaml!(Fork); From 3b40b691ab31e6c49d047bb8c705fcfa96b79310 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Wed, 4 Sep 2019 15:58:51 +1000 Subject: [PATCH 218/305] Download ENR during bootstrap --- beacon_node/src/config.rs | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 6a13a9aae..cf5616938 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -137,6 +137,7 @@ fn process_testnet_subcommand( .and_then(|s| s.parse::().ok()); builder.import_bootstrap_libp2p_address(server, port)?; + builder.import_bootstrap_enr_address(server)?; builder.import_bootstrap_eth2_config(server)?; builder.set_beacon_chain_start_method(BeaconChainStartMethod::HttpBootstrap { @@ -301,7 +302,7 @@ impl<'a> ConfigBuilder<'a> { self.client_config.eth1_backend_method = method; } - /// Import the libp2p address for `server` into the list of bootnodes in `self`. + /// Import the libp2p address for `server` into the list of libp2p nodes to connect with. /// /// If `port` is `Some`, it is used as the port for the `Multiaddr`. If `port` is `None`, /// attempts to connect to the `server` via HTTP and retrieve it's libp2p listen port. @@ -333,6 +334,28 @@ impl<'a> ConfigBuilder<'a> { Ok(()) } + /// Import the enr address for `server` into the list of initial enrs (boot nodes). + pub fn import_bootstrap_enr_address(&mut self, server: &str) -> Result<()> { + let bootstrapper = Bootstrapper::connect(server.to_string(), &self.log)?; + + if let Ok(enr) = bootstrapper.enr() { + info!( + self.log, + "Loaded bootstrapper libp2p address"; + "enr" => format!("{:?}", enr) + ); + + self.client_config.network.boot_nodes.push(enr); + } else { + warn!( + self.log, + "Unable to estimate a bootstrapper enr address, this node may not find any peers." + ); + }; + + Ok(()) + } + /// Set the config data_dir to be an random directory. /// /// Useful for easily spinning up ephemeral testnets. From eeba69cd0f41e0cdc7cd07a5765142fe8b6c9527 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 4 Sep 2019 22:03:55 +1000 Subject: [PATCH 219/305] Moved beacon chain from request functionality into its own function. --- beacon_node/rest_api/src/beacon.rs | 20 ++++---------------- beacon_node/rest_api/src/metrics.rs | 10 +++------- beacon_node/rest_api/src/spec.rs | 8 +++----- 3 files changed, 10 insertions(+), 28 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 1da640baf..935368892 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -109,10 +109,7 @@ pub fn get_block(req: Request) -> ApiResult /// HTTP handler to return a `BeaconBlock` root at a given `slot`. pub fn get_block_root(req: Request) -> ApiResult { - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; let target = parse_slot(&slot_string)?; @@ -163,10 +160,7 @@ pub struct StateResponse { /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. pub fn get_state(req: Request) -> ApiResult { - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let (key, value) = match UrlQuery::from_request(&req) { Ok(query) => { @@ -220,10 +214,7 @@ pub fn get_state(req: Request) -> ApiResult /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. pub fn get_state_root(req: Request) -> ApiResult { - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; let slot = parse_slot(&slot_string)?; @@ -240,10 +231,7 @@ pub fn get_state_root(req: Request) -> ApiR pub fn get_current_finalized_checkpoint( req: Request, ) -> ApiResult { - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let checkpoint = beacon_chain .head() diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 2239249b6..01dc4d22d 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -1,9 +1,8 @@ -use crate::{success_response, ApiError, ApiResult, DBPath}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use crate::{helpers::*, success_response, ApiError, ApiResult, DBPath}; +use beacon_chain::BeaconChainTypes; use http::HeaderValue; use hyper::{Body, Request}; use prometheus::{Encoder, TextEncoder}; -use std::sync::Arc; pub use lighthouse_metrics::*; @@ -31,10 +30,7 @@ pub fn get_prometheus(req: Request) -> ApiR let mut buffer = vec![]; let encoder = TextEncoder::new(); - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let db_path = req .extensions() .get::() diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs index 86d1c227d..a353b3833 100644 --- a/beacon_node/rest_api/src/spec.rs +++ b/beacon_node/rest_api/src/spec.rs @@ -1,6 +1,7 @@ use super::{success_response, ApiResult}; +use crate::helpers::*; use crate::ApiError; -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::BeaconChainTypes; use eth2_config::Eth2Config; use hyper::{Body, Request}; use std::sync::Arc; @@ -8,10 +9,7 @@ use types::EthSpec; /// HTTP handler to return the full spec object. pub fn get_spec(req: Request) -> ApiResult { - let beacon_chain = req - .extensions() - .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let json: String = serde_json::to_string(&beacon_chain.spec) .map_err(|e| ApiError::ServerError(format!("Unable to serialize spec: {:?}", e)))?; From bf2f4597738c987d4b1454b8d9adfbdf011ee12a Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 4 Sep 2019 23:03:05 +1000 Subject: [PATCH 220/305] Extended API - Added a /beacon/validators function, to list all validators active in a particular epoch - Moved 'get_genesis_state' function, to align with router. - Added content-type for error responses - Tried adding a cache update call to fix issue getting validator duties (this is WIP) --- beacon_node/rest_api/src/beacon.rs | 57 +++++++++++++++++++++++---- beacon_node/rest_api/src/lib.rs | 5 +-- beacon_node/rest_api/src/validator.rs | 13 ++++++ 3 files changed, 65 insertions(+), 10 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 935368892..ae112883e 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -6,7 +6,7 @@ use serde::Serialize; use ssz_derive::Encode; use std::sync::Arc; use store::Store; -use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot}; +use types::{BeaconBlock, BeaconState, Epoch, EthSpec, Hash256, Slot}; #[derive(Serialize)] pub struct HeadResponse { @@ -136,16 +136,47 @@ pub fn get_fork(req: Request) -> ApiResult Ok(success_response(Body::from(json))) } -/// HTTP handler to return a `BeaconState` at a given `root` or `slot`. +/// HTTP handler to return the set of validators for an `Epoch` /// -/// Will not return a state if the request slot is in the future. Will return states higher than -/// the current head by skipping slots. -pub fn get_genesis_state(req: Request) -> ApiResult { +/// The `Epoch` parameter can be any epoch number. If it is not specified, +/// the current epoch is assumed. +pub fn get_validators(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; - let (_root, state) = state_at_slot(&beacon_chain, Slot::new(0))?; + let epoch = match UrlQuery::from_request(&req) { + // We have some parameters, so make sure it's the epoch one and parse it + Ok(query) => query + .only_one("epoch")? + .parse::() + .map(Epoch::from) + .map_err(|e| { + ApiError::InvalidQueryParams(format!( + "Invalid epoch parameter, must be a u64. {:?}", + e + )) + })?, + // In this case, our url query did not contain any parameters, so we take the default + Err(_) => beacon_chain.epoch().map_err(|e| { + ApiError::ServerError(format!("Unable to determine current epoch: {:?}", e)) + })?, + }; - ResponseBuilder::new(&req).body(&state) + let all_validators = &beacon_chain.head().beacon_state.validators; + let mut active_validators = Vec::with_capacity(all_validators.len()); + for (_index, validator) in all_validators.iter().enumerate() { + if validator.is_active_at(epoch) { + active_validators.push(validator) + } + } + active_validators.shrink_to_fit(); + let json: String = serde_json::to_string(&active_validators).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize list of active validators: {:?}", + e + )) + })?; + + Ok(success_response(Body::from(json))) } #[derive(Serialize, Encode)] @@ -244,3 +275,15 @@ pub fn get_current_finalized_checkpoint( Ok(success_response(Body::from(json))) } + +/// HTTP handler to return a `BeaconState` at a given `root` or `slot`. +/// +/// Will not return a state if the request slot is in the future. Will return states higher than +/// the current head by skipping slots. +pub fn get_genesis_state(req: Request) -> ApiResult { + let beacon_chain = get_beacon_chain_from_request::(&req)?; + + let (_root, state) = state_at_slot(&beacon_chain, Slot::new(0))?; + + ResponseBuilder::new(&req).body(&state) +} diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 4dbbdda51..02c68c639 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -54,6 +54,7 @@ impl Into> for ApiError { }; Response::builder() .status(status_code.0) + .header("content-type", "text/plain") .body(Body::from(status_code.1)) .expect("Response should always be created.") } @@ -160,9 +161,7 @@ pub fn start_server( helpers::implementation_pending_response(req) } - (&Method::GET, "/beacon/validators") => { - helpers::implementation_pending_response(req) - } + (&Method::GET, "/beacon/validators") => beacon::get_validators::(req), (&Method::GET, "/beacon/validators/indicies") => { helpers::implementation_pending_response(req) } diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 010e49305..2374373bd 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -60,6 +60,18 @@ pub fn get_validator_duties(req: Request) - .collect::, _>>()?; let mut duties: Vec = Vec::new(); + // Update the committee cache + // TODO: Do we need to update the cache on the state, for the epoch which has been specified? + beacon_chain + .state_now() + .map_err(|e| ApiError::ServerError(format!("Unable to get current BeaconState {:?}", e)))? + .maybe_as_mut_ref() + .ok_or(ApiError::ServerError( + "Unable to get mutable BeaconState".into(), + ))? + .build_committee_cache(relative_epoch, &beacon_chain.spec) + .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; + // Get a list of all validators for this epoch let validator_proposers: Vec = epoch .slot_iter(T::EthSpec::slots_per_epoch()) @@ -67,6 +79,7 @@ pub fn get_validator_duties(req: Request) - head_state .get_beacon_proposer_index(slot, relative_epoch, &beacon_chain.spec) .map_err(|e| { + // TODO: why are we getting an uninitialized state error here??? ApiError::ServerError(format!( "Unable to get proposer index for validator: {:?}", e From 32ca8e951d7e5b712e20f1ff37f4d0a6eb868e7c Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Thu, 5 Sep 2019 00:36:06 +1000 Subject: [PATCH 221/305] Updated content-type acceptance and returning, mainly for /spec/eth2_config --- beacon_node/rest_api/src/beacon.rs | 5 +-- beacon_node/rest_api/src/response_builder.rs | 35 ++++++++++++-------- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index ae112883e..70b3f3ee9 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -276,10 +276,7 @@ pub fn get_current_finalized_checkpoint( Ok(success_response(Body::from(json))) } -/// HTTP handler to return a `BeaconState` at a given `root` or `slot`. -/// -/// Will not return a state if the request slot is in the future. Will return states higher than -/// the current head by skipping slots. +/// HTTP handler to return a `BeaconState` at the genesis block. pub fn get_genesis_state(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; diff --git a/beacon_node/rest_api/src/response_builder.rs b/beacon_node/rest_api/src/response_builder.rs index 9b8819996..c1df4892c 100644 --- a/beacon_node/rest_api/src/response_builder.rs +++ b/beacon_node/rest_api/src/response_builder.rs @@ -26,24 +26,31 @@ impl ResponseBuilder { } pub fn body(self, item: &T) -> ApiResult { - let body: Body = match self.encoding { - Encoding::JSON => Body::from(serde_json::to_string(&item).map_err(|e| { - ApiError::ServerError(format!( - "Unable to serialize response body as JSON: {:?}", - e - )) - })?), - Encoding::SSZ => Body::from(item.as_ssz_bytes()), - Encoding::YAML => Body::from(serde_yaml::to_string(&item).map_err(|e| { - ApiError::ServerError(format!( - "Unable to serialize response body as YAML: {:?}", - e - )) - })?), + let (body, content_type) = match self.encoding { + Encoding::JSON => ( + Body::from(serde_json::to_string(&item).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as JSON: {:?}", + e + )) + })?), + "application/json", + ), + Encoding::SSZ => (Body::from(item.as_ssz_bytes()), "application/ssz"), + Encoding::YAML => ( + Body::from(serde_yaml::to_string(&item).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as YAML: {:?}", + e + )) + })?), + "application/ssz", + ), }; Response::builder() .status(StatusCode::OK) + .header("content-type", content_type) .body(Body::from(body)) .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) } From 4339e372c7f859ac3d0135d8caaa9e73899f7ded Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Thu, 5 Sep 2019 00:39:54 +1000 Subject: [PATCH 222/305] Updated the API spec. - Moved ENR into it's own object - Moved the POST request parameters into the requestBody attribute (instead of query) - Updated IndexedAttestation to Attestation - Updated path for current_finalized_checkpoint - Completed the /spec and /spec and /spec/slots_per_epoch endpoints - Completed the /beacon/state/genesis endpoint - Completed the /spec/eth2_config endpoint - Fixed the prometheus example value - Added various example values to reflect real world values - Fixed incorrect indenting of Eth1Data - Added the whole ChainSpec schema --- docs/api_spec.yaml | 333 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 281 insertions(+), 52 deletions(-) diff --git a/docs/api_spec.yaml b/docs/api_spec.yaml index 2356d1f66..ced07e96d 100644 --- a/docs/api_spec.yaml +++ b/docs/api_spec.yaml @@ -84,10 +84,7 @@ paths: content: application/json: schema: - type: string - format: byte - example: "-IW4QHzEZbIB0YN47bVlsUrGbcL9vl21n7xF5gRKjMNkJ4MxfcwiqrsE7Ows8EnzOvC8P4ZyAjfOhr2ffk0bWAxDGq8BgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjzKzqo5c33ydUUHrWJ4FWwIXJa2MN9BBsgZkj6mhthp" - pattern: "^[^-A-Za-z0-9+/=]+$" + $ref: '#/components/schemas/ENR' 500: $ref: '#/components/responses/InternalError' @@ -884,13 +881,13 @@ paths: - Phase0 summary: "Publish a signed block." description: "Instructs the beacon node to broadcast a newly signed beacon block to the beacon network, to be included in the beacon chain. The beacon node is not required to validate the signed `BeaconBlock`, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new block into its state, and therefore validate the block internally, however blocks which fail the validation are still broadcast but a different status code is returned (202)" - parameters: - - name: beacon_block - in: query - required: true - description: "The `BeaconBlock` object, as sent from the beacon node originally, but now with the signature field completed." - schema: - $ref: '#/components/schemas/BeaconBlock' + requestBody: + description: "The `BeaconBlock` object, as sent from the beacon node originally, but now with the signature field completed. Must be sent in JSON format in the body of the request." + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BeaconBlock' responses: 200: description: "The block was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." @@ -908,7 +905,7 @@ paths: tags: - Phase0 summary: "Produce an attestation, without signature." - description: "Requests that the beacon node produce an IndexedAttestation, with a blank signature field, which the validator will then sign." + description: "Requests that the beacon node produce an Attestation, with a blank signature field, which the validator will then sign." parameters: - name: validator_pubkey in: query @@ -919,7 +916,7 @@ paths: - name: poc_bit in: query required: true - description: "The proof-of-custody bit that is to be reported by the requesting validator. This bit will be inserted into the appropriate location in the returned `IndexedAttestation`." + description: "The proof-of-custody bit that is to be reported by the requesting validator. This bit will be inserted into the appropriate location in the returned `Attestation`." schema: type: integer format: uint32 @@ -943,7 +940,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/IndexedAttestation' + $ref: '#/components/schemas/Attestation' 400: $ref: '#/components/responses/InvalidRequest' 500: @@ -954,14 +951,14 @@ paths: tags: - Phase0 summary: "Publish a signed attestation." - description: "Instructs the beacon node to broadcast a newly signed IndexedAttestation object to the intended shard subnet. The beacon node is not required to validate the signed IndexedAttestation, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new attestation into its state, and therefore validate the attestation internally, however attestations which fail the validation are still broadcast but a different status code is returned (202)" - parameters: - - name: attestation - in: query - required: true - description: "An `IndexedAttestation` structure, as originally provided by the beacon node, but now with the signature field completed." - schema: - $ref: '#/components/schemas/IndexedAttestation' + description: "Instructs the beacon node to broadcast a newly signed Attestation object to the intended shard subnet. The beacon node is not required to validate the signed Attestation, and a successful response (20X) only indicates that the broadcast has been successful. The beacon node is expected to integrate the new attestation into its state, and therefore validate the attestation internally, however attestations which fail the validation are still broadcast but a different status code is returned (202)" + requestBody: + description: "An `Attestation` structure, as originally provided by the beacon node, but now with the signature field completed. Must be sent in JSON format in the body of the request." + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/Attestation' responses: 200: description: "The attestation was validated successfully and has been broadcast. It has also been integrated into the beacon node's database." @@ -1042,13 +1039,12 @@ paths: format: bytes pattern: "^0x[a-fA-F0-9]{64}$" description: "The state root" - 400: $ref: '#/components/responses/InvalidRequest' 500: $ref: '#/components/responses/InternalError' - /beacon/current_finalized_checkpoint: + /beacon/state/current_finalized_checkpoint: get: tags: - Phase0 @@ -1062,16 +1058,63 @@ paths: application/json: schema: $ref: '#/components/schemas/Checkpoint' - - 500: $ref: '#/components/responses/InternalError' - #TODO fill spec - /spec: + /beacon/state/genesis: + get: + tags: + - Phase0 + summary: "Get the full beacon state, as it was at genesis." + description: "Requests the beacon node to provide the full beacon state object and the state root, as it was for the genesis block." + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/BeaconState' + application/yaml: + schema: + $ref: '#/components/schemas/BeaconState' + 400: + $ref: '#/components/responses/InvalidRequest' + 500: + $ref: '#/components/responses/InternalError' + + /spec: + get: + tags: + - Phase0 + summary: "Get the current ChainSpec configuration." + description: "Requests the beacon node to provide the configuration that it has used to start the beacon chain." + responses: + 200: + description: Success response + content: + application/json: + schema: + $ref: '#/components/schemas/ChainSpec' + 500: + $ref: '#/components/responses/InternalError' - #TODO fill spec/slots_per_epoch /spec/slots_per_epoch: + get: + tags: + - Phase0 + summary: "Get the configured number of slots per epoch." + description: "The number of slots in each epoch is part of the Eth2.0 spec. This function simply returns an integer representing this value." + responses: + 200: + description: Success response + content: + application/json: + schema: + type: integer + format: uint64 + example: 64 + 500: + $ref: '#/components/responses/InternalError' /spec/deposit_contract: get: @@ -1089,6 +1132,28 @@ paths: 500: $ref: '#/components/responses/InternalError' + /spec/eth2_config: + get: + tags: + - Phase0 + summary: "Gets the Eth2.0 spec, including the identifying string." + description: "" + responses: + 200: + description: Success response + content: + application/json: + schema: + type: object + properties: + spec_constants: + type: string + example: "mainnet" + spec: + $ref: '#/components/schemas/ChainSpec' + 500: + $ref: '#/components/responses/InternalError' + /metrics: get: tags: @@ -1100,9 +1165,7 @@ paths: description: Request successful content: text/plain: - example: - summary: 'Promethius metrics' - value: "# HELP beacon_head_state_active_validators_total Count of active validators at the head of the chain + example: "# HELP beacon_head_state_active_validators_total Count of active validators at the head of the chain # TYPE beacon_head_state_active_validators_total gauge beacon_head_state_active_validators_total 16 # HELP beacon_head_state_current_justified_epoch Current justified epoch at the head of the chain @@ -1178,6 +1241,12 @@ components: pattern: "^0x[a-fA-F0-9]{64}$" description: "A hex encoded ethereum address." + ENR: + type: string + format: byte + example: "-IW4QHzEZbIB0YN47bVlsUrGbcL9vl21n7xF5gRKjMNkJ4MxfcwiqrsE7Ows8EnzOvC8P4ZyAjfOhr2ffk0bWAxDGq8BgmlwhH8AAAGDdGNwgiMog3VkcIIjKIlzZWNwMjU2azGhAjzKzqo5c33ydUUHrWJ4FWwIXJa2MN9BBsgZkj6mhthp" + pattern: "^[^-A-Za-z0-9+/=]+$" + Shard: type: integer format: uint64 @@ -1239,13 +1308,16 @@ components: format: bytes pattern: "^0x[a-fA-F0-9]{64}$" description: "The 32 byte hash of the public key which the validator uses for withdrawing their rewards." + example: "0x00ec7ef7780c9d151597924036262dd28dc60e1228f4da6fecf9d402cb3f3594" effective_balance: type: integer format: uint64 description: "The effective balance of the validator, measured in Gwei." + example: 32000000000 slashed: type: boolean description: "Whether the validator has or has not been slashed." + example: false activation_eligiblity_epoch: type: integer format: uint64 @@ -1259,11 +1331,13 @@ components: format: uint64 nullable: true description: "Epoch when the validator was exited, or null if the validator has not exited." + example: 18446744073709551615 withdrawable_epoch: type: integer format: uint64 nullable: true description: "Epoch when the validator is eligible to withdraw their funds, or null if the validator has not exited." + example: 18446744073709551615 ValidatorDuty: type: object @@ -1301,24 +1375,24 @@ components: format: uint64 description: "Globally, the estimated most recent slot number, or current target slot number." - Eth1Data: - type: object - description: "The [`Eth1Data`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#eth1data) object from the Eth2.0 spec." - properties: - deposit_root: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Root of the deposit tree." - deposit_count: - type: integer - format: uint64 - description: "Total number of deposits." - block_hash: - type: string - format: byte - pattern: "^0x[a-fA-F0-9]{64}$" - description: "Ethereum 1.x block hash." + Eth1Data: + type: object + description: "The [`Eth1Data`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#eth1data) object from the Eth2.0 spec." + properties: + deposit_root: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Root of the deposit tree." + deposit_count: + type: integer + format: uint64 + description: "Total number of deposits." + block_hash: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{64}$" + description: "Ethereum 1.x block hash." BeaconBlock: description: "The [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblock) object from the Eth2.0 spec." @@ -1404,9 +1478,9 @@ components: description: "The [`AttesterSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attesterslashing) object from the Eth2.0 spec." properties: attestation_1: - $ref: '#/components/schemas/IndexedAttestation' + $ref: '#/components/schemas/Attestation' attestation_2: - $ref: '#/components/schemas/IndexedAttestation' + $ref: '#/components/schemas/Attestation' attestations: type: array items: @@ -1818,6 +1892,161 @@ components: pattern: "^0x[a-fA-F0-9]{64}$" description: "Root of the crosslinked shard data since the previous crosslink." + ChainSpec: + type: object + description: "Stores all of the values which specify a particular chain. The `ChainSpec` object in Lighthouse" + properties: + far_future_epoch: + type: integer + format: uint64 + example: 18446744073709551615 + base_rewards_per_epoch: + type: integer + format: uint64 + example: 5 + deposit_contract_tree_depth: + type: integer + format: uint64 + example: 32 + seconds_per_day: + type: integer + format: uint64 + example: 86400 + target_committee_size: + type: integer + format: uint64 + example: 128 + min_per_epoch_churn_limit: + type: integer + format: uint64 + example: 4 + churn_limit_quotient: + type: integer + format: uint64 + example: 65536 + shuffle_round_count: + type: integer + format: uint8 + example: 90 + min_genesis_active_validator_count: + type: integer + format: uint64 + example: 65536 + min_genesis_time: + type: integer + format: uint64 + example: 1578009600 + min_deposit_amount: + type: integer + format: uint64 + example: 1000000000 + max_effective_balance: + type: integer + format: uint64 + example: 32000000000 + ejection_balance: + type: integer + format: uint64 + example: 16000000000 + effective_balance_increment: + type: integer + format: uint64 + example: 1000000000 + genesis_slot: + type: integer + format: uint64 + example: 0 + bls_withdrawal_prefix_byte: + type: string + format: byte + pattern: "^0x[a-fA-F0-9]{2}$" + example: "0x00" + milliseconds_per_slot: + type: integer + format: uint64 + example: 6000 + min_attestation_inclusion_delay: + type: integer + format: uint64 + example: 1 + min_seed_lookahead: + type: integer + format: uint64 + example: 1 + activation_exit_delay: + type: integer + format: uint64 + example: 4 + min_validator_withdrawability_delay: + type: integer + format: uint64 + example: 256 + persistent_committee_period: + type: integer + format: uint64 + example: 2048 + max_epochs_per_crosslink: + type: integer + format: uint64 + example: 64 + min_epochs_to_inactivity_penalty: + type: integer + format: uint64 + example: 4 + base_reward_factor: + type: integer + format: uint64 + example: 64 + whistleblower_reward_quotient: + type: integer + format: uint64 + example: 512 + proposer_reward_quotient: + type: integer + format: uint64 + example: 8 + inactivity_penalty_quotient: + type: integer + format: uint64 + example: 33554432 + min_slashing_penalty_quotient: + type: integer + format: uint64 + example: 32 + domain_beacon_proposer: + type: integer + format: uint32 + example: 0 + domain_randao: + type: integer + format: uint32 + example: 1 + domain_attestation: + type: integer + format: uint32 + example: 2 + domain_deposit: + type: integer + format: uint32 + example: 3 + domain_voluntary_exit: + type: integer + format: uint32 + example: 4 + domain_transfer: + type: integer + format: uint32 + example: 5 + boot_nodes: + type: array + items: + $ref: '#/components/schemas/ENR' + network_id: + type: integer + format: uint8 + example: 2 + + responses: Success: description: "Request successful." From 50fca17308914e2bd9c52fd4a583556b28ca6faf Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Thu, 5 Sep 2019 00:40:23 +1000 Subject: [PATCH 223/305] Updated ChainSpec serialization and added some comments about potentially missing components. --- eth2/types/src/chain_spec.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index d59e0db0a..17e9dba49 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -24,11 +24,13 @@ pub struct ChainSpec { /* * Constants */ - #[serde(skip_serializing)] // skipped because Serde TOML has trouble with u64::max pub far_future_epoch: Epoch, + // The above may need to be skipped because Serde TOML has trouble with u64::max. + // Use: #[serde(skip_serializing)] pub base_rewards_per_epoch: u64, pub deposit_contract_tree_depth: u64, pub seconds_per_day: u64, + //TODO missing JUSTIFICATION_BITS_LENGTH and ENDIANNESS /* * Misc @@ -52,6 +54,7 @@ pub struct ChainSpec { * Initial Values */ pub genesis_slot: Slot, + //TODO Missing genesis_epoch #[serde(deserialize_with = "u8_from_hex_str", serialize_with = "u8_to_hex_str")] pub bls_withdrawal_prefix_byte: u8, @@ -59,6 +62,7 @@ pub struct ChainSpec { * Time parameters */ pub milliseconds_per_slot: u64, + //TODO should we also have SECONDS_PER_SLOT? pub min_attestation_inclusion_delay: u64, pub min_seed_lookahead: Epoch, pub activation_exit_delay: u64, From e7ab89a783f02b13a242d7ec528520c12a6e65d1 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 5 Sep 2019 02:06:39 +1000 Subject: [PATCH 224/305] Adds gossipsub object validation and verification --- beacon_node/eth2-libp2p/Cargo.toml | 4 +- beacon_node/eth2-libp2p/src/behaviour.rs | 28 +++++++++-- beacon_node/eth2-libp2p/src/config.rs | 3 +- beacon_node/eth2-libp2p/src/discovery.rs | 1 + beacon_node/eth2-libp2p/src/service.rs | 5 +- beacon_node/network/src/message_handler.rs | 49 +++++++++++++++---- beacon_node/network/src/service.rs | 53 ++++++++++++--------- beacon_node/network/src/sync/simple_sync.rs | 10 ++-- 8 files changed, 106 insertions(+), 47 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index caa5b28e4..59c799105 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "61036890d574f5b46573952b20def2baafd6a6e9" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "61036890d574f5b46573952b20def2baafd6a6e9", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "76f7475e4b7063e663ad03c7524cf091f9961968" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "76f7475e4b7063e663ad03c7524cf091f9961968", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" diff --git a/beacon_node/eth2-libp2p/src/behaviour.rs b/beacon_node/eth2-libp2p/src/behaviour.rs index 2c574e46a..a47d32ec2 100644 --- a/beacon_node/eth2-libp2p/src/behaviour.rs +++ b/beacon_node/eth2-libp2p/src/behaviour.rs @@ -15,7 +15,7 @@ use libp2p::{ tokio_io::{AsyncRead, AsyncWrite}, NetworkBehaviour, PeerId, }; -use slog::{debug, o, trace}; +use slog::{debug, o}; use std::num::NonZeroU32; use std::time::Duration; @@ -90,13 +90,15 @@ impl NetworkBehaviourEventProcess { - trace!(self.log, "Received GossipEvent"); - + GossipsubEvent::Message(propagation_source, gs_msg) => { + let id = gs_msg.id(); let msg = PubsubMessage::from_topics(&gs_msg.topics, gs_msg.data); + // Note: We are keeping track here of the peer that sent us the message, not the + // peer that originally published the message. self.events.push(BehaviourEvent::GossipMessage { - source: gs_msg.source, + id, + source: propagation_source, topics: gs_msg.topics, message: msg, }); @@ -199,6 +201,13 @@ impl Behaviour { } } + /// Forwards a message that is waiting in gossipsub's mcache. Messages are only propagated + /// once validated by the beacon chain. + pub fn propagate_message(&mut self, propagation_source: &PeerId, message_id: String) { + self.gossipsub + .propagate_message(&message_id, propagation_source); + } + /* Eth2 RPC behaviour functions */ /// Sends an RPC Request/Response via the RPC protocol. @@ -214,12 +223,21 @@ impl Behaviour { /// The types of events than can be obtained from polling the behaviour. pub enum BehaviourEvent { + /// A received RPC event and the peer that it was received from. RPC(PeerId, RPCEvent), + /// We have completed an initial connection to a new peer. PeerDialed(PeerId), + /// A peer has disconnected. PeerDisconnected(PeerId), + /// A gossipsub message has been received. GossipMessage { + /// The gossipsub message id. Used when propagating blocks after validation. + id: String, + /// The peer from which we received this message, not the peer that published it. source: PeerId, + /// The topics that this message was sent on. topics: Vec, + /// The message itself. message: PubsubMessage, }, } diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index 7cb501c1f..fd44b99af 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -74,7 +74,8 @@ impl Default for Config { // parameter. gs_config: GossipsubConfigBuilder::new() .max_transmit_size(1_048_576) - .heartbeat_interval(Duration::from_secs(20)) + .heartbeat_interval(Duration::from_secs(20)) // TODO: Reduce for mainnet + .propagate_messages(false) // require validation before propagation .build(), boot_nodes: vec![], libp2p_nodes: vec![], diff --git a/beacon_node/eth2-libp2p/src/discovery.rs b/beacon_node/eth2-libp2p/src/discovery.rs index 4a8aba2b1..759adc482 100644 --- a/beacon_node/eth2-libp2p/src/discovery.rs +++ b/beacon_node/eth2-libp2p/src/discovery.rs @@ -169,6 +169,7 @@ where fn inject_connected(&mut self, peer_id: PeerId, _endpoint: ConnectedPoint) { self.connected_peers.insert(peer_id); + // TODO: Drop peers if over max_peer limit metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT); metrics::set_gauge(&metrics::PEERS_CONNECTED, self.connected_peers() as i64); diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 34781927c..3559fb850 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -145,16 +145,16 @@ impl Stream for Service { fn poll(&mut self) -> Poll, Self::Error> { loop { match self.swarm.poll() { - //Behaviour events Ok(Async::Ready(Some(event))) => match event { - // TODO: Stub here for debugging BehaviourEvent::GossipMessage { + id, source, topics, message, } => { trace!(self.log, "Gossipsub message received"; "service" => "Swarm"); return Ok(Async::Ready(Some(Libp2pEvent::PubsubMessage { + id, source, topics, message, @@ -222,6 +222,7 @@ pub enum Libp2pEvent { PeerDisconnected(PeerId), /// Received pubsub message. PubsubMessage { + id: String, source: PeerId, topics: Vec, message: PubsubMessage, diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index c14fc970d..d6e9f8be8 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -21,6 +21,8 @@ pub struct MessageHandler { _chain: Arc>, /// The syncing framework. sync: SimpleSync, + /// A channel to the network service to allow for gossip propagation. + network_send: mpsc::UnboundedSender, /// The `MessageHandler` logger. log: slog::Logger, } @@ -34,8 +36,9 @@ pub enum HandlerMessage { PeerDisconnected(PeerId), /// An RPC response/request has been received. RPC(PeerId, RPCEvent), - /// A gossip message has been received. - PubsubMessage(PeerId, PubsubMessage), + /// A gossip message has been received. The fields are: message id, the peer that sent us this + /// message and the message itself. + PubsubMessage(String, PeerId, PubsubMessage), } impl MessageHandler { @@ -50,12 +53,13 @@ impl MessageHandler { let (handler_send, handler_recv) = mpsc::unbounded_channel(); // Initialise sync and begin processing in thread - let sync = SimpleSync::new(beacon_chain.clone(), network_send, &log); + let sync = SimpleSync::new(beacon_chain.clone(), network_send.clone(), &log); // generate the Message handler let mut handler = MessageHandler { _chain: beacon_chain.clone(), sync, + network_send, log: log.clone(), }; @@ -87,8 +91,8 @@ impl MessageHandler { self.handle_rpc_message(peer_id, rpc_event); } // An RPC message request/response has been received - HandlerMessage::PubsubMessage(peer_id, gossip) => { - self.handle_gossip(peer_id, gossip); + HandlerMessage::PubsubMessage(id, peer_id, gossip) => { + self.handle_gossip(id, peer_id, gossip); } } } @@ -194,24 +198,34 @@ impl MessageHandler { } /// Handle RPC messages - fn handle_gossip(&mut self, peer_id: PeerId, gossip_message: PubsubMessage) { + fn handle_gossip(&mut self, id: String, peer_id: PeerId, gossip_message: PubsubMessage) { match gossip_message { PubsubMessage::Block(message) => match self.decode_gossip_block(message) { Ok(block) => { - let _should_forward_on = self.sync.on_block_gossip(peer_id, block); + let should_forward_on = self.sync.on_block_gossip(peer_id.clone(), block); + // TODO: Apply more sophisticated validation and decoding logic + if should_forward_on { + self.propagate_message(id, peer_id.clone()); + } } Err(e) => { debug!(self.log, "Invalid gossiped beacon block"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); } }, PubsubMessage::Attestation(message) => match self.decode_gossip_attestation(message) { - Ok(attestation) => self.sync.on_attestation_gossip(peer_id, attestation), + Ok(attestation) => { + // TODO: Apply more sophisticated validation and decoding logic + self.propagate_message(id, peer_id.clone()); + self.sync.on_attestation_gossip(peer_id, attestation); + } Err(e) => { debug!(self.log, "Invalid gossiped attestation"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); } }, PubsubMessage::VoluntaryExit(message) => match self.decode_gossip_exit(message) { Ok(_exit) => { + // TODO: Apply more sophisticated validation and decoding logic + self.propagate_message(id, peer_id.clone()); // TODO: Handle exits debug!(self.log, "Received a voluntary exit"; "peer_id" => format!("{}", peer_id) ); } @@ -222,6 +236,8 @@ impl MessageHandler { PubsubMessage::ProposerSlashing(message) => { match self.decode_gossip_proposer_slashing(message) { Ok(_slashing) => { + // TODO: Apply more sophisticated validation and decoding logic + self.propagate_message(id, peer_id.clone()); // TODO: Handle proposer slashings debug!(self.log, "Received a proposer slashing"; "peer_id" => format!("{}", peer_id) ); } @@ -233,6 +249,8 @@ impl MessageHandler { PubsubMessage::AttesterSlashing(message) => { match self.decode_gossip_attestation_slashing(message) { Ok(_slashing) => { + // TODO: Apply more sophisticated validation and decoding logic + self.propagate_message(id, peer_id.clone()); // TODO: Handle attester slashings debug!(self.log, "Received an attester slashing"; "peer_id" => format!("{}", peer_id) ); } @@ -248,6 +266,21 @@ impl MessageHandler { } } + /// Informs the network service that the message should be forwarded to other peers. + fn propagate_message(&mut self, message_id: String, propagation_source: PeerId) { + self.network_send + .try_send(NetworkMessage::Propagate { + propagation_source, + message_id, + }) + .unwrap_or_else(|_| { + warn!( + self.log, + "Could not send propagation request to the network service" + ) + }); + } + /* Decoding of gossipsub objects from the network. * * The decoding is done in the message handler as it has access to to a `BeaconChain` and can diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a8b3c74b6..5336c7118 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -159,12 +159,23 @@ fn network_service( // poll the network channel match network_recv.poll() { Ok(Async::Ready(Some(message))) => match message { - NetworkMessage::Send(peer_id, outgoing_message) => match outgoing_message { - OutgoingMessage::RPC(rpc_event) => { - trace!(log, "Sending RPC Event: {:?}", rpc_event); - libp2p_service.lock().swarm.send_rpc(peer_id, rpc_event); - } - }, + NetworkMessage::RPC(peer_id, rpc_event) => { + trace!(log, "Sending RPC Event: {:?}", rpc_event); + libp2p_service.lock().swarm.send_rpc(peer_id, rpc_event); + } + NetworkMessage::Propagate { + propagation_source, + message_id, + } => { + trace!(log, "Propagating gossipsub message"; + "propagation_peer" => format!("{:?}", propagation_source), + "message_id" => format!("{}", message_id), + ); + libp2p_service + .lock() + .swarm + .propagate_message(&propagation_source, message_id); + } NetworkMessage::Publish { topics, message } => { debug!(log, "Sending pubsub message"; "topics" => format!("{:?}",topics)); libp2p_service.lock().swarm.publish(&topics, message); @@ -203,13 +214,14 @@ fn network_service( .map_err(|_| "Failed to send PeerDisconnected to handler")?; } Libp2pEvent::PubsubMessage { - source, message, .. + id, + source, + message, + .. } => { - //TODO: Decide if we need to propagate the topic upwards. (Potentially for - //attestations) message_handler_send - .try_send(HandlerMessage::PubsubMessage(source, message)) - .map_err(|_| " failed to send pubsub message to handler")?; + .try_send(HandlerMessage::PubsubMessage(id, source, message)) + .map_err(|_| "Failed to send pubsub message to handler")?; } }, Ok(Async::Ready(None)) => unreachable!("Stream never ends"), @@ -225,19 +237,16 @@ fn network_service( /// Types of messages that the network service can receive. #[derive(Debug)] pub enum NetworkMessage { - /// Send a message to libp2p service. - //TODO: Define typing for messages across the wire - Send(PeerId, OutgoingMessage), - /// Publish a message to pubsub mechanism. + /// Send an RPC message to the libp2p service. + RPC(PeerId, RPCEvent), + /// Publish a message to gossipsub. Publish { topics: Vec, message: PubsubMessage, }, -} - -/// Type of outgoing messages that can be sent through the network service. -#[derive(Debug)] -pub enum OutgoingMessage { - /// Send an RPC request/response. - RPC(RPCEvent), + /// Propagate a received gossipsub message + Propagate { + propagation_source: PeerId, + message_id: String, + }, } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 573ac9dd1..789f5b6be 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,5 +1,5 @@ use super::manager::{ImportManager, ImportManagerOutcome}; -use crate::service::{NetworkMessage, OutgoingMessage}; +use crate::service::NetworkMessage; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; @@ -468,7 +468,7 @@ impl SimpleSync { SHOULD_FORWARD_GOSSIP_BLOCK } BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK, - _ => SHOULD_NOT_FORWARD_GOSSIP_BLOCK, + _ => SHOULD_NOT_FORWARD_GOSSIP_BLOCK, //TODO: Decide if we want to forward these } } else { SHOULD_NOT_FORWARD_GOSSIP_BLOCK @@ -554,12 +554,8 @@ impl NetworkContext { } fn send_rpc_event(&mut self, peer_id: PeerId, rpc_event: RPCEvent) { - self.send(peer_id, OutgoingMessage::RPC(rpc_event)) - } - - fn send(&mut self, peer_id: PeerId, outgoing_message: OutgoingMessage) { self.network_send - .try_send(NetworkMessage::Send(peer_id, outgoing_message)) + .try_send(NetworkMessage::RPC(peer_id, rpc_event)) .unwrap_or_else(|_| { warn!( self.log, From a3877b6135272a29a0d0e43f5a36f4c43d73a5ab Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 5 Sep 2019 08:07:57 +1000 Subject: [PATCH 225/305] Updates syncing stability, fixes large RPC message codec, corrects beacon chain referencing --- beacon_node/client/src/notifier.rs | 4 +- beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs | 36 ++- beacon_node/eth2-libp2p/src/rpc/methods.rs | 6 +- beacon_node/eth2-libp2p/src/service.rs | 5 + beacon_node/network/src/message_handler.rs | 7 +- beacon_node/network/src/sync/manager.rs | 211 +++++++------ beacon_node/network/src/sync/simple_sync.rs | 304 +++++++++++-------- 7 files changed, 312 insertions(+), 261 deletions(-) diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index d705637cb..343918d4d 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -34,10 +34,10 @@ pub fn run(client: &Client, executor: TaskExecutor, exit // Panics if libp2p is poisoned. let connected_peer_count = libp2p.lock().swarm.connected_peers(); - debug!(log, "Libp2p connected peer status"; "peer_count" => connected_peer_count); + debug!(log, "Connected peer status"; "peer_count" => connected_peer_count); if connected_peer_count <= WARN_PEER_COUNT { - warn!(log, "Low libp2p peer count"; "peer_count" => connected_peer_count); + warn!(log, "Low peer count"; "peer_count" => connected_peer_count); } Ok(()) diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs index 260a00346..1966bab62 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs @@ -152,45 +152,49 @@ impl Decoder for SSZOutboundCodec { type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - match self.inner.decode(src).map_err(RPCError::from) { - Ok(Some(packet)) => match self.protocol.message_name.as_str() { + if src.is_empty() { + // the object sent could be empty. We return the empty object if this is the case + match self.protocol.message_name.as_str() { "hello" => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes( - &packet, - )?))), + "1" => Err(RPCError::Custom( + "Hello stream terminated unexpectedly".into(), + )), // cannot have an empty HELLO message. The stream has terminated unexpectedly _ => unreachable!("Cannot negotiate an unknown version"), }, "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), "beacon_blocks" => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::BeaconBlocks(packet.to_vec()))), + "1" => Ok(Some(RPCResponse::BeaconBlocks(Vec::new()))), _ => unreachable!("Cannot negotiate an unknown version"), }, "recent_beacon_blocks" => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(packet.to_vec()))), + "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(Vec::new()))), _ => unreachable!("Cannot negotiate an unknown version"), }, _ => unreachable!("Cannot negotiate an unknown protocol"), - }, - Ok(None) => { - // the object sent could be a empty. We return the empty object if this is the case - match self.protocol.message_name.as_str() { + } + } else { + match self.inner.decode(src).map_err(RPCError::from) { + Ok(Some(packet)) => match self.protocol.message_name.as_str() { "hello" => match self.protocol.version.as_str() { - "1" => Ok(None), // cannot have an empty HELLO message. The stream has terminated unexpectedly + "1" => Ok(Some(RPCResponse::Hello(HelloMessage::from_ssz_bytes( + &packet, + )?))), _ => unreachable!("Cannot negotiate an unknown version"), }, "goodbye" => Err(RPCError::InvalidProtocol("GOODBYE doesn't have a response")), "beacon_blocks" => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::BeaconBlocks(Vec::new()))), + "1" => Ok(Some(RPCResponse::BeaconBlocks(packet.to_vec()))), _ => unreachable!("Cannot negotiate an unknown version"), }, "recent_beacon_blocks" => match self.protocol.version.as_str() { - "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(Vec::new()))), + "1" => Ok(Some(RPCResponse::RecentBeaconBlocks(packet.to_vec()))), _ => unreachable!("Cannot negotiate an unknown version"), }, _ => unreachable!("Cannot negotiate an unknown protocol"), - } + }, + Ok(None) => Ok(None), // waiting for more bytes + Err(e) => Err(e), } - Err(e) => Err(e), } } } diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index c9610b000..49813abe9 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -168,8 +168,10 @@ impl std::fmt::Display for RPCResponse { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { RPCResponse::Hello(hello) => write!(f, "{}", hello), - RPCResponse::BeaconBlocks(_) => write!(f, ""), - RPCResponse::RecentBeaconBlocks(_) => write!(f, ""), + RPCResponse::BeaconBlocks(data) => write!(f, ", len: {}", data.len()), + RPCResponse::RecentBeaconBlocks(data) => { + write!(f, ", len: {}", data.len()) + } } } } diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index 9f08b1eda..dac011752 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -98,6 +98,11 @@ impl Service { // attempt to connect to any specified boot-nodes for bootnode_enr in config.boot_nodes { for multiaddr in bootnode_enr.multiaddr() { + // ignore udp multiaddr if it exists + let components = multiaddr.iter().collect::>(); + if let Protocol::Udp(_) = components[1] { + continue; + } dial_addr(multiaddr); } } diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index d6e9f8be8..cade65d63 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -17,8 +17,6 @@ use types::{Attestation, AttesterSlashing, BeaconBlock, ProposerSlashing, Volunt /// Handles messages received from the network and client and organises syncing. pub struct MessageHandler { - /// Currently loaded and initialised beacon chain. - _chain: Arc>, /// The syncing framework. sync: SimpleSync, /// A channel to the network service to allow for gossip propagation. @@ -53,13 +51,12 @@ impl MessageHandler { let (handler_send, handler_recv) = mpsc::unbounded_channel(); // Initialise sync and begin processing in thread - let sync = SimpleSync::new(beacon_chain.clone(), network_send.clone(), &log); + let sync = SimpleSync::new(Arc::downgrade(&beacon_chain), network_send.clone(), &log); // generate the Message handler let mut handler = MessageHandler { - _chain: beacon_chain.clone(), - sync, network_send, + sync, log: log.clone(), }; diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 1eec51843..2b2ed9dca 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -62,13 +62,13 @@ use slog::{debug, info, trace, warn, Logger}; use smallvec::SmallVec; use std::collections::{HashMap, HashSet}; use std::ops::{Add, Sub}; -use std::sync::{Arc, Weak}; +use std::sync::Weak; use types::{BeaconBlock, EthSpec, Hash256, Slot}; /// Blocks are downloaded in batches from peers. This constant specifies how many blocks per batch /// is requested. Currently the value is small for testing. This will be incremented for /// production. -const MAX_BLOCKS_PER_REQUEST: u64 = 100; +const MAX_BLOCKS_PER_REQUEST: u64 = 50; /// The number of slots ahead of us that is allowed before requesting a long-range (batch) Sync /// from a peer. If a peer is within this tolerance (forwards or backwards), it is treated as a @@ -224,10 +224,10 @@ impl ImportManager { /// Generates a new `ImportManager` given a logger and an Arc reference to a beacon chain. The /// import manager keeps a weak reference to the beacon chain, which allows the chain to be /// dropped during the syncing process. The syncing handles this termination gracefully. - pub fn new(beacon_chain: Arc>, log: &slog::Logger) -> Self { + pub fn new(beacon_chain: Weak>, log: &slog::Logger) -> Self { ImportManager { event_queue: SmallVec::new(), - chain: Arc::downgrade(&beacon_chain), + chain: beacon_chain, state: ManagerState::Regular, import_queue: HashMap::new(), parent_queue: SmallVec::new(), @@ -359,7 +359,9 @@ impl ImportManager { warn!(self.log, "Peer returned too many empty block batches"; "peer" => format!("{:?}", peer_id)); block_requests.state = BlockRequestsState::Failed; - } else if block_requests.current_start_slot >= block_requests.target_head_slot { + } else if block_requests.current_start_slot + MAX_BLOCKS_PER_REQUEST + >= block_requests.target_head_slot + { warn!(self.log, "Peer did not return blocks it claimed to possess"; "peer" => format!("{:?}", peer_id)); block_requests.state = BlockRequestsState::Failed; @@ -583,6 +585,11 @@ impl ImportManager { re_run = re_run || self.process_complete_parent_requests(); } + // exit early if the beacon chain is dropped + if let None = self.chain.upgrade() { + return ImportManagerOutcome::Idle; + } + // return any queued events if !self.event_queue.is_empty() { let event = self.event_queue.remove(0); @@ -681,56 +688,48 @@ impl ImportManager { self.import_queue.retain(|peer_id, block_requests| { if block_requests.state == BlockRequestsState::ReadyToProcess { - // check that the chain still exists - if let Some(chain) = chain_ref.upgrade() { - let downloaded_blocks = - std::mem::replace(&mut block_requests.downloaded_blocks, Vec::new()); - let last_element = downloaded_blocks.len() - 1; - let start_slot = downloaded_blocks[0].slot; - let end_slot = downloaded_blocks[last_element].slot; + let downloaded_blocks = + std::mem::replace(&mut block_requests.downloaded_blocks, Vec::new()); + let last_element = downloaded_blocks.len() - 1; + let start_slot = downloaded_blocks[0].slot; + let end_slot = downloaded_blocks[last_element].slot; - match process_blocks(chain, downloaded_blocks, log_ref) { - Ok(()) => { - debug!(log_ref, "Blocks processed successfully"; + match process_blocks(chain_ref.clone(), downloaded_blocks, log_ref) { + Ok(()) => { + debug!(log_ref, "Blocks processed successfully"; + "peer" => format!("{:?}", peer_id), + "start_slot" => start_slot, + "end_slot" => end_slot, + "no_blocks" => last_element + 1, + ); + block_requests.blocks_processed += last_element + 1; + + // check if the batch is complete, by verifying if we have reached the + // target head + if end_slot >= block_requests.target_head_slot { + // Completed, re-hello the peer to ensure we are up to the latest head + event_queue_ref.push(ImportManagerOutcome::Hello(peer_id.clone())); + // remove the request + false + } else { + // have not reached the end, queue another batch + block_requests.update_start_slot(); + re_run = true; + // keep the batch + true + } + } + Err(e) => { + warn!(log_ref, "Block processing failed"; "peer" => format!("{:?}", peer_id), "start_slot" => start_slot, "end_slot" => end_slot, "no_blocks" => last_element + 1, - ); - block_requests.blocks_processed += last_element + 1; - - // check if the batch is complete, by verifying if we have reached the - // target head - if end_slot >= block_requests.target_head_slot { - // Completed, re-hello the peer to ensure we are up to the latest head - event_queue_ref.push(ImportManagerOutcome::Hello(peer_id.clone())); - // remove the request - false - } else { - // have not reached the end, queue another batch - block_requests.update_start_slot(); - re_run = true; - // keep the batch - true - } - } - Err(e) => { - warn!(log_ref, "Block processing failed"; - "peer" => format!("{:?}", peer_id), - "start_slot" => start_slot, - "end_slot" => end_slot, - "no_blocks" => last_element + 1, - "error" => format!("{:?}", e), - ); - event_queue_ref - .push(ImportManagerOutcome::DownvotePeer(peer_id.clone())); - false - } + "error" => format!("{:?}", e), + ); + event_queue_ref.push(ImportManagerOutcome::DownvotePeer(peer_id.clone())); + false } - } else { - // chain no longer exists, empty the queue and return - event_queue_ref.clear(); - return false; } } else { // not ready to process @@ -894,42 +893,43 @@ impl ImportManager { // Helper function to process blocks fn process_blocks( - chain: Arc>, + weak_chain: Weak>, blocks: Vec>, log: &Logger, ) -> Result<(), String> { for block in blocks { - let processing_result = chain.process_block(block.clone()); + if let Some(chain) = weak_chain.upgrade() { + let processing_result = chain.process_block(block.clone()); - if let Ok(outcome) = processing_result { - match outcome { - BlockProcessingOutcome::Processed { block_root } => { - // The block was valid and we processed it successfully. - trace!( - log, "Imported block from network"; - "slot" => block.slot, - "block_root" => format!("{}", block_root), - ); - } - BlockProcessingOutcome::ParentUnknown { parent } => { - // blocks should be sequential and all parents should exist - trace!( - log, "Parent block is unknown"; - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - ); - return Err(format!( - "Block at slot {} has an unknown parent.", - block.slot - )); - } - BlockProcessingOutcome::BlockIsAlreadyKnown => { - // this block is already known to us, move to the next - debug!( - log, "Imported a block that is already known"; - "parent_root" => format!("{}", parent), - "baby_block_slot" => block.slot, - ); + if let Ok(outcome) = processing_result { + match outcome { + BlockProcessingOutcome::Processed { block_root } => { + // The block was valid and we processed it successfully. + trace!( + log, "Imported block from network"; + "slot" => block.slot, + "block_root" => format!("{}", block_root), + ); + } + BlockProcessingOutcome::ParentUnknown { parent } => { + // blocks should be sequential and all parents should exist + trace!( + log, "Parent block is unknown"; + "parent_root" => format!("{}", parent), + "baby_block_slot" => block.slot, + ); + return Err(format!( + "Block at slot {} has an unknown parent.", + block.slot + )); + } + BlockProcessingOutcome::BlockIsAlreadyKnown => { + // this block is already known to us, move to the next + debug!( + log, "Imported a block that is already known"; + "block_slot" => block.slot, + ); + } BlockProcessingOutcome::FutureSlot { present_slot, block_slot, @@ -937,7 +937,7 @@ fn process_blocks( if present_slot + FUTURE_SLOT_TOLERANCE >= block_slot { // The block is too far in the future, drop it. trace!( - self.log, "Block is ahead of our slot clock"; + log, "Block is ahead of our slot clock"; "msg" => "block for future slot rejected, check your time", "present_slot" => present_slot, "block_slot" => block_slot, @@ -950,7 +950,7 @@ fn process_blocks( } else { // The block is in the future, but not too far. trace!( - self.log, "Block is slightly ahead of our slot clock, ignoring."; + log, "Block is slightly ahead of our slot clock, ignoring."; "present_slot" => present_slot, "block_slot" => block_slot, "FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE, @@ -959,44 +959,41 @@ fn process_blocks( } BlockProcessingOutcome::WouldRevertFinalizedSlot { .. } => { trace!( - self.log, "Finalized or earlier block processed"; + log, "Finalized or earlier block processed"; "outcome" => format!("{:?}", outcome), ); // block reached our finalized slot or was earlier, move to the next block } BlockProcessingOutcome::GenesisBlock => { trace!( - self.log, "Genesis block was processed"; + log, "Genesis block was processed"; "outcome" => format!("{:?}", outcome), ); } - BlockProcessingOutcome::FinalizedSlot => { - trace!( - log, "Finalized or earlier block processed"; - "outcome" => format!("{:?}", outcome), - ); - // block reached our finalized slot or was earlier, move to the next block - } - _ => { - warn!( - log, "Invalid block received"; - "msg" => "peer sent invalid block", - "outcome" => format!("{:?}", outcome), - ); - return Err(format!("Invalid block at slot {}", block.slot)); + _ => { + warn!( + log, "Invalid block received"; + "msg" => "peer sent invalid block", + "outcome" => format!("{:?}", outcome), + ); + return Err(format!("Invalid block at slot {}", block.slot)); + } } + } else { + warn!( + log, "BlockProcessingFailure"; + "msg" => "unexpected condition in processing block.", + "outcome" => format!("{:?}", processing_result) + ); + return Err(format!( + "Unexpected block processing error: {:?}", + processing_result + )); } } else { - warn!( - log, "BlockProcessingFailure"; - "msg" => "unexpected condition in processing block.", - "outcome" => format!("{:?}", processing_result) - ); - return Err(format!( - "Unexpected block processing error: {:?}", - processing_result - )); + return Ok(()); // terminate early due to dropped beacon chain } } + Ok(()) } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 9d05b312b..a8b271700 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -6,7 +6,7 @@ use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; use slog::{debug, info, o, trace, warn}; use ssz::Encode; -use std::sync::Arc; +use std::sync::{Arc, Weak}; use store::Store; use tokio::sync::mpsc; use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256, Slot}; @@ -57,7 +57,7 @@ pub enum SyncState { /// Simple Syncing protocol. pub struct SimpleSync { /// A reference to the underlying beacon chain. - chain: Arc>, + chain: Weak>, manager: ImportManager, network: NetworkContext, log: slog::Logger, @@ -66,7 +66,7 @@ pub struct SimpleSync { impl SimpleSync { /// Instantiate a `SimpleSync` instance, with no peers and an empty queue. pub fn new( - beacon_chain: Arc>, + beacon_chain: Weak>, network_send: mpsc::UnboundedSender, log: &slog::Logger, ) -> Self { @@ -91,8 +91,10 @@ impl SimpleSync { /// /// Sends a `Hello` message to the peer. pub fn on_connect(&mut self, peer_id: PeerId) { - self.network - .send_rpc_request(None, peer_id, RPCRequest::Hello(hello_message(&self.chain))); + if let Some(chain) = self.chain.upgrade() { + self.network + .send_rpc_request(None, peer_id, RPCRequest::Hello(hello_message(&chain))); + } } /// Handle a `Hello` request. @@ -104,16 +106,19 @@ impl SimpleSync { request_id: RequestId, hello: HelloMessage, ) { - trace!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); + // ignore hello responses if we are shutting down + if let Some(chain) = self.chain.upgrade() { + trace!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); - // Say hello back. - self.network.send_rpc_response( - peer_id.clone(), - request_id, - RPCResponse::Hello(hello_message(&self.chain)), - ); + // Say hello back. + self.network.send_rpc_response( + peer_id.clone(), + request_id, + RPCResponse::Hello(hello_message(&chain)), + ); - self.process_hello(peer_id, hello); + self.process_hello(peer_id, hello); + } } /// Process a `Hello` response from a peer. @@ -128,88 +133,107 @@ impl SimpleSync { /// /// Disconnects the peer if required. fn process_hello(&mut self, peer_id: PeerId, hello: HelloMessage) { - let remote = PeerSyncInfo::from(hello); - let local = PeerSyncInfo::from(&self.chain); - - let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); - - if local.fork_version != remote.fork_version { - // The node is on a different network/fork, disconnect them. - debug!( - self.log, "HandshakeFailure"; - "peer" => format!("{:?}", peer_id), - "reason" => "network_id" - ); - - self.network - .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); - } else if remote.finalized_epoch <= local.finalized_epoch - && remote.finalized_root != Hash256::zero() - && local.finalized_root != Hash256::zero() - && (self.root_at_slot(start_slot(remote.finalized_epoch)) - != Some(remote.finalized_root)) + // If we update the manager we may need to drive the sync. This flag lies out of scope of + // the beacon chain so that the process sync command has no long-lived beacon chain + // references. + let mut process_sync = false; { - // The remotes finalized epoch is less than or greater than ours, but the block root is - // different to the one in our chain. - // - // Therefore, the node is on a different chain and we should not communicate with them. - debug!( - self.log, "HandshakeFailure"; - "peer" => format!("{:?}", peer_id), - "reason" => "different finalized chain" - ); - self.network - .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); - } else if remote.finalized_epoch < local.finalized_epoch { - // The node has a lower finalized epoch, their chain is not useful to us. There are two - // cases where a node can have a lower finalized epoch: - // - // ## The node is on the same chain - // - // If a node is on the same chain but has a lower finalized epoch, their head must be - // lower than ours. Therefore, we have nothing to request from them. - // - // ## The node is on a fork - // - // If a node is on a fork that has a lower finalized epoch, switching to that fork would - // cause us to revert a finalized block. This is not permitted, therefore we have no - // interest in their blocks. - debug!( - self.log, - "NaivePeer"; - "peer" => format!("{:?}", peer_id), - "reason" => "lower finalized epoch" - ); - } else if self - .chain - .store - .exists::>(&remote.head_root) - .unwrap_or_else(|_| false) - { - trace!( - self.log, "Out of date or potentially sync'd peer found"; - "peer" => format!("{:?}", peer_id), - "remote_head_slot" => remote.head_slot, - "remote_latest_finalized_epoch" => remote.finalized_epoch, - ); + // scope of beacon chain reference + let chain = match self.chain.upgrade() { + Some(chain) => chain, + None => { + info!(self.log, "Sync shutting down"; + "reason" => "Beacon chain dropped"); + return; + } + }; - // If the node's best-block is already known to us and they are close to our current - // head, treat them as a fully sync'd peer. - self.manager.add_peer(peer_id, remote); - self.process_sync(); - } else { - // The remote node has an equal or great finalized epoch and we don't know it's head. - // - // Therefore, there are some blocks between the local finalized epoch and the remote - // head that are worth downloading. - debug!( - self.log, "UsefulPeer"; - "peer" => format!("{:?}", peer_id), - "local_finalized_epoch" => local.finalized_epoch, - "remote_latest_finalized_epoch" => remote.finalized_epoch, - ); + let remote = PeerSyncInfo::from(hello); + let local = PeerSyncInfo::from(&chain); - self.manager.add_peer(peer_id, remote); + let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); + + if local.fork_version != remote.fork_version { + // The node is on a different network/fork, disconnect them. + debug!( + self.log, "HandshakeFailure"; + "peer" => format!("{:?}", peer_id), + "reason" => "network_id" + ); + + self.network + .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + } else if remote.finalized_epoch <= local.finalized_epoch + && remote.finalized_root != Hash256::zero() + && local.finalized_root != Hash256::zero() + && (chain.root_at_slot(start_slot(remote.finalized_epoch)) + != Some(remote.finalized_root)) + { + // The remotes finalized epoch is less than or greater than ours, but the block root is + // different to the one in our chain. + // + // Therefore, the node is on a different chain and we should not communicate with them. + debug!( + self.log, "HandshakeFailure"; + "peer" => format!("{:?}", peer_id), + "reason" => "different finalized chain" + ); + self.network + .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + } else if remote.finalized_epoch < local.finalized_epoch { + // The node has a lower finalized epoch, their chain is not useful to us. There are two + // cases where a node can have a lower finalized epoch: + // + // ## The node is on the same chain + // + // If a node is on the same chain but has a lower finalized epoch, their head must be + // lower than ours. Therefore, we have nothing to request from them. + // + // ## The node is on a fork + // + // If a node is on a fork that has a lower finalized epoch, switching to that fork would + // cause us to revert a finalized block. This is not permitted, therefore we have no + // interest in their blocks. + debug!( + self.log, + "NaivePeer"; + "peer" => format!("{:?}", peer_id), + "reason" => "lower finalized epoch" + ); + } else if chain + .store + .exists::>(&remote.head_root) + .unwrap_or_else(|_| false) + { + trace!( + self.log, "Peer with known chain found"; + "peer" => format!("{:?}", peer_id), + "remote_head_slot" => remote.head_slot, + "remote_latest_finalized_epoch" => remote.finalized_epoch, + ); + + // If the node's best-block is already known to us and they are close to our current + // head, treat them as a fully sync'd peer. + self.manager.add_peer(peer_id, remote); + process_sync = true; + } else { + // The remote node has an equal or great finalized epoch and we don't know it's head. + // + // Therefore, there are some blocks between the local finalized epoch and the remote + // head that are worth downloading. + debug!( + self.log, "UsefulPeer"; + "peer" => format!("{:?}", peer_id), + "local_finalized_epoch" => local.finalized_epoch, + "remote_latest_finalized_epoch" => remote.finalized_epoch, + ); + + self.manager.add_peer(peer_id, remote); + process_sync = true + } + } // end beacon chain reference scope + + if process_sync { self.process_sync(); } } @@ -226,11 +250,13 @@ impl SimpleSync { "method" => "HELLO", "peer" => format!("{:?}", peer_id) ); - self.network.send_rpc_request( - None, - peer_id, - RPCRequest::Hello(hello_message(&self.chain)), - ); + if let Some(chain) = self.chain.upgrade() { + self.network.send_rpc_request( + None, + peer_id, + RPCRequest::Hello(hello_message(&chain)), + ); + } } ImportManagerOutcome::RequestBlocks { peer_id, @@ -283,14 +309,6 @@ impl SimpleSync { } } - //TODO: Move to beacon chain - fn root_at_slot(&self, target_slot: Slot) -> Option { - self.chain - .rev_iter_block_roots() - .find(|(_root, slot)| *slot == target_slot) - .map(|(root, _slot)| root) - } - /// Handle a `RecentBeaconBlocks` request from the peer. pub fn on_recent_beacon_blocks_request( &mut self, @@ -298,11 +316,20 @@ impl SimpleSync { request_id: RequestId, request: RecentBeaconBlocksRequest, ) { + let chain = match self.chain.upgrade() { + Some(chain) => chain, + None => { + info!(self.log, "Sync shutting down"; + "reason" => "Beacon chain dropped"); + return; + } + }; + let blocks: Vec> = request .block_roots .iter() .filter_map(|root| { - if let Ok(Some(block)) = self.chain.store.get::>(root) { + if let Ok(Some(block)) = chain.store.get::>(root) { Some(block) } else { debug!( @@ -319,7 +346,7 @@ impl SimpleSync { debug!( self.log, - "BlockBodiesRequest"; + "RecentBeaconBlocksRequest"; "peer" => format!("{:?}", peer_id), "requested" => request.block_roots.len(), "returned" => blocks.len(), @@ -339,6 +366,15 @@ impl SimpleSync { request_id: RequestId, req: BeaconBlocksRequest, ) { + let chain = match self.chain.upgrade() { + Some(chain) => chain, + None => { + info!(self.log, "Sync shutting down"; + "reason" => "Beacon chain dropped"); + return; + } + }; + debug!( self.log, "BeaconBlocksRequest"; @@ -352,15 +388,14 @@ impl SimpleSync { // In the current implementation we read from the db then filter out out-of-range blocks. // Improving the db schema to prevent this would be ideal. - let mut blocks: Vec> = self - .chain + let mut blocks: Vec> = chain .rev_iter_block_roots() .filter(|(_root, slot)| { req.start_slot <= slot.as_u64() && req.start_slot + req.count > slot.as_u64() }) .take_while(|(_root, slot)| req.start_slot <= slot.as_u64()) .filter_map(|(root, _slot)| { - if let Ok(Some(block)) = self.chain.store.get::>(&root) { + if let Ok(Some(block)) = chain.store.get::>(&root) { Some(block) } else { warn!( @@ -378,18 +413,16 @@ impl SimpleSync { blocks.reverse(); blocks.dedup_by_key(|brs| brs.slot); - if blocks.len() as u64 != req.count { - debug!( - self.log, - "BeaconBlocksRequest response"; - "peer" => format!("{:?}", peer_id), - "msg" => "Failed to return all requested hashes", - "start_slot" => req.start_slot, - "current_slot" => format!("{:?}", self.chain.slot()), - "requested" => req.count, - "returned" => blocks.len(), - ); - } + debug!( + self.log, + "BeaconBlocksRequest response"; + "peer" => format!("{:?}", peer_id), + "msg" => "Failed to return all requested hashes", + "start_slot" => req.start_slot, + "current_slot" => chain.slot().unwrap_or_else(|_| Slot::from(0_u64)).as_u64(), + "requested" => req.count, + "returned" => blocks.len(), + ); self.network.send_rpc_response( peer_id, @@ -444,7 +477,16 @@ impl SimpleSync { /// /// Returns a `bool` which, if `true`, indicates we should forward the block to our peers. pub fn on_block_gossip(&mut self, peer_id: PeerId, block: BeaconBlock) -> bool { - if let Ok(outcome) = self.chain.process_block(block.clone()) { + let chain = match self.chain.upgrade() { + Some(chain) => chain, + None => { + info!(self.log, "Sync shutting down"; + "reason" => "Beacon chain dropped"); + return false; + } + }; + + if let Ok(outcome) = chain.process_block(block.clone()) { match outcome { BlockProcessingOutcome::Processed { .. } => { trace!(self.log, "Gossipsub block processed"; @@ -477,7 +519,16 @@ impl SimpleSync { /// /// Not currently implemented. pub fn on_attestation_gossip(&mut self, _peer_id: PeerId, msg: Attestation) { - match self.chain.process_attestation(msg) { + let chain = match self.chain.upgrade() { + Some(chain) => chain, + None => { + info!(self.log, "Sync shutting down"; + "reason" => "Beacon chain dropped"); + return; + } + }; + + match chain.process_attestation(msg) { Ok(outcome) => info!( self.log, "Processed attestation"; @@ -489,11 +540,6 @@ impl SimpleSync { } } } - - /// Generates our current state in the form of a HELLO RPC message. - pub fn generate_hello(&self) -> HelloMessage { - hello_message(&self.chain) - } } /// Build a `HelloMessage` representing the state of the given `beacon_chain`. From 289f8d13b00c4984ab61324f989eff028e5cc207 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 5 Sep 2019 10:19:52 +1000 Subject: [PATCH 226/305] Cleanups and SSZ generic container tests --- tests/ef_tests/src/bls_setting.rs | 1 - tests/ef_tests/src/case_result.rs | 11 ++- tests/ef_tests/src/cases.rs | 12 +-- tests/ef_tests/src/cases/epoch_processing.rs | 4 - .../src/cases/genesis_initialization.rs | 4 - tests/ef_tests/src/cases/genesis_validity.rs | 13 +-- tests/ef_tests/src/cases/operations.rs | 8 +- tests/ef_tests/src/cases/sanity_blocks.rs | 7 -- tests/ef_tests/src/cases/sanity_slots.rs | 7 -- tests/ef_tests/src/cases/ssz_generic.rs | 79 +++++++++++++++-- tests/ef_tests/src/cases/ssz_static.rs | 1 - tests/ef_tests/src/handler.rs | 16 ++-- tests/ef_tests/tests/tests.rs | 84 ++++++++++--------- 13 files changed, 137 insertions(+), 110 deletions(-) diff --git a/tests/ef_tests/src/bls_setting.rs b/tests/ef_tests/src/bls_setting.rs index 79990c8ee..add7d8b7b 100644 --- a/tests/ef_tests/src/bls_setting.rs +++ b/tests/ef_tests/src/bls_setting.rs @@ -2,7 +2,6 @@ use self::BlsSetting::*; use crate::error::Error; use serde_repr::Deserialize_repr; -// TODO: use this in every test case #[derive(Deserialize_repr, Debug, Clone, Copy)] #[repr(u8)] pub enum BlsSetting { diff --git a/tests/ef_tests/src/case_result.rs b/tests/ef_tests/src/case_result.rs index add428ec5..9df60f402 100644 --- a/tests/ef_tests/src/case_result.rs +++ b/tests/ef_tests/src/case_result.rs @@ -1,7 +1,7 @@ use super::*; use compare_fields::{CompareFields, Comparison, FieldComparison}; use std::fmt::Debug; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use types::BeaconState; pub const MAX_VALUE_STRING_LEN: usize = 500; @@ -15,11 +15,16 @@ pub struct CaseResult { } impl CaseResult { - pub fn new(case_index: usize, case: &impl Case, result: Result<(), Error>) -> Self { + pub fn new( + case_index: usize, + path: &Path, + case: &impl Case, + result: Result<(), Error>, + ) -> Self { CaseResult { case_index, desc: case.description(), - path: case.path().into(), + path: path.into(), result, } } diff --git a/tests/ef_tests/src/cases.rs b/tests/ef_tests/src/cases.rs index 279086b68..c5b0d8c4f 100644 --- a/tests/ef_tests/src/cases.rs +++ b/tests/ef_tests/src/cases.rs @@ -1,7 +1,7 @@ use super::*; use rayon::prelude::*; use std::fmt::Debug; -use std::path::Path; +use std::path::{Path, PathBuf}; mod bls_aggregate_pubkeys; mod bls_aggregate_sigs; @@ -50,12 +50,6 @@ pub trait Case: Debug + Sync { "no description".to_string() } - /// Path to the directory for this test case. - fn path(&self) -> &Path { - // FIXME(michael): remove default impl - Path::new("") - } - /// Execute a test and return the result. /// /// `case_index` reports the index of the case in the set of test cases. It is not strictly @@ -65,7 +59,7 @@ pub trait Case: Debug + Sync { #[derive(Debug)] pub struct Cases { - pub test_cases: Vec, + pub test_cases: Vec<(PathBuf, T)>, } impl Cases { @@ -73,7 +67,7 @@ impl Cases { self.test_cases .into_par_iter() .enumerate() - .map(|(i, tc)| CaseResult::new(i, tc, tc.result(i))) + .map(|(i, (ref path, ref tc))| CaseResult::new(i, path, tc, tc.result(i))) .collect() } } diff --git a/tests/ef_tests/src/cases/epoch_processing.rs b/tests/ef_tests/src/cases/epoch_processing.rs index 2a2dde629..ece69b3fe 100644 --- a/tests/ef_tests/src/cases/epoch_processing.rs +++ b/tests/ef_tests/src/cases/epoch_processing.rs @@ -125,10 +125,6 @@ impl> Case for EpochProcessing { .unwrap_or_else(String::new) } - fn path(&self) -> &Path { - &self.path - } - fn result(&self, _case_index: usize) -> Result<(), Error> { let mut state = self.pre.clone(); let mut expected = self.post.clone(); diff --git a/tests/ef_tests/src/cases/genesis_initialization.rs b/tests/ef_tests/src/cases/genesis_initialization.rs index bd0507b9d..0fb64ccb3 100644 --- a/tests/ef_tests/src/cases/genesis_initialization.rs +++ b/tests/ef_tests/src/cases/genesis_initialization.rs @@ -45,10 +45,6 @@ impl LoadCase for GenesisInitialization { } impl Case for GenesisInitialization { - fn path(&self) -> &Path { - &self.path - } - fn result(&self, _case_index: usize) -> Result<(), Error> { let spec = &E::default_spec(); diff --git a/tests/ef_tests/src/cases/genesis_validity.rs b/tests/ef_tests/src/cases/genesis_validity.rs index 3a1b9e267..f72ac4c3e 100644 --- a/tests/ef_tests/src/cases/genesis_validity.rs +++ b/tests/ef_tests/src/cases/genesis_validity.rs @@ -2,13 +2,12 @@ use super::*; use crate::decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::is_valid_genesis_state; -use std::path::{Path, PathBuf}; +use std::path::Path; use types::{BeaconState, EthSpec}; #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct GenesisValidity { - pub path: PathBuf, pub genesis: BeaconState, pub is_valid: bool, } @@ -18,19 +17,11 @@ impl LoadCase for GenesisValidity { let genesis = ssz_decode_file(&path.join("genesis.ssz"))?; let is_valid = yaml_decode_file(&path.join("is_valid.yaml"))?; - Ok(Self { - path: path.into(), - genesis, - is_valid, - }) + Ok(Self { genesis, is_valid }) } } impl Case for GenesisValidity { - fn path(&self) -> &Path { - &self.path - } - fn result(&self, _case_index: usize) -> Result<(), Error> { let spec = &E::default_spec(); diff --git a/tests/ef_tests/src/cases/operations.rs b/tests/ef_tests/src/cases/operations.rs index 7b4ffff98..89fa3ccca 100644 --- a/tests/ef_tests/src/cases/operations.rs +++ b/tests/ef_tests/src/cases/operations.rs @@ -11,7 +11,7 @@ use state_processing::per_block_processing::{ process_transfers, }; use std::fmt::Debug; -use std::path::{Path, PathBuf}; +use std::path::Path; use types::{ Attestation, AttesterSlashing, BeaconBlock, BeaconState, ChainSpec, Deposit, EthSpec, ProposerSlashing, Transfer, VoluntaryExit, @@ -25,7 +25,6 @@ struct Metadata { #[derive(Debug, Clone)] pub struct Operations> { - pub path: PathBuf, metadata: Metadata, pub pre: BeaconState, pub operation: O, @@ -156,7 +155,6 @@ impl> LoadCase for Operations { }; Ok(Self { - path: path.into(), metadata, pre, operation, @@ -173,10 +171,6 @@ impl> Case for Operations { .unwrap_or_else(String::new) } - fn path(&self) -> &Path { - &self.path - } - fn result(&self, _case_index: usize) -> Result<(), Error> { self.metadata.bls_setting.unwrap_or_default().check()?; diff --git a/tests/ef_tests/src/cases/sanity_blocks.rs b/tests/ef_tests/src/cases/sanity_blocks.rs index 9fadea42e..292f47415 100644 --- a/tests/ef_tests/src/cases/sanity_blocks.rs +++ b/tests/ef_tests/src/cases/sanity_blocks.rs @@ -6,7 +6,6 @@ use serde_derive::Deserialize; use state_processing::{ per_block_processing, per_slot_processing, BlockInvalid, BlockProcessingError, }; -use std::path::PathBuf; use types::{BeaconBlock, BeaconState, EthSpec, RelativeEpoch}; #[derive(Debug, Clone, Deserialize)] @@ -19,7 +18,6 @@ pub struct Metadata { #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct SanityBlocks { - pub path: PathBuf, pub metadata: Metadata, pub pre: BeaconState, pub blocks: Vec>, @@ -44,7 +42,6 @@ impl LoadCase for SanityBlocks { }; Ok(Self { - path: path.into(), metadata, pre, blocks, @@ -61,10 +58,6 @@ impl Case for SanityBlocks { .unwrap_or_else(String::new) } - fn path(&self) -> &Path { - &self.path - } - fn result(&self, _case_index: usize) -> Result<(), Error> { self.metadata.bls_setting.unwrap_or_default().check()?; diff --git a/tests/ef_tests/src/cases/sanity_slots.rs b/tests/ef_tests/src/cases/sanity_slots.rs index 34acb1105..e9b80a252 100644 --- a/tests/ef_tests/src/cases/sanity_slots.rs +++ b/tests/ef_tests/src/cases/sanity_slots.rs @@ -4,7 +4,6 @@ use crate::case_result::compare_beacon_state_results_without_caches; use crate::decode::{ssz_decode_file, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::per_slot_processing; -use std::path::PathBuf; use types::{BeaconState, EthSpec}; #[derive(Debug, Clone, Default, Deserialize)] @@ -16,7 +15,6 @@ pub struct Metadata { #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct SanitySlots { - pub path: PathBuf, pub metadata: Metadata, pub pre: BeaconState, pub slots: u64, @@ -41,7 +39,6 @@ impl LoadCase for SanitySlots { }; Ok(Self { - path: path.into(), metadata, pre, slots, @@ -58,10 +55,6 @@ impl Case for SanitySlots { .unwrap_or_else(String::new) } - fn path(&self) -> &Path { - &self.path - } - fn result(&self, _case_index: usize) -> Result<(), Error> { self.metadata.bls_setting.unwrap_or_default().check()?; diff --git a/tests/ef_tests/src/cases/ssz_generic.rs b/tests/ef_tests/src/cases/ssz_generic.rs index 5f9cd3faf..ce43f3c50 100644 --- a/tests/ef_tests/src/cases/ssz_generic.rs +++ b/tests/ef_tests/src/cases/ssz_generic.rs @@ -1,12 +1,16 @@ +#![allow(non_snake_case)] + use super::*; use crate::cases::common::{SszStaticType, TestU128, TestU256}; use crate::cases::ssz_static::{check_serialization, check_tree_hash}; use crate::decode::yaml_decode_file; use serde_derive::Deserialize; +use ssz_derive::{Decode, Encode}; use std::fs; use std::path::{Path, PathBuf}; +use tree_hash_derive::TreeHash; use types::typenum::*; -use types::{BitList, BitVector, FixedVector}; +use types::{BitList, BitVector, FixedVector, VariableList}; #[derive(Debug, Clone, Deserialize)] struct Metadata { @@ -54,7 +58,7 @@ macro_rules! type_dispatch { "uint64" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* u64>, $($rest)*), "uint128" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* TestU128>, $($rest)*), "uint256" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* TestU256>, $($rest)*), - _ => { println!("unsupported: {}", $value); Ok(()) }, + _ => Err(Error::FailedToParseTest(format!("unsupported: {}", $value))), } }; ($function:ident, @@ -86,7 +90,23 @@ macro_rules! type_dispatch { "2048" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U2048>, $($rest)*), "4096" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U4096>, $($rest)*), "8192" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* U8192>, $($rest)*), - _ => { println!("unsupported: {}", $value); Ok(()) }, + _ => Err(Error::FailedToParseTest(format!("unsupported: {}", $value))), + } + }; + ($function:ident, + ($($arg:expr),*), + $base_ty:tt, + <$($param_ty:ty),*>, + [ $value:expr => test_container ] $($rest:tt)*) => { + match $value { + "SingleFieldTestStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* SingleFieldTestStruct>, $($rest)*), + "SmallTestStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* SmallTestStruct>, $($rest)*), + "FixedTestStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* FixedTestStruct>, $($rest)*), + "VarTestStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* VarTestStruct>, $($rest)*), + "BitsStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* BitsStruct>, $($rest)*), + // TODO: enable ComplexTestStruct + "ComplexTestStruct" => Err(Error::SkippedKnownFailure), + _ => Err(Error::FailedToParseTest(format!("unsupported: {}", $value))), } }; // No base type: apply type params to function @@ -99,10 +119,6 @@ macro_rules! type_dispatch { } impl Case for SszGeneric { - fn path(&self) -> &Path { - &self.path - } - fn result(&self, _case_index: usize) -> Result<(), Error> { let parts = self.case_name.split('_').collect::>(); @@ -162,7 +178,17 @@ impl Case for SszGeneric { [type_name.as_str() => primitive_type] )?; } - // FIXME(michael): support for the containers tests + "containers" => { + let type_name = parts[0]; + + type_dispatch!( + ssz_generic_test, + (&self.path), + _, + <>, + [type_name => test_container] + )?; + } _ => panic!("unsupported handler: {}", self.handler_name), } Ok(()) @@ -187,7 +213,7 @@ fn ssz_generic_test(path: &Path) -> Result<(), Error> { }; // Valid - // TODO: signing root + // TODO: signing root (annoying because of traits) if let Some(value) = value { check_serialization(&value, &serialized)?; @@ -207,3 +233,38 @@ fn ssz_generic_test(path: &Path) -> Result<(), Error> { Ok(()) } + +// Containers for SSZ generic tests +#[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +struct SingleFieldTestStruct { + A: u8, +} + +#[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +struct SmallTestStruct { + A: u16, + B: u16, +} + +#[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +struct FixedTestStruct { + A: u8, + B: u64, + C: u32, +} + +#[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +struct VarTestStruct { + A: u16, + B: VariableList, + C: u8, +} + +#[derive(Debug, Clone, PartialEq, Decode, Encode, TreeHash, Deserialize)] +struct BitsStruct { + A: BitList, + B: BitVector, + C: BitVector, + D: BitList, + E: BitVector, +} diff --git a/tests/ef_tests/src/cases/ssz_static.rs b/tests/ef_tests/src/cases/ssz_static.rs index d1c9b1048..6e4a672cb 100644 --- a/tests/ef_tests/src/cases/ssz_static.rs +++ b/tests/ef_tests/src/cases/ssz_static.rs @@ -28,7 +28,6 @@ pub struct SszStaticSR { } fn load_from_dir(path: &Path) -> Result<(SszStaticRoots, Vec, T), Error> { - // FIXME(michael): set description/name let roots = yaml_decode_file(&path.join("roots.yaml"))?; let serialized = fs::read(&path.join("serialized.ssz")).expect("serialized.ssz exists"); let value = yaml_decode_file(&path.join("value.yaml"))?; diff --git a/tests/ef_tests/src/handler.rs b/tests/ef_tests/src/handler.rs index b6334c383..e5d175e11 100644 --- a/tests/ef_tests/src/handler.rs +++ b/tests/ef_tests/src/handler.rs @@ -32,19 +32,21 @@ pub trait Handler { .join(Self::handler_name()); // Iterate through test suites - // TODO: parallelism - // TODO: error handling? let test_cases = fs::read_dir(&handler_path) - .expect("open main directory") + .expect("handler dir exists") .flat_map(|entry| { entry .ok() .filter(|e| e.file_type().map(|ty| ty.is_dir()).unwrap_or(false)) }) - .flat_map(|suite| fs::read_dir(suite.path()).expect("open suite dir")) + .flat_map(|suite| fs::read_dir(suite.path()).expect("suite dir exists")) .flat_map(Result::ok) - .map(|test_case_dir| Self::Case::load_from_dir(&test_case_dir.path()).expect("loads")) - .collect::>(); + .map(|test_case_dir| { + let path = test_case_dir.path(); + let case = Self::Case::load_from_dir(&path).expect("test should load"); + (path, case) + }) + .collect(); let results = Cases { test_cases }.test_results(); @@ -286,3 +288,5 @@ pub struct Boolean; type_name!(Boolean, "boolean"); pub struct Uints; type_name!(Uints, "uints"); +pub struct Containers; +type_name!(Containers, "containers"); diff --git a/tests/ef_tests/tests/tests.rs b/tests/ef_tests/tests/tests.rs index 71fa53c66..337c54b46 100644 --- a/tests/ef_tests/tests/tests.rs +++ b/tests/ef_tests/tests/tests.rs @@ -1,19 +1,5 @@ use ef_tests::*; -use types::{ - Attestation, AttestationData, AttestationDataAndCustodyBit, AttesterSlashing, BeaconBlock, - BeaconBlockBody, BeaconBlockHeader, BeaconState, Checkpoint, CompactCommittee, Crosslink, - Deposit, DepositData, Eth1Data, Fork, HistoricalBatch, IndexedAttestation, MainnetEthSpec, - MinimalEthSpec, PendingAttestation, ProposerSlashing, Transfer, Validator, VoluntaryExit, -}; - -#[test] -fn ssz_generic() { - SszGenericHandler::::run(); - SszGenericHandler::::run(); - SszGenericHandler::::run(); - SszGenericHandler::::run(); - SszGenericHandler::::run(); -} +use types::*; #[test] fn shuffling() { @@ -105,6 +91,7 @@ fn bls_sign_msg() { BlsSignMsgHandler::run(); } +#[cfg(feature = "fake_crypto")] macro_rules! ssz_static_test { // Signed-root ($test_name:ident, $typ:ident$(<$generics:tt>)?, SR) => { @@ -135,7 +122,6 @@ macro_rules! ssz_static_test { // Base case ($test_name:ident, $handler:ident, { $(($typ:ty, $spec:ident)),+ }) => { #[test] - #[cfg(feature = "fake_crypto")] fn $test_name() { $( $handler::<$typ, $spec>::run(); @@ -144,31 +130,47 @@ macro_rules! ssz_static_test { }; } -ssz_static_test!(ssz_static_attestation, Attestation<_>, SR); -ssz_static_test!(ssz_static_attestation_data, AttestationData); -ssz_static_test!( - ssz_static_attestation_data_and_custody_bit, - AttestationDataAndCustodyBit -); -ssz_static_test!(ssz_static_attester_slashing, AttesterSlashing<_>); -ssz_static_test!(ssz_static_beacon_block, BeaconBlock<_>, SR); -ssz_static_test!(ssz_static_beacon_block_body, BeaconBlockBody<_>); -ssz_static_test!(ssz_static_beacon_block_header, BeaconBlockHeader, SR); -ssz_static_test!(ssz_static_beacon_state, BeaconState<_>); -ssz_static_test!(ssz_static_checkpoint, Checkpoint); -ssz_static_test!(ssz_static_compact_committee, CompactCommittee<_>); -ssz_static_test!(ssz_static_crosslink, Crosslink); -ssz_static_test!(ssz_static_deposit, Deposit); -ssz_static_test!(ssz_static_deposit_data, DepositData, SR); -ssz_static_test!(ssz_static_eth1_data, Eth1Data); -ssz_static_test!(ssz_static_fork, Fork); -ssz_static_test!(ssz_static_historical_batch, HistoricalBatch<_>); -ssz_static_test!(ssz_static_indexed_attestation, IndexedAttestation<_>, SR); -ssz_static_test!(ssz_static_pending_attestation, PendingAttestation<_>); -ssz_static_test!(ssz_static_proposer_slashing, ProposerSlashing); -ssz_static_test!(ssz_static_transfer, Transfer, SR); -ssz_static_test!(ssz_static_validator, Validator); -ssz_static_test!(ssz_static_voluntary_exit, VoluntaryExit, SR); +#[cfg(feature = "fake_crypto")] +mod ssz_static { + use ef_tests::{Handler, SszStaticHandler, SszStaticSRHandler}; + use types::*; + + ssz_static_test!(attestation, Attestation<_>, SR); + ssz_static_test!(attestation_data, AttestationData); + ssz_static_test!( + attestation_data_and_custody_bit, + AttestationDataAndCustodyBit + ); + ssz_static_test!(attester_slashing, AttesterSlashing<_>); + ssz_static_test!(beacon_block, BeaconBlock<_>, SR); + ssz_static_test!(beacon_block_body, BeaconBlockBody<_>); + ssz_static_test!(beacon_block_header, BeaconBlockHeader, SR); + ssz_static_test!(beacon_state, BeaconState<_>); + ssz_static_test!(checkpoint, Checkpoint); + ssz_static_test!(compact_committee, CompactCommittee<_>); + ssz_static_test!(crosslink, Crosslink); + ssz_static_test!(deposit, Deposit); + ssz_static_test!(deposit_data, DepositData, SR); + ssz_static_test!(eth1_data, Eth1Data); + ssz_static_test!(fork, Fork); + ssz_static_test!(historical_batch, HistoricalBatch<_>); + ssz_static_test!(indexed_attestation, IndexedAttestation<_>, SR); + ssz_static_test!(pending_attestation, PendingAttestation<_>); + ssz_static_test!(proposer_slashing, ProposerSlashing); + ssz_static_test!(transfer, Transfer, SR); + ssz_static_test!(validator, Validator); + ssz_static_test!(voluntary_exit, VoluntaryExit, SR); +} + +#[test] +fn ssz_generic() { + SszGenericHandler::::run(); + SszGenericHandler::::run(); + SszGenericHandler::::run(); + SszGenericHandler::::run(); + SszGenericHandler::::run(); + SszGenericHandler::::run(); +} #[test] fn epoch_processing_justification_and_finalization() { From a074d8f09b83dcb4b3a52464c1113d76c2139574 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 5 Sep 2019 16:10:57 +1000 Subject: [PATCH 227/305] Update book --- book/src/interop-cheat-sheet.md | 10 ++++++++++ book/src/interop-scenarios.md | 5 ++++- book/src/interop-tips.md | 1 - 3 files changed, 14 insertions(+), 2 deletions(-) delete mode 100644 book/src/interop-tips.md diff --git a/book/src/interop-cheat-sheet.md b/book/src/interop-cheat-sheet.md index ea7794c33..7fea539ea 100644 --- a/book/src/interop-cheat-sheet.md +++ b/book/src/interop-cheat-sheet.md @@ -9,6 +9,7 @@ interop testing. - [Avoid port clashes when starting multiple nodes](#port-bump) - [Specify a custom slot time](#slot-time) - Using the beacon node HTTP API: + - [Pretty-print the genesis state and state root](#http-state) - [Curl a node's ENR](#http-enr) - [Curl a node's connected peers](#http-peer-ids) - [Curl a node's local peer id](#http-peer-id) @@ -82,6 +83,15 @@ $ ./beacon_node testnet -t 500 recent 8 Examples assume there is a Lighthouse node exposing a HTTP API on `localhost:5052`. Responses are JSON. + +### Pretty-print the genesis state and state root + +Returns the genesis state and state root in your terminal, in YAML. + +``` +$ curl --header "Content-Type: application/yaml" "localhost:5052/beacon/state?slot=0" +``` + ### Get the node's ENR diff --git a/book/src/interop-scenarios.md b/book/src/interop-scenarios.md index dc8789362..5e44d822a 100644 --- a/book/src/interop-scenarios.md +++ b/book/src/interop-scenarios.md @@ -25,8 +25,11 @@ cheat-sheet](./interop-cheat-sheet.md). To start a brand-new beacon node (with no history) use: ``` -$ ./beacon_node testnet -f quick 8 1567222226 +$ ./beacon_node testnet -f quick 8 ``` + +Where `GENESIS_TIME` is in [unix time](https://duckduckgo.com/?q=unix+time&t=ffab&ia=answer). + > Notes: > > - This method conforms the ["Quick-start diff --git a/book/src/interop-tips.md b/book/src/interop-tips.md deleted file mode 100644 index 0d52e896a..000000000 --- a/book/src/interop-tips.md +++ /dev/null @@ -1 +0,0 @@ -# Interop Tips & Tricks From 6cbef7b58bc84c43c1dcbf29d5e9eef63fd7c7ad Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Thu, 5 Sep 2019 20:06:46 +1000 Subject: [PATCH 228/305] Undoing changes to ChainSpec. The discrepancies from the Eth2.0 spec are necessary in our case. --- docs/api_spec.yaml | 4 ---- eth2/types/src/chain_spec.rs | 6 +----- 2 files changed, 1 insertion(+), 9 deletions(-) diff --git a/docs/api_spec.yaml b/docs/api_spec.yaml index ced07e96d..23608807e 100644 --- a/docs/api_spec.yaml +++ b/docs/api_spec.yaml @@ -1896,10 +1896,6 @@ components: type: object description: "Stores all of the values which specify a particular chain. The `ChainSpec` object in Lighthouse" properties: - far_future_epoch: - type: integer - format: uint64 - example: 18446744073709551615 base_rewards_per_epoch: type: integer format: uint64 diff --git a/eth2/types/src/chain_spec.rs b/eth2/types/src/chain_spec.rs index 17e9dba49..d59e0db0a 100644 --- a/eth2/types/src/chain_spec.rs +++ b/eth2/types/src/chain_spec.rs @@ -24,13 +24,11 @@ pub struct ChainSpec { /* * Constants */ + #[serde(skip_serializing)] // skipped because Serde TOML has trouble with u64::max pub far_future_epoch: Epoch, - // The above may need to be skipped because Serde TOML has trouble with u64::max. - // Use: #[serde(skip_serializing)] pub base_rewards_per_epoch: u64, pub deposit_contract_tree_depth: u64, pub seconds_per_day: u64, - //TODO missing JUSTIFICATION_BITS_LENGTH and ENDIANNESS /* * Misc @@ -54,7 +52,6 @@ pub struct ChainSpec { * Initial Values */ pub genesis_slot: Slot, - //TODO Missing genesis_epoch #[serde(deserialize_with = "u8_from_hex_str", serialize_with = "u8_to_hex_str")] pub bls_withdrawal_prefix_byte: u8, @@ -62,7 +59,6 @@ pub struct ChainSpec { * Time parameters */ pub milliseconds_per_slot: u64, - //TODO should we also have SECONDS_PER_SLOT? pub min_attestation_inclusion_delay: u64, pub min_seed_lookahead: Epoch, pub activation_exit_delay: u64, From 940ddd0d13b9b7ecdd322d63989b14bef88eedbd Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 5 Sep 2019 20:57:48 +1000 Subject: [PATCH 229/305] Use michael's milagro in interop keypairs --- eth2/utils/eth2_interop_keypairs/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/utils/eth2_interop_keypairs/Cargo.toml b/eth2/utils/eth2_interop_keypairs/Cargo.toml index 31f9718cd..d8a111855 100644 --- a/eth2/utils/eth2_interop_keypairs/Cargo.toml +++ b/eth2/utils/eth2_interop_keypairs/Cargo.toml @@ -10,7 +10,7 @@ edition = "2018" lazy_static = "1.4" num-bigint = "0.2" eth2_hashing = "0.1" -milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v0.10.0" } +milagro_bls = { git = "https://github.com/michaelsproul/milagro_bls", branch = "little-endian-v0.10" } [dev-dependencies] base64 = "0.10" From ee25766caea02711fceb71950f847678ccadc6fc Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 5 Sep 2019 22:18:17 +1000 Subject: [PATCH 230/305] Correct recent beacon block request bug --- beacon_node/network/src/sync/manager.rs | 20 ++++++++++++++------ beacon_node/network/src/sync/simple_sync.rs | 12 ++++++++---- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 2b2ed9dca..fa1315c39 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -187,7 +187,11 @@ pub(crate) enum ImportManagerOutcome { request: BeaconBlocksRequest, }, /// A `RecentBeaconBlocks` request is required. - RecentRequest(PeerId, RecentBeaconBlocksRequest), + RecentRequest { + peer_id: PeerId, + request_id: RequestId, + request: RecentBeaconBlocksRequest, + }, /// Updates information with peer via requesting another HELLO handshake. Hello(PeerId), /// A peer has caused a punishable error and should be downvoted. @@ -532,7 +536,7 @@ impl ImportManager { pub fn add_unknown_block(&mut self, block: BeaconBlock, peer_id: PeerId) { // if we are not in regular sync mode, ignore this block - if let ManagerState::Regular = self.state { + if self.state != ManagerState::Regular { return; } @@ -774,19 +778,23 @@ impl ImportManager { continue; } - parent_request.state = BlockRequestsState::Pending(self.current_req_id); + let request_id = self.current_req_id; + parent_request.state = BlockRequestsState::Pending(request_id); self.current_req_id += 1; let last_element_index = parent_request.downloaded_blocks.len() - 1; let parent_hash = parent_request.downloaded_blocks[last_element_index].parent_root; - let req = RecentBeaconBlocksRequest { + let request = RecentBeaconBlocksRequest { block_roots: vec![parent_hash], }; // select a random fully synced peer to attempt to download the parent block let peer_id = self.full_peers.iter().next().expect("List is not empty"); - self.event_queue - .push(ImportManagerOutcome::RecentRequest(peer_id.clone(), req)); + self.event_queue.push(ImportManagerOutcome::RecentRequest { + peer_id: peer_id.clone(), + request_id, + request, + }); re_run = true; } } diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index a8b271700..4a853f05d 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -277,18 +277,22 @@ impl SimpleSync { RPCRequest::BeaconBlocks(request), ); } - ImportManagerOutcome::RecentRequest(peer_id, req) => { + ImportManagerOutcome::RecentRequest { + peer_id, + request_id, + request, + } => { trace!( self.log, "RPC Request"; "method" => "RecentBeaconBlocks", - "count" => req.block_roots.len(), + "count" => request.block_roots.len(), "peer" => format!("{:?}", peer_id) ); self.network.send_rpc_request( - None, + Some(request_id), peer_id.clone(), - RPCRequest::RecentBeaconBlocks(req), + RPCRequest::RecentBeaconBlocks(request), ); } ImportManagerOutcome::DownvotePeer(peer_id) => { From 8b69a48fc5824bfad353bc87c861d8001ad60f00 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 6 Sep 2019 10:03:45 +1000 Subject: [PATCH 231/305] Allow validator client to start before genesis --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 +- beacon_node/client/src/lib.rs | 16 ++++++-- eth2/utils/slot_clock/src/lib.rs | 23 ++++++----- .../slot_clock/src/system_time_slot_clock.rs | 2 +- validator_client/src/service.rs | 40 +++++++++---------- 5 files changed, 48 insertions(+), 37 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 5409d3728..0f76507fe 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -178,7 +178,7 @@ impl BeaconChain { genesis_state.genesis_time, Duration::from_millis(spec.milliseconds_per_slot), ) - .ok_or_else(|| Error::SlotClockDidNotStart)?; + .map_err(|_| Error::SlotClockDidNotStart)?; info!(log, "Beacon chain initialized from genesis"; "validator_count" => genesis_state.validators.len(), @@ -220,7 +220,7 @@ impl BeaconChain { state.genesis_time, Duration::from_millis(spec.milliseconds_per_slot), ) - .ok_or_else(|| Error::SlotClockDidNotStart)?; + .map_err(|_| Error::SlotClockDidNotStart)?; let last_finalized_root = p.canonical_head.beacon_state.finalized_checkpoint.root; let last_finalized_block = &p.canonical_head.beacon_block; diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 1d3cb40ec..afcd538b5 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -16,7 +16,7 @@ use slog::{crit, error, info, o}; use slot_clock::SlotClock; use std::marker::PhantomData; use std::sync::Arc; -use std::time::{Duration, Instant}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use tokio::runtime::TaskExecutor; use tokio::timer::Interval; use types::EthSpec; @@ -177,8 +177,18 @@ where .map_err(error::Error::from)?, ); - if beacon_chain.slot().is_err() { - panic!("Cannot start client before genesis!") + let since_epoch = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to read system time: {}", e))?; + let since_genesis = Duration::from_secs(beacon_chain.head().beacon_state.genesis_time); + + if since_genesis > since_epoch { + info!( + log, + "Starting node prior to genesis"; + "now" => since_epoch.as_secs(), + "genesis_seconds" => since_genesis.as_secs(), + ); } let network_config = &client_config.network; diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index fd3bf029b..6192d1b6f 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -5,7 +5,7 @@ mod metrics; mod system_time_slot_clock; mod testing_slot_clock; -use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use std::time::{Duration, Instant, SystemTime, SystemTimeError, UNIX_EPOCH}; pub use crate::system_time_slot_clock::SystemTimeSlotClock; pub use crate::testing_slot_clock::TestingSlotClock; @@ -17,18 +17,21 @@ pub trait SlotClock: Send + Sync + Sized { genesis_slot: Slot, genesis_seconds: u64, slot_duration: Duration, - ) -> Option { - let duration_between_now_and_unix_epoch = - SystemTime::now().duration_since(UNIX_EPOCH).ok()?; + ) -> Result { + let duration_between_now_and_unix_epoch = SystemTime::now().duration_since(UNIX_EPOCH)?; let duration_between_unix_epoch_and_genesis = Duration::from_secs(genesis_seconds); - if duration_between_now_and_unix_epoch < duration_between_unix_epoch_and_genesis { - None + let genesis_instant = if duration_between_now_and_unix_epoch + < duration_between_unix_epoch_and_genesis + { + Instant::now() + + (duration_between_unix_epoch_and_genesis - duration_between_now_and_unix_epoch) } else { - let genesis_instant = Instant::now() - - (duration_between_now_and_unix_epoch - duration_between_unix_epoch_and_genesis); - Some(Self::new(genesis_slot, genesis_instant, slot_duration)) - } + Instant::now() + - (duration_between_now_and_unix_epoch - duration_between_unix_epoch_and_genesis) + }; + + Ok(Self::new(genesis_slot, genesis_instant, slot_duration)) } fn new(genesis_slot: Slot, genesis: Instant, slot_duration: Duration) -> Self; diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index 0d4a52ef6..aae12c18c 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -42,7 +42,7 @@ impl SlotClock for SystemTimeSlotClock { fn duration_to_next_slot(&self) -> Option { let now = Instant::now(); if now < self.genesis { - None + Some(self.genesis - now) } else { let duration_since_genesis = now - self.genesis; let millis_since_genesis = duration_since_genesis.as_millis(); diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 8adc79b91..8cdba537a 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -27,7 +27,7 @@ use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::marker::PhantomData; use std::sync::Arc; use std::sync::RwLock; -use std::time::{Duration, Instant, SystemTime}; +use std::time::{Duration, Instant}; use tokio::prelude::*; use tokio::runtime::Builder; use tokio::timer::Interval; @@ -100,19 +100,6 @@ impl Service { - // verify the node's genesis time - if SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - .as_secs() - < info.genesis_time - { - error!( - log, - "Beacon Node's genesis time is in the future. No work to do.\n Exiting" - ); - return Err("Genesis time in the future".into()); - } // verify the node's network id if eth2_config.spec.network_id != info.network_id as u8 { error!( @@ -177,13 +164,11 @@ impl Service(|| { - "Unable to start slot clock. Genesis may not have occurred yet. Exiting.".into() + .map_err::(|e| { + format!("Unable to start slot clock: {}.", e).into() })?; - let current_slot = slot_clock.now().ok_or_else::(|| { - "Genesis has not yet occurred. Exiting.".into() - })?; + let current_slot = slot_clock.now().unwrap_or_else(|| Slot::new(0)); /* Generate the duties manager */ @@ -237,7 +222,7 @@ impl Service::initialize_service( client_config, eth2_config, - log, + log.clone(), )?; // we have connected to a node and established its parameters. Spin up the core service @@ -253,7 +238,7 @@ impl Service(|| { - "Genesis is not in the past. Exiting.".into() + "Unable to determine duration to next slot. Exiting.".into() })?; // set up the validator work interval - start at next slot and proceed every slot @@ -264,6 +249,19 @@ impl Service duration_to_next_slot.as_secs() + ); + /* kick off the core service */ runtime.block_on( interval From 14cf6b0118bc722402858333d194cf7b4b699d62 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 6 Sep 2019 10:17:23 +1000 Subject: [PATCH 232/305] Add option to validator service to fix bug With the previous setup it would never produce on the 0 slot. --- validator_client/src/service.rs | 44 +++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 8cdba537a..5169f67f8 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -46,8 +46,8 @@ pub struct Service, slots_per_epoch: u64, /// The chain specification for this clients instance. spec: Arc, @@ -168,8 +168,6 @@ impl Service Service Service error_chain::Result<()> { - let current_slot = self + let wall_clock_slot = self .slot_clock .now() .ok_or_else::(|| { "Genesis is not in the past. Exiting.".into() })?; - let current_epoch = current_slot.epoch(self.slots_per_epoch); + let wall_clock_epoch = wall_clock_slot.epoch(self.slots_per_epoch); // this is a non-fatal error. If the slot clock repeats, the node could // have been slow to process the previous slot and is now duplicating tasks. // We ignore duplicated but raise a critical error. - if current_slot <= self.current_slot { - crit!( - self.log, - "The validator tried to duplicate a slot. Likely missed the previous slot" - ); - return Err("Duplicate slot".into()); + if let Some(current_slot) = self.current_slot { + if wall_clock_slot <= current_slot { + crit!( + self.log, + "The validator tried to duplicate a slot. Likely missed the previous slot" + ); + return Err("Duplicate slot".into()); + } } - self.current_slot = current_slot; - info!(self.log, "Processing"; "slot" => current_slot.as_u64(), "epoch" => current_epoch.as_u64()); + self.current_slot = Some(wall_clock_slot); + info!(self.log, "Processing"; "slot" => wall_clock_slot.as_u64(), "epoch" => wall_clock_epoch.as_u64()); Ok(()) } @@ -324,7 +324,10 @@ impl Service Service Date: Fri, 6 Sep 2019 14:10:49 +1000 Subject: [PATCH 233/305] Added YAML types for list of validators and added some logging to duties function. --- beacon_node/rest_api/src/beacon.rs | 22 +++++++--------------- beacon_node/rest_api/src/helpers.rs | 10 ++++++++++ beacon_node/rest_api/src/validator.rs | 25 +++++++++++++++++++++---- 3 files changed, 38 insertions(+), 19 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 70b3f3ee9..f9b2d0383 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -6,7 +6,7 @@ use serde::Serialize; use ssz_derive::Encode; use std::sync::Arc; use store::Store; -use types::{BeaconBlock, BeaconState, Epoch, EthSpec, Hash256, Slot}; +use types::{BeaconBlock, BeaconState, Epoch, EthSpec, Hash256, Slot, Validator}; #[derive(Serialize)] pub struct HeadResponse { @@ -162,21 +162,13 @@ pub fn get_validators(req: Request) -> ApiR }; let all_validators = &beacon_chain.head().beacon_state.validators; - let mut active_validators = Vec::with_capacity(all_validators.len()); - for (_index, validator) in all_validators.iter().enumerate() { - if validator.is_active_at(epoch) { - active_validators.push(validator) - } - } - active_validators.shrink_to_fit(); - let json: String = serde_json::to_string(&active_validators).map_err(|e| { - ApiError::ServerError(format!( - "Unable to serialize list of active validators: {:?}", - e - )) - })?; + let active_vals: Vec = all_validators + .iter() + .filter(|v| v.is_active_at(epoch)) + .cloned() + .collect(); - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req).body(&active_vals) } #[derive(Serialize, Encode)] diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index e15c27df5..08ccbb6c9 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -179,6 +179,7 @@ pub fn get_beacon_chain_from_request( .get::>>() .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".into()))?; + /* let _state_now = beacon_chain .state_now() .map_err(|e| ApiError::ServerError(format!("Unable to get current BeaconState {:?}", e)))? @@ -188,10 +189,19 @@ pub fn get_beacon_chain_from_request( ))? .build_all_caches(&beacon_chain.spec) .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; + */ Ok(beacon_chain.clone()) } +pub fn get_logger_from_request(req: &Request) -> slog::Logger { + let log = req + .extensions() + .get::() + .expect("Should always get the logger from the request, since we put it in there."); + log.to_owned() +} + #[cfg(test)] mod test { use super::*; diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 2374373bd..c559777c0 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -32,27 +32,44 @@ impl ValidatorDuty { /// HTTP Handler to retrieve a the duties for a set of validators during a particular epoch pub fn get_validator_duties(req: Request) -> ApiResult { + let log = get_logger_from_request(&req); + slog::trace!(log, "Validator duties requested of API: {:?}", &req); let beacon_chain = get_beacon_chain_from_request::(&req)?; - let head_state = &beacon_chain.head().beacon_state; + let mut head_state = beacon_chain + .state_now() + .map_err(|e| ApiError::ServerError(format!("Unable to get current BeaconState {:?}", e)))?; + slog::trace!(log, "Got head state from request."); // Parse and check query parameters let query = UrlQuery::from_request(&req)?; let current_epoch = head_state.current_epoch(); let epoch = match query.first_of(&["epoch"]) { - Ok((_, v)) => Epoch::new(v.parse::().map_err(|e| { - ApiError::InvalidQueryParams(format!("Invalid epoch parameter, must be a u64. {:?}", e)) - })?), + Ok((_, v)) => { + slog::trace!(log, "Requested epoch {:?}", v); + Epoch::new(v.parse::().map_err(|e| { + slog::info!(log, "Invalid epoch {:?}", e); + ApiError::InvalidQueryParams(format!( + "Invalid epoch parameter, must be a u64. {:?}", + e + )) + })?) + } Err(_) => { // epoch not supplied, use the current epoch + slog::info!(log, "Using default epoch {:?}", current_epoch); current_epoch } }; let relative_epoch = RelativeEpoch::from_epoch(current_epoch, epoch).map_err(|e| { + slog::info!(log, "Requested epoch out of range."); ApiError::InvalidQueryParams(format!( "Cannot get RelativeEpoch, epoch out of range: {:?}", e )) })?; + if let Some(s) = head_state.maybe_as_mut_ref() { + s.build_all_caches(&beacon_chain.spec).ok(); + } let validators: Vec = query .all_of("validator_pubkeys")? .iter() From 1c9e4bc09cbab8f0c402aa1c6064fa9072d9f269 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 6 Sep 2019 17:05:40 +1000 Subject: [PATCH 234/305] Fix bug in SSZ encoding of FixedVector --- eth2/utils/ssz_types/src/fixed_vector.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth2/utils/ssz_types/src/fixed_vector.rs b/eth2/utils/ssz_types/src/fixed_vector.rs index edf499adf..090d04d84 100644 --- a/eth2/utils/ssz_types/src/fixed_vector.rs +++ b/eth2/utils/ssz_types/src/fixed_vector.rs @@ -172,7 +172,7 @@ where T: ssz::Encode, { fn is_ssz_fixed_len() -> bool { - true + T::is_ssz_fixed_len() } fn ssz_fixed_len() -> usize { From 23a4fdabe44a266f05b0e92491e6617d7d35cecc Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Fri, 6 Sep 2019 17:06:11 +1000 Subject: [PATCH 235/305] Enable remaining SSZ generic tests --- tests/ef_tests/src/cases/ssz_generic.rs | 36 +++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/tests/ef_tests/src/cases/ssz_generic.rs b/tests/ef_tests/src/cases/ssz_generic.rs index ce43f3c50..420f59679 100644 --- a/tests/ef_tests/src/cases/ssz_generic.rs +++ b/tests/ef_tests/src/cases/ssz_generic.rs @@ -4,6 +4,7 @@ use super::*; use crate::cases::common::{SszStaticType, TestU128, TestU256}; use crate::cases::ssz_static::{check_serialization, check_tree_hash}; use crate::decode::yaml_decode_file; +use serde::{de::Error as SerdeError, Deserialize, Deserializer}; use serde_derive::Deserialize; use ssz_derive::{Decode, Encode}; use std::fs; @@ -103,9 +104,8 @@ macro_rules! type_dispatch { "SmallTestStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* SmallTestStruct>, $($rest)*), "FixedTestStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* FixedTestStruct>, $($rest)*), "VarTestStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* VarTestStruct>, $($rest)*), + "ComplexTestStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* ComplexTestStruct>, $($rest)*), "BitsStruct" => type_dispatch!($function, ($($arg),*), $base_ty, <$($param_ty,)* BitsStruct>, $($rest)*), - // TODO: enable ComplexTestStruct - "ComplexTestStruct" => Err(Error::SkippedKnownFailure), _ => Err(Error::FailedToParseTest(format!("unsupported: {}", $value))), } }; @@ -260,6 +260,18 @@ struct VarTestStruct { C: u8, } +#[derive(Debug, Clone, Default, PartialEq, Decode, Encode, TreeHash, Deserialize)] +struct ComplexTestStruct { + A: u16, + B: VariableList, + C: u8, + #[serde(deserialize_with = "byte_list_from_hex_str")] + D: VariableList, + E: VarTestStruct, + F: FixedVector, + G: FixedVector, +} + #[derive(Debug, Clone, PartialEq, Decode, Encode, TreeHash, Deserialize)] struct BitsStruct { A: BitList, @@ -268,3 +280,23 @@ struct BitsStruct { D: BitList, E: BitVector, } + +fn byte_list_from_hex_str<'de, D, N: Unsigned>( + deserializer: D, +) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let s: String = Deserialize::deserialize(deserializer)?; + let decoded: Vec = hex::decode(&s.as_str()[2..]).map_err(D::Error::custom)?; + + if decoded.len() > N::to_usize() { + return Err(D::Error::custom(format!( + "Too many values for list, got: {}, limit: {}", + decoded.len(), + N::to_usize() + ))); + } else { + Ok(decoded.into()) + } +} From 9b062e052328559b962f0fcabb142713be61deda Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 6 Sep 2019 22:45:05 +1000 Subject: [PATCH 236/305] Fix compile error in ef_tests --- tests/ef_tests/src/cases/ssz_generic.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/ef_tests/src/cases/ssz_generic.rs b/tests/ef_tests/src/cases/ssz_generic.rs index 420f59679..fc62e66fc 100644 --- a/tests/ef_tests/src/cases/ssz_generic.rs +++ b/tests/ef_tests/src/cases/ssz_generic.rs @@ -4,7 +4,7 @@ use super::*; use crate::cases::common::{SszStaticType, TestU128, TestU256}; use crate::cases::ssz_static::{check_serialization, check_tree_hash}; use crate::decode::yaml_decode_file; -use serde::{de::Error as SerdeError, Deserialize, Deserializer}; +use serde::{de::Error as SerdeError, Deserializer}; use serde_derive::Deserialize; use ssz_derive::{Decode, Encode}; use std::fs; @@ -287,7 +287,7 @@ fn byte_list_from_hex_str<'de, D, N: Unsigned>( where D: Deserializer<'de>, { - let s: String = Deserialize::deserialize(deserializer)?; + let s: String = serde::de::Deserialize::deserialize(deserializer)?; let decoded: Vec = hex::decode(&s.as_str()[2..]).map_err(D::Error::custom)?; if decoded.len() > N::to_usize() { From 812e1fbe2691bcbebd623ffdeb2adc1101e2aa0e Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sat, 7 Sep 2019 00:28:54 +1000 Subject: [PATCH 237/305] Implements a new thread dedicated for syncing --- beacon_node/network/src/message_handler.rs | 52 +- beacon_node/network/src/sync/manager.rs | 502 +++++++++++++------- beacon_node/network/src/sync/mod.rs | 2 +- beacon_node/network/src/sync/simple_sync.rs | 415 ++++++---------- 4 files changed, 514 insertions(+), 457 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index cade65d63..be8fa21f8 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -1,6 +1,6 @@ use crate::error; use crate::service::NetworkMessage; -use crate::sync::SimpleSync; +use crate::sync::MessageProcessor; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2_libp2p::{ behaviour::PubsubMessage, @@ -15,12 +15,16 @@ use std::sync::Arc; use tokio::sync::mpsc; use types::{Attestation, AttesterSlashing, BeaconBlock, ProposerSlashing, VoluntaryExit}; -/// Handles messages received from the network and client and organises syncing. +/// Handles messages received from the network and client and organises syncing. This +/// functionality of this struct is to validate an decode messages from the network before +/// passing them to the internal message processor. The message processor spawns a syncing thread +/// which manages which blocks need to be requested and processed. pub struct MessageHandler { - /// The syncing framework. - sync: SimpleSync, /// A channel to the network service to allow for gossip propagation. network_send: mpsc::UnboundedSender, + /// Processes validated and decoded messages from the network. Has direct access to the + /// sync manager. + message_processor: MessageProcessor, /// The `MessageHandler` logger. log: slog::Logger, } @@ -50,13 +54,15 @@ impl MessageHandler { trace!(log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); - // Initialise sync and begin processing in thread - let sync = SimpleSync::new(Arc::downgrade(&beacon_chain), network_send.clone(), &log); + + // Initialise a message instance, which itself spawns the syncing thread. + let message_processor = + MessageProcessor::new(executor, beacon_chain, network_send.clone(), &log); // generate the Message handler let mut handler = MessageHandler { network_send, - sync, + message_processor, log: log.clone(), }; @@ -66,7 +72,11 @@ impl MessageHandler { .for_each(move |msg| Ok(handler.handle_message(msg))) .map_err(move |_| { debug!(log, "Network message handler terminated."); - }), + }), /* + .then(move |_| { + debug!(log.clone(), "Message handler shutdown"); + }), + */ ); Ok(handler_send) @@ -77,11 +87,11 @@ impl MessageHandler { match message { // we have initiated a connection to a peer HandlerMessage::PeerDialed(peer_id) => { - self.sync.on_connect(peer_id); + self.message_processor.on_connect(peer_id); } // A peer has disconnected HandlerMessage::PeerDisconnected(peer_id) => { - self.sync.on_disconnect(peer_id); + self.message_processor.on_disconnect(peer_id); } // An RPC message request/response has been received HandlerMessage::RPC(peer_id, rpc_event) => { @@ -109,7 +119,7 @@ impl MessageHandler { fn handle_rpc_request(&mut self, peer_id: PeerId, request_id: RequestId, request: RPCRequest) { match request { RPCRequest::Hello(hello_message) => { - self.sync + self.message_processor .on_hello_request(peer_id, request_id, hello_message) } RPCRequest::Goodbye(goodbye_reason) => { @@ -118,13 +128,13 @@ impl MessageHandler { "peer" => format!("{:?}", peer_id), "reason" => format!("{:?}", goodbye_reason), ); - self.sync.on_disconnect(peer_id); + self.message_processor.on_disconnect(peer_id); } RPCRequest::BeaconBlocks(request) => self - .sync + .message_processor .on_beacon_blocks_request(peer_id, request_id, request), RPCRequest::RecentBeaconBlocks(request) => self - .sync + .message_processor .on_recent_beacon_blocks_request(peer_id, request_id, request), } } @@ -151,12 +161,13 @@ impl MessageHandler { RPCErrorResponse::Success(response) => { match response { RPCResponse::Hello(hello_message) => { - self.sync.on_hello_response(peer_id, hello_message); + self.message_processor + .on_hello_response(peer_id, hello_message); } RPCResponse::BeaconBlocks(response) => { match self.decode_beacon_blocks(&response) { Ok(beacon_blocks) => { - self.sync.on_beacon_blocks_response( + self.message_processor.on_beacon_blocks_response( peer_id, request_id, beacon_blocks, @@ -171,7 +182,7 @@ impl MessageHandler { RPCResponse::RecentBeaconBlocks(response) => { match self.decode_beacon_blocks(&response) { Ok(beacon_blocks) => { - self.sync.on_recent_beacon_blocks_response( + self.message_processor.on_recent_beacon_blocks_response( peer_id, request_id, beacon_blocks, @@ -199,7 +210,9 @@ impl MessageHandler { match gossip_message { PubsubMessage::Block(message) => match self.decode_gossip_block(message) { Ok(block) => { - let should_forward_on = self.sync.on_block_gossip(peer_id.clone(), block); + let should_forward_on = self + .message_processor + .on_block_gossip(peer_id.clone(), block); // TODO: Apply more sophisticated validation and decoding logic if should_forward_on { self.propagate_message(id, peer_id.clone()); @@ -213,7 +226,8 @@ impl MessageHandler { Ok(attestation) => { // TODO: Apply more sophisticated validation and decoding logic self.propagate_message(id, peer_id.clone()); - self.sync.on_attestation_gossip(peer_id, attestation); + self.message_processor + .on_attestation_gossip(peer_id, attestation); } Err(e) => { debug!(self.log, "Invalid gossiped attestation"; "peer_id" => format!("{}", peer_id), "Error" => format!("{:?}", e)); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index fa1315c39..12bef95fa 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1,4 +1,4 @@ -//! The `ImportManager` facilities the block syncing logic of lighthouse. The current networking +//! The `SyncManager` facilities the block syncing logic of lighthouse. The current networking //! specification provides two methods from which to obtain blocks from peers. The `BeaconBlocks` //! request and the `RecentBeaconBlocks` request. The former is used to obtain a large number of //! blocks and the latter allows for searching for blocks given a block-hash. @@ -7,7 +7,7 @@ //! - Long range (batch) sync, when a client is out of date and needs to the latest head. //! - Parent lookup - when a peer provides us a block whose parent is unknown to us. //! -//! Both of these syncing strategies are built into the `ImportManager`. +//! Both of these syncing strategies are built into the `SyncManager`. //! //! //! Currently the long-range (batch) syncing method functions by opportunistically downloading @@ -53,16 +53,18 @@ //! fully sync'd peers. If `PARENT_FAIL_TOLERANCE` attempts at requesting the block fails, we //! drop the propagated block and downvote the peer that sent it to us. -use super::simple_sync::{PeerSyncInfo, FUTURE_SLOT_TOLERANCE}; +use super::simple_sync::{hello_message, NetworkContext, PeerSyncInfo, FUTURE_SLOT_TOLERANCE}; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::methods::*; -use eth2_libp2p::rpc::RequestId; +use eth2_libp2p::rpc::{RPCRequest, RequestId}; use eth2_libp2p::PeerId; +use futures::prelude::*; use slog::{debug, info, trace, warn, Logger}; use smallvec::SmallVec; use std::collections::{HashMap, HashSet}; use std::ops::{Add, Sub}; use std::sync::Weak; +use tokio::sync::{mpsc, oneshot}; use types::{BeaconBlock, EthSpec, Hash256, Slot}; /// Blocks are downloaded in batches from peers. This constant specifies how many blocks per batch @@ -84,6 +86,31 @@ const PARENT_DEPTH_TOLERANCE: usize = SLOT_IMPORT_TOLERANCE * 2; /// requests to peers who never return blocks. const EMPTY_BATCH_TOLERANCE: usize = 100; +#[derive(Debug)] +/// A message than can be sent to the sync manager thread. +pub enum SyncMessage { + /// A useful peer has been discovered. + AddPeer(PeerId, PeerSyncInfo), + /// A `BeaconBlocks` response has been received. + BeaconBlocksResponse { + peer_id: PeerId, + request_id: RequestId, + beacon_blocks: Vec>, + }, + /// A `RecentBeaconBlocks` response has been received. + RecentBeaconBlocksResponse { + peer_id: PeerId, + request_id: RequestId, + beacon_blocks: Vec>, + }, + /// A block with an unknown parent has been received. + UnknownBlock(PeerId, BeaconBlock), + /// A peer has disconnected. + Disconnect(PeerId), + /// An RPC Error has occurred on a request. + _RPCError(RequestId), +} + #[derive(PartialEq)] /// The current state of a block or batches lookup. enum BlockRequestsState { @@ -176,39 +203,19 @@ enum ManagerState { Stalled, } -/// The output states that can occur from driving (polling) the manager state machine. -pub(crate) enum ImportManagerOutcome { - /// There is no further work to complete. The manager is waiting for further input. - Idle, - /// A `BeaconBlocks` request is required. - RequestBlocks { - peer_id: PeerId, - request_id: RequestId, - request: BeaconBlocksRequest, - }, - /// A `RecentBeaconBlocks` request is required. - RecentRequest { - peer_id: PeerId, - request_id: RequestId, - request: RecentBeaconBlocksRequest, - }, - /// Updates information with peer via requesting another HELLO handshake. - Hello(PeerId), - /// A peer has caused a punishable error and should be downvoted. - DownvotePeer(PeerId), -} - /// The primary object for handling and driving all the current syncing logic. It maintains the /// current state of the syncing process, the number of useful peers, downloaded blocks and /// controls the logic behind both the long-range (batch) sync and the on-going potential parent /// look-up of blocks. -pub struct ImportManager { - /// List of events to be processed externally. - event_queue: SmallVec<[ImportManagerOutcome; 20]>, +pub struct SyncManager { /// A weak reference to the underlying beacon chain. chain: Weak>, /// The current state of the import manager. state: ManagerState, + /// A receiving channel sent by the message processor thread. + input_channel: mpsc::UnboundedReceiver>, + /// A network context to contact the network service. + network: NetworkContext, /// A collection of `BlockRequest` per peer that is currently being downloaded. Used in the /// long-range (batch) sync process. import_queue: HashMap>, @@ -224,22 +231,51 @@ pub struct ImportManager { log: Logger, } -impl ImportManager { - /// Generates a new `ImportManager` given a logger and an Arc reference to a beacon chain. The - /// import manager keeps a weak reference to the beacon chain, which allows the chain to be - /// dropped during the syncing process. The syncing handles this termination gracefully. - pub fn new(beacon_chain: Weak>, log: &slog::Logger) -> Self { - ImportManager { - event_queue: SmallVec::new(), - chain: beacon_chain, - state: ManagerState::Regular, - import_queue: HashMap::new(), - parent_queue: SmallVec::new(), - full_peers: HashSet::new(), - current_req_id: 0, - log: log.clone(), - } - } +/// Spawns a new `SyncManager` thread which has a weak reference to underlying beacon +/// chain. This allows the chain to be +/// dropped during the syncing process which will gracefully end the `SyncManager`. +pub fn spawn( + executor: &tokio::runtime::TaskExecutor, + beacon_chain: Weak>, + network: NetworkContext, + log: slog::Logger, +) -> ( + mpsc::UnboundedSender>, + oneshot::Sender<()>, +) { + // generate the exit channel + let (sync_exit, exit_rx) = tokio::sync::oneshot::channel(); + // generate the message channel + let (sync_send, sync_recv) = mpsc::unbounded_channel::>(); + + // create an instance of the SyncManager + let sync_manager = SyncManager { + chain: beacon_chain, + state: ManagerState::Regular, + input_channel: sync_recv, + network, + import_queue: HashMap::new(), + parent_queue: SmallVec::new(), + full_peers: HashSet::new(), + current_req_id: 0, + log: log.clone(), + }; + + // spawn the sync manager thread + debug!(log, "Sync Manager started"); + executor.spawn( + sync_manager + .select(exit_rx.then(|_| Ok(()))) + .then(move |_| { + info!(log.clone(), "Sync Manager shutdown"); + Ok(()) + }), + ); + (sync_send, sync_exit) +} + +impl SyncManager { + /* Input Handling Functions */ /// A peer has connected which has blocks that are unknown to us. /// @@ -281,7 +317,7 @@ impl ImportManager { return; } - // Check if the peer is significantly is behind us. If within `SLOT_IMPORT_TOLERANCE` + // Check if the peer is significantly behind us. If within `SLOT_IMPORT_TOLERANCE` // treat them as a fully synced peer. If not, ignore them in the sync process if local.head_slot.sub(remote.head_slot).as_usize() < SLOT_IMPORT_TOLERANCE { self.add_full_peer(peer_id.clone()); @@ -328,8 +364,7 @@ impl ImportManager { let chain = match self.chain.upgrade() { Some(chain) => chain, None => { - debug!(self.log, "Chain dropped. Sync terminating"); - self.event_queue.clear(); + trace!(self.log, "Chain dropped. Sync terminating"); return; } }; @@ -390,8 +425,7 @@ impl ImportManager { "request_id" => request_id, "response_initial_slot" => blocks[0].slot, "requested_initial_slot" => block_requests.current_start_slot); - self.event_queue - .push(ImportManagerOutcome::DownvotePeer(peer_id)); + downvote_peer(&mut self.network, &self.log, peer_id); // consider this sync failed block_requests.state = BlockRequestsState::Failed; return; @@ -515,26 +549,7 @@ impl ImportManager { parent_request.state = BlockRequestsState::ReadyToProcess; } - pub fn _inject_error(_peer_id: PeerId, _id: RequestId) { - //TODO: Remove block state from pending - } - - pub fn peer_disconnect(&mut self, peer_id: &PeerId) { - self.import_queue.remove(peer_id); - self.full_peers.remove(peer_id); - self.update_state(); - } - - pub fn add_full_peer(&mut self, peer_id: PeerId) { - debug!( - self.log, "Fully synced peer added"; - "peer" => format!("{:?}", peer_id), - ); - self.full_peers.insert(peer_id); - self.update_state(); - } - - pub fn add_unknown_block(&mut self, block: BeaconBlock, peer_id: PeerId) { + fn add_unknown_block(&mut self, peer_id: PeerId, block: BeaconBlock) { // if we are not in regular sync mode, ignore this block if self.state != ManagerState::Regular { return; @@ -563,55 +578,29 @@ impl ImportManager { self.parent_queue.push(req); } - pub(crate) fn poll(&mut self) -> ImportManagerOutcome { - loop { - //TODO: Optimize the lookups. Potentially keep state of whether each of these functions - //need to be called. - - // only break once everything has been processed - let mut re_run = false; - - // only process batch requests if there are any - if !self.import_queue.is_empty() { - // process potential block requests - re_run = re_run || self.process_potential_block_requests(); - - // process any complete long-range batches - re_run = re_run || self.process_complete_batches(); - } - - // only process parent objects if we are in regular sync - if !self.parent_queue.is_empty() { - // process any parent block lookup-requests - re_run = re_run || self.process_parent_requests(); - - // process any complete parent lookups - re_run = re_run || self.process_complete_parent_requests(); - } - - // exit early if the beacon chain is dropped - if let None = self.chain.upgrade() { - return ImportManagerOutcome::Idle; - } - - // return any queued events - if !self.event_queue.is_empty() { - let event = self.event_queue.remove(0); - self.event_queue.shrink_to_fit(); - return event; - } - - // update the state of the manager - self.update_state(); - - if !re_run { - break; - } - } - - return ImportManagerOutcome::Idle; + fn inject_error(&mut self, _id: RequestId) { + //TODO: Remove block state from pending } + fn peer_disconnect(&mut self, peer_id: &PeerId) { + self.import_queue.remove(peer_id); + self.full_peers.remove(peer_id); + self.update_state(); + } + + fn add_full_peer(&mut self, peer_id: PeerId) { + debug!( + self.log, "Fully synced peer added"; + "peer" => format!("{:?}", peer_id), + ); + self.full_peers.insert(peer_id); + self.update_state(); + } + + /* Processing State Functions */ + // These functions are called in the main poll function to transition the state of the sync + // manager + fn update_state(&mut self) { let previous_state = self.state.clone(); self.state = { @@ -631,13 +620,12 @@ impl ImportManager { } } - fn process_potential_block_requests(&mut self) -> bool { + fn process_potential_block_requests(&mut self) { // check if an outbound request is required // Managing a fixed number of outbound requests is maintained at the RPC protocol libp2p // layer and not needed here. Therefore we create many outbound requests and let the RPC // handle the number of simultaneous requests. Request all queued objects. - let mut re_run = false; // remove any failed batches let debug_log = &self.log; let full_peer_ref = &mut self.full_peers; @@ -655,40 +643,40 @@ impl ImportManager { }); // process queued block requests - for (peer_id, block_requests) in self - .import_queue - .iter_mut() - .find(|(_peer_id, req)| req.state == BlockRequestsState::Queued) - { - let request_id = self.current_req_id; - block_requests.state = BlockRequestsState::Pending(request_id); - self.current_req_id += 1; + for (peer_id, block_requests) in self.import_queue.iter_mut() { + { + if block_requests.state == BlockRequestsState::Queued { + let request_id = self.current_req_id; + block_requests.state = BlockRequestsState::Pending(request_id); + self.current_req_id += 1; - let request = BeaconBlocksRequest { - head_block_root: block_requests.target_head_root, - start_slot: block_requests.current_start_slot.as_u64(), - count: MAX_BLOCKS_PER_REQUEST, - step: 0, - }; - self.event_queue.push(ImportManagerOutcome::RequestBlocks { - peer_id: peer_id.clone(), - request, - request_id, - }); - re_run = true; + let request = BeaconBlocksRequest { + head_block_root: block_requests.target_head_root, + start_slot: block_requests.current_start_slot.as_u64(), + count: MAX_BLOCKS_PER_REQUEST, + step: 0, + }; + request_blocks( + &mut self.network, + &self.log, + peer_id.clone(), + request_id, + request, + ); + } + } } - - re_run } fn process_complete_batches(&mut self) -> bool { - // flag to indicate if the manager can be switched to idle or not - let mut re_run = false; + // This function can queue extra blocks and the main poll loop will need to be re-executed + // to process these. This flag indicates that the main poll loop has to continue. + let mut re_run_poll = false; // create reference variables to be moved into subsequent closure let chain_ref = self.chain.clone(); let log_ref = &self.log; - let event_queue_ref = &mut self.event_queue; + let network_ref = &mut self.network; self.import_queue.retain(|peer_id, block_requests| { if block_requests.state == BlockRequestsState::ReadyToProcess { @@ -712,13 +700,13 @@ impl ImportManager { // target head if end_slot >= block_requests.target_head_slot { // Completed, re-hello the peer to ensure we are up to the latest head - event_queue_ref.push(ImportManagerOutcome::Hello(peer_id.clone())); + hello_peer(network_ref, log_ref, chain_ref.clone(), peer_id.clone()); // remove the request false } else { // have not reached the end, queue another batch block_requests.update_start_slot(); - re_run = true; + re_run_poll = true; // keep the batch true } @@ -731,7 +719,7 @@ impl ImportManager { "no_blocks" => last_element + 1, "error" => format!("{:?}", e), ); - event_queue_ref.push(ImportManagerOutcome::DownvotePeer(peer_id.clone())); + downvote_peer(network_ref, log_ref, peer_id.clone()); false } } @@ -741,17 +729,15 @@ impl ImportManager { } }); - re_run + re_run_poll } - fn process_parent_requests(&mut self) -> bool { + fn process_parent_requests(&mut self) { // check to make sure there are peers to search for the parent from if self.full_peers.is_empty() { - return false; + return; } - let mut re_run = false; - // remove any failed requests let debug_log = &self.log; self.parent_queue.retain(|parent_request| { @@ -790,20 +776,20 @@ impl ImportManager { // select a random fully synced peer to attempt to download the parent block let peer_id = self.full_peers.iter().next().expect("List is not empty"); - self.event_queue.push(ImportManagerOutcome::RecentRequest { - peer_id: peer_id.clone(), + recent_blocks_request( + &mut self.network, + &self.log, + peer_id.clone(), request_id, request, - }); - re_run = true; + ); } } - re_run } fn process_complete_parent_requests(&mut self) -> bool { // returned value indicating whether the manager can be switched to idle or not - let mut re_run = false; + let mut re_run_poll = false; // Find any parent_requests ready to be processed for completed_request in self @@ -827,9 +813,8 @@ impl ImportManager { "received_block" => format!("{}", block_hash), "expected_parent" => format!("{}", expected_hash), ); - re_run = true; - self.event_queue - .push(ImportManagerOutcome::DownvotePeer(peer)); + re_run_poll = true; + downvote_peer(&mut self.network, &self.log, peer); } // try and process the list of blocks up to the requested block @@ -846,7 +831,7 @@ impl ImportManager { // need to keep looking for parents completed_request.downloaded_blocks.push(block); completed_request.state = BlockRequestsState::Queued; - re_run = true; + re_run_poll = true; break; } Ok(BlockProcessingOutcome::Processed { block_root: _ }) => {} @@ -859,11 +844,13 @@ impl ImportManager { "peer" => format!("{:?}", completed_request.last_submitted_peer), ); completed_request.state = BlockRequestsState::Queued; - re_run = true; - self.event_queue.push(ImportManagerOutcome::DownvotePeer( + re_run_poll = true; + downvote_peer( + &mut self.network, + &self.log, completed_request.last_submitted_peer.clone(), - )); - return re_run; + ); + return re_run_poll; } Err(e) => { completed_request.failed_attempts += 1; @@ -872,16 +859,17 @@ impl ImportManager { "error" => format!("{:?}", e) ); completed_request.state = BlockRequestsState::Queued; - re_run = true; - self.event_queue.push(ImportManagerOutcome::DownvotePeer( + re_run_poll = true; + downvote_peer( + &mut self.network, + &self.log, completed_request.last_submitted_peer.clone(), - )); - return re_run; + ); + return re_run_poll; } } } else { // chain doesn't exist - clear the event queue and return - self.event_queue.clear(); return false; } } @@ -895,11 +883,83 @@ impl ImportManager { true } }); - re_run + re_run_poll } } -// Helper function to process blocks +/* Network Context Helper Functions */ + +fn hello_peer( + network: &mut NetworkContext, + log: &slog::Logger, + chain: Weak>, + peer_id: PeerId, +) { + trace!( + log, + "RPC Request"; + "method" => "HELLO", + "peer" => format!("{:?}", peer_id) + ); + if let Some(chain) = chain.upgrade() { + network.send_rpc_request(None, peer_id, RPCRequest::Hello(hello_message(&chain))); + } +} + +fn request_blocks( + network: &mut NetworkContext, + log: &slog::Logger, + peer_id: PeerId, + request_id: RequestId, + request: BeaconBlocksRequest, +) { + trace!( + log, + "RPC Request"; + "method" => "BeaconBlocks", + "id" => request_id, + "count" => request.count, + "peer" => format!("{:?}", peer_id) + ); + network.send_rpc_request( + Some(request_id), + peer_id.clone(), + RPCRequest::BeaconBlocks(request), + ); +} + +fn recent_blocks_request( + network: &mut NetworkContext, + log: &slog::Logger, + peer_id: PeerId, + request_id: RequestId, + request: RecentBeaconBlocksRequest, +) { + trace!( + log, + "RPC Request"; + "method" => "RecentBeaconBlocks", + "count" => request.block_roots.len(), + "peer" => format!("{:?}", peer_id) + ); + network.send_rpc_request( + Some(request_id), + peer_id.clone(), + RPCRequest::RecentBeaconBlocks(request), + ); +} + +fn downvote_peer(network: &mut NetworkContext, log: &slog::Logger, peer_id: PeerId) { + trace!( + log, + "Peer downvoted"; + "peer" => format!("{:?}", peer_id) + ); + // TODO: Implement reputation + network.disconnect(peer_id.clone(), GoodbyeReason::Fault); +} + +// Helper function to process blocks which only consumes the chain and blocks to process fn process_blocks( weak_chain: Weak>, blocks: Vec>, @@ -1005,3 +1065,99 @@ fn process_blocks( Ok(()) } + +impl Future for SyncManager { + type Item = (); + type Error = String; + + fn poll(&mut self) -> Result, Self::Error> { + // process any inbound messages + loop { + match self.input_channel.poll() { + Ok(Async::Ready(Some(message))) => match message { + SyncMessage::AddPeer(peer_id, info) => { + self.add_peer(peer_id, info); + dbg!("add peer"); + } + SyncMessage::BeaconBlocksResponse { + peer_id, + request_id, + beacon_blocks, + } => { + self.beacon_blocks_response(peer_id, request_id, beacon_blocks); + } + SyncMessage::RecentBeaconBlocksResponse { + peer_id, + request_id, + beacon_blocks, + } => { + self.recent_blocks_response(peer_id, request_id, beacon_blocks); + } + SyncMessage::UnknownBlock(peer_id, block) => { + self.add_unknown_block(peer_id, block); + } + SyncMessage::Disconnect(peer_id) => { + self.peer_disconnect(&peer_id); + } + SyncMessage::_RPCError(request_id) => { + self.inject_error(request_id); + } + }, + Ok(Async::NotReady) => break, + Ok(Async::Ready(None)) => { + return Err("Sync manager channel closed".into()); + } + Err(e) => { + return Err(format!("Sync Manager channel error: {:?}", e)); + } + } + } + + loop { + //TODO: Optimize the lookups. Potentially keep state of whether each of these functions + //need to be called. + let mut re_run = false; + + dbg!(self.import_queue.len()); + // only process batch requests if there are any + if !self.import_queue.is_empty() { + // process potential block requests + self.process_potential_block_requests(); + + dbg!(self.import_queue.len()); + // process any complete long-range batches + re_run = re_run || self.process_complete_batches(); + dbg!(self.import_queue.len()); + dbg!(&self.state); + } + + // only process parent objects if we are in regular sync + if !self.parent_queue.is_empty() { + // process any parent block lookup-requests + self.process_parent_requests(); + + // process any complete parent lookups + re_run = re_run || self.process_complete_parent_requests(); + } + + dbg!(self.import_queue.len()); + dbg!(&self.state); + + // Shutdown the thread if the chain has termined + if let None = self.chain.upgrade() { + return Ok(Async::Ready(())); + } + + if !re_run { + break; + } + } + dbg!(self.import_queue.len()); + dbg!(&self.state); + + // update the state of the manager + self.update_state(); + + return Ok(Async::NotReady); + } +} diff --git a/beacon_node/network/src/sync/mod.rs b/beacon_node/network/src/sync/mod.rs index b26d78c14..58ec386aa 100644 --- a/beacon_node/network/src/sync/mod.rs +++ b/beacon_node/network/src/sync/mod.rs @@ -4,7 +4,7 @@ mod manager; /// Stores the various syncing methods for the beacon chain. mod simple_sync; -pub use simple_sync::SimpleSync; +pub use simple_sync::MessageProcessor; /// Currently implemented sync methods. pub enum SyncMethod { diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index 4a853f05d..d8b5f2dbf 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,4 +1,4 @@ -use super::manager::{ImportManager, ImportManagerOutcome}; +use super::manager::SyncMessage; use crate::service::NetworkMessage; use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; use eth2_libp2p::rpc::methods::*; @@ -6,11 +6,14 @@ use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; use slog::{debug, info, o, trace, warn}; use ssz::Encode; -use std::sync::{Arc, Weak}; +use std::sync::Arc; use store::Store; -use tokio::sync::mpsc; +use tokio::sync::{mpsc, oneshot}; use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256, Slot}; +//TODO: Put a maximum limit on the number of block that can be requested. +//TODO: Rate limit requests + /// If a block is more than `FUTURE_SLOT_TOLERANCE` slots ahead of our slot clock, we drop it. /// Otherwise we queue it. pub(crate) const FUTURE_SLOT_TOLERANCE: u64 = 1; @@ -46,55 +49,71 @@ impl From<&Arc>> for PeerSyncInfo { } } -/// The current syncing state. -#[derive(PartialEq)] -pub enum SyncState { - _Idle, - _Downloading, - _Stopped, -} - -/// Simple Syncing protocol. -pub struct SimpleSync { +/// Processes validated messages from the network. It relays necessary data to the syncing thread +/// and processes blocks from the pubsub network. +pub struct MessageProcessor { /// A reference to the underlying beacon chain. - chain: Weak>, - manager: ImportManager, + chain: Arc>, + /// A channel to the syncing thread. + sync_send: mpsc::UnboundedSender>, + /// A oneshot channel for destroying the sync thread. + _sync_exit: oneshot::Sender<()>, + /// A nextwork context to return and handle RPC requests. network: NetworkContext, + /// The `RPCHandler` logger. log: slog::Logger, } -impl SimpleSync { - /// Instantiate a `SimpleSync` instance, with no peers and an empty queue. +impl MessageProcessor { + /// Instantiate a `MessageProcessor` instance pub fn new( - beacon_chain: Weak>, + executor: &tokio::runtime::TaskExecutor, + beacon_chain: Arc>, network_send: mpsc::UnboundedSender, log: &slog::Logger, ) -> Self { let sync_logger = log.new(o!("Service"=> "Sync")); + let sync_network_context = NetworkContext::new(network_send.clone(), sync_logger.clone()); - SimpleSync { - chain: beacon_chain.clone(), - manager: ImportManager::new(beacon_chain, log), + // spawn the sync thread + let (sync_send, _sync_exit) = super::manager::spawn( + executor, + Arc::downgrade(&beacon_chain), + sync_network_context, + sync_logger, + ); + + MessageProcessor { + chain: beacon_chain, + sync_send, + _sync_exit, network: NetworkContext::new(network_send, log.clone()), - log: sync_logger, + log: log.clone(), } } + fn send_to_sync(&mut self, message: SyncMessage) { + self.sync_send.try_send(message).unwrap_or_else(|_| { + warn!( + self.log, + "Could not send message to the sync service"; + ) + }); + } + /// Handle a peer disconnect. /// /// Removes the peer from the manager. pub fn on_disconnect(&mut self, peer_id: PeerId) { - self.manager.peer_disconnect(&peer_id); + self.send_to_sync(SyncMessage::Disconnect(peer_id)); } /// Handle the connection of a new peer. /// /// Sends a `Hello` message to the peer. pub fn on_connect(&mut self, peer_id: PeerId) { - if let Some(chain) = self.chain.upgrade() { - self.network - .send_rpc_request(None, peer_id, RPCRequest::Hello(hello_message(&chain))); - } + self.network + .send_rpc_request(None, peer_id, RPCRequest::Hello(hello_message(&self.chain))); } /// Handle a `Hello` request. @@ -107,18 +126,16 @@ impl SimpleSync { hello: HelloMessage, ) { // ignore hello responses if we are shutting down - if let Some(chain) = self.chain.upgrade() { - trace!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); + trace!(self.log, "HelloRequest"; "peer" => format!("{:?}", peer_id)); - // Say hello back. - self.network.send_rpc_response( - peer_id.clone(), - request_id, - RPCResponse::Hello(hello_message(&chain)), - ); + // Say hello back. + self.network.send_rpc_response( + peer_id.clone(), + request_id, + RPCResponse::Hello(hello_message(&self.chain)), + ); - self.process_hello(peer_id, hello); - } + self.process_hello(peer_id, hello); } /// Process a `Hello` response from a peer. @@ -133,183 +150,86 @@ impl SimpleSync { /// /// Disconnects the peer if required. fn process_hello(&mut self, peer_id: PeerId, hello: HelloMessage) { - // If we update the manager we may need to drive the sync. This flag lies out of scope of - // the beacon chain so that the process sync command has no long-lived beacon chain - // references. - let mut process_sync = false; + let remote = PeerSyncInfo::from(hello); + let local = PeerSyncInfo::from(&self.chain); + + let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); + + if local.fork_version != remote.fork_version { + // The node is on a different network/fork, disconnect them. + debug!( + self.log, "HandshakeFailure"; + "peer" => format!("{:?}", peer_id), + "reason" => "network_id" + ); + + self.network + .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + } else if remote.finalized_epoch <= local.finalized_epoch + && remote.finalized_root != Hash256::zero() + && local.finalized_root != Hash256::zero() + && (self.chain.root_at_slot(start_slot(remote.finalized_epoch)) + != Some(remote.finalized_root)) { - // scope of beacon chain reference - let chain = match self.chain.upgrade() { - Some(chain) => chain, - None => { - info!(self.log, "Sync shutting down"; - "reason" => "Beacon chain dropped"); - return; - } - }; + // The remotes finalized epoch is less than or greater than ours, but the block root is + // different to the one in our chain. + // + // Therefore, the node is on a different chain and we should not communicate with them. + debug!( + self.log, "HandshakeFailure"; + "peer" => format!("{:?}", peer_id), + "reason" => "different finalized chain" + ); + self.network + .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); + } else if remote.finalized_epoch < local.finalized_epoch { + // The node has a lower finalized epoch, their chain is not useful to us. There are two + // cases where a node can have a lower finalized epoch: + // + // ## The node is on the same chain + // + // If a node is on the same chain but has a lower finalized epoch, their head must be + // lower than ours. Therefore, we have nothing to request from them. + // + // ## The node is on a fork + // + // If a node is on a fork that has a lower finalized epoch, switching to that fork would + // cause us to revert a finalized block. This is not permitted, therefore we have no + // interest in their blocks. + debug!( + self.log, + "NaivePeer"; + "peer" => format!("{:?}", peer_id), + "reason" => "lower finalized epoch" + ); + } else if self + .chain + .store + .exists::>(&remote.head_root) + .unwrap_or_else(|_| false) + { + trace!( + self.log, "Peer with known chain found"; + "peer" => format!("{:?}", peer_id), + "remote_head_slot" => remote.head_slot, + "remote_latest_finalized_epoch" => remote.finalized_epoch, + ); - let remote = PeerSyncInfo::from(hello); - let local = PeerSyncInfo::from(&chain); - - let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch()); - - if local.fork_version != remote.fork_version { - // The node is on a different network/fork, disconnect them. - debug!( - self.log, "HandshakeFailure"; - "peer" => format!("{:?}", peer_id), - "reason" => "network_id" - ); - - self.network - .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); - } else if remote.finalized_epoch <= local.finalized_epoch - && remote.finalized_root != Hash256::zero() - && local.finalized_root != Hash256::zero() - && (chain.root_at_slot(start_slot(remote.finalized_epoch)) - != Some(remote.finalized_root)) - { - // The remotes finalized epoch is less than or greater than ours, but the block root is - // different to the one in our chain. - // - // Therefore, the node is on a different chain and we should not communicate with them. - debug!( - self.log, "HandshakeFailure"; - "peer" => format!("{:?}", peer_id), - "reason" => "different finalized chain" - ); - self.network - .disconnect(peer_id.clone(), GoodbyeReason::IrrelevantNetwork); - } else if remote.finalized_epoch < local.finalized_epoch { - // The node has a lower finalized epoch, their chain is not useful to us. There are two - // cases where a node can have a lower finalized epoch: - // - // ## The node is on the same chain - // - // If a node is on the same chain but has a lower finalized epoch, their head must be - // lower than ours. Therefore, we have nothing to request from them. - // - // ## The node is on a fork - // - // If a node is on a fork that has a lower finalized epoch, switching to that fork would - // cause us to revert a finalized block. This is not permitted, therefore we have no - // interest in their blocks. - debug!( - self.log, - "NaivePeer"; - "peer" => format!("{:?}", peer_id), - "reason" => "lower finalized epoch" - ); - } else if chain - .store - .exists::>(&remote.head_root) - .unwrap_or_else(|_| false) - { - trace!( - self.log, "Peer with known chain found"; - "peer" => format!("{:?}", peer_id), - "remote_head_slot" => remote.head_slot, - "remote_latest_finalized_epoch" => remote.finalized_epoch, - ); - - // If the node's best-block is already known to us and they are close to our current - // head, treat them as a fully sync'd peer. - self.manager.add_peer(peer_id, remote); - process_sync = true; - } else { - // The remote node has an equal or great finalized epoch and we don't know it's head. - // - // Therefore, there are some blocks between the local finalized epoch and the remote - // head that are worth downloading. - debug!( - self.log, "UsefulPeer"; - "peer" => format!("{:?}", peer_id), - "local_finalized_epoch" => local.finalized_epoch, - "remote_latest_finalized_epoch" => remote.finalized_epoch, - ); - - self.manager.add_peer(peer_id, remote); - process_sync = true - } - } // end beacon chain reference scope - - if process_sync { - self.process_sync(); - } - } - - /// This function drives the `ImportManager` state machine. The outcomes it provides are - /// actioned until the `ImportManager` is idle. - fn process_sync(&mut self) { - loop { - match self.manager.poll() { - ImportManagerOutcome::Hello(peer_id) => { - trace!( - self.log, - "RPC Request"; - "method" => "HELLO", - "peer" => format!("{:?}", peer_id) - ); - if let Some(chain) = self.chain.upgrade() { - self.network.send_rpc_request( - None, - peer_id, - RPCRequest::Hello(hello_message(&chain)), - ); - } - } - ImportManagerOutcome::RequestBlocks { - peer_id, - request_id, - request, - } => { - trace!( - self.log, - "RPC Request"; - "method" => "BeaconBlocks", - "id" => request_id, - "count" => request.count, - "peer" => format!("{:?}", peer_id) - ); - self.network.send_rpc_request( - Some(request_id), - peer_id.clone(), - RPCRequest::BeaconBlocks(request), - ); - } - ImportManagerOutcome::RecentRequest { - peer_id, - request_id, - request, - } => { - trace!( - self.log, - "RPC Request"; - "method" => "RecentBeaconBlocks", - "count" => request.block_roots.len(), - "peer" => format!("{:?}", peer_id) - ); - self.network.send_rpc_request( - Some(request_id), - peer_id.clone(), - RPCRequest::RecentBeaconBlocks(request), - ); - } - ImportManagerOutcome::DownvotePeer(peer_id) => { - trace!( - self.log, - "Peer downvoted"; - "peer" => format!("{:?}", peer_id) - ); - // TODO: Implement reputation - self.network - .disconnect(peer_id.clone(), GoodbyeReason::Fault); - } - ImportManagerOutcome::Idle => { - // nothing to do - return; - } - } + // If the node's best-block is already known to us and they are close to our current + // head, treat them as a fully sync'd peer. + self.send_to_sync(SyncMessage::AddPeer(peer_id, remote)); + } else { + // The remote node has an equal or great finalized epoch and we don't know it's head. + // + // Therefore, there are some blocks between the local finalized epoch and the remote + // head that are worth downloading. + debug!( + self.log, "UsefulPeer"; + "peer" => format!("{:?}", peer_id), + "local_finalized_epoch" => local.finalized_epoch, + "remote_latest_finalized_epoch" => remote.finalized_epoch, + ); + self.send_to_sync(SyncMessage::AddPeer(peer_id, remote)); } } @@ -320,20 +240,11 @@ impl SimpleSync { request_id: RequestId, request: RecentBeaconBlocksRequest, ) { - let chain = match self.chain.upgrade() { - Some(chain) => chain, - None => { - info!(self.log, "Sync shutting down"; - "reason" => "Beacon chain dropped"); - return; - } - }; - let blocks: Vec> = request .block_roots .iter() .filter_map(|root| { - if let Ok(Some(block)) = chain.store.get::>(root) { + if let Ok(Some(block)) = self.chain.store.get::>(root) { Some(block) } else { debug!( @@ -370,15 +281,6 @@ impl SimpleSync { request_id: RequestId, req: BeaconBlocksRequest, ) { - let chain = match self.chain.upgrade() { - Some(chain) => chain, - None => { - info!(self.log, "Sync shutting down"; - "reason" => "Beacon chain dropped"); - return; - } - }; - debug!( self.log, "BeaconBlocksRequest"; @@ -392,14 +294,15 @@ impl SimpleSync { // In the current implementation we read from the db then filter out out-of-range blocks. // Improving the db schema to prevent this would be ideal. - let mut blocks: Vec> = chain + let mut blocks: Vec> = self + .chain .rev_iter_block_roots() .filter(|(_root, slot)| { req.start_slot <= slot.as_u64() && req.start_slot + req.count > slot.as_u64() }) .take_while(|(_root, slot)| req.start_slot <= slot.as_u64()) .filter_map(|(root, _slot)| { - if let Ok(Some(block)) = chain.store.get::>(&root) { + if let Ok(Some(block)) = self.chain.store.get::>(&root) { Some(block) } else { warn!( @@ -423,7 +326,7 @@ impl SimpleSync { "peer" => format!("{:?}", peer_id), "msg" => "Failed to return all requested hashes", "start_slot" => req.start_slot, - "current_slot" => chain.slot().unwrap_or_else(|_| Slot::from(0_u64)).as_u64(), + "current_slot" => self.chain.slot().unwrap_or_else(|_| Slot::from(0_u64)).as_u64(), "requested" => req.count, "returned" => blocks.len(), ); @@ -449,10 +352,11 @@ impl SimpleSync { "count" => beacon_blocks.len(), ); - self.manager - .beacon_blocks_response(peer_id, request_id, beacon_blocks); - - self.process_sync(); + self.send_to_sync(SyncMessage::RecentBeaconBlocksResponse { + peer_id, + request_id, + beacon_blocks, + }); } /// Handle a `RecentBeaconBlocks` response from the peer. @@ -469,10 +373,11 @@ impl SimpleSync { "count" => beacon_blocks.len(), ); - self.manager - .recent_blocks_response(peer_id, request_id, beacon_blocks); - - self.process_sync(); + self.send_to_sync(SyncMessage::BeaconBlocksResponse { + peer_id, + request_id, + beacon_blocks, + }); } /// Process a gossip message declaring a new block. @@ -481,16 +386,7 @@ impl SimpleSync { /// /// Returns a `bool` which, if `true`, indicates we should forward the block to our peers. pub fn on_block_gossip(&mut self, peer_id: PeerId, block: BeaconBlock) -> bool { - let chain = match self.chain.upgrade() { - Some(chain) => chain, - None => { - info!(self.log, "Sync shutting down"; - "reason" => "Beacon chain dropped"); - return false; - } - }; - - if let Ok(outcome) = chain.process_block(block.clone()) { + if let Ok(outcome) = self.chain.process_block(block.clone()) { match outcome { BlockProcessingOutcome::Processed { .. } => { trace!(self.log, "Gossipsub block processed"; @@ -501,7 +397,7 @@ impl SimpleSync { // Inform the sync manager to find parents for this block trace!(self.log, "Block with unknown parent received"; "peer_id" => format!("{:?}",peer_id)); - self.manager.add_unknown_block(block.clone(), peer_id); + self.send_to_sync(SyncMessage::UnknownBlock(peer_id, block.clone())); SHOULD_FORWARD_GOSSIP_BLOCK } BlockProcessingOutcome::FutureSlot { @@ -523,16 +419,7 @@ impl SimpleSync { /// /// Not currently implemented. pub fn on_attestation_gossip(&mut self, _peer_id: PeerId, msg: Attestation) { - let chain = match self.chain.upgrade() { - Some(chain) => chain, - None => { - info!(self.log, "Sync shutting down"; - "reason" => "Beacon chain dropped"); - return; - } - }; - - match chain.process_attestation(msg) { + match self.chain.process_attestation(msg) { Ok(outcome) => info!( self.log, "Processed attestation"; @@ -547,7 +434,7 @@ impl SimpleSync { } /// Build a `HelloMessage` representing the state of the given `beacon_chain`. -fn hello_message(beacon_chain: &BeaconChain) -> HelloMessage { +pub(crate) fn hello_message(beacon_chain: &BeaconChain) -> HelloMessage { let state = &beacon_chain.head().beacon_state; HelloMessage { From 04b47a357b997e8da7bf37c4830c2a48090e0720 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sat, 7 Sep 2019 09:31:05 +1000 Subject: [PATCH 238/305] Correct bugs in new sync threading --- beacon_node/network/src/message_handler.rs | 7 ++++--- beacon_node/network/src/service.rs | 9 ++------- beacon_node/network/src/sync/manager.rs | 20 +++++++------------- beacon_node/network/src/sync/simple_sync.rs | 6 +++--- tests/ef_tests/eth2.0-spec-tests | 2 +- 5 files changed, 17 insertions(+), 27 deletions(-) diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index be8fa21f8..782d2129e 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -9,7 +9,7 @@ use eth2_libp2p::{ }; use futures::future::Future; use futures::stream::Stream; -use slog::{debug, trace, warn}; +use slog::{debug, o, trace, warn}; use ssz::{Decode, DecodeError}; use std::sync::Arc; use tokio::sync::mpsc; @@ -51,7 +51,8 @@ impl MessageHandler { executor: &tokio::runtime::TaskExecutor, log: slog::Logger, ) -> error::Result> { - trace!(log, "Service starting"); + let message_handler_log = log.new(o!("Service"=> "Message Handler")); + trace!(message_handler_log, "Service starting"); let (handler_send, handler_recv) = mpsc::unbounded_channel(); @@ -63,7 +64,7 @@ impl MessageHandler { let mut handler = MessageHandler { network_send, message_processor, - log: log.clone(), + log: message_handler_log, }; // spawn handler task and move the message handler instance into the spawned thread diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index f54630615..1357b5495 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -34,13 +34,8 @@ impl Service { // build the network channel let (network_send, network_recv) = mpsc::unbounded_channel::(); // launch message handler thread - let message_handler_log = log.new(o!("Service" => "MessageHandler")); - let message_handler_send = MessageHandler::spawn( - beacon_chain, - network_send.clone(), - executor, - message_handler_log, - )?; + let message_handler_send = + MessageHandler::spawn(beacon_chain, network_send.clone(), executor, log.clone())?; let network_log = log.new(o!("Service" => "Network")); // launch libp2p service diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 12bef95fa..171d0fdf0 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -251,7 +251,7 @@ pub fn spawn( // create an instance of the SyncManager let sync_manager = SyncManager { chain: beacon_chain, - state: ManagerState::Regular, + state: ManagerState::Stalled, input_channel: sync_recv, network, import_queue: HashMap::new(), @@ -510,7 +510,7 @@ impl SyncManager { &mut self, peer_id: PeerId, request_id: RequestId, - blocks: Vec>, + mut blocks: Vec>, ) { // find the request let parent_request = match self @@ -545,6 +545,11 @@ impl SyncManager { return; } + // add the block to response + parent_request + .downloaded_blocks + .push(blocks.pop().expect("must exist")); + // queue for processing parent_request.state = BlockRequestsState::ReadyToProcess; } @@ -594,7 +599,6 @@ impl SyncManager { "peer" => format!("{:?}", peer_id), ); self.full_peers.insert(peer_id); - self.update_state(); } /* Processing State Functions */ @@ -1077,7 +1081,6 @@ impl Future for SyncManager { Ok(Async::Ready(Some(message))) => match message { SyncMessage::AddPeer(peer_id, info) => { self.add_peer(peer_id, info); - dbg!("add peer"); } SyncMessage::BeaconBlocksResponse { peer_id, @@ -1118,17 +1121,13 @@ impl Future for SyncManager { //need to be called. let mut re_run = false; - dbg!(self.import_queue.len()); // only process batch requests if there are any if !self.import_queue.is_empty() { // process potential block requests self.process_potential_block_requests(); - dbg!(self.import_queue.len()); // process any complete long-range batches re_run = re_run || self.process_complete_batches(); - dbg!(self.import_queue.len()); - dbg!(&self.state); } // only process parent objects if we are in regular sync @@ -1140,9 +1139,6 @@ impl Future for SyncManager { re_run = re_run || self.process_complete_parent_requests(); } - dbg!(self.import_queue.len()); - dbg!(&self.state); - // Shutdown the thread if the chain has termined if let None = self.chain.upgrade() { return Ok(Async::Ready(())); @@ -1152,8 +1148,6 @@ impl Future for SyncManager { break; } } - dbg!(self.import_queue.len()); - dbg!(&self.state); // update the state of the manager self.update_state(); diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index d8b5f2dbf..c54c481c7 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -352,7 +352,7 @@ impl MessageProcessor { "count" => beacon_blocks.len(), ); - self.send_to_sync(SyncMessage::RecentBeaconBlocksResponse { + self.send_to_sync(SyncMessage::BeaconBlocksResponse { peer_id, request_id, beacon_blocks, @@ -368,12 +368,12 @@ impl MessageProcessor { ) { debug!( self.log, - "BeaconBlocksResponse"; + "RecentBeaconBlocksResponse"; "peer" => format!("{:?}", peer_id), "count" => beacon_blocks.len(), ); - self.send_to_sync(SyncMessage::BeaconBlocksResponse { + self.send_to_sync(SyncMessage::RecentBeaconBlocksResponse { peer_id, request_id, beacon_blocks, diff --git a/tests/ef_tests/eth2.0-spec-tests b/tests/ef_tests/eth2.0-spec-tests index aaa1673f5..ae6dd9011 160000 --- a/tests/ef_tests/eth2.0-spec-tests +++ b/tests/ef_tests/eth2.0-spec-tests @@ -1 +1 @@ -Subproject commit aaa1673f508103e11304833e0456e4149f880065 +Subproject commit ae6dd9011df05fab8c7e651c09cf9c940973bf81 From 69442a2ab302ed04ecbbc86009f2179803a6dd00 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 8 Sep 2019 01:57:56 +1000 Subject: [PATCH 239/305] Correct warnings --- beacon_node/rest_api/src/helpers.rs | 4 +--- beacon_node/rest_api/src/url_query.rs | 2 +- beacon_node/rest_api/src/validator.rs | 5 ++--- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 6b0211662..a21f1831e 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -2,9 +2,7 @@ use crate::{ApiError, ApiResult}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use bls::PublicKey; use hex; -use hyper::{Body, Request, StatusCode}; -use serde::de::value::StringDeserializer; -use serde_json::Deserializer; +use hyper::{Body, Request}; use store::{iter::AncestorIter, Store}; use types::{BeaconState, EthSpec, Hash256, RelativeEpoch, Slot}; diff --git a/beacon_node/rest_api/src/url_query.rs b/beacon_node/rest_api/src/url_query.rs index e39a9a449..3802ff831 100644 --- a/beacon_node/rest_api/src/url_query.rs +++ b/beacon_node/rest_api/src/url_query.rs @@ -64,7 +64,7 @@ impl<'a> UrlQuery<'a> { /// Returns a vector of all values present where `key` is in `keys /// /// If no match is found, an `InvalidQueryParams` error is returned. - pub fn all_of(mut self, key: &str) -> Result, ApiError> { + pub fn all_of(self, key: &str) -> Result, ApiError> { let queries: Vec<_> = self .0 .filter_map(|(k, v)| { diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 365b7e552..0440a7368 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -5,9 +5,8 @@ use bls::PublicKey; use hyper::{Body, Request}; use serde::{Deserialize, Serialize}; use std::sync::Arc; -use store::Store; use types::beacon_state::EthSpec; -use types::{BeaconBlock, BeaconState, Epoch, RelativeEpoch, Shard, Slot}; +use types::{Epoch, RelativeEpoch, Shard, Slot}; #[derive(Debug, Serialize, Deserialize)] pub struct ValidatorDuty { @@ -61,7 +60,7 @@ pub fn get_validator_duties(req: Request) - )) })?; //TODO: Handle an array of validators, currently only takes one - let mut validators: Vec = match query.all_of("validator_pubkeys") { + let validators: Vec = match query.all_of("validator_pubkeys") { Ok(v) => v .iter() .map(|pk| parse_pubkey(pk)) From 9461b5063b3ea9e0af05527af7dacf3d9d13f438 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 8 Sep 2019 04:19:54 +1000 Subject: [PATCH 240/305] Replace EF tests submodule with a makefile --- .gitmodules | 3 --- Makefile | 31 +++++++++++++++++++++++++ book/src/setup.md | 40 ++++++++++++++++++++++---------- tests/ef_tests/.gitignore | 1 + tests/ef_tests/eth2.0-spec-tests | 1 - tests/ef_tests/src/handler.rs | 5 ++++ 6 files changed, 65 insertions(+), 16 deletions(-) create mode 100644 Makefile create mode 100644 tests/ef_tests/.gitignore delete mode 160000 tests/ef_tests/eth2.0-spec-tests diff --git a/.gitmodules b/.gitmodules index 1b0e150ce..e69de29bb 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "tests/ef_tests/eth2.0-spec-tests"] - path = tests/ef_tests/eth2.0-spec-tests - url = https://github.com/ethereum/eth2.0-spec-tests diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..d5517ed23 --- /dev/null +++ b/Makefile @@ -0,0 +1,31 @@ +TESTS_TAG := v0.8.3 +TESTS = general minimal mainnet + +TESTS_BASE_DIR := ./tests/ef_tests +REPO_NAME := eth2.0-spec-tests +OUTPUT_DIR := $(TESTS_BASE_DIR)/$(REPO_NAME) + +BASE_URL := https://github.com/ethereum/$(REPO_NAME)/releases/download/$(SPEC_VERSION) + +release: + cargo build --all --release + +clean_ef_tests: + rm -r $(OUTPUT_DIR) + +ef_tests: download_tests extract_tests + mkdir $(OUTPUT_DIR) + for test in $(TESTS); do \ + tar -C $(OUTPUT_DIR) -xvf $(TESTS_BASE_DIR)/$$test.tar ;\ + rm $(TESTS_BASE_DIR)/$$test.tar ;\ + done + +extract_tests: + for test in $(TESTS); do \ + gzip -df $(TESTS_BASE_DIR)/$$test.tar.gz ;\ + done + +download_tests: + for test in $(TESTS); do \ + wget -P $(TESTS_BASE_DIR) $(BASE_URL)/$$test.tar.gz; \ + done diff --git a/book/src/setup.md b/book/src/setup.md index e53ca93d8..532de3fc0 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -9,11 +9,8 @@ See the [Quick instructions](#quick-instructions) for a summary or the 1. Install Rust + Cargo with [rustup](https://rustup.rs/). 1. Install build dependencies using your package manager. - - `$ clang protobuf libssl-dev cmake git-lfs` - - Ensure [git-lfs](https://git-lfs.github.com/) is installed with `git lfs - install`. -1. Clone the [sigp/lighthouse](https://github.com/sigp/lighthouse), ensuring to - **initialize submodules**. + - `$ clang protobuf libssl-dev cmake` +1. Clone the [sigp/lighthouse](https://github.com/sigp/lighthouse). 1. In the root of the repo, run the tests with `cargo test --all --release`. 1. Then, build the binaries with `cargo build --all --release`. 1. Lighthouse is now fully built and tested. @@ -37,13 +34,8 @@ steps: - `protobuf`: required for protobuf serialization (gRPC) - `libssl-dev`: also gRPC - `cmake`: required for building protobuf - - `git-lfs`: The Git extension for [Large File - Support](https://git-lfs.github.com/) (required for Ethereum Foundation - test vectors). - 1. Clone the repository with submodules: `git clone --recursive - https://github.com/sigp/lighthouse`. If you're already cloned the repo, - ensure testing submodules are present: `$ git submodule init; git - submodule update` + 1. Clone the repository with submodules: `git clone + https://github.com/sigp/lighthouse`. 1. Change directory to the root of the repository. 1. Run the test suite with `cargo test --all --release`. The build and test process can take several minutes. If you experience any failures on @@ -63,3 +55,27 @@ Perl](http://strawberryperl.com/), or alternatively use a choco install command Additionally, the dependency `protoc-grpcio v0.3.1` is reported to have issues compiling in Windows. You can specify a known working version by editing version in `protos/Cargo.toml` section to `protoc-grpcio = "<=0.3.0"`. + +## eth2.0-spec-tests + +The +[ethereum/eth2.0-spec-tests](https://github.com/ethereum/eth2.0-spec-tests/) +repository contains a large set of tests that verify Lighthouse behaviour +against the Ethereum Foundation specifications. + +The `tests/ef_tests` crate runs these tests and it has some interesting +behaviours: + +- If the `tests/ef_tests/eth2.0-spec-tests` directory is not present, all tests + indicate a `pass` when they did not actually run. +- If that directory _is_ present, the tests are executed faithfully, failing if + a discrepancy is found. + +The `tests/ef_tests/eth2.0-spec-tests` directory is not present by default. To +obtain it, use the Makefile in the root of the repository: + +``` +make ef_tests +``` + +_Note: this will download 100+ MB of test files from the [ethereum/eth2.0-spec-tests](https://github.com/ethereum/eth2.0-spec-tests/)._ diff --git a/tests/ef_tests/.gitignore b/tests/ef_tests/.gitignore new file mode 100644 index 000000000..a83c5aa96 --- /dev/null +++ b/tests/ef_tests/.gitignore @@ -0,0 +1 @@ +/eth2.0-spec-tests diff --git a/tests/ef_tests/eth2.0-spec-tests b/tests/ef_tests/eth2.0-spec-tests deleted file mode 160000 index ae6dd9011..000000000 --- a/tests/ef_tests/eth2.0-spec-tests +++ /dev/null @@ -1 +0,0 @@ -Subproject commit ae6dd9011df05fab8c7e651c09cf9c940973bf81 diff --git a/tests/ef_tests/src/handler.rs b/tests/ef_tests/src/handler.rs index e5d175e11..e8c83e1f8 100644 --- a/tests/ef_tests/src/handler.rs +++ b/tests/ef_tests/src/handler.rs @@ -31,6 +31,11 @@ pub trait Handler { .join(Self::runner_name()) .join(Self::handler_name()); + // If the directory containing the tests does not exist, just let all tests pass. + if !handler_path.exists() { + return; + } + // Iterate through test suites let test_cases = fs::read_dir(&handler_path) .expect("handler dir exists") From e8619399f254ac301db7591ddb6c5905347960e7 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Sun, 8 Sep 2019 07:10:36 +1000 Subject: [PATCH 241/305] Patch to correct for single byte RPC responses --- beacon_node/eth2-libp2p/src/rpc/codec/base.rs | 8 +++++--- beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs | 10 +++++++--- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/base.rs b/beacon_node/eth2-libp2p/src/rpc/codec/base.rs index a8a239867..973567473 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/base.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/base.rs @@ -101,13 +101,15 @@ where type Error = ::Error; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { + // if we have only received the response code, wait for more bytes + if src.len() == 1 { + return Ok(None); + } + // using the response code determine which kind of payload needs to be decoded. let response_code = { if let Some(resp_code) = self.response_code { resp_code } else { - // buffer should not be empty - debug_assert!(!src.is_empty()); - let resp_byte = src.split_to(1); let mut resp_code_byte = [0; 1]; resp_code_byte.copy_from_slice(&resp_byte); diff --git a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs index 1966bab62..d0e4d01cf 100644 --- a/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs +++ b/beacon_node/eth2-libp2p/src/rpc/codec/ssz.rs @@ -4,7 +4,7 @@ use crate::rpc::{ protocol::{ProtocolId, RPCError}, }; use crate::rpc::{ErrorMessage, RPCErrorResponse, RPCRequest, RPCResponse}; -use bytes::{Bytes, BytesMut}; +use bytes::{BufMut, Bytes, BytesMut}; use ssz::{Decode, Encode}; use tokio::codec::{Decoder, Encoder}; use unsigned_varint::codec::UviBytes; @@ -56,6 +56,10 @@ impl Encoder for SSZInboundCodec { .inner .encode(Bytes::from(bytes), dst) .map_err(RPCError::from); + } else { + // payload is empty, add a 0-byte length prefix + dst.reserve(1); + dst.put_u8(0); } Ok(()) } @@ -152,8 +156,8 @@ impl Decoder for SSZOutboundCodec { type Error = RPCError; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { - if src.is_empty() { - // the object sent could be empty. We return the empty object if this is the case + if src.len() == 1 && src[0] == 0_u8 { + // the object is empty. We return the empty object if this is the case match self.protocol.message_name.as_str() { "hello" => match self.protocol.version.as_str() { "1" => Err(RPCError::Custom( From 6a870d468cbae4da17db45fc9861309d0972c7c2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 8 Sep 2019 12:23:37 -0400 Subject: [PATCH 242/305] Add ssz_fixed_len method to ssz::Encode --- beacon_node/eth2-libp2p/src/rpc/methods.rs | 35 +++++++- eth2/types/src/lib.rs | 5 +- eth2/types/src/slot_epoch_macros.rs | 4 + eth2/utils/bls/src/macros.rs | 4 + eth2/utils/ssz/examples/struct_definition.rs | 7 ++ eth2/utils/ssz/src/encode.rs | 2 + eth2/utils/ssz/src/encode/impls.rs | 68 ++++++++++++++ eth2/utils/ssz/src/lib.rs | 1 - eth2/utils/ssz/src/macros.rs | 95 -------------------- eth2/utils/ssz/tests/tests.rs | 1 + eth2/utils/ssz_derive/src/lib.rs | 25 +++++- eth2/utils/ssz_types/src/bitfield.rs | 35 ++++++++ eth2/utils/ssz_types/src/fixed_vector.rs | 5 ++ eth2/utils/ssz_types/src/variable_list.rs | 5 ++ tests/ef_tests/src/cases/ssz_static.rs | 1 + 15 files changed, 191 insertions(+), 102 deletions(-) diff --git a/beacon_node/eth2-libp2p/src/rpc/methods.rs b/beacon_node/eth2-libp2p/src/rpc/methods.rs index 49813abe9..ee8ad4860 100644 --- a/beacon_node/eth2-libp2p/src/rpc/methods.rs +++ b/beacon_node/eth2-libp2p/src/rpc/methods.rs @@ -1,6 +1,5 @@ //!Available RPC methods types and ids. -use ssz::{impl_decode_via_from, impl_encode_via_from}; use ssz_derive::{Decode, Encode}; use types::{Epoch, Hash256, Slot}; @@ -66,8 +65,38 @@ impl Into for GoodbyeReason { } } -impl_encode_via_from!(GoodbyeReason, u64); -impl_decode_via_from!(GoodbyeReason, u64); +impl ssz::Encode for GoodbyeReason { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn ssz_bytes_len(&self) -> usize { + 0_u64.ssz_bytes_len() + } + + fn ssz_append(&self, buf: &mut Vec) { + let conv: u64 = self.clone().into(); + conv.ssz_append(buf) + } +} + +impl ssz::Decode for GoodbyeReason { + fn is_ssz_fixed_len() -> bool { + ::is_ssz_fixed_len() + } + + fn ssz_fixed_len() -> usize { + ::ssz_fixed_len() + } + + fn from_ssz_bytes(bytes: &[u8]) -> Result { + u64::from_ssz_bytes(bytes).and_then(|n| Ok(n.into())) + } +} /// Request a number of beacon block roots from a peer. #[derive(Encode, Decode, Clone, Debug, PartialEq)] diff --git a/eth2/types/src/lib.rs b/eth2/types/src/lib.rs index 3edf8b36b..d1eaa393f 100644 --- a/eth2/types/src/lib.rs +++ b/eth2/types/src/lib.rs @@ -86,5 +86,8 @@ pub type AttesterMap = HashMap<(u64, u64), Vec>; /// Maps a slot to a block proposer. pub type ProposerMap = HashMap; -pub use bls::{AggregatePublicKey, AggregateSignature, Keypair, PublicKey, SecretKey, Signature}; +pub use bls::{ + AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, + Signature, SignatureBytes, +}; pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; diff --git a/eth2/types/src/slot_epoch_macros.rs b/eth2/types/src/slot_epoch_macros.rs index 62ca6b3af..3bd54ee2d 100644 --- a/eth2/types/src/slot_epoch_macros.rs +++ b/eth2/types/src/slot_epoch_macros.rs @@ -201,6 +201,10 @@ macro_rules! impl_ssz { ::ssz_fixed_len() } + fn ssz_bytes_len(&self) -> usize { + 0_u64.ssz_bytes_len() + } + fn ssz_append(&self, buf: &mut Vec) { self.0.ssz_append(buf) } diff --git a/eth2/utils/bls/src/macros.rs b/eth2/utils/bls/src/macros.rs index 09838b73e..e8bd3dd04 100644 --- a/eth2/utils/bls/src/macros.rs +++ b/eth2/utils/bls/src/macros.rs @@ -9,6 +9,10 @@ macro_rules! impl_ssz { $byte_size } + fn ssz_bytes_len(&self) -> usize { + $byte_size + } + fn ssz_append(&self, buf: &mut Vec) { buf.append(&mut self.as_bytes()) } diff --git a/eth2/utils/ssz/examples/struct_definition.rs b/eth2/utils/ssz/examples/struct_definition.rs index fa3ed2a64..0971e21da 100644 --- a/eth2/utils/ssz/examples/struct_definition.rs +++ b/eth2/utils/ssz/examples/struct_definition.rs @@ -12,6 +12,13 @@ impl Encode for Foo { ::is_ssz_fixed_len() && as Encode>::is_ssz_fixed_len() } + fn ssz_bytes_len(&self) -> usize { + ::ssz_fixed_len() + + ssz::BYTES_PER_LENGTH_OFFSET + + ::ssz_fixed_len() + + self.b.ssz_bytes_len() + } + fn ssz_append(&self, buf: &mut Vec) { let offset = ::ssz_fixed_len() + as Encode>::ssz_fixed_len() diff --git a/eth2/utils/ssz/src/encode.rs b/eth2/utils/ssz/src/encode.rs index 6ceb08deb..5113fb71a 100644 --- a/eth2/utils/ssz/src/encode.rs +++ b/eth2/utils/ssz/src/encode.rs @@ -27,6 +27,8 @@ pub trait Encode { BYTES_PER_LENGTH_OFFSET } + fn ssz_bytes_len(&self) -> usize; + /// Returns the full-form encoding of this object. /// /// The default implementation of this method should suffice for most cases. diff --git a/eth2/utils/ssz/src/encode/impls.rs b/eth2/utils/ssz/src/encode/impls.rs index 3d68d8911..d25e79370 100644 --- a/eth2/utils/ssz/src/encode/impls.rs +++ b/eth2/utils/ssz/src/encode/impls.rs @@ -13,6 +13,10 @@ macro_rules! impl_encodable_for_uint { $bit_size / 8 } + fn ssz_bytes_len(&self) -> usize { + $bit_size / 8 + } + fn ssz_append(&self, buf: &mut Vec) { buf.extend_from_slice(&self.to_le_bytes()); } @@ -58,6 +62,23 @@ macro_rules! impl_encode_for_tuples { } } + fn ssz_bytes_len(&self) -> usize { + if ::is_ssz_fixed_len() { + ::ssz_fixed_len() + } else { + let mut len = 0; + $( + len += if <$T as Encode>::is_ssz_fixed_len() { + <$T as Encode>::ssz_fixed_len() + } else { + BYTES_PER_LENGTH_OFFSET + + self.$idx.ssz_bytes_len() + }; + )* + len + } + } + fn ssz_append(&self, buf: &mut Vec) { let offset = $( <$T as Encode>::ssz_fixed_len() + @@ -185,6 +206,19 @@ impl Encode for Option { false } + fn ssz_bytes_len(&self) -> usize { + if let Some(some) = self { + let len = if ::is_ssz_fixed_len() { + ::ssz_fixed_len() + } else { + some.ssz_bytes_len() + }; + len + BYTES_PER_LENGTH_OFFSET + } else { + BYTES_PER_LENGTH_OFFSET + } + } + fn ssz_append(&self, buf: &mut Vec) { match self { None => buf.append(&mut encode_union_index(0)), @@ -201,6 +235,16 @@ impl Encode for Vec { false } + fn ssz_bytes_len(&self) -> usize { + if ::is_ssz_fixed_len() { + ::ssz_fixed_len() * self.len() + } else { + let mut len = self.into_iter().map(|item| item.ssz_bytes_len()).sum(); + len += BYTES_PER_LENGTH_OFFSET * self.len(); + len + } + } + fn ssz_append(&self, buf: &mut Vec) { if T::is_ssz_fixed_len() { buf.reserve(T::ssz_fixed_len() * self.len()); @@ -229,6 +273,10 @@ impl Encode for bool { 1 } + fn ssz_bytes_len(&self) -> usize { + 1 + } + fn ssz_append(&self, buf: &mut Vec) { buf.extend_from_slice(&(*self as u8).to_le_bytes()); } @@ -243,6 +291,10 @@ impl Encode for NonZeroUsize { ::ssz_fixed_len() } + fn ssz_bytes_len(&self) -> usize { + std::mem::size_of::() + } + fn ssz_append(&self, buf: &mut Vec) { self.get().ssz_append(buf) } @@ -257,6 +309,10 @@ impl Encode for H256 { 32 } + fn ssz_bytes_len(&self) -> usize { + 32 + } + fn ssz_append(&self, buf: &mut Vec) { buf.extend_from_slice(self.as_bytes()); } @@ -271,6 +327,10 @@ impl Encode for U256 { 32 } + fn ssz_bytes_len(&self) -> usize { + 32 + } + fn ssz_append(&self, buf: &mut Vec) { let n = ::ssz_fixed_len(); let s = buf.len(); @@ -289,6 +349,10 @@ impl Encode for U128 { 16 } + fn ssz_bytes_len(&self) -> usize { + 16 + } + fn ssz_append(&self, buf: &mut Vec) { let n = ::ssz_fixed_len(); let s = buf.len(); @@ -309,6 +373,10 @@ macro_rules! impl_encodable_for_u8_array { $len } + fn ssz_bytes_len(&self) -> usize { + $len + } + fn ssz_append(&self, buf: &mut Vec) { buf.extend_from_slice(&self[..]); } diff --git a/eth2/utils/ssz/src/lib.rs b/eth2/utils/ssz/src/lib.rs index 696d36cbf..115633889 100644 --- a/eth2/utils/ssz/src/lib.rs +++ b/eth2/utils/ssz/src/lib.rs @@ -36,7 +36,6 @@ mod decode; mod encode; -mod macros; pub use decode::{ impls::decode_list_of_variable_length_items, Decode, DecodeError, SszDecoder, SszDecoderBuilder, diff --git a/eth2/utils/ssz/src/macros.rs b/eth2/utils/ssz/src/macros.rs index 04147a805..8b1378917 100644 --- a/eth2/utils/ssz/src/macros.rs +++ b/eth2/utils/ssz/src/macros.rs @@ -1,96 +1 @@ -/// Implements `Encode` for `$impl_type` using an implementation of `From<$impl_type> for -/// $from_type`. -/// -/// In effect, this allows for easy implementation of `Encode` for some type that implements a -/// `From` conversion into another type that already has `Encode` implemented. -#[macro_export] -macro_rules! impl_encode_via_from { - ($impl_type: ty, $from_type: ty) => { - impl ssz::Encode for $impl_type { - fn is_ssz_fixed_len() -> bool { - <$from_type as ssz::Encode>::is_ssz_fixed_len() - } - fn ssz_fixed_len() -> usize { - <$from_type as ssz::Encode>::ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - let conv: $from_type = self.clone().into(); - - conv.ssz_append(buf) - } - } - }; -} - -/// Implements `Decode` for `$impl_type` using an implementation of `From<$impl_type> for -/// $from_type`. -/// -/// In effect, this allows for easy implementation of `Decode` for some type that implements a -/// `From` conversion into another type that already has `Decode` implemented. -#[macro_export] -macro_rules! impl_decode_via_from { - ($impl_type: ty, $from_type: tt) => { - impl ssz::Decode for $impl_type { - fn is_ssz_fixed_len() -> bool { - <$from_type as ssz::Decode>::is_ssz_fixed_len() - } - - fn ssz_fixed_len() -> usize { - <$from_type as ssz::Decode>::ssz_fixed_len() - } - - fn from_ssz_bytes(bytes: &[u8]) -> Result { - $from_type::from_ssz_bytes(bytes).and_then(|dec| Ok(dec.into())) - } - } - }; -} - -#[cfg(test)] -mod tests { - use self::ssz::{Decode, Encode}; - use crate as ssz; - - #[derive(PartialEq, Debug, Clone, Copy)] - struct Wrapper(u64); - - impl From for Wrapper { - fn from(x: u64) -> Wrapper { - Wrapper(x) - } - } - - impl From for u64 { - fn from(x: Wrapper) -> u64 { - x.0 - } - } - - impl_encode_via_from!(Wrapper, u64); - impl_decode_via_from!(Wrapper, u64); - - #[test] - fn impl_encode_via_from() { - let check_encode = |a: u64, b: Wrapper| assert_eq!(a.as_ssz_bytes(), b.as_ssz_bytes()); - - check_encode(0, Wrapper(0)); - check_encode(1, Wrapper(1)); - check_encode(42, Wrapper(42)); - } - - #[test] - fn impl_decode_via_from() { - let check_decode = |bytes: Vec| { - let a = u64::from_ssz_bytes(&bytes).unwrap(); - let b = Wrapper::from_ssz_bytes(&bytes).unwrap(); - - assert_eq!(a, b.into()) - }; - - check_decode(vec![0, 0, 0, 0, 0, 0, 0, 0]); - check_decode(vec![1, 0, 0, 0, 0, 0, 0, 0]); - check_decode(vec![1, 0, 0, 0, 2, 0, 0, 0]); - } -} diff --git a/eth2/utils/ssz/tests/tests.rs b/eth2/utils/ssz/tests/tests.rs index c19e36662..26f2f53ef 100644 --- a/eth2/utils/ssz/tests/tests.rs +++ b/eth2/utils/ssz/tests/tests.rs @@ -8,6 +8,7 @@ mod round_trip { fn round_trip(items: Vec) { for item in items { let encoded = &item.as_ssz_bytes(); + assert_eq!(item.ssz_bytes_len(), encoded.len()); assert_eq!(T::from_ssz_bytes(&encoded), Ok(item)); } } diff --git a/eth2/utils/ssz_derive/src/lib.rs b/eth2/utils/ssz_derive/src/lib.rs index 47d96859e..5bdb9ca9d 100644 --- a/eth2/utils/ssz_derive/src/lib.rs +++ b/eth2/utils/ssz_derive/src/lib.rs @@ -81,9 +81,12 @@ pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { }; let field_idents = get_serializable_named_field_idents(&struct_data); + let field_idents_a = get_serializable_named_field_idents(&struct_data); let field_types_a = get_serializable_field_types(&struct_data); let field_types_b = field_types_a.clone(); - let field_types_c = field_types_a.clone(); + let field_types_d = field_types_a.clone(); + let field_types_e = field_types_a.clone(); + let field_types_f = field_types_a.clone(); let output = quote! { impl #impl_generics ssz::Encode for #name #ty_generics #where_clause { @@ -105,9 +108,27 @@ pub fn ssz_encode_derive(input: TokenStream) -> TokenStream { } } + fn ssz_bytes_len(&self) -> usize { + if ::is_ssz_fixed_len() { + ::ssz_fixed_len() + } else { + let mut len = 0; + #( + if <#field_types_d as ssz::Encode>::is_ssz_fixed_len() { + len += <#field_types_e as ssz::Encode>::ssz_fixed_len(); + } else { + len += ssz::BYTES_PER_LENGTH_OFFSET; + len += self.#field_idents_a.ssz_bytes_len(); + } + )* + + len + } + } + fn ssz_append(&self, buf: &mut Vec) { let offset = #( - <#field_types_c as ssz::Encode>::ssz_fixed_len() + + <#field_types_f as ssz::Encode>::ssz_fixed_len() + )* 0; diff --git a/eth2/utils/ssz_types/src/bitfield.rs b/eth2/utils/ssz_types/src/bitfield.rs index 197426046..dbe1addbe 100644 --- a/eth2/utils/ssz_types/src/bitfield.rs +++ b/eth2/utils/ssz_types/src/bitfield.rs @@ -476,6 +476,12 @@ impl Encode for Bitfield> { false } + fn ssz_bytes_len(&self) -> usize { + // We could likely do better than turning this into bytes and reading the length, however + // it is kept this way for simplicity. + self.clone().into_bytes().len() + } + fn ssz_append(&self, buf: &mut Vec) { buf.append(&mut self.clone().into_bytes()) } @@ -498,6 +504,10 @@ impl Encode for Bitfield> { true } + fn ssz_bytes_len(&self) -> usize { + self.as_slice().len() + } + fn ssz_fixed_len() -> usize { bytes_for_bit_len(N::to_usize()) } @@ -616,6 +626,7 @@ mod bitvector { pub type BitVector4 = BitVector; pub type BitVector8 = BitVector; pub type BitVector16 = BitVector; + pub type BitVector64 = BitVector; #[test] fn ssz_encode() { @@ -706,6 +717,18 @@ mod bitvector { fn assert_round_trip(t: T) { assert_eq!(T::from_ssz_bytes(&t.as_ssz_bytes()).unwrap(), t); } + + #[test] + fn ssz_bytes_len() { + for i in 0..64 { + let mut bitfield = BitVector64::new(); + for j in 0..i { + bitfield.set(j, true).expect("should set bit in bounds"); + } + let bytes = bitfield.as_ssz_bytes(); + assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); + } + } } #[cfg(test)] @@ -1152,4 +1175,16 @@ mod bitlist { vec![false, false, true, false, false, false, false, false, true] ); } + + #[test] + fn ssz_bytes_len() { + for i in 1..64 { + let mut bitfield = BitList1024::with_capacity(i).unwrap(); + for j in 0..i { + bitfield.set(j, true).expect("should set bit in bounds"); + } + let bytes = bitfield.as_ssz_bytes(); + assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); + } + } } diff --git a/eth2/utils/ssz_types/src/fixed_vector.rs b/eth2/utils/ssz_types/src/fixed_vector.rs index 090d04d84..f9c896331 100644 --- a/eth2/utils/ssz_types/src/fixed_vector.rs +++ b/eth2/utils/ssz_types/src/fixed_vector.rs @@ -183,6 +183,10 @@ where } } + fn ssz_bytes_len(&self) -> usize { + self.vec.ssz_bytes_len() + } + fn ssz_append(&self, buf: &mut Vec) { if T::is_ssz_fixed_len() { buf.reserve(T::ssz_fixed_len() * self.len()); @@ -318,6 +322,7 @@ mod test { fn ssz_round_trip(item: T) { let encoded = &item.as_ssz_bytes(); + assert_eq!(item.ssz_bytes_len(), encoded.len()); assert_eq!(T::from_ssz_bytes(&encoded), Ok(item)); } diff --git a/eth2/utils/ssz_types/src/variable_list.rs b/eth2/utils/ssz_types/src/variable_list.rs index beb7e6a93..feb656745 100644 --- a/eth2/utils/ssz_types/src/variable_list.rs +++ b/eth2/utils/ssz_types/src/variable_list.rs @@ -208,6 +208,10 @@ where >::ssz_fixed_len() } + fn ssz_bytes_len(&self) -> usize { + self.vec.ssz_bytes_len() + } + fn ssz_append(&self, buf: &mut Vec) { self.vec.ssz_append(buf) } @@ -304,6 +308,7 @@ mod test { fn round_trip(item: T) { let encoded = &item.as_ssz_bytes(); + assert_eq!(item.ssz_bytes_len(), encoded.len()); assert_eq!(T::from_ssz_bytes(&encoded), Ok(item)); } diff --git a/tests/ef_tests/src/cases/ssz_static.rs b/tests/ef_tests/src/cases/ssz_static.rs index 6e4a672cb..62f285d58 100644 --- a/tests/ef_tests/src/cases/ssz_static.rs +++ b/tests/ef_tests/src/cases/ssz_static.rs @@ -58,6 +58,7 @@ impl LoadCase for SszStaticSR { pub fn check_serialization(value: &T, serialized: &[u8]) -> Result<(), Error> { // Check serialization let serialized_result = value.as_ssz_bytes(); + compare_result::(&Ok(value.ssz_bytes_len()), &Some(serialized.len()))?; compare_result::, Error>(&Ok(serialized_result), &Some(serialized.to_vec()))?; // Check deserialization From 6311b13169a31f62e5283b05c5146025c2d1994a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 8 Sep 2019 13:42:38 -0400 Subject: [PATCH 243/305] Add ssz benches to state processing --- eth2/state_processing/Cargo.toml | 1 + eth2/state_processing/benches/benches.rs | 27 ++++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/eth2/state_processing/Cargo.toml b/eth2/state_processing/Cargo.toml index 65d5a2f30..633c5bfef 100644 --- a/eth2/state_processing/Cargo.toml +++ b/eth2/state_processing/Cargo.toml @@ -15,6 +15,7 @@ serde = "1.0" serde_derive = "1.0" lazy_static = "0.1" serde_yaml = "0.8" +eth2_ssz = { path = "../utils/ssz" } beacon_chain = { path = "../../beacon_node/beacon_chain" } store = { path = "../../beacon_node/store" } lmd_ghost = { path = "../lmd_ghost" } diff --git a/eth2/state_processing/benches/benches.rs b/eth2/state_processing/benches/benches.rs index 28afd0614..bdbe57b8e 100644 --- a/eth2/state_processing/benches/benches.rs +++ b/eth2/state_processing/benches/benches.rs @@ -2,6 +2,7 @@ extern crate env_logger; use criterion::Criterion; use criterion::{black_box, criterion_group, criterion_main, Benchmark}; +use ssz::Encode; use state_processing::{test_utils::BlockBuilder, BlockSignatureStrategy, VerifySignatures}; use types::{BeaconBlock, BeaconState, ChainSpec, EthSpec, MainnetEthSpec, MinimalEthSpec, Slot}; @@ -393,6 +394,32 @@ fn bench_block( }) .sample_size(10), ); + + let local_block = block.clone(); + c.bench( + &title, + Benchmark::new("ssz_serialize_block", move |b| { + b.iter_batched_ref( + || (), + |_| black_box(local_block.as_ssz_bytes()), + criterion::BatchSize::SmallInput, + ) + }) + .sample_size(10), + ); + + let local_block = block.clone(); + c.bench( + &title, + Benchmark::new("ssz_block_len", move |b| { + b.iter_batched_ref( + || (), + |_| black_box(local_block.ssz_bytes_len()), + criterion::BatchSize::SmallInput, + ) + }) + .sample_size(10), + ); } criterion_group!(benches, all_benches,); From 92c16bb9112de696fdf3f832a8be342bcb43d63b Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 8 Sep 2019 14:20:48 -0400 Subject: [PATCH 244/305] Add extra logs to gossip object processing --- beacon_node/network/Cargo.toml | 1 + beacon_node/network/src/sync/simple_sync.rs | 23 ++++++++++++++++++--- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 06fc06dde..ffeba96ec 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -13,6 +13,7 @@ store = { path = "../store" } eth2-libp2p = { path = "../eth2-libp2p" } types = { path = "../../eth2/types" } slog = { version = "^2.2.3" , features = ["max_level_trace"] } +hex = "0.3" eth2_ssz = "0.1" tree_hash = "0.1" futures = "0.1.25" diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index c54c481c7..af5b3dd9a 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -408,7 +408,19 @@ impl MessageProcessor { SHOULD_FORWARD_GOSSIP_BLOCK } BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK, - _ => SHOULD_NOT_FORWARD_GOSSIP_BLOCK, //TODO: Decide if we want to forward these + _ => { + warn!( + self.log, + "Invalid gossip beacon block"; + "block slot" => block.slot + ); + trace!( + self.log, + "Invalid gossip beacon block ssz"; + "ssz" => format!("0x{}", hex::encode(block.as_ssz_bytes())), + ); + SHOULD_NOT_FORWARD_GOSSIP_BLOCK //TODO: Decide if we want to forward these + } } } else { SHOULD_NOT_FORWARD_GOSSIP_BLOCK @@ -419,7 +431,7 @@ impl MessageProcessor { /// /// Not currently implemented. pub fn on_attestation_gossip(&mut self, _peer_id: PeerId, msg: Attestation) { - match self.chain.process_attestation(msg) { + match self.chain.process_attestation(msg.clone()) { Ok(outcome) => info!( self.log, "Processed attestation"; @@ -427,7 +439,12 @@ impl MessageProcessor { "outcome" => format!("{:?}", outcome) ), Err(e) => { - warn!(self.log, "InvalidAttestation"; "source" => "gossip", "error" => format!("{:?}", e)) + warn!(self.log, "Invalid gossip attestation"; "error" => format!("{:?}", e)); + trace!( + self.log, + "Invalid gossip attestation ssz"; + "ssz" => format!("0x{}", hex::encode(msg.as_ssz_bytes())), + ); } } } From 1040c80cd8dfd7cf7cdbb670f742fb3ba071caf1 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 8 Sep 2019 16:28:01 -0400 Subject: [PATCH 245/305] Add noisey debugging to attestation processing --- .../per_block_processing/is_valid_indexed_attestation.rs | 9 +++++++++ eth2/utils/bls/src/signature_set.rs | 4 ++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs b/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs index 54a48d7b7..34802ef02 100644 --- a/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs @@ -55,6 +55,15 @@ pub fn is_valid_indexed_attestation( check_sorted(&bit_0_indices)?; check_sorted(&bit_1_indices)?; + dbg!(indexed_attestation_signature_set( + state, + &indexed_attestation.signature, + &indexed_attestation, + spec + )?); + dbg!(&bit_0_indices); + dbg!(&bit_1_indices); + if verify_signatures.is_true() { verify!( indexed_attestation_signature_set( diff --git a/eth2/utils/bls/src/signature_set.rs b/eth2/utils/bls/src/signature_set.rs index 4b6065f9f..df1636f1d 100644 --- a/eth2/utils/bls/src/signature_set.rs +++ b/eth2/utils/bls/src/signature_set.rs @@ -7,7 +7,7 @@ use milagro_bls::AggregateSignature as RawAggregateSignature; type Message = Vec; type Domain = u64; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct SignedMessage<'a> { signing_keys: Vec<&'a G1Point>, message: Message, @@ -25,7 +25,7 @@ impl<'a> SignedMessage<'a> { } } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct SignatureSet<'a> { pub signature: &'a G2Point, signed_messages: Vec>, From 0e5f33d928bd982ac3d17c6f3121891e6d6b9441 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 8 Sep 2019 18:37:30 -0400 Subject: [PATCH 246/305] Removes testin dbg!'s Hopefully this appeases @gregthegreek --- .../per_block_processing/is_valid_indexed_attestation.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs b/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs index 34802ef02..54a48d7b7 100644 --- a/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs @@ -55,15 +55,6 @@ pub fn is_valid_indexed_attestation( check_sorted(&bit_0_indices)?; check_sorted(&bit_1_indices)?; - dbg!(indexed_attestation_signature_set( - state, - &indexed_attestation.signature, - &indexed_attestation, - spec - )?); - dbg!(&bit_0_indices); - dbg!(&bit_1_indices); - if verify_signatures.is_true() { verify!( indexed_attestation_signature_set( From 39f7dda7611b54438f6df726bfdf93abc2df96dc Mon Sep 17 00:00:00 2001 From: Gregory Markou <16929357+GregTheGreek@users.noreply.github.com> Date: Sun, 8 Sep 2019 18:38:22 -0400 Subject: [PATCH 247/305] fix pauls shit (#528) --- .../per_block_processing/is_valid_indexed_attestation.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs b/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs index 34802ef02..54a48d7b7 100644 --- a/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs +++ b/eth2/state_processing/src/per_block_processing/is_valid_indexed_attestation.rs @@ -55,15 +55,6 @@ pub fn is_valid_indexed_attestation( check_sorted(&bit_0_indices)?; check_sorted(&bit_1_indices)?; - dbg!(indexed_attestation_signature_set( - state, - &indexed_attestation.signature, - &indexed_attestation, - spec - )?); - dbg!(&bit_0_indices); - dbg!(&bit_1_indices); - if verify_signatures.is_true() { verify!( indexed_attestation_signature_set( From 37cd98f3ce39d44b0923e0abcd841f15b51a9699 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 8 Sep 2019 18:51:24 -0400 Subject: [PATCH 248/305] Add additional logging for gossip messages --- beacon_node/network/src/sync/simple_sync.rs | 57 +++++++++++++++------ 1 file changed, 42 insertions(+), 15 deletions(-) diff --git a/beacon_node/network/src/sync/simple_sync.rs b/beacon_node/network/src/sync/simple_sync.rs index af5b3dd9a..83aa7ebd2 100644 --- a/beacon_node/network/src/sync/simple_sync.rs +++ b/beacon_node/network/src/sync/simple_sync.rs @@ -1,14 +1,17 @@ use super::manager::SyncMessage; use crate::service::NetworkMessage; -use beacon_chain::{BeaconChain, BeaconChainTypes, BlockProcessingOutcome}; +use beacon_chain::{ + AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BlockProcessingOutcome, +}; use eth2_libp2p::rpc::methods::*; use eth2_libp2p::rpc::{RPCEvent, RPCRequest, RPCResponse, RequestId}; use eth2_libp2p::PeerId; -use slog::{debug, info, o, trace, warn}; +use slog::{debug, error, info, o, trace, warn}; use ssz::Encode; use std::sync::Arc; use store::Store; use tokio::sync::{mpsc, oneshot}; +use tree_hash::SignedRoot; use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256, Slot}; //TODO: Put a maximum limit on the number of block that can be requested. @@ -386,8 +389,8 @@ impl MessageProcessor { /// /// Returns a `bool` which, if `true`, indicates we should forward the block to our peers. pub fn on_block_gossip(&mut self, peer_id: PeerId, block: BeaconBlock) -> bool { - if let Ok(outcome) = self.chain.process_block(block.clone()) { - match outcome { + match self.chain.process_block(block.clone()) { + Ok(outcome) => match outcome { BlockProcessingOutcome::Processed { .. } => { trace!(self.log, "Gossipsub block processed"; "peer_id" => format!("{:?}",peer_id)); @@ -408,10 +411,12 @@ impl MessageProcessor { SHOULD_FORWARD_GOSSIP_BLOCK } BlockProcessingOutcome::BlockIsAlreadyKnown => SHOULD_FORWARD_GOSSIP_BLOCK, - _ => { + other => { warn!( self.log, "Invalid gossip beacon block"; + "outcome" => format!("{:?}", other), + "block root" => format!("{}", Hash256::from_slice(&block.signed_root()[..])), "block slot" => block.slot ); trace!( @@ -421,9 +426,21 @@ impl MessageProcessor { ); SHOULD_NOT_FORWARD_GOSSIP_BLOCK //TODO: Decide if we want to forward these } + }, + Err(e) => { + error!( + self.log, + "Error processing gossip beacon block"; + "error" => format!("{:?}", e), + "block slot" => block.slot + ); + trace!( + self.log, + "Erroneous gossip beacon block ssz"; + "ssz" => format!("0x{}", hex::encode(block.as_ssz_bytes())), + ); + SHOULD_NOT_FORWARD_GOSSIP_BLOCK } - } else { - SHOULD_NOT_FORWARD_GOSSIP_BLOCK } } @@ -432,19 +449,29 @@ impl MessageProcessor { /// Not currently implemented. pub fn on_attestation_gossip(&mut self, _peer_id: PeerId, msg: Attestation) { match self.chain.process_attestation(msg.clone()) { - Ok(outcome) => info!( - self.log, - "Processed attestation"; - "source" => "gossip", - "outcome" => format!("{:?}", outcome) - ), + Ok(outcome) => { + info!( + self.log, + "Processed attestation"; + "source" => "gossip", + "outcome" => format!("{:?}", outcome) + ); + + if outcome != AttestationProcessingOutcome::Processed { + trace!( + self.log, + "Invalid gossip attestation ssz"; + "ssz" => format!("0x{}", hex::encode(msg.as_ssz_bytes())), + ); + } + } Err(e) => { - warn!(self.log, "Invalid gossip attestation"; "error" => format!("{:?}", e)); trace!( self.log, - "Invalid gossip attestation ssz"; + "Erroneous gossip attestation ssz"; "ssz" => format!("0x{}", hex::encode(msg.as_ssz_bytes())), ); + error!(self.log, "Invalid gossip attestation"; "error" => format!("{:?}", e)); } } } From e7b324966de3a10ed744f89f834d80803d7a1fbc Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 8 Sep 2019 20:55:15 -0400 Subject: [PATCH 249/305] Log all states and blocks processed --- beacon_node/beacon_chain/src/beacon_chain.rs | 55 ++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index aa9332c01..b6bf4e053 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -11,6 +11,7 @@ use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{RwLock, RwLockReadGuard}; use slog::{error, info, warn, Logger}; use slot_clock::SlotClock; +use ssz::Encode; use state_processing::per_block_processing::{ errors::{ AttestationValidationError, AttesterSlashingValidationError, DepositValidationError, @@ -21,6 +22,8 @@ use state_processing::per_block_processing::{ use state_processing::{ per_block_processing, per_slot_processing, BlockProcessingError, BlockSignatureStrategy, }; +use std::fs; +use std::io::prelude::*; use std::sync::Arc; use std::time::Duration; use store::iter::{BlockRootsIterator, StateRootsIterator}; @@ -1035,6 +1038,13 @@ impl BeaconChain { metrics::stop_timer(db_read_timer); + write_block(&block, block_root, &self.log); + write_state( + &format!("state_pre_block_{}", block_root), + &parent_state, + &self.log, + ); + let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); // Keep a list of any states that were "skipped" (block-less) in between the parent state @@ -1083,6 +1093,12 @@ impl BeaconChain { let state_root = state.canonical_root(); + write_state( + &format!("state_post_block_{}", block_root), + &state, + &self.log, + ); + if block.state_root != state_root { return Ok(BlockProcessingOutcome::StateRootMismatch { block: block.state_root, @@ -1445,6 +1461,45 @@ impl BeaconChain { } } +fn write_state(prefix: &str, state: &BeaconState, log: &Logger) { + let root = Hash256::from_slice(&state.tree_hash_root()); + let filename = format!("{}_slot_{}_root_{}.ssz", prefix, state.slot, root); + let mut path = std::env::temp_dir().join("lighthouse"); + let _ = fs::create_dir_all(path.clone()); + path = path.join(filename); + + match fs::File::create(path.clone()) { + Ok(mut file) => { + let _ = file.write_all(&state.as_ssz_bytes()); + } + Err(e) => error!( + log, + "Failed to log state"; + "path" => format!("{:?}", path), + "error" => format!("{:?}", e) + ), + } +} + +fn write_block(block: &BeaconBlock, root: Hash256, log: &Logger) { + let filename = format!("block_slot_{}_root{}.ssz", block.slot, root); + let mut path = std::env::temp_dir().join("lighthouse"); + let _ = fs::create_dir_all(path.clone()); + path = path.join(filename); + + match fs::File::create(path.clone()) { + Ok(mut file) => { + let _ = file.write_all(&block.as_ssz_bytes()); + } + Err(e) => error!( + log, + "Failed to log block"; + "path" => format!("{:?}", path), + "error" => format!("{:?}", e) + ), + } +} + impl From for Error { fn from(e: DBError) -> Error { Error::DBError(e) From 09b0db2535ba64fc855a4e7228e288c44ccf9e8d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 8 Sep 2019 21:11:16 -0400 Subject: [PATCH 250/305] Change which pre state is logged --- beacon_node/beacon_chain/src/beacon_chain.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b6bf4e053..33da14f53 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1039,11 +1039,6 @@ impl BeaconChain { metrics::stop_timer(db_read_timer); write_block(&block, block_root, &self.log); - write_state( - &format!("state_pre_block_{}", block_root), - &parent_state, - &self.log, - ); let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE); @@ -1069,6 +1064,12 @@ impl BeaconChain { metrics::stop_timer(committee_timer); + write_state( + &format!("state_pre_block_{}", block_root), + &state, + &self.log, + ); + let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE); // Apply the received block to its parent state (which has been transitioned into this From 7b7a44e2f22e0f43c2574e8d16e892830ecdb65f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 8 Sep 2019 21:57:48 -0400 Subject: [PATCH 251/305] Add const to control writing of ssz files --- beacon_node/beacon_chain/src/beacon_chain.rs | 64 +++++++++++--------- 1 file changed, 37 insertions(+), 27 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 33da14f53..e88747d83 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -37,6 +37,12 @@ use types::*; // |-------must be this long------| pub const GRAFFITI: &str = "sigp/lighthouse-0.0.0-prerelease"; +/// If true, everytime a block is processed the pre-state, post-state and block are written to SSZ +/// files in the temp directory. +/// +/// Only useful for testing. +const WRITE_BLOCK_PROCESSING_SSZ: bool = true; + #[derive(Debug, PartialEq)] pub enum BlockProcessingOutcome { /// Block was valid and imported into the block graph. @@ -1463,41 +1469,45 @@ impl BeaconChain { } fn write_state(prefix: &str, state: &BeaconState, log: &Logger) { - let root = Hash256::from_slice(&state.tree_hash_root()); - let filename = format!("{}_slot_{}_root_{}.ssz", prefix, state.slot, root); - let mut path = std::env::temp_dir().join("lighthouse"); - let _ = fs::create_dir_all(path.clone()); - path = path.join(filename); + if WRITE_BLOCK_PROCESSING_SSZ { + let root = Hash256::from_slice(&state.tree_hash_root()); + let filename = format!("{}_slot_{}_root_{}.ssz", prefix, state.slot, root); + let mut path = std::env::temp_dir().join("lighthouse"); + let _ = fs::create_dir_all(path.clone()); + path = path.join(filename); - match fs::File::create(path.clone()) { - Ok(mut file) => { - let _ = file.write_all(&state.as_ssz_bytes()); + match fs::File::create(path.clone()) { + Ok(mut file) => { + let _ = file.write_all(&state.as_ssz_bytes()); + } + Err(e) => error!( + log, + "Failed to log state"; + "path" => format!("{:?}", path), + "error" => format!("{:?}", e) + ), } - Err(e) => error!( - log, - "Failed to log state"; - "path" => format!("{:?}", path), - "error" => format!("{:?}", e) - ), } } fn write_block(block: &BeaconBlock, root: Hash256, log: &Logger) { - let filename = format!("block_slot_{}_root{}.ssz", block.slot, root); - let mut path = std::env::temp_dir().join("lighthouse"); - let _ = fs::create_dir_all(path.clone()); - path = path.join(filename); + if WRITE_BLOCK_PROCESSING_SSZ { + let filename = format!("block_slot_{}_root{}.ssz", block.slot, root); + let mut path = std::env::temp_dir().join("lighthouse"); + let _ = fs::create_dir_all(path.clone()); + path = path.join(filename); - match fs::File::create(path.clone()) { - Ok(mut file) => { - let _ = file.write_all(&block.as_ssz_bytes()); + match fs::File::create(path.clone()) { + Ok(mut file) => { + let _ = file.write_all(&block.as_ssz_bytes()); + } + Err(e) => error!( + log, + "Failed to log block"; + "path" => format!("{:?}", path), + "error" => format!("{:?}", e) + ), } - Err(e) => error!( - log, - "Failed to log block"; - "path" => format!("{:?}", path), - "error" => format!("{:?}", e) - ), } } From 99c673045c8be6422a6ffde12da95cffedffc11d Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Mon, 9 Sep 2019 12:10:41 +1000 Subject: [PATCH 252/305] Moved chain/cache building into separate function, and made sure that all REST API endpoints are using this function to get the state. --- beacon_node/rest_api/src/beacon.rs | 22 +++++++++------------- beacon_node/rest_api/src/helpers.rs | 21 ++++++++------------- beacon_node/rest_api/src/metrics.rs | 2 +- beacon_node/rest_api/src/node.rs | 8 ++++---- beacon_node/rest_api/src/spec.rs | 2 +- beacon_node/rest_api/src/validator.rs | 13 +++---------- 6 files changed, 26 insertions(+), 42 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index f9b2d0383..66f5e7731 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -109,7 +109,7 @@ pub fn get_block(req: Request) -> ApiResult /// HTTP handler to return a `BeaconBlock` root at a given `slot`. pub fn get_block_root(req: Request) -> ApiResult { - let beacon_chain = get_beacon_chain_from_request::(&req)?; + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; let target = parse_slot(&slot_string)?; @@ -126,10 +126,9 @@ pub fn get_block_root(req: Request) -> ApiR /// HTTP handler to return the `Fork` of the current head. pub fn get_fork(req: Request) -> ApiResult { - let beacon_chain = get_beacon_chain_from_request::(&req)?; - let chain_head = beacon_chain.head(); + let (_beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; - let json: String = serde_json::to_string(&chain_head.beacon_state.fork).map_err(|e| { + let json: String = serde_json::to_string(&head_state.fork).map_err(|e| { ApiError::ServerError(format!("Unable to serialize BeaconState::Fork: {:?}", e)) })?; @@ -141,7 +140,7 @@ pub fn get_fork(req: Request) -> ApiResult /// The `Epoch` parameter can be any epoch number. If it is not specified, /// the current epoch is assumed. pub fn get_validators(req: Request) -> ApiResult { - let beacon_chain = get_beacon_chain_from_request::(&req)?; + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; let epoch = match UrlQuery::from_request(&req) { // We have some parameters, so make sure it's the epoch one and parse it @@ -183,7 +182,7 @@ pub struct StateResponse { /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. pub fn get_state(req: Request) -> ApiResult { - let beacon_chain = get_beacon_chain_from_request::(&req)?; + let (beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; let (key, value) = match UrlQuery::from_request(&req) { Ok(query) => { @@ -199,10 +198,7 @@ pub fn get_state(req: Request) -> ApiResult } Err(ApiError::InvalidQueryParams(_)) => { // No parameters provided at all, use current slot. - ( - String::from("slot"), - beacon_chain.head().beacon_state.slot.to_string(), - ) + (String::from("slot"), head_state.slot.to_string()) } Err(e) => { return Err(e); @@ -237,7 +233,7 @@ pub fn get_state(req: Request) -> ApiResult /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. pub fn get_state_root(req: Request) -> ApiResult { - let beacon_chain = get_beacon_chain_from_request::(&req)?; + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; let slot = parse_slot(&slot_string)?; @@ -254,7 +250,7 @@ pub fn get_state_root(req: Request) -> ApiR pub fn get_current_finalized_checkpoint( req: Request, ) -> ApiResult { - let beacon_chain = get_beacon_chain_from_request::(&req)?; + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; let checkpoint = beacon_chain .head() @@ -270,7 +266,7 @@ pub fn get_current_finalized_checkpoint( /// HTTP handler to return a `BeaconState` at the genesis block. pub fn get_genesis_state(req: Request) -> ApiResult { - let beacon_chain = get_beacon_chain_from_request::(&req)?; + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; let (_root, state) = state_at_slot(&beacon_chain, Slot::new(0))?; diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 08ccbb6c9..76fc78750 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -172,26 +172,21 @@ pub fn implementation_pending_response(_req: Request) -> ApiResult { pub fn get_beacon_chain_from_request( req: &Request, -) -> Result>, ApiError> { +) -> Result<(Arc>, BeaconState), ApiError> { // Get beacon state let beacon_chain = req .extensions() .get::>>() .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".into()))?; - - /* - let _state_now = beacon_chain + let mut head_state = beacon_chain .state_now() - .map_err(|e| ApiError::ServerError(format!("Unable to get current BeaconState {:?}", e)))? - .maybe_as_mut_ref() - .ok_or(ApiError::ServerError( - "Unable to get mutable BeaconState".into(), - ))? - .build_all_caches(&beacon_chain.spec) - .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; - */ + .map_err(|e| ApiError::ServerError(format!("Unable to get current BeaconState {:?}", e)))?; - Ok(beacon_chain.clone()) + if let Some(s) = head_state.maybe_as_mut_ref() { + s.build_all_caches(&beacon_chain.spec).ok(); + } + + Ok((beacon_chain.clone(), head_state.clone())) } pub fn get_logger_from_request(req: &Request) -> slog::Logger { diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 01dc4d22d..62a769de1 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -30,7 +30,7 @@ pub fn get_prometheus(req: Request) -> ApiR let mut buffer = vec![]; let encoder = TextEncoder::new(); - let beacon_chain = get_beacon_chain_from_request::(&req)?; + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; let db_path = req .extensions() .get::() diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs index 4dbd41229..c75d3ba20 100644 --- a/beacon_node/rest_api/src/node.rs +++ b/beacon_node/rest_api/src/node.rs @@ -1,7 +1,7 @@ +use crate::helpers::get_beacon_chain_from_request; use crate::{success_response, ApiResult}; -use beacon_chain::{BeaconChain, BeaconChainTypes}; +use beacon_chain::BeaconChainTypes; use hyper::{Body, Request}; -use std::sync::Arc; use version; /// Read the version string from the current Lighthouse build. @@ -15,8 +15,8 @@ pub fn get_version(_req: Request) -> ApiResult { /// Read the genesis time from the current beacon chain state. pub fn get_genesis_time(req: Request) -> ApiResult { - let beacon_chain = req.extensions().get::>>().unwrap(); - let gen_time: u64 = beacon_chain.head().beacon_state.genesis_time; + let (_beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; + let gen_time: u64 = head_state.genesis_time; let body = Body::from( serde_json::to_string(&gen_time) .expect("Genesis should time always have a valid JSON serialization."), diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs index a353b3833..ad168faf1 100644 --- a/beacon_node/rest_api/src/spec.rs +++ b/beacon_node/rest_api/src/spec.rs @@ -9,7 +9,7 @@ use types::EthSpec; /// HTTP handler to return the full spec object. pub fn get_spec(req: Request) -> ApiResult { - let beacon_chain = get_beacon_chain_from_request::(&req)?; + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; let json: String = serde_json::to_string(&beacon_chain.spec) .map_err(|e| ApiError::ServerError(format!("Unable to serialize spec: {:?}", e)))?; diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index c559777c0..49b4c0441 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -34,10 +34,7 @@ impl ValidatorDuty { pub fn get_validator_duties(req: Request) -> ApiResult { let log = get_logger_from_request(&req); slog::trace!(log, "Validator duties requested of API: {:?}", &req); - let beacon_chain = get_beacon_chain_from_request::(&req)?; - let mut head_state = beacon_chain - .state_now() - .map_err(|e| ApiError::ServerError(format!("Unable to get current BeaconState {:?}", e)))?; + let (beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; slog::trace!(log, "Got head state from request."); // Parse and check query parameters @@ -67,9 +64,6 @@ pub fn get_validator_duties(req: Request) - e )) })?; - if let Some(s) = head_state.maybe_as_mut_ref() { - s.build_all_caches(&beacon_chain.spec).ok(); - } let validators: Vec = query .all_of("validator_pubkeys")? .iter() @@ -163,7 +157,7 @@ pub fn get_validator_duties(req: Request) - /// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator. pub fn get_new_beacon_block(req: Request) -> ApiResult { - let beacon_chain = get_beacon_chain_from_request::(&req)?; + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; let query = UrlQuery::from_request(&req)?; let slot = query @@ -203,8 +197,7 @@ pub fn get_new_beacon_block(req: Request) - /// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. pub fn get_new_attestation(req: Request) -> ApiResult { - let beacon_chain = get_beacon_chain_from_request::(&req)?; - let head_state = &beacon_chain.head().beacon_state; + let (beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; let query = UrlQuery::from_request(&req)?; let val_pk_str = query From 0136eb33b092a8ab474c5ddf3db666a542dfda0b Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Mon, 9 Sep 2019 12:54:14 +1000 Subject: [PATCH 253/305] WIP: Added POST functionality for pusblish_beacon_block. Currently doesn't compile, struggling with the borrow checker. --- beacon_node/client/src/lib.rs | 1 + beacon_node/rest_api/src/helpers.rs | 42 +++++++++++++++++++- beacon_node/rest_api/src/lib.rs | 8 +++- beacon_node/rest_api/src/validator.rs | 57 +++++++++++++++++++++++++-- 4 files changed, 103 insertions(+), 5 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index afcd538b5..f26a5503c 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -215,6 +215,7 @@ where executor, beacon_chain.clone(), network.clone(), + network_send.clone(), client_config.db_path().expect("unable to read datadir"), eth2_config.clone(), &log, diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 76fc78750..d6ea0397f 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -1,11 +1,17 @@ use crate::{ApiError, ApiResult}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use bls::PublicKey; +use eth2_libp2p::{PubsubMessage, Topic}; +use eth2_libp2p::{BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX}; use hex; use hyper::{Body, Request}; +use network::NetworkMessage; +use ssz::Encode; +use std::borrow::BorrowMut; use std::sync::Arc; use store::{iter::AncestorIter, Store}; -use types::{BeaconState, EthSpec, Hash256, RelativeEpoch, Slot}; +use tokio::sync::mpsc; +use types::{BeaconBlock, BeaconState, EthSpec, Hash256, RelativeEpoch, Slot}; /// Parse a slot from a `0x` preixed string. /// @@ -197,6 +203,40 @@ pub fn get_logger_from_request(req: &Request) -> slog::Logger { log.to_owned() } +pub fn publish_beacon_block_to_network( + req: &Request, + block: BeaconBlock, +) -> Result<(), ApiError> { + // Get the network service from the request + let mut network_chan = req + .extensions() + .get::>() + .expect( + "Should always get the network channel from the request, since we put it in there.", + ); + + // create the network topic to send on + let topic_string = format!( + "/{}/{}/{}", + TOPIC_PREFIX, BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX + ); + let topic = Topic::new(topic_string); + let message = PubsubMessage::Block(block.as_ssz_bytes()); + + // Publish the block to the p2p network via gossipsub. + if let Err(e) = &network_chan.try_send(NetworkMessage::Publish { + topics: vec![topic], + message: message, + }) { + return Err(ApiError::ServerError(format!( + "Unable to send new block to network: {:?}", + e + ))); + } + + Ok(()) +} + #[cfg(test)] mod test { use super::*; diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 02c68c639..c0927dde3 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -14,6 +14,7 @@ mod url_query; mod validator; use beacon_chain::{BeaconChain, BeaconChainTypes}; +use client_network::NetworkMessage; use client_network::Service as NetworkService; use eth2_config::Eth2Config; use hyper::rt::Future; @@ -25,6 +26,7 @@ use std::ops::Deref; use std::path::PathBuf; use std::sync::Arc; use tokio::runtime::TaskExecutor; +use tokio::sync::mpsc; use url_query::UrlQuery; pub use beacon::{BlockResponse, HeadResponse, StateResponse}; @@ -83,6 +85,7 @@ pub fn start_server( executor: &TaskExecutor, beacon_chain: Arc>, network_service: Arc>, + network_chan: mpsc::UnboundedSender, db_path: PathBuf, eth2_config: Eth2Config, log: &slog::Logger, @@ -113,6 +116,7 @@ pub fn start_server( let beacon_chain = server_bc.clone(); let db_path = db_path.clone(); let network_service = network_service.clone(); + let network_chan = network_chan.clone(); let eth2_config = eth2_config.clone(); // Create a simple handler for the router, inject our stateful objects into the request. @@ -126,6 +130,8 @@ pub fn start_server( req.extensions_mut().insert::(db_path.clone()); req.extensions_mut() .insert::>>(network_service.clone()); + req.extensions_mut() + .insert::>(network_chan.clone()); req.extensions_mut() .insert::>(eth2_config.clone()); @@ -177,7 +183,7 @@ pub fn start_server( validator::get_new_beacon_block::(req) } (&Method::POST, "/beacon/validator/block") => { - helpers::implementation_pending_response(req) + validator::publish_beacon_block::(req) } (&Method::GET, "/beacon/validator/attestation") => { validator::get_new_attestation::(req) diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 49b4c0441..632aee0ac 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,11 +1,14 @@ use super::{success_response, ApiResult}; use crate::{helpers::*, ApiError, UrlQuery}; -use beacon_chain::BeaconChainTypes; +use beacon_chain::{BeaconChainTypes, BlockProcessingOutcome}; use bls::{AggregateSignature, PublicKey, Signature}; -use hyper::{Body, Request}; +use futures::future::Future; +use futures::stream::Stream; +use hyper::{Body, Error, Request}; use serde::{Deserialize, Serialize}; +use slog::info; use types::beacon_state::EthSpec; -use types::{Attestation, BitList, Epoch, RelativeEpoch, Shard, Slot}; +use types::{Attestation, BeaconBlock, BitList, Epoch, RelativeEpoch, Shard, Slot}; #[derive(Debug, Serialize, Deserialize)] pub struct ValidatorDuty { @@ -195,6 +198,54 @@ pub fn get_new_beacon_block(req: Request) - Ok(success_response(body)) } +/// HTTP Handler to publish a BeaconBlock, which has been signed by a validator. +pub fn publish_beacon_block(req: Request) -> ApiResult { + let log = get_logger_from_request(&req); + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; + + let (_head, body) = req.into_parts(); + let block_future = body + .fold(Vec::new(), |mut acc, chunk| { + acc.extend_from_slice(&*chunk); + futures::future::ok::<_, Error>(acc) + }) + .map_err(|e| ApiError::ServerError(format!("Unable parse request body: {:?}", e))) + .and_then(|body| { + let block_result: Result, ApiError> = + serde_json::from_slice(&body.as_slice()).map_err(|e| { + ApiError::InvalidQueryParams(format!( + "Unable to deserialize JSON into a BeaconBlock: {:?}", + e + )) + }); + block_result + }); + let block = block_future.wait()?; + match beacon_chain.process_block(block.clone()) { + Ok(BlockProcessingOutcome::Processed { + block_root: block_root, + }) => { + // Block was processed, publish via gossipsub + info!(log, "Processed valid block from API"; "block_slot" => block.slot, "block_root" => format!("{}", block_root)); + publish_beacon_block_to_network::(&req, block)?; + } + Ok(outcome) => { + return Err(ApiError::InvalidQueryParams(format!( + "The BeaconBlock could not be processed: {:?}", + outcome + ))); + } + Err(e) => { + return Err(ApiError::ServerError(format!( + "Unable to process block: {:?}", + e + ))); + } + } + + Ok(success_response(Body::empty())) +} + /// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. pub fn get_new_attestation(req: Request) -> ApiResult { let (beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; From e1d6e187d15aa258d442a1631001ec404a8b3c11 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 9 Sep 2019 01:54:32 -0400 Subject: [PATCH 254/305] Fix bug in crosslink rewards during per-epoch --- .../src/per_epoch_processing.rs | 16 +++--------- .../src/per_epoch_processing/apply_rewards.rs | 19 +++++--------- .../validator_statuses.rs | 26 ++++++++++++++++--- .../src/per_epoch_processing/winning_root.rs | 2 +- 4 files changed, 34 insertions(+), 29 deletions(-) diff --git a/eth2/state_processing/src/per_epoch_processing.rs b/eth2/state_processing/src/per_epoch_processing.rs index f66ce4ea2..bcac1dc27 100644 --- a/eth2/state_processing/src/per_epoch_processing.rs +++ b/eth2/state_processing/src/per_epoch_processing.rs @@ -48,15 +48,10 @@ pub fn per_epoch_processing( process_justification_and_finalization(state, &validator_statuses.total_balances)?; // Crosslinks. - let winning_root_for_shards = process_crosslinks(state, spec)?; + process_crosslinks(state, spec)?; // Rewards and Penalties. - process_rewards_and_penalties( - state, - &mut validator_statuses, - &winning_root_for_shards, - spec, - )?; + process_rewards_and_penalties(state, &mut validator_statuses, spec)?; // Registry Updates. process_registry_updates(state, spec)?; @@ -160,9 +155,7 @@ pub fn process_justification_and_finalization( pub fn process_crosslinks( state: &mut BeaconState, spec: &ChainSpec, -) -> Result { - let mut winning_root_for_shards: WinningRootHashSet = HashMap::new(); - +) -> Result<(), Error> { state.previous_crosslinks = state.current_crosslinks.clone(); for &relative_epoch in &[RelativeEpoch::Previous, RelativeEpoch::Current] { @@ -182,12 +175,11 @@ pub fn process_crosslinks( if 3 * winning_root.total_attesting_balance >= 2 * total_committee_balance { state.current_crosslinks[shard as usize] = winning_root.crosslink.clone(); } - winning_root_for_shards.insert(shard, winning_root); } } } - Ok(winning_root_for_shards) + Ok(()) } /// Finish up an epoch update. diff --git a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs index 9bd53077a..6de9ed872 100644 --- a/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs +++ b/eth2/state_processing/src/per_epoch_processing/apply_rewards.rs @@ -1,5 +1,5 @@ use super::validator_statuses::{TotalBalances, ValidatorStatus, ValidatorStatuses}; -use super::{Error, WinningRootHashSet}; +use super::Error; use integer_sqrt::IntegerSquareRoot; use types::*; @@ -36,7 +36,6 @@ impl std::ops::AddAssign for Delta { pub fn process_rewards_and_penalties( state: &mut BeaconState, validator_statuses: &mut ValidatorStatuses, - winning_root_for_shards: &WinningRootHashSet, spec: &ChainSpec, ) -> Result<(), Error> { if state.current_epoch() == T::genesis_epoch() { @@ -53,15 +52,13 @@ pub fn process_rewards_and_penalties( let mut deltas = vec![Delta::default(); state.balances.len()]; get_attestation_deltas(&mut deltas, state, &validator_statuses, spec)?; + + // Update statuses with the information from winning roots. + validator_statuses.process_winning_roots(state, spec)?; + get_crosslink_deltas(&mut deltas, state, &validator_statuses, spec)?; - get_proposer_deltas( - &mut deltas, - state, - validator_statuses, - winning_root_for_shards, - spec, - )?; + get_proposer_deltas(&mut deltas, state, validator_statuses, spec)?; // Apply the deltas, over-flowing but not under-flowing (saturating at 0 instead). for (i, delta) in deltas.iter().enumerate() { @@ -79,12 +76,8 @@ fn get_proposer_deltas( deltas: &mut Vec, state: &BeaconState, validator_statuses: &mut ValidatorStatuses, - winning_root_for_shards: &WinningRootHashSet, spec: &ChainSpec, ) -> Result<(), Error> { - // Update statuses with the information from winning roots. - validator_statuses.process_winning_roots(state, winning_root_for_shards, spec)?; - for (index, validator) in validator_statuses.statuses.iter().enumerate() { if validator.is_previous_epoch_attester { let inclusion = validator diff --git a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs index 8a7d07d57..3280b981f 100644 --- a/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs +++ b/eth2/state_processing/src/per_epoch_processing/validator_statuses.rs @@ -1,4 +1,4 @@ -use super::WinningRootHashSet; +use super::{winning_root::winning_root, WinningRootHashSet}; use crate::common::get_attesting_indices; use types::*; @@ -292,9 +292,29 @@ impl ValidatorStatuses { pub fn process_winning_roots( &mut self, state: &BeaconState, - winning_roots: &WinningRootHashSet, spec: &ChainSpec, ) -> Result<(), BeaconStateError> { + // We must re-calculate the winning roots here because it is possible that they have + // changed since the first time they were calculated. + // + // This is because we altered the state during the first time we calculated the winning + // roots. + let winning_root_for_shards = { + let mut winning_root_for_shards = WinningRootHashSet::new(); + let relative_epoch = RelativeEpoch::Previous; + + let epoch = relative_epoch.into_epoch(state.current_epoch()); + for offset in 0..state.get_committee_count(relative_epoch)? { + let shard = (state.get_epoch_start_shard(relative_epoch)? + offset) + % T::ShardCount::to_u64(); + if let Some(winning_root) = winning_root(state, shard, epoch, spec)? { + winning_root_for_shards.insert(shard, winning_root); + } + } + + winning_root_for_shards + }; + // Loop through each slot in the previous epoch. for slot in state.previous_epoch().slot_iter(T::slots_per_epoch()) { let crosslink_committees_at_slot = state.get_crosslink_committees_at_slot(slot)?; @@ -302,7 +322,7 @@ impl ValidatorStatuses { // Loop through each committee in the slot. for c in crosslink_committees_at_slot { // If there was some winning crosslink root for the committee's shard. - if let Some(winning_root) = winning_roots.get(&c.shard) { + if let Some(winning_root) = winning_root_for_shards.get(&c.shard) { let total_committee_balance = state.get_total_balance(&c.committee, spec)?; for &validator_index in &winning_root.attesting_validator_indices { // Take note of the balance information for the winning root, it will be diff --git a/eth2/state_processing/src/per_epoch_processing/winning_root.rs b/eth2/state_processing/src/per_epoch_processing/winning_root.rs index 874e11d6c..82a6b0ff1 100644 --- a/eth2/state_processing/src/per_epoch_processing/winning_root.rs +++ b/eth2/state_processing/src/per_epoch_processing/winning_root.rs @@ -3,7 +3,7 @@ use std::collections::{HashMap, HashSet}; use tree_hash::TreeHash; use types::*; -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct WinningRoot { pub crosslink: Crosslink, pub attesting_validator_indices: Vec, From cce76f0bd2df2416827425518023634438a0a8c4 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 9 Sep 2019 01:55:14 -0400 Subject: [PATCH 255/305] Add block transition to cli_util --- tests/cli_util/Cargo.toml | 2 + tests/cli_util/src/main.rs | 102 ++++++++++++++++-------- tests/cli_util/src/transition_blocks.rs | 93 +++++++++++++++++++++ 3 files changed, 163 insertions(+), 34 deletions(-) create mode 100644 tests/cli_util/src/transition_blocks.rs diff --git a/tests/cli_util/Cargo.toml b/tests/cli_util/Cargo.toml index 7690d5a87..b868f1541 100644 --- a/tests/cli_util/Cargo.toml +++ b/tests/cli_util/Cargo.toml @@ -13,3 +13,5 @@ serde = "1.0" serde_yaml = "0.8" simple_logger = "1.0" types = { path = "../../eth2/types" } +state_processing = { path = "../../eth2/state_processing" } +eth2_ssz = { path = "../../eth2/utils/ssz" } diff --git a/tests/cli_util/src/main.rs b/tests/cli_util/src/main.rs index 330a0d171..e03febca6 100644 --- a/tests/cli_util/src/main.rs +++ b/tests/cli_util/src/main.rs @@ -1,10 +1,13 @@ #[macro_use] extern crate log; +mod transition_blocks; + use clap::{App, Arg, SubCommand}; use std::fs::File; use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; +use transition_blocks::run_transition_blocks; use types::{test_utils::TestingBeaconStateBuilder, EthSpec, MainnetEthSpec, MinimalEthSpec}; fn main() { @@ -54,47 +57,78 @@ fn main() { .help("Output file for generated state."), ), ) + .subcommand( + SubCommand::with_name("transition-blocks") + .about("Performs a state transition given a pre-state and block") + .version("0.1.0") + .author("Paul Hauner ") + .arg( + Arg::with_name("pre-state") + .value_name("BEACON_STATE") + .takes_value(true) + .required(true) + .help("Path to a SSZ file of the pre-state."), + ) + .arg( + Arg::with_name("block") + .value_name("BEACON_BLOCK") + .takes_value(true) + .required(true) + .help("Path to a SSZ file of the block to apply to pre-state."), + ) + .arg( + Arg::with_name("output") + .value_name("SSZ_FILE") + .takes_value(true) + .required(true) + .default_value("./output.ssz") + .help("Path to output a SSZ file."), + ), + ) .get_matches(); - if let Some(matches) = matches.subcommand_matches("genesis_yaml") { - let num_validators = matches - .value_of("num_validators") - .expect("slog requires num_validators") - .parse::() - .expect("num_validators must be a valid integer"); + match matches.subcommand() { + ("genesis_yaml", Some(matches)) => { + let num_validators = matches + .value_of("num_validators") + .expect("slog requires num_validators") + .parse::() + .expect("num_validators must be a valid integer"); - let genesis_time = if let Some(string) = matches.value_of("genesis_time") { - string - .parse::() - .expect("genesis_time must be a valid integer") - } else { - warn!("No genesis time supplied via CLI, using the current time."); - SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("should obtain time since unix epoch") - .as_secs() - }; + let genesis_time = if let Some(string) = matches.value_of("genesis_time") { + string + .parse::() + .expect("genesis_time must be a valid integer") + } else { + warn!("No genesis time supplied via CLI, using the current time."); + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("should obtain time since unix epoch") + .as_secs() + }; - let file = matches - .value_of("output_file") - .expect("slog requires output file") - .parse::() - .expect("output_file must be a valid path"); + let file = matches + .value_of("output_file") + .expect("slog requires output file") + .parse::() + .expect("output_file must be a valid path"); - info!( - "Creating genesis state with {} validators and genesis time {}.", - num_validators, genesis_time - ); + info!( + "Creating genesis state with {} validators and genesis time {}.", + num_validators, genesis_time + ); - match matches.value_of("spec").expect("spec is required by slog") { - "minimal" => genesis_yaml::(num_validators, genesis_time, file), - "mainnet" => genesis_yaml::(num_validators, genesis_time, file), - _ => unreachable!("guarded by slog possible_values"), - }; + match matches.value_of("spec").expect("spec is required by slog") { + "minimal" => genesis_yaml::(num_validators, genesis_time, file), + "mainnet" => genesis_yaml::(num_validators, genesis_time, file), + _ => unreachable!("guarded by slog possible_values"), + }; - info!("Genesis state YAML file created. Exiting successfully."); - } else { - error!("No subcommand supplied.") + info!("Genesis state YAML file created. Exiting successfully."); + } + ("transition-blocks", Some(matches)) => run_transition_blocks(matches) + .unwrap_or_else(|e| error!("Failed to transition blocks: {}", e)), + (other, _) => error!("Unknown subcommand supplied: {}", other), } } diff --git a/tests/cli_util/src/transition_blocks.rs b/tests/cli_util/src/transition_blocks.rs new file mode 100644 index 000000000..d8b0974b4 --- /dev/null +++ b/tests/cli_util/src/transition_blocks.rs @@ -0,0 +1,93 @@ +use clap::ArgMatches; +use ssz::{Decode, Encode}; +use state_processing::{per_block_processing, per_slot_processing, BlockSignatureStrategy}; +use std::fs::File; +use std::io::prelude::*; +use std::path::PathBuf; +use types::{BeaconBlock, BeaconState, EthSpec, MinimalEthSpec}; + +pub fn run_transition_blocks(matches: &ArgMatches) -> Result<(), String> { + let pre_state_path = matches + .value_of("pre-state") + .ok_or_else(|| "No pre-state file supplied".to_string())? + .parse::() + .map_err(|e| format!("Failed to parse pre-state path: {}", e))?; + + let block_path = matches + .value_of("block") + .ok_or_else(|| "No block file supplied".to_string())? + .parse::() + .map_err(|e| format!("Failed to parse block path: {}", e))?; + + let output_path = matches + .value_of("output") + .ok_or_else(|| "No output file supplied".to_string())? + .parse::() + .map_err(|e| format!("Failed to parse output path: {}", e))?; + + info!("Using minimal spec"); + info!("Pre-state path: {:?}", pre_state_path); + info!("Block path: {:?}", block_path); + + let pre_state: BeaconState = load_from_ssz(pre_state_path)?; + let block: BeaconBlock = load_from_ssz(block_path)?; + + let post_state = do_transition(pre_state, block)?; + + let mut output_file = File::create(output_path.clone()) + .map_err(|e| format!("Unable to create output file: {:?}", e))?; + + output_file + .write_all(&post_state.as_ssz_bytes()) + .map_err(|e| format!("Unable to write to output file: {:?}", e))?; + + /* + println!( + "{}", + serde_yaml::to_string(&post_state).expect("Should serialize state") + ); + */ + + Ok(()) +} + +fn do_transition( + mut pre_state: BeaconState, + block: BeaconBlock, +) -> Result, String> { + let spec = &T::default_spec(); + + pre_state + .build_all_caches(spec) + .map_err(|e| format!("Unable to build caches: {:?}", e))?; + + // Transition the parent state to the block slot. + for i in pre_state.slot.as_u64()..block.slot.as_u64() { + per_slot_processing(&mut pre_state, spec) + .map_err(|e| format!("Failed to advance slot on iteration {}: {:?}", i, e))?; + } + + pre_state + .build_all_caches(spec) + .map_err(|e| format!("Unable to build caches: {:?}", e))?; + + per_block_processing( + &mut pre_state, + &block, + None, + BlockSignatureStrategy::VerifyIndividual, + spec, + ) + .map_err(|e| format!("State transition failed: {:?}", e))?; + + Ok(pre_state) +} + +fn load_from_ssz(path: PathBuf) -> Result { + let mut file = + File::open(path.clone()).map_err(|e| format!("Unable to open file {:?}: {:?}", path, e))?; + let mut bytes = vec![]; + file.read_to_end(&mut bytes) + .map_err(|e| format!("Unable to read from file {:?}: {:?}", path, e))?; + T::from_ssz_bytes(&bytes).map_err(|e| format!("Ssz decode failed: {:?}", e)) +} From 15220ae56587bf925154d1ed47f02e6124f4a081 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 9 Sep 2019 01:55:43 -0400 Subject: [PATCH 256/305] Fix minor vec access panic opportunity --- .../src/per_block_processing/signature_sets.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/eth2/state_processing/src/per_block_processing/signature_sets.rs b/eth2/state_processing/src/per_block_processing/signature_sets.rs index dec529247..4f1a06670 100644 --- a/eth2/state_processing/src/per_block_processing/signature_sets.rs +++ b/eth2/state_processing/src/per_block_processing/signature_sets.rs @@ -42,8 +42,12 @@ pub fn block_proposal_signature_set<'a, T: EthSpec>( block_signed_root: Option, spec: &'a ChainSpec, ) -> Result> { - let block_proposer = &state.validators - [state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?]; + let proposer_index = + state.get_beacon_proposer_index(block.slot, RelativeEpoch::Current, spec)?; + let block_proposer = &state + .validators + .get(proposer_index) + .ok_or_else(|| Error::ValidatorUnknown(proposer_index as u64))?; let domain = spec.get_domain( block.slot.epoch(T::slots_per_epoch()), From 14e8c6c87c3155ed14068011cb26c1a0ea7ac50c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 9 Sep 2019 12:21:41 -0400 Subject: [PATCH 257/305] Add hex parsing to test_cli --- tests/cli_util/Cargo.toml | 1 + tests/cli_util/src/main.rs | 26 ++++++++++++++++++ tests/cli_util/src/parse_hex.rs | 47 +++++++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+) create mode 100644 tests/cli_util/src/parse_hex.rs diff --git a/tests/cli_util/Cargo.toml b/tests/cli_util/Cargo.toml index b868f1541..6fd211970 100644 --- a/tests/cli_util/Cargo.toml +++ b/tests/cli_util/Cargo.toml @@ -8,6 +8,7 @@ edition = "2018" [dependencies] clap = "2.33" +hex = "0.3" log = "0.4" serde = "1.0" serde_yaml = "0.8" diff --git a/tests/cli_util/src/main.rs b/tests/cli_util/src/main.rs index e03febca6..ef2848578 100644 --- a/tests/cli_util/src/main.rs +++ b/tests/cli_util/src/main.rs @@ -1,9 +1,11 @@ #[macro_use] extern crate log; +mod parse_hex; mod transition_blocks; use clap::{App, Arg, SubCommand}; +use parse_hex::run_parse_hex; use std::fs::File; use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; @@ -85,6 +87,27 @@ fn main() { .help("Path to output a SSZ file."), ), ) + .subcommand( + SubCommand::with_name("pretty-hex") + .about("Parses some SSZ as encoded as ASCII 0x-prefixed hex") + .version("0.1.0") + .author("Paul Hauner ") + .arg( + Arg::with_name("type") + .value_name("TYPE") + .takes_value(true) + .required(true) + .possible_values(&["block"]) + .help("The schema of the supplied SSZ."), + ) + .arg( + Arg::with_name("hex_ssz") + .value_name("HEX") + .takes_value(true) + .required(true) + .help("SSZ encoded as 0x-prefixed hex"), + ), + ) .get_matches(); match matches.subcommand() { @@ -128,6 +151,9 @@ fn main() { } ("transition-blocks", Some(matches)) => run_transition_blocks(matches) .unwrap_or_else(|e| error!("Failed to transition blocks: {}", e)), + ("pretty-hex", Some(matches)) => { + run_parse_hex(matches).unwrap_or_else(|e| error!("Failed to pretty print hex: {}", e)) + } (other, _) => error!("Unknown subcommand supplied: {}", other), } } diff --git a/tests/cli_util/src/parse_hex.rs b/tests/cli_util/src/parse_hex.rs new file mode 100644 index 000000000..a74e9a645 --- /dev/null +++ b/tests/cli_util/src/parse_hex.rs @@ -0,0 +1,47 @@ +use clap::ArgMatches; +use serde::Serialize; +use ssz::{Decode, Encode}; +use state_processing::{per_block_processing, per_slot_processing, BlockSignatureStrategy}; +use std::fs::File; +use std::io::prelude::*; +use std::path::PathBuf; +use types::{BeaconBlock, BeaconState, EthSpec, MinimalEthSpec}; + +pub fn run_parse_hex(matches: &ArgMatches) -> Result<(), String> { + let type_str = matches + .value_of("type") + .ok_or_else(|| "No type supplied".to_string())?; + let mut hex: String = matches + .value_of("hex_ssz") + .ok_or_else(|| "No hex ssz supplied".to_string())? + .to_string(); + + if hex.starts_with("0x") { + hex = hex[2..].to_string(); + } + + let hex = hex::decode(&hex).map_err(|e| format!("Failed to parse hex: {:?}", e))?; + + info!("Using minimal spec"); + info!("Type: {:?}", type_str); + + match type_str.as_ref() { + "block" => decode_and_print::>(&hex)?, + "state" => decode_and_print::>(&hex)?, + other => return Err(format!("Unknown type: {}", other)), + }; + + Ok(()) +} + +fn decode_and_print(bytes: &[u8]) -> Result<(), String> { + let item = T::from_ssz_bytes(&bytes).map_err(|e| format!("Ssz decode failed: {:?}", e))?; + + println!( + "{}", + serde_yaml::to_string(&item) + .map_err(|e| format!("Unable to write object to YAML: {:?}", e))? + ); + + Ok(()) +} From d9a4dbd91295d4e1d6b670381ab2a7e6b83c876d Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 9 Sep 2019 12:22:09 -0400 Subject: [PATCH 258/305] Add nimbus-specific test --- eth2/utils/ssz_types/src/bitfield.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/eth2/utils/ssz_types/src/bitfield.rs b/eth2/utils/ssz_types/src/bitfield.rs index dbe1addbe..cc01d40c7 100644 --- a/eth2/utils/ssz_types/src/bitfield.rs +++ b/eth2/utils/ssz_types/src/bitfield.rs @@ -729,6 +729,13 @@ mod bitvector { assert_eq!(bitfield.ssz_bytes_len(), bytes.len(), "i = {}", i); } } + + #[test] + fn excess_bits_nimbus() { + let bad = vec![0b0001_1111]; + + assert!(BitVector4::from_ssz_bytes(&bad).is_err()); + } } #[cfg(test)] From e07fc08f8ef0b555fe190836e7b8d063b28824d6 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 9 Sep 2019 12:29:50 -0400 Subject: [PATCH 259/305] Fix warnings --- tests/cli_util/src/parse_hex.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/cli_util/src/parse_hex.rs b/tests/cli_util/src/parse_hex.rs index a74e9a645..50f61ea9f 100644 --- a/tests/cli_util/src/parse_hex.rs +++ b/tests/cli_util/src/parse_hex.rs @@ -1,11 +1,7 @@ use clap::ArgMatches; use serde::Serialize; -use ssz::{Decode, Encode}; -use state_processing::{per_block_processing, per_slot_processing, BlockSignatureStrategy}; -use std::fs::File; -use std::io::prelude::*; -use std::path::PathBuf; -use types::{BeaconBlock, BeaconState, EthSpec, MinimalEthSpec}; +use ssz::Decode; +use types::{BeaconBlock, BeaconState, MinimalEthSpec}; pub fn run_parse_hex(matches: &ArgMatches) -> Result<(), String> { let type_str = matches From 60f37789a6069b194711715189f65f3c9bf21eea Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 10 Sep 2019 03:57:44 +1000 Subject: [PATCH 260/305] Allowing lighthouse to suit Alex's non-spec shinanigans --- beacon_node/eth2-libp2p/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 59c799105..7ad2f415d 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "76f7475e4b7063e663ad03c7524cf091f9961968" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "76f7475e4b7063e663ad03c7524cf091f9961968", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "d4851ea3b564266aeb9d83d10148b972721999db" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "d4851ea3b564266aeb9d83d10148b972721999db", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" From 66fd1586ca09fde07e9660af3adb0492e67876d2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 9 Sep 2019 16:43:08 -0400 Subject: [PATCH 261/305] Add more logging around block/attn production --- beacon_node/beacon_chain/src/beacon_chain.rs | 23 +++++++++++++++++-- .../src/attestation_producer/mod.rs | 9 +++++--- validator_client/src/block_producer/mod.rs | 12 ++++++---- validator_client/src/service.rs | 17 ++++++++++++-- 4 files changed, 50 insertions(+), 11 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index e88747d83..b026b15af 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -9,7 +9,7 @@ use lmd_ghost::LmdGhost; use operation_pool::DepositInsertStatus; use operation_pool::{OperationPool, PersistedOperationPool}; use parking_lot::{RwLock, RwLockReadGuard}; -use slog::{error, info, warn, Logger}; +use slog::{error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; use state_processing::per_block_processing::{ @@ -639,6 +639,14 @@ impl BeaconChain { metrics::inc_counter(&metrics::ATTESTATION_PRODUCTION_SUCCESSES); metrics::stop_timer(timer); + trace!( + self.log, + "Produced beacon attestation data"; + "beacon_block_root" => format!("{}", head_block_root), + "shard" => shard, + "slot" => state.slot + ); + Ok(AttestationData { beacon_block_root: head_block_root, source: state.current_justified_checkpoint.clone(), @@ -751,7 +759,7 @@ impl BeaconChain { // has a higher slot than the attestation. // // Permitting this would allow for attesters to vote on _future_ slots. - if attestation_slot > state.slot { + if state.slot > attestation_slot { Ok(AttestationProcessingOutcome::AttestsToFutureState { state: state.slot, attestation: attestation_slot, @@ -1270,6 +1278,14 @@ impl BeaconChain { metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES); metrics::stop_timer(timer); + trace!( + self.log, + "Produced beacon block"; + "parent" => format!("{}", block.parent_root), + "attestations" => block.body.attestations.len(), + "slot" => block.slot + ); + Ok((block, state)) } @@ -1307,7 +1323,10 @@ impl BeaconChain { warn!( self.log, "Beacon chain re-org"; + "previous_head" => format!("{}", self.head().beacon_block_root), "previous_slot" => previous_slot, + "new_head_parent" => format!("{}", beacon_block.parent_root), + "new_head" => format!("{}", beacon_block_root), "new_slot" => new_slot ); } else { diff --git a/validator_client/src/attestation_producer/mod.rs b/validator_client/src/attestation_producer/mod.rs index e831b4c1c..6f4a5f304 100644 --- a/validator_client/src/attestation_producer/mod.rs +++ b/validator_client/src/attestation_producer/mod.rs @@ -50,9 +50,12 @@ impl<'a, B: BeaconNodeAttestation, S: Signer, E: EthSpec> AttestationProducer<'a /// Handle outputs and results from attestation production. pub fn handle_produce_attestation(&mut self, log: slog::Logger) { match self.produce_attestation() { - Ok(ValidatorEvent::AttestationProduced(_slot)) => { - info!(log, "Attestation produced"; "Validator" => format!("{}", self.signer)) - } + Ok(ValidatorEvent::AttestationProduced(slot)) => info!( + log, + "Attestation produced"; + "validator" => format!("{}", self.signer), + "slot" => slot, + ), Err(e) => error!(log, "Attestation production error"; "Error" => format!("{:?}", e)), Ok(ValidatorEvent::SignerRejection(_slot)) => { error!(log, "Attestation production error"; "Error" => "Signer could not sign the attestation".to_string()) diff --git a/validator_client/src/block_producer/mod.rs b/validator_client/src/block_producer/mod.rs index ca1e3a1d8..03d9f5946 100644 --- a/validator_client/src/block_producer/mod.rs +++ b/validator_client/src/block_producer/mod.rs @@ -59,9 +59,12 @@ impl<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> BlockProducer<'a, B, S, E> { /// Handle outputs and results from block production. pub fn handle_produce_block(&mut self, log: slog::Logger) { match self.produce_block() { - Ok(ValidatorEvent::BlockProduced(_slot)) => { - info!(log, "Block produced"; "Validator" => format!("{}", self.signer)) - } + Ok(ValidatorEvent::BlockProduced(slot)) => info!( + log, + "Block produced"; + "validator" => format!("{}", self.signer), + "slot" => slot, + ), Err(e) => error!(log, "Block production error"; "Error" => format!("{:?}", e)), Ok(ValidatorEvent::SignerRejection(_slot)) => { error!(log, "Block production error"; "Error" => "Signer Could not sign the block".to_string()) @@ -105,12 +108,13 @@ impl<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> BlockProducer<'a, B, S, E> { .produce_beacon_block(self.slot, &randao_reveal)? { if self.safe_to_produce(&block) { + let slot = block.slot; let domain = self .spec .get_domain(epoch, Domain::BeaconProposer, &self.fork); if let Some(block) = self.sign_block(block, domain) { self.beacon_node.publish_beacon_block(block)?; - Ok(ValidatorEvent::BlockProduced(self.slot)) + Ok(ValidatorEvent::BlockProduced(slot)) } else { Ok(ValidatorEvent::SignerRejection(self.slot)) } diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 5169f67f8..ba4f3c133 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -359,7 +359,12 @@ impl Service format!("{}", signers[signer_index])); + info!( + log, + "Producing a block"; + "validator"=> format!("{}", signers[signer_index]), + "slot"=> slot + ); let signer = &signers[signer_index]; let mut block_producer = BlockProducer { fork, @@ -376,6 +381,9 @@ impl Service Service format!("{}", signers[signer_index])); + info!( + log, + "Producing an attestation"; + "validator"=> format!("{}", signers[signer_index]), + "slot"=> slot + ); let signer = &signers[signer_index]; let mut attestation_producer = AttestationProducer { fork, From d466f90843ece7113ccfca6230455f86ebd0f97a Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 9 Sep 2019 17:05:23 -0400 Subject: [PATCH 262/305] Rename cli_util to lcli --- Cargo.toml | 2 +- tests/{cli_util => lcli}/.gitignore | 0 tests/{cli_util => lcli}/Cargo.toml | 3 ++- tests/{cli_util => lcli}/src/main.rs | 11 +++++++---- tests/{cli_util => lcli}/src/parse_hex.rs | 0 tests/{cli_util => lcli}/src/transition_blocks.rs | 0 6 files changed, 10 insertions(+), 6 deletions(-) rename tests/{cli_util => lcli}/.gitignore (100%) rename tests/{cli_util => lcli}/Cargo.toml (87%) rename tests/{cli_util => lcli}/src/main.rs (95%) rename tests/{cli_util => lcli}/src/parse_hex.rs (100%) rename tests/{cli_util => lcli}/src/transition_blocks.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index d081ee74f..0a98bb8dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ members = [ "beacon_node/version", "beacon_node/beacon_chain", "tests/ef_tests", - "tests/cli_util", + "tests/lcli", "protos", "validator_client", "account_manager", diff --git a/tests/cli_util/.gitignore b/tests/lcli/.gitignore similarity index 100% rename from tests/cli_util/.gitignore rename to tests/lcli/.gitignore diff --git a/tests/cli_util/Cargo.toml b/tests/lcli/Cargo.toml similarity index 87% rename from tests/cli_util/Cargo.toml rename to tests/lcli/Cargo.toml index 6fd211970..3322d8cca 100644 --- a/tests/cli_util/Cargo.toml +++ b/tests/lcli/Cargo.toml @@ -1,5 +1,6 @@ [package] -name = "cli_util" +name = "lcli" +description = "Lighthouse CLI (modeled after zcli)" version = "0.1.0" authors = ["Paul Hauner "] edition = "2018" diff --git a/tests/cli_util/src/main.rs b/tests/lcli/src/main.rs similarity index 95% rename from tests/cli_util/src/main.rs rename to tests/lcli/src/main.rs index ef2848578..63f01c671 100644 --- a/tests/cli_util/src/main.rs +++ b/tests/lcli/src/main.rs @@ -15,10 +15,13 @@ use types::{test_utils::TestingBeaconStateBuilder, EthSpec, MainnetEthSpec, Mini fn main() { simple_logger::init().expect("logger should initialize"); - let matches = App::new("Lighthouse Testing CLI Tool") + let matches = App::new("Lighthouse CLI Tool") .version("0.1.0") .author("Paul Hauner ") - .about("Performs various testing-related tasks.") + .about( + "Performs various testing-related tasks, modelled after zcli. \ + by @protolambda.", + ) .subcommand( SubCommand::with_name("genesis_yaml") .about("Generates a genesis YAML file") @@ -89,7 +92,7 @@ fn main() { ) .subcommand( SubCommand::with_name("pretty-hex") - .about("Parses some SSZ as encoded as ASCII 0x-prefixed hex") + .about("Parses SSZ encoded as ASCII 0x-prefixed hex") .version("0.1.0") .author("Paul Hauner ") .arg( @@ -154,7 +157,7 @@ fn main() { ("pretty-hex", Some(matches)) => { run_parse_hex(matches).unwrap_or_else(|e| error!("Failed to pretty print hex: {}", e)) } - (other, _) => error!("Unknown subcommand supplied: {}", other), + (other, _) => error!("Unknown subcommand {}. See --help.", other), } } diff --git a/tests/cli_util/src/parse_hex.rs b/tests/lcli/src/parse_hex.rs similarity index 100% rename from tests/cli_util/src/parse_hex.rs rename to tests/lcli/src/parse_hex.rs diff --git a/tests/cli_util/src/transition_blocks.rs b/tests/lcli/src/transition_blocks.rs similarity index 100% rename from tests/cli_util/src/transition_blocks.rs rename to tests/lcli/src/transition_blocks.rs From 5de80f27995b0ce8a6f0e82e000774502825d85f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 9 Sep 2019 17:12:47 -0400 Subject: [PATCH 263/305] Add extra logging when new head found --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b026b15af..064260cfc 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1332,9 +1332,11 @@ impl BeaconChain { } else { info!( self.log, - "new head block"; + "New head beacon block"; "justified_root" => format!("{}", beacon_state.current_justified_checkpoint.root), + "justified_epoch" => beacon_state.current_justified_checkpoint.epoch, "finalized_root" => format!("{}", beacon_state.finalized_checkpoint.root), + "finalized_epoch" => beacon_state.finalized_checkpoint.epoch, "root" => format!("{}", beacon_block_root), "slot" => new_slot, ); From ca9094e79a4bf62a34b8349e5868782e5a1a557f Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Tue, 10 Sep 2019 10:54:37 +1000 Subject: [PATCH 264/305] WIP: Made block publishing validator function, which sends to a network channel. Untested. --- beacon_node/rest_api/Cargo.toml | 4 +- beacon_node/rest_api/src/helpers.rs | 30 +++++++++------ beacon_node/rest_api/src/lib.rs | 5 ++- beacon_node/rest_api/src/validator.rs | 54 +++++++++++++++++++++------ 4 files changed, 68 insertions(+), 25 deletions(-) diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index 863ea04da..a3d31e410 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -25,7 +25,7 @@ types = { path = "../../eth2/types" } clap = "2.32.0" http = "^0.1.17" prometheus = { version = "^0.6", features = ["process"] } -hyper = "0.12.32" +hyper = "0.12.34" futures = "0.1" exit-future = "0.1.3" tokio = "0.1.17" @@ -35,3 +35,5 @@ eth2_config = { path = "../../eth2/utils/eth2_config" } lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } slot_clock = { path = "../../eth2/utils/slot_clock" } hex = "0.3.2" +parking_lot = "0.9" + diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index d6ea0397f..bff7d9ece 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -4,10 +4,11 @@ use bls::PublicKey; use eth2_libp2p::{PubsubMessage, Topic}; use eth2_libp2p::{BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX}; use hex; +use http::header; use hyper::{Body, Request}; use network::NetworkMessage; +use parking_lot::RwLock; use ssz::Encode; -use std::borrow::BorrowMut; use std::sync::Arc; use store::{iter::AncestorIter, Store}; use tokio::sync::mpsc; @@ -41,6 +42,21 @@ pub fn parse_root(string: &str) -> Result { } } +/// Checks the provided request to ensure that the `content-type` header. +/// +/// The content-type header should either be omitted, in which case JSON is assumed, or it should +/// explicity specify `application/json`. If anything else is provided, an error is returned. +pub fn check_content_type_for_json(req: &Request) -> Result<(), ApiError> { + match req.headers().get(header::CONTENT_TYPE) { + Some(h) if h == "application/json" => Ok(()), + Some(h) => Err(ApiError::InvalidQueryParams(format!( + "The provided content-type {:?} is not available, it must be JSON.", + h + ))), + _ => Ok(()), + } +} + /// Parse a PublicKey from a `0x` prefixed hex string pub fn parse_pubkey(string: &str) -> Result { const PREFIX: &str = "0x"; @@ -204,17 +220,9 @@ pub fn get_logger_from_request(req: &Request) -> slog::Logger { } pub fn publish_beacon_block_to_network( - req: &Request, + chan: Arc>>, block: BeaconBlock, ) -> Result<(), ApiError> { - // Get the network service from the request - let mut network_chan = req - .extensions() - .get::>() - .expect( - "Should always get the network channel from the request, since we put it in there.", - ); - // create the network topic to send on let topic_string = format!( "/{}/{}/{}", @@ -224,7 +232,7 @@ pub fn publish_beacon_block_to_network( let message = PubsubMessage::Block(block.as_ssz_bytes()); // Publish the block to the p2p network via gossipsub. - if let Err(e) = &network_chan.try_send(NetworkMessage::Publish { + if let Err(e) = chan.write().try_send(NetworkMessage::Publish { topics: vec![topic], message: message, }) { diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index c0927dde3..adab0c3bb 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -18,8 +18,9 @@ use client_network::NetworkMessage; use client_network::Service as NetworkService; use eth2_config::Eth2Config; use hyper::rt::Future; -use hyper::service::service_fn_ok; -use hyper::{Body, Method, Response, Server, StatusCode}; +use hyper::service::Service; +use hyper::{Body, Method, Request, Response, Server, StatusCode}; +use parking_lot::RwLock; use response_builder::ResponseBuilder; use slog::{info, o, warn}; use std::ops::Deref; diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 632aee0ac..2ead55d14 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -5,8 +5,13 @@ use bls::{AggregateSignature, PublicKey, Signature}; use futures::future::Future; use futures::stream::Stream; use hyper::{Body, Error, Request}; +use network::NetworkMessage; +use parking_lot::RwLock; use serde::{Deserialize, Serialize}; -use slog::info; +use slog::{info, trace, warn}; +use std::sync::Arc; +use tokio; +use tokio::sync::mpsc; use types::beacon_state::EthSpec; use types::{Attestation, BeaconBlock, BitList, Epoch, RelativeEpoch, Shard, Slot}; @@ -200,17 +205,41 @@ pub fn get_new_beacon_block(req: Request) - /// HTTP Handler to publish a BeaconBlock, which has been signed by a validator. pub fn publish_beacon_block(req: Request) -> ApiResult { + let _ = check_content_type_for_json(&req)?; let log = get_logger_from_request(&req); let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; + // Get the network sending channel from the request, for later transmission + let network_chan = req + .extensions() + .get::>>>() + .expect("Should always get the network channel from the request, since we put it in there.") + .clone(); - let (_head, body) = req.into_parts(); - let block_future = body - .fold(Vec::new(), |mut acc, chunk| { - acc.extend_from_slice(&*chunk); - futures::future::ok::<_, Error>(acc) + let body = req.into_body(); + trace!( + log, + "Got the request body, now going to parse it into a block." + ); + let block = body + .concat2() + .map(move |chunk| chunk.iter().cloned().collect::>()) + .map(|chunks| { + let block_result: Result, ApiError> = + serde_json::from_slice(&chunks.as_slice()).map_err(|e| { + ApiError::InvalidQueryParams(format!( + "Unable to deserialize JSON into a BeaconBlock: {:?}", + e + )) + }); + block_result }) + .unwrap() + .unwrap(); + + /* .map_err(|e| ApiError::ServerError(format!("Unable parse request body: {:?}", e))) .and_then(|body| { + trace!(log, "parsing json"); let block_result: Result, ApiError> = serde_json::from_slice(&body.as_slice()).map_err(|e| { ApiError::InvalidQueryParams(format!( @@ -220,16 +249,19 @@ pub fn publish_beacon_block(req: Request) - }); block_result }); + tokio::run(block_future); let block = block_future.wait()?; + */ + trace!(log, "BeaconBlock successfully parsed from JSON"; "block" => serde_json::to_string(&block).expect("We should always be able to serialize a block that we just created.")); match beacon_chain.process_block(block.clone()) { - Ok(BlockProcessingOutcome::Processed { - block_root: block_root, - }) => { + Ok(BlockProcessingOutcome::Processed { block_root }) => { // Block was processed, publish via gossipsub - info!(log, "Processed valid block from API"; "block_slot" => block.slot, "block_root" => format!("{}", block_root)); - publish_beacon_block_to_network::(&req, block)?; + info!(log, "Processed valid block from API, transmitting to network."; "block_slot" => block.slot, "block_root" => format!("{}", block_root)); + publish_beacon_block_to_network::(network_chan, block)?; } Ok(outcome) => { + warn!(log, "Block could not be processed, but is being sent to the network anyway."; "block_slot" => block.slot, "outcome" => format!("{:?}", outcome)); + //TODO need to send to network and return http 202 return Err(ApiError::InvalidQueryParams(format!( "The BeaconBlock could not be processed: {:?}", outcome From 476cbae57746edd5e2067b6535e346d6880ff135 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Tue, 10 Sep 2019 10:55:46 +1000 Subject: [PATCH 265/305] Updated validator client to do better logging, including of JSON serialised signatures and such, for debugging purposes. --- validator_client/Cargo.toml | 1 + validator_client/src/block_producer/mod.rs | 30 ++++++++++++++-------- validator_client/src/main.rs | 10 +++++++- validator_client/src/service.rs | 3 ++- 4 files changed, 32 insertions(+), 12 deletions(-) diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 2000f5409..706b28f86 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -25,6 +25,7 @@ slot_clock = { path = "../eth2/utils/slot_clock" } types = { path = "../eth2/types" } serde = "1.0" serde_derive = "1.0" +serde_json = "^1.0" slog = "^2.2.3" slog-async = "^2.3.0" slog-json = "^2.3" diff --git a/validator_client/src/block_producer/mod.rs b/validator_client/src/block_producer/mod.rs index ca1e3a1d8..f61cde146 100644 --- a/validator_client/src/block_producer/mod.rs +++ b/validator_client/src/block_producer/mod.rs @@ -6,7 +6,8 @@ pub use self::beacon_node_block::{BeaconNodeError, PublishOutcome}; pub use self::grpc::BeaconBlockGrpcClient; use crate::signer::Signer; use core::marker::PhantomData; -use slog::{error, info, warn}; +use serde_json; +use slog::{error, info, trace, warn}; use std::sync::Arc; use tree_hash::{SignedRoot, TreeHash}; use types::{BeaconBlock, ChainSpec, Domain, EthSpec, Fork, Slot}; @@ -53,27 +54,29 @@ pub struct BlockProducer<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> { pub slots_per_epoch: u64, /// Mere vessel for E. pub _phantom: PhantomData, + /// The logger, for logging + pub log: slog::Logger, } impl<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> BlockProducer<'a, B, S, E> { /// Handle outputs and results from block production. - pub fn handle_produce_block(&mut self, log: slog::Logger) { + pub fn handle_produce_block(&mut self) { match self.produce_block() { Ok(ValidatorEvent::BlockProduced(_slot)) => { - info!(log, "Block produced"; "Validator" => format!("{}", self.signer)) + info!(self.log, "Block produced"; "Validator" => format!("{}", self.signer)) } - Err(e) => error!(log, "Block production error"; "Error" => format!("{:?}", e)), + Err(e) => error!(self.log, "Block production error"; "Error" => format!("{:?}", e)), Ok(ValidatorEvent::SignerRejection(_slot)) => { - error!(log, "Block production error"; "Error" => "Signer Could not sign the block".to_string()) + error!(self.log, "Block production error"; "Error" => "Signer Could not sign the block".to_string()) } Ok(ValidatorEvent::SlashableBlockNotProduced(_slot)) => { - error!(log, "Block production error"; "Error" => "Rejected the block as it could have been slashed".to_string()) + error!(self.log, "Block production error"; "Error" => "Rejected the block as it could have been slashed".to_string()) } Ok(ValidatorEvent::BeaconNodeUnableToProduceBlock(_slot)) => { - error!(log, "Block production error"; "Error" => "Beacon node was unable to produce a block".to_string()) + error!(self.log, "Block production error"; "Error" => "Beacon node was unable to produce a block".to_string()) } Ok(v) => { - warn!(log, "Unknown result for block production"; "Error" => format!("{:?}",v)) + warn!(self.log, "Unknown result for block production"; "Error" => format!("{:?}",v)) } } } @@ -90,14 +93,21 @@ impl<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> BlockProducer<'a, B, S, E> { /// slashing. pub fn produce_block(&mut self) -> Result { let epoch = self.slot.epoch(self.slots_per_epoch); + trace!(self.log, "Producing block"; "epoch" => epoch); let message = epoch.tree_hash_root(); let randao_reveal = match self.signer.sign_message( &message, self.spec.get_domain(epoch, Domain::Randao, &self.fork), ) { - None => return Ok(ValidatorEvent::SignerRejection(self.slot)), - Some(signature) => signature, + None => { + warn!(self.log, "Signing rejected"; "message" => format!("{:?}", message)); + return Ok(ValidatorEvent::SignerRejection(self.slot)); + } + Some(signature) => { + info!(self.log, "Signed tree_hash_root for randao_reveal"; "message" => format!("{:?}", message), "signature" => serde_json::to_string(&signature).expect("We should always be able to serialize a signature as JSON.")); + signature + } }; if let Some(block) = self diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 39b2e3eae..bb791aa20 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -54,7 +54,6 @@ fn main() { ) .arg( Arg::with_name("spec") - .short("s") .long("spec") .value_name("TITLE") .help("Specifies the default eth2 spec type.") @@ -132,6 +131,15 @@ fn main() { .help("The number of validators.")) ) ) + .subcommand(SubCommand::with_name("sign_block") + .about("Connects to the beacon server, requests a new block (after providing reveal),\ + and prints the signed block to standard out") + .arg(Arg::with_name("validator") + .value_name("VALIDATOR") + .required(true) + .help("The pubkey of the validator that should sign the block.") + ) + ) .get_matches(); let drain = match matches.value_of("debug-level") { diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index 5169f67f8..4fe744ea2 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -369,8 +369,9 @@ impl Service, + log, }; - block_producer.handle_produce_block(log); + block_producer.handle_produce_block(); }); } if work_type.attestation_duty.is_some() { From 405a59e8b9fa28b2e56c8148affa733f57b95cb5 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Tue, 10 Sep 2019 10:56:50 +1000 Subject: [PATCH 266/305] WIP: Trying to restructure ApiService to be async. --- beacon_node/rest_api/src/error.rs | 61 ++++++ beacon_node/rest_api/src/lib.rs | 309 ++++++++++++++---------------- 2 files changed, 204 insertions(+), 166 deletions(-) create mode 100644 beacon_node/rest_api/src/error.rs diff --git a/beacon_node/rest_api/src/error.rs b/beacon_node/rest_api/src/error.rs new file mode 100644 index 000000000..bae07ea0b --- /dev/null +++ b/beacon_node/rest_api/src/error.rs @@ -0,0 +1,61 @@ +use hyper::{Body, Method, Request, Response, Server, StatusCode}; +use std::error::Error as StdError; + +type Cause = Box; + +pub struct ApiError { + kind: ApiErrorKind, + cause: Option, +} + +#[derive(PartialEq, Debug)] +pub enum ApiErrorKind { + MethodNotAllowed(String), + ServerError(String), + NotImplemented(String), + InvalidQueryParams(String), + NotFound(String), + ImATeapot(String), // Just in case. +} + +pub type ApiResult = Result, ApiError>; + +impl Into> for ApiError { + fn into(self) -> Response { + let status_code: (StatusCode, String) = match self { + ApiError::MethodNotAllowed(desc) => (StatusCode::METHOD_NOT_ALLOWED, desc), + ApiError::ServerError(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc), + ApiError::NotImplemented(desc) => (StatusCode::NOT_IMPLEMENTED, desc), + ApiError::InvalidQueryParams(desc) => (StatusCode::BAD_REQUEST, desc), + ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc), + ApiError::ImATeapot(desc) => (StatusCode::IM_A_TEAPOT, desc), + }; + Response::builder() + .status(status_code.0) + .header("content-type", "text/plain") + .body(Body::from(status_code.1)) + .expect("Response should always be created.") + } +} + +impl From for ApiError { + fn from(e: store::Error) -> ApiError { + ApiError::ServerError(format!("Database error: {:?}", e)) + } +} + +impl From for ApiError { + fn from(e: types::BeaconStateError) -> ApiError { + ApiError::ServerError(format!("BeaconState error: {:?}", e)) + } +} + +impl From for ApiError { + fn from(e: state_processing::per_slot_processing::Error) -> ApiError { + ApiError::ServerError(format!("PerSlotProcessing error: {:?}", e)) + } +} + +impl std::error::Error for ApiError { + fn cause(&self) -> Option<&Error> {} +} diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index adab0c3bb..56ed8c7bb 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -4,6 +4,7 @@ extern crate network as client_network; mod beacon; mod config; +mod error; mod helpers; mod metrics; mod network; @@ -32,52 +33,143 @@ use url_query::UrlQuery; pub use beacon::{BlockResponse, HeadResponse, StateResponse}; pub use config::Config as ApiConfig; +use eth2_libp2p::rpc::RequestId; +use serde::ser::StdError; -#[derive(PartialEq, Debug)] -pub enum ApiError { - MethodNotAllowed(String), - ServerError(String), - NotImplemented(String), - InvalidQueryParams(String), - NotFound(String), - ImATeapot(String), // Just in case. +type BoxFut = Box, Error = ApiError> + Send>; + +pub struct ApiService { + log: slog::Logger, + beacon_chain: Arc>, + db_path: DBPath, + network_service: Arc>, + network_channel: Arc>>, + eth2_config: Arc, } -pub type ApiResult = Result, ApiError>; +impl Service for ApiService { + type ReqBody = Body; + type ResBody = Body; + type Error = ApiError; + type Future = BoxFut; -impl Into> for ApiError { - fn into(self) -> Response { - let status_code: (StatusCode, String) = match self { - ApiError::MethodNotAllowed(desc) => (StatusCode::METHOD_NOT_ALLOWED, desc), - ApiError::ServerError(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc), - ApiError::NotImplemented(desc) => (StatusCode::NOT_IMPLEMENTED, desc), - ApiError::InvalidQueryParams(desc) => (StatusCode::BAD_REQUEST, desc), - ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc), - ApiError::ImATeapot(desc) => (StatusCode::IM_A_TEAPOT, desc), + fn call(&mut self, mut req: Request) -> Self::Future { + metrics::inc_counter(&metrics::REQUEST_COUNT); + let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME); + + req.extensions_mut() + .insert::(self.log.clone()); + req.extensions_mut() + .insert::>>(self.beacon_chain.clone()); + req.extensions_mut().insert::(self.db_path.clone()); + req.extensions_mut() + .insert::>>(self.network_service.clone()); + req.extensions_mut() + .insert::>>>( + self.network_channel.clone(), + ); + req.extensions_mut() + .insert::>(self.eth2_config.clone()); + + let path = req.uri().path().to_string(); + + // Route the request to the correct handler. + let result = match (req.method(), path.as_ref()) { + // Methods for Client + (&Method::GET, "/node/version") => node::get_version(req), + /* + (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), + (&Method::GET, "/node/syncing") => helpers::implementation_pending_response(req), + + // Methods for Network + (&Method::GET, "/network/enr") => network::get_enr::(req), + (&Method::GET, "/network/peer_count") => network::get_peer_count::(req), + (&Method::GET, "/network/peer_id") => network::get_peer_id::(req), + (&Method::GET, "/network/peers") => network::get_peer_list::(req), + (&Method::GET, "/network/listen_port") => network::get_listen_port::(req), + (&Method::GET, "/network/listen_addresses") => { + network::get_listen_addresses::(req) + } + + // Methods for Beacon Node + (&Method::GET, "/beacon/head") => beacon::get_head::(req), + (&Method::GET, "/beacon/block") => beacon::get_block::(req), + (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), + (&Method::GET, "/beacon/blocks") => helpers::implementation_pending_response(req), + (&Method::GET, "/beacon/fork") => beacon::get_fork::(req), + (&Method::GET, "/beacon/attestations") => { + helpers::implementation_pending_response(req) + } + (&Method::GET, "/beacon/attestations/pending") => { + helpers::implementation_pending_response(req) + } + + (&Method::GET, "/beacon/validators") => beacon::get_validators::(req), + (&Method::GET, "/beacon/validators/indicies") => { + helpers::implementation_pending_response(req) + } + (&Method::GET, "/beacon/validators/pubkeys") => { + helpers::implementation_pending_response(req) + } + + // Methods for Validator + (&Method::GET, "/beacon/validator/duties") => { + validator::get_validator_duties::(req) + } + (&Method::GET, "/beacon/validator/block") => { + validator::get_new_beacon_block::(req) + } + (&Method::POST, "/beacon/validator/block") => { + validator::publish_beacon_block::(req) + } + (&Method::GET, "/beacon/validator/attestation") => { + validator::get_new_attestation::(req) + } + (&Method::POST, "/beacon/validator/attestation") => { + helpers::implementation_pending_response(req) + } + + (&Method::GET, "/beacon/state") => beacon::get_state::(req), + (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), + (&Method::GET, "/beacon/state/current_finalized_checkpoint") => { + beacon::get_current_finalized_checkpoint::(req) + } + (&Method::GET, "/beacon/state/genesis") => beacon::get_genesis_state::(req), + //TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances + + // Methods for bootstrap and checking configuration + (&Method::GET, "/spec") => spec::get_spec::(req), + (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), + (&Method::GET, "/spec/deposit_contract") => { + helpers::implementation_pending_response(req) + } + (&Method::GET, "/spec/eth2_config") => spec::get_eth2_config::(req), + + (&Method::GET, "/metrics") => metrics::get_prometheus::(req), + + */ + _ => Err(ApiError::NotFound( + "Request path and/or method not found.".to_owned(), + )), }; - Response::builder() - .status(status_code.0) - .header("content-type", "text/plain") - .body(Body::from(status_code.1)) - .expect("Response should always be created.") - } -} -impl From for ApiError { - fn from(e: store::Error) -> ApiError { - ApiError::ServerError(format!("Database error: {:?}", e)) - } -} + let response = match result { + // Return the `hyper::Response`. + Ok(response) => { + metrics::inc_counter(&metrics::SUCCESS_COUNT); + slog::debug!(self.log, "Request successful: {:?}", path); + Box::new(response) + } + // Map the `ApiError` into `hyper::Response`. + Err(e) => { + slog::debug!(self.log, "Request failure: {:?}", path); + Box::new(e.into()) + } + }; -impl From for ApiError { - fn from(e: types::BeaconStateError) -> ApiError { - ApiError::ServerError(format!("BeaconState error: {:?}", e)) - } -} + metrics::stop_timer(timer); -impl From for ApiError { - fn from(e: state_processing::per_slot_processing::Error) -> ApiError { - ApiError::ServerError(format!("PerSlotProcessing error: {:?}", e)) + Box::new(futures::future::ok(response)) } } @@ -112,128 +204,13 @@ pub fn start_server( let server_bc = beacon_chain.clone(); let eth2_config = Arc::new(eth2_config); - let service = move || { - let log = server_log.clone(); - let beacon_chain = server_bc.clone(); - let db_path = db_path.clone(); - let network_service = network_service.clone(); - let network_chan = network_chan.clone(); - let eth2_config = eth2_config.clone(); - - // Create a simple handler for the router, inject our stateful objects into the request. - service_fn_ok(move |mut req| { - metrics::inc_counter(&metrics::REQUEST_COUNT); - let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME); - - req.extensions_mut().insert::(log.clone()); - req.extensions_mut() - .insert::>>(beacon_chain.clone()); - req.extensions_mut().insert::(db_path.clone()); - req.extensions_mut() - .insert::>>(network_service.clone()); - req.extensions_mut() - .insert::>(network_chan.clone()); - req.extensions_mut() - .insert::>(eth2_config.clone()); - - let path = req.uri().path().to_string(); - - // Route the request to the correct handler. - let result = match (req.method(), path.as_ref()) { - // Methods for Client - (&Method::GET, "/node/version") => node::get_version(req), - (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), - (&Method::GET, "/node/syncing") => helpers::implementation_pending_response(req), - - // Methods for Network - (&Method::GET, "/network/enr") => network::get_enr::(req), - (&Method::GET, "/network/peer_count") => network::get_peer_count::(req), - (&Method::GET, "/network/peer_id") => network::get_peer_id::(req), - (&Method::GET, "/network/peers") => network::get_peer_list::(req), - (&Method::GET, "/network/listen_port") => network::get_listen_port::(req), - (&Method::GET, "/network/listen_addresses") => { - network::get_listen_addresses::(req) - } - - // Methods for Beacon Node - (&Method::GET, "/beacon/head") => beacon::get_head::(req), - (&Method::GET, "/beacon/block") => beacon::get_block::(req), - (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), - (&Method::GET, "/beacon/blocks") => helpers::implementation_pending_response(req), - (&Method::GET, "/beacon/fork") => beacon::get_fork::(req), - (&Method::GET, "/beacon/attestations") => { - helpers::implementation_pending_response(req) - } - (&Method::GET, "/beacon/attestations/pending") => { - helpers::implementation_pending_response(req) - } - - (&Method::GET, "/beacon/validators") => beacon::get_validators::(req), - (&Method::GET, "/beacon/validators/indicies") => { - helpers::implementation_pending_response(req) - } - (&Method::GET, "/beacon/validators/pubkeys") => { - helpers::implementation_pending_response(req) - } - - // Methods for Validator - (&Method::GET, "/beacon/validator/duties") => { - validator::get_validator_duties::(req) - } - (&Method::GET, "/beacon/validator/block") => { - validator::get_new_beacon_block::(req) - } - (&Method::POST, "/beacon/validator/block") => { - validator::publish_beacon_block::(req) - } - (&Method::GET, "/beacon/validator/attestation") => { - validator::get_new_attestation::(req) - } - (&Method::POST, "/beacon/validator/attestation") => { - helpers::implementation_pending_response(req) - } - - (&Method::GET, "/beacon/state") => beacon::get_state::(req), - (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), - (&Method::GET, "/beacon/state/current_finalized_checkpoint") => { - beacon::get_current_finalized_checkpoint::(req) - } - (&Method::GET, "/beacon/state/genesis") => beacon::get_genesis_state::(req), - //TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances - - // Methods for bootstrap and checking configuration - (&Method::GET, "/spec") => spec::get_spec::(req), - (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), - (&Method::GET, "/spec/deposit_contract") => { - helpers::implementation_pending_response(req) - } - (&Method::GET, "/spec/eth2_config") => spec::get_eth2_config::(req), - - (&Method::GET, "/metrics") => metrics::get_prometheus::(req), - - _ => Err(ApiError::NotFound( - "Request path and/or method not found.".to_owned(), - )), - }; - - let response = match result { - // Return the `hyper::Response`. - Ok(response) => { - metrics::inc_counter(&metrics::SUCCESS_COUNT); - slog::debug!(log, "Request successful: {:?}", path); - response - } - // Map the `ApiError` into `hyper::Response`. - Err(e) => { - slog::debug!(log, "Request failure: {:?}", path); - e.into() - } - }; - - metrics::stop_timer(timer); - - response - }) + let service = move || ApiService { + log: server_log.clone(), + beacon_chain: server_bc.clone(), + db_path: db_path.clone(), + network_service: network_service.clone(), + network_channel: Arc::new(RwLock::new(network_chan.clone())), + eth2_config: eth2_config.clone(), }; let log_clone = log.clone(); @@ -242,16 +219,16 @@ pub fn start_server( .with_graceful_shutdown(server_exit) .map_err(move |e| { warn!( - log_clone, - "API failed to start, Unable to bind"; "address" => format!("{:?}", e) + log_clone, + "API failed to start, Unable to bind"; "address" => format!("{:?}", e) ) }); info!( - log, - "REST API started"; - "address" => format!("{}", config.listen_address), - "port" => config.port, + log, + "REST API started"; + "address" => format!("{}", config.listen_address), + "port" => config.port, ); executor.spawn(server); From 965d6f1df9683e2e53f65bac48190c25b60f6c81 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Tue, 10 Sep 2019 15:35:54 +1000 Subject: [PATCH 267/305] WIP: More restructuring to have ApiService be a future. --- beacon_node/rest_api/Cargo.toml | 1 + beacon_node/rest_api/src/error.rs | 44 ++++++++++++++++++------------- beacon_node/rest_api/src/lib.rs | 16 ++++++++--- 3 files changed, 40 insertions(+), 21 deletions(-) diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index a3d31e410..ac762ebb7 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -36,4 +36,5 @@ lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" } slot_clock = { path = "../../eth2/utils/slot_clock" } hex = "0.3.2" parking_lot = "0.9" +futures = "0.1.25" diff --git a/beacon_node/rest_api/src/error.rs b/beacon_node/rest_api/src/error.rs index bae07ea0b..f3eb597a0 100644 --- a/beacon_node/rest_api/src/error.rs +++ b/beacon_node/rest_api/src/error.rs @@ -1,15 +1,8 @@ use hyper::{Body, Method, Request, Response, Server, StatusCode}; use std::error::Error as StdError; -type Cause = Box; - -pub struct ApiError { - kind: ApiErrorKind, - cause: Option, -} - #[derive(PartialEq, Debug)] -pub enum ApiErrorKind { +pub enum ApiError { MethodNotAllowed(String), ServerError(String), NotImplemented(String), @@ -20,21 +13,27 @@ pub enum ApiErrorKind { pub type ApiResult = Result, ApiError>; -impl Into> for ApiError { - fn into(self) -> Response { - let status_code: (StatusCode, String) = match self { +impl ApiError { + pub fn status_code(&self) -> (StatusCode, &String) { + match self { ApiError::MethodNotAllowed(desc) => (StatusCode::METHOD_NOT_ALLOWED, desc), ApiError::ServerError(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc), ApiError::NotImplemented(desc) => (StatusCode::NOT_IMPLEMENTED, desc), ApiError::InvalidQueryParams(desc) => (StatusCode::BAD_REQUEST, desc), ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc), ApiError::ImATeapot(desc) => (StatusCode::IM_A_TEAPOT, desc), - }; + } + } +} + +impl Into> for ApiError { + fn into(self) -> Response { + let status_code = self.status_code(); Response::builder() - .status(status_code.0) - .header("content-type", "text/plain") - .body(Body::from(status_code.1)) - .expect("Response should always be created.") + .status(status_code.0) + .header("content-type", "text/plain") + .body(Body::from(*status_code.1)) + .expect("Response should always be created.") } } @@ -56,6 +55,15 @@ impl From for ApiError { } } -impl std::error::Error for ApiError { - fn cause(&self) -> Option<&Error> {} +impl StdError for ApiError { + fn cause(&self) -> Option<&StdError> { + None + } +} + +impl std::fmt::Display for ApiError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let status = self.status_code(); + write!(f, "{:?}: {:?}", status.0, status.1) + } } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 56ed8c7bb..d7ea72cc5 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -14,6 +14,7 @@ mod spec; mod url_query; mod validator; +use error::{ApiError, ApiResult}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use client_network::NetworkMessage; use client_network::Service as NetworkService; @@ -34,7 +35,6 @@ use url_query::UrlQuery; pub use beacon::{BlockResponse, HeadResponse, StateResponse}; pub use config::Config as ApiConfig; use eth2_libp2p::rpc::RequestId; -use serde::ser::StdError; type BoxFut = Box, Error = ApiError> + Send>; @@ -158,18 +158,19 @@ impl Service for ApiService { Ok(response) => { metrics::inc_counter(&metrics::SUCCESS_COUNT); slog::debug!(self.log, "Request successful: {:?}", path); - Box::new(response) + response } // Map the `ApiError` into `hyper::Response`. Err(e) => { slog::debug!(self.log, "Request failure: {:?}", path); - Box::new(e.into()) + e.into() } }; metrics::stop_timer(timer); Box::new(futures::future::ok(response)) + } } @@ -236,6 +237,15 @@ pub fn start_server( Ok(exit_signal) } +impl Future for ApiService { + type Item = Result, ApiError>; + type Error = ApiError; + + fn poll(&mut self) -> Result, Self::Error> { + unimplemented!() + } +} + fn success_response(body: Body) -> Response { Response::builder() .status(StatusCode::OK) From 576712cefeb3016b811eac0a6ad20c2dd63d9b3a Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Tue, 10 Sep 2019 17:16:41 +1000 Subject: [PATCH 268/305] WIP: Trying to get futures to work... --- beacon_node/rest_api/Cargo.toml | 1 - beacon_node/rest_api/src/lib.rs | 49 ++++++++++++++++++++++++--------- 2 files changed, 36 insertions(+), 14 deletions(-) diff --git a/beacon_node/rest_api/Cargo.toml b/beacon_node/rest_api/Cargo.toml index ac762ebb7..7ea21eeba 100644 --- a/beacon_node/rest_api/Cargo.toml +++ b/beacon_node/rest_api/Cargo.toml @@ -26,7 +26,6 @@ clap = "2.32.0" http = "^0.1.17" prometheus = { version = "^0.6", features = ["process"] } hyper = "0.12.34" -futures = "0.1" exit-future = "0.1.3" tokio = "0.1.17" url = "2.0" diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index d7ea72cc5..2062d4e03 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -20,7 +20,7 @@ use client_network::NetworkMessage; use client_network::Service as NetworkService; use eth2_config::Eth2Config; use hyper::rt::Future; -use hyper::service::Service; +use hyper::service::{Service, MakeService}; use hyper::{Body, Method, Request, Response, Server, StatusCode}; use parking_lot::RwLock; use response_builder::ResponseBuilder; @@ -31,13 +31,44 @@ use std::sync::Arc; use tokio::runtime::TaskExecutor; use tokio::sync::mpsc; use url_query::UrlQuery; +use hyper::server::conn::AddrStream; pub use beacon::{BlockResponse, HeadResponse, StateResponse}; pub use config::Config as ApiConfig; use eth2_libp2p::rpc::RequestId; +use serde::export::PhantomData; type BoxFut = Box, Error = ApiError> + Send>; +pub struct ApiMaker { + log: slog::Logger, + beacon_chain: Arc>, + db_path: DBPath, + network_service: Arc>, + network_channel: Arc>>, + eth2_config: Arc, +} + +impl MakeService for ApiMaker { + type ReqBody = Body; + type ResBody = Body; + type Error = ApiError; + type Service = ApiService; + type Future = futures::future::FutureResult; + type MakeError = String; + + fn make_service(&mut self, _ctx: AddrStream) -> Self::Future { + futures::future::ok(ApiService { + log: self.log.clone(), + beacon_chain: self.beacon_chain.clone(), + db_path: self.db_path.clone(), + network_service: self.network_service.clone(), + network_channel: self.network_channel.clone(), + eth2_config: self.eth2_config.clone(), + }) + } +} + pub struct ApiService { log: slog::Logger, beacon_chain: Arc>, @@ -205,15 +236,16 @@ pub fn start_server( let server_bc = beacon_chain.clone(); let eth2_config = Arc::new(eth2_config); - let service = move || ApiService { - log: server_log.clone(), - beacon_chain: server_bc.clone(), + let service = move || ApiMaker { + log: log.clone(), + beacon_chain: beacon_chain.clone(), db_path: db_path.clone(), network_service: network_service.clone(), network_channel: Arc::new(RwLock::new(network_chan.clone())), eth2_config: eth2_config.clone(), }; + let log_clone = log.clone(); let server = Server::bind(&bind_addr) .serve(service) @@ -237,15 +269,6 @@ pub fn start_server( Ok(exit_signal) } -impl Future for ApiService { - type Item = Result, ApiError>; - type Error = ApiError; - - fn poll(&mut self) -> Result, Self::Error> { - unimplemented!() - } -} - fn success_response(body: Body) -> Response { Response::builder() .status(StatusCode::OK) From 4dcad27381595fce55811cd34e12118a6a2421c9 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 10 Sep 2019 17:27:28 +1000 Subject: [PATCH 269/305] Fix ApiService woes (hopefully) --- beacon_node/rest_api/src/lib.rs | 53 ++++++++------------------------- 1 file changed, 12 insertions(+), 41 deletions(-) diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 2062d4e03..a5f62360d 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -14,13 +14,14 @@ mod spec; mod url_query; mod validator; -use error::{ApiError, ApiResult}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use client_network::NetworkMessage; use client_network::Service as NetworkService; +use error::{ApiError, ApiResult}; use eth2_config::Eth2Config; use hyper::rt::Future; -use hyper::service::{Service, MakeService}; +use hyper::server::conn::AddrStream; +use hyper::service::{MakeService, Service}; use hyper::{Body, Method, Request, Response, Server, StatusCode}; use parking_lot::RwLock; use response_builder::ResponseBuilder; @@ -31,7 +32,6 @@ use std::sync::Arc; use tokio::runtime::TaskExecutor; use tokio::sync::mpsc; use url_query::UrlQuery; -use hyper::server::conn::AddrStream; pub use beacon::{BlockResponse, HeadResponse, StateResponse}; pub use config::Config as ApiConfig; @@ -40,35 +40,6 @@ use serde::export::PhantomData; type BoxFut = Box, Error = ApiError> + Send>; -pub struct ApiMaker { - log: slog::Logger, - beacon_chain: Arc>, - db_path: DBPath, - network_service: Arc>, - network_channel: Arc>>, - eth2_config: Arc, -} - -impl MakeService for ApiMaker { - type ReqBody = Body; - type ResBody = Body; - type Error = ApiError; - type Service = ApiService; - type Future = futures::future::FutureResult; - type MakeError = String; - - fn make_service(&mut self, _ctx: AddrStream) -> Self::Future { - futures::future::ok(ApiService { - log: self.log.clone(), - beacon_chain: self.beacon_chain.clone(), - db_path: self.db_path.clone(), - network_service: self.network_service.clone(), - network_channel: self.network_channel.clone(), - eth2_config: self.eth2_config.clone(), - }) - } -} - pub struct ApiService { log: slog::Logger, beacon_chain: Arc>, @@ -201,7 +172,6 @@ impl Service for ApiService { metrics::stop_timer(timer); Box::new(futures::future::ok(response)) - } } @@ -236,16 +206,17 @@ pub fn start_server( let server_bc = beacon_chain.clone(); let eth2_config = Arc::new(eth2_config); - let service = move || ApiMaker { - log: log.clone(), - beacon_chain: beacon_chain.clone(), - db_path: db_path.clone(), - network_service: network_service.clone(), - network_channel: Arc::new(RwLock::new(network_chan.clone())), - eth2_config: eth2_config.clone(), + let service = move || -> futures::future::FutureResult, String> { + futures::future::ok(ApiService { + log: log.clone(), + beacon_chain: beacon_chain.clone(), + db_path: db_path.clone(), + network_service: network_service.clone(), + network_channel: Arc::new(RwLock::new(network_chan.clone())), + eth2_config: eth2_config.clone(), + }) }; - let log_clone = log.clone(); let server = Server::bind(&bind_addr) .serve(service) From b0090df5432bfa702e290afca1f7476f6167da53 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Tue, 10 Sep 2019 19:30:36 +1000 Subject: [PATCH 270/305] Getting the restructured ApiService to work. --- beacon_node/rest_api/src/error.rs | 14 +++++++------- beacon_node/rest_api/src/lib.rs | 26 +++++++------------------- beacon_node/rest_api/src/validator.rs | 11 +++++------ 3 files changed, 19 insertions(+), 32 deletions(-) diff --git a/beacon_node/rest_api/src/error.rs b/beacon_node/rest_api/src/error.rs index f3eb597a0..b6b1bbfb5 100644 --- a/beacon_node/rest_api/src/error.rs +++ b/beacon_node/rest_api/src/error.rs @@ -1,7 +1,7 @@ use hyper::{Body, Method, Request, Response, Server, StatusCode}; use std::error::Error as StdError; -#[derive(PartialEq, Debug)] +#[derive(PartialEq, Debug, Clone)] pub enum ApiError { MethodNotAllowed(String), ServerError(String), @@ -14,7 +14,7 @@ pub enum ApiError { pub type ApiResult = Result, ApiError>; impl ApiError { - pub fn status_code(&self) -> (StatusCode, &String) { + pub fn status_code(self) -> (StatusCode, String) { match self { ApiError::MethodNotAllowed(desc) => (StatusCode::METHOD_NOT_ALLOWED, desc), ApiError::ServerError(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc), @@ -30,10 +30,10 @@ impl Into> for ApiError { fn into(self) -> Response { let status_code = self.status_code(); Response::builder() - .status(status_code.0) - .header("content-type", "text/plain") - .body(Body::from(*status_code.1)) - .expect("Response should always be created.") + .status(status_code.0) + .header("content-type", "text/plain") + .body(Body::from(status_code.1)) + .expect("Response should always be created.") } } @@ -63,7 +63,7 @@ impl StdError for ApiError { impl std::fmt::Display for ApiError { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let status = self.status_code(); + let status = self.clone().status_code(); write!(f, "{:?}: {:?}", status.0, status.1) } } diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index a5f62360d..0852dd1a3 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -79,7 +79,6 @@ impl Service for ApiService { let result = match (req.method(), path.as_ref()) { // Methods for Client (&Method::GET, "/node/version") => node::get_version(req), - /* (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), (&Method::GET, "/node/syncing") => helpers::implementation_pending_response(req), @@ -89,9 +88,7 @@ impl Service for ApiService { (&Method::GET, "/network/peer_id") => network::get_peer_id::(req), (&Method::GET, "/network/peers") => network::get_peer_list::(req), (&Method::GET, "/network/listen_port") => network::get_listen_port::(req), - (&Method::GET, "/network/listen_addresses") => { - network::get_listen_addresses::(req) - } + (&Method::GET, "/network/listen_addresses") => network::get_listen_addresses::(req), // Methods for Beacon Node (&Method::GET, "/beacon/head") => beacon::get_head::(req), @@ -99,9 +96,7 @@ impl Service for ApiService { (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), (&Method::GET, "/beacon/blocks") => helpers::implementation_pending_response(req), (&Method::GET, "/beacon/fork") => beacon::get_fork::(req), - (&Method::GET, "/beacon/attestations") => { - helpers::implementation_pending_response(req) - } + (&Method::GET, "/beacon/attestations") => helpers::implementation_pending_response(req), (&Method::GET, "/beacon/attestations/pending") => { helpers::implementation_pending_response(req) } @@ -115,15 +110,9 @@ impl Service for ApiService { } // Methods for Validator - (&Method::GET, "/beacon/validator/duties") => { - validator::get_validator_duties::(req) - } - (&Method::GET, "/beacon/validator/block") => { - validator::get_new_beacon_block::(req) - } - (&Method::POST, "/beacon/validator/block") => { - validator::publish_beacon_block::(req) - } + (&Method::GET, "/beacon/validator/duties") => validator::get_validator_duties::(req), + (&Method::GET, "/beacon/validator/block") => validator::get_new_beacon_block::(req), + //(&Method::POST, "/beacon/validator/block") => validator::publish_beacon_block::(req), (&Method::GET, "/beacon/validator/attestation") => { validator::get_new_attestation::(req) } @@ -149,7 +138,6 @@ impl Service for ApiService { (&Method::GET, "/metrics") => metrics::get_prometheus::(req), - */ _ => Err(ApiError::NotFound( "Request path and/or method not found.".to_owned(), )), @@ -208,8 +196,8 @@ pub fn start_server( let service = move || -> futures::future::FutureResult, String> { futures::future::ok(ApiService { - log: log.clone(), - beacon_chain: beacon_chain.clone(), + log: server_log.clone(), + beacon_chain: server_bc.clone(), db_path: db_path.clone(), network_service: network_service.clone(), network_channel: Arc::new(RwLock::new(network_chan.clone())), diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 2ead55d14..8b2bbd2ac 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -203,7 +203,9 @@ pub fn get_new_beacon_block(req: Request) - Ok(success_response(body)) } -/// HTTP Handler to publish a BeaconBlock, which has been signed by a validator. +/* + + HTTP Handler to publish a BeaconBlock, which has been signed by a validator. pub fn publish_beacon_block(req: Request) -> ApiResult { let _ = check_content_type_for_json(&req)?; let log = get_logger_from_request(&req); @@ -232,11 +234,8 @@ pub fn publish_beacon_block(req: Request) - )) }); block_result - }) - .unwrap() - .unwrap(); + }); - /* .map_err(|e| ApiError::ServerError(format!("Unable parse request body: {:?}", e))) .and_then(|body| { trace!(log, "parsing json"); @@ -251,7 +250,6 @@ pub fn publish_beacon_block(req: Request) - }); tokio::run(block_future); let block = block_future.wait()?; - */ trace!(log, "BeaconBlock successfully parsed from JSON"; "block" => serde_json::to_string(&block).expect("We should always be able to serialize a block that we just created.")); match beacon_chain.process_block(block.clone()) { Ok(BlockProcessingOutcome::Processed { block_root }) => { @@ -277,6 +275,7 @@ pub fn publish_beacon_block(req: Request) - Ok(success_response(Body::empty())) } + */ /// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. pub fn get_new_attestation(req: Request) -> ApiResult { From b8667217f0da801d471aac51409cd3107d2b0656 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 11 Sep 2019 00:40:22 +1000 Subject: [PATCH 271/305] Made async functions work! - Cleaned up imports - Moved ApiError and such to it's own error.rs - Obsoleted 'success_response' in favour of new async regular and json only flavours - Made ApiError work async and be derived from hyper errors - Added a check to ensure an error is thrown if a non-json encoding is requested on a json-only function - Made all the individual service functions return futures (only node and network for now) --- beacon_node/rest_api/src/beacon.rs | 15 +++-- beacon_node/rest_api/src/error.rs | 13 ++++ beacon_node/rest_api/src/helpers.rs | 68 ++++++++++++++------ beacon_node/rest_api/src/lib.rs | 20 ++---- beacon_node/rest_api/src/metrics.rs | 5 +- beacon_node/rest_api/src/network.rs | 62 ++++++------------ beacon_node/rest_api/src/node.rs | 28 ++++---- beacon_node/rest_api/src/response_builder.rs | 25 ++++--- beacon_node/rest_api/src/spec.rs | 8 +-- beacon_node/rest_api/src/validator.rs | 37 +++-------- 10 files changed, 143 insertions(+), 138 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 66f5e7731..4c57e4770 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -1,5 +1,6 @@ -use super::{success_response, ApiResult, ResponseBuilder}; -use crate::{helpers::*, ApiError, UrlQuery}; +use crate::helpers::*; +use crate::response_builder::ResponseBuilder; +use crate::{ApiError, ApiResult, BoxFut, NetworkService, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; use serde::Serialize; @@ -57,7 +58,7 @@ pub fn get_head(req: Request) -> ApiResult let json: String = serde_json::to_string(&head) .map_err(|e| ApiError::ServerError(format!("Unable to serialize HeadResponse: {:?}", e)))?; - Ok(success_response(Body::from(json))) + Ok(success_response_old(Body::from(json))) } #[derive(Serialize, Encode)] @@ -121,7 +122,7 @@ pub fn get_block_root(req: Request) -> ApiR let json: String = serde_json::to_string(&root) .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; - Ok(success_response(Body::from(json))) + Ok(success_response_old(Body::from(json))) } /// HTTP handler to return the `Fork` of the current head. @@ -132,7 +133,7 @@ pub fn get_fork(req: Request) -> ApiResult ApiError::ServerError(format!("Unable to serialize BeaconState::Fork: {:?}", e)) })?; - Ok(success_response(Body::from(json))) + Ok(success_response_old(Body::from(json))) } /// HTTP handler to return the set of validators for an `Epoch` @@ -243,7 +244,7 @@ pub fn get_state_root(req: Request) -> ApiR let json: String = serde_json::to_string(&root) .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; - Ok(success_response(Body::from(json))) + Ok(success_response_old(Body::from(json))) } /// HTTP handler to return the highest finalized slot. @@ -261,7 +262,7 @@ pub fn get_current_finalized_checkpoint( let json: String = serde_json::to_string(&checkpoint) .map_err(|e| ApiError::ServerError(format!("Unable to serialize checkpoint: {:?}", e)))?; - Ok(success_response(Body::from(json))) + Ok(success_response_old(Body::from(json))) } /// HTTP handler to return a `BeaconState` at the genesis block. diff --git a/beacon_node/rest_api/src/error.rs b/beacon_node/rest_api/src/error.rs index b6b1bbfb5..138affae4 100644 --- a/beacon_node/rest_api/src/error.rs +++ b/beacon_node/rest_api/src/error.rs @@ -1,3 +1,4 @@ +use crate::BoxFut; use hyper::{Body, Method, Request, Response, Server, StatusCode}; use std::error::Error as StdError; @@ -37,6 +38,12 @@ impl Into> for ApiError { } } +impl Into for ApiError { + fn into(self) -> BoxFut { + Box::new(futures::future::err(self)) + } +} + impl From for ApiError { fn from(e: store::Error) -> ApiError { ApiError::ServerError(format!("Database error: {:?}", e)) @@ -55,6 +62,12 @@ impl From for ApiError { } } +impl From for ApiError { + fn from(e: hyper::error::Error) -> ApiError { + ApiError::ServerError(format!("Networking error: {:?}", e)) + } +} + impl StdError for ApiError { fn cause(&self) -> Option<&StdError> { None diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index bff7d9ece..006deb268 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -1,19 +1,46 @@ -use crate::{ApiError, ApiResult}; +use crate::response_builder::ResponseBuilder; +use crate::{ApiError, ApiResult, BoxFut}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use bls::PublicKey; use eth2_libp2p::{PubsubMessage, Topic}; use eth2_libp2p::{BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX}; use hex; use http::header; -use hyper::{Body, Request}; +use hyper::{Body, Request, Response, StatusCode}; use network::NetworkMessage; use parking_lot::RwLock; +use serde::Serialize; use ssz::Encode; use std::sync::Arc; use store::{iter::AncestorIter, Store}; use tokio::sync::mpsc; use types::{BeaconBlock, BeaconState, EthSpec, Hash256, RelativeEpoch, Slot}; +pub fn success_response(req: Request, item: &T) -> BoxFut { + Box::new(match ResponseBuilder::new(&req).body(item) { + Ok(resp) => futures::future::ok(resp), + Err(e) => futures::future::err(e), + }) +} + +pub fn success_response_json(req: Request, item: &T) -> BoxFut { + if let Err(e) = check_content_type_for_json(&req) { + return Box::new(futures::future::err(e)); + } + Box::new(match ResponseBuilder::new(&req).body_json(item) { + Ok(resp) => futures::future::ok(resp), + Err(e) => futures::future::err(e), + }) +} + +pub fn success_response_old(body: Body) -> Response { + Response::builder() + .status(StatusCode::OK) + .header("content-type", "application/json") + .body(body) + .expect("We should always be able to make response from the success body.") +} + /// Parse a slot from a `0x` preixed string. /// /// E.g., `"1234"` @@ -24,6 +51,21 @@ pub fn parse_slot(string: &str) -> Result { .map_err(|e| ApiError::InvalidQueryParams(format!("Unable to parse slot: {:?}", e))) } +/// Checks the provided request to ensure that the `content-type` header. +/// +/// The content-type header should either be omitted, in which case JSON is assumed, or it should +/// explicity specify `application/json`. If anything else is provided, an error is returned. +pub fn check_content_type_for_json(req: &Request) -> Result<(), ApiError> { + match req.headers().get(header::CONTENT_TYPE) { + Some(h) if h == "application/json" => Ok(()), + Some(h) => Err(ApiError::InvalidQueryParams(format!( + "The provided content-type {:?} is not available, this endpoint only supports json.", + h + ))), + _ => Ok(()), + } +} + /// Parse a root from a `0x` preixed string. /// /// E.g., `"0x0000000000000000000000000000000000000000000000000000000000000000"` @@ -42,21 +84,6 @@ pub fn parse_root(string: &str) -> Result { } } -/// Checks the provided request to ensure that the `content-type` header. -/// -/// The content-type header should either be omitted, in which case JSON is assumed, or it should -/// explicity specify `application/json`. If anything else is provided, an error is returned. -pub fn check_content_type_for_json(req: &Request) -> Result<(), ApiError> { - match req.headers().get(header::CONTENT_TYPE) { - Some(h) if h == "application/json" => Ok(()), - Some(h) => Err(ApiError::InvalidQueryParams(format!( - "The provided content-type {:?} is not available, it must be JSON.", - h - ))), - _ => Ok(()), - } -} - /// Parse a PublicKey from a `0x` prefixed hex string pub fn parse_pubkey(string: &str) -> Result { const PREFIX: &str = "0x"; @@ -186,10 +213,11 @@ pub fn state_root_at_slot( } } -pub fn implementation_pending_response(_req: Request) -> ApiResult { - Err(ApiError::NotImplemented( +pub fn implementation_pending_response(_req: Request) -> BoxFut { + ApiError::NotImplemented( "API endpoint has not yet been implemented, but is planned to be soon.".to_owned(), - )) + ) + .into() } pub fn get_beacon_chain_from_request( diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 0852dd1a3..dc4abc2bf 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -24,7 +24,6 @@ use hyper::server::conn::AddrStream; use hyper::service::{MakeService, Service}; use hyper::{Body, Method, Request, Response, Server, StatusCode}; use parking_lot::RwLock; -use response_builder::ResponseBuilder; use slog::{info, o, warn}; use std::ops::Deref; use std::path::PathBuf; @@ -59,6 +58,8 @@ impl Service for ApiService { metrics::inc_counter(&metrics::REQUEST_COUNT); let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME); + // Add all the useful bits into the request, so that we can pull them out in the individual + // functions. req.extensions_mut() .insert::(self.log.clone()); req.extensions_mut() @@ -90,6 +91,7 @@ impl Service for ApiService { (&Method::GET, "/network/listen_port") => network::get_listen_port::(req), (&Method::GET, "/network/listen_addresses") => network::get_listen_addresses::(req), + /* // Methods for Beacon Node (&Method::GET, "/beacon/head") => beacon::get_head::(req), (&Method::GET, "/beacon/block") => beacon::get_block::(req), @@ -137,13 +139,13 @@ impl Service for ApiService { (&Method::GET, "/spec/eth2_config") => spec::get_eth2_config::(req), (&Method::GET, "/metrics") => metrics::get_prometheus::(req), - - _ => Err(ApiError::NotFound( + */ + _ => Box::new(futures::future::err(ApiError::NotFound( "Request path and/or method not found.".to_owned(), - )), + ))), }; - let response = match result { + let response = match result.wait() { // Return the `hyper::Response`. Ok(response) => { metrics::inc_counter(&metrics::SUCCESS_COUNT); @@ -228,14 +230,6 @@ pub fn start_server( Ok(exit_signal) } -fn success_response(body: Body) -> Response { - Response::builder() - .status(StatusCode::OK) - .header("content-type", "application/json") - .body(body) - .expect("We should always be able to make response from the success body.") -} - #[derive(Clone)] pub struct DBPath(PathBuf); diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 62a769de1..d430db3f5 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -1,4 +1,5 @@ -use crate::{helpers::*, success_response, ApiError, ApiResult, DBPath}; +use crate::helpers::*; +use crate::{ApiError, ApiResult, DBPath}; use beacon_chain::BeaconChainTypes; use http::HeaderValue; use hyper::{Body, Request}; @@ -62,7 +63,7 @@ pub fn get_prometheus(req: Request) -> ApiR String::from_utf8(buffer) .map(|string| { - let mut response = success_response(Body::from(string)); + let mut response = success_response_old(Body::from(string)); // Need to change the header to text/plain for prometheus response.headers_mut().insert( "content-type", diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index 4f1f53bb9..e037d43f0 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -1,4 +1,5 @@ -use crate::{success_response, ApiError, ApiResult, NetworkService}; +use crate::helpers::*; +use crate::{ApiError, BoxFut, NetworkService}; use beacon_chain::BeaconChainTypes; use eth2_libp2p::{Enr, Multiaddr, PeerId}; use hyper::{Body, Request}; @@ -7,92 +8,75 @@ use std::sync::Arc; /// HTTP handler to return the list of libp2p multiaddr the client is listening on. /// /// Returns a list of `Multiaddr`, serialized according to their `serde` impl. -pub fn get_listen_addresses(req: Request) -> ApiResult { +pub fn get_listen_addresses(req: Request) -> BoxFut { let network = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; - + .expect("The network service should always be there, we put it there"); let multiaddresses: Vec = network.listen_multiaddrs(); - - Ok(success_response(Body::from( - serde_json::to_string(&multiaddresses) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, - ))) + success_response_json(req, &multiaddresses) } /// HTTP handler to return the network port the client is listening on. /// /// Returns the TCP port number in its plain form (which is also valid JSON serialization) -pub fn get_listen_port(req: Request) -> ApiResult { +pub fn get_listen_port(req: Request) -> BoxFut { let network = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + .expect("The network service should always be there, we put it there") + .clone(); - Ok(success_response(Body::from( - serde_json::to_string(&network.listen_port()) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize port: {:?}", e)))?, - ))) + success_response(req, &network.listen_port()) } /// HTTP handler to return the Discv5 ENR from the client's libp2p service. /// /// ENR is encoded as base64 string. -pub fn get_enr(req: Request) -> ApiResult { +pub fn get_enr(req: Request) -> BoxFut { let network = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + .expect("The network service should always be there, we put it there"); let enr: Enr = network.local_enr(); - - Ok(success_response(Body::from( - serde_json::to_string(&enr.to_base64()) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, - ))) + success_response_json(req, &enr.to_base64()) } /// HTTP handler to return the `PeerId` from the client's libp2p service. /// /// PeerId is encoded as base58 string. -pub fn get_peer_id(req: Request) -> ApiResult { +pub fn get_peer_id(req: Request) -> BoxFut { let network = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + .expect("The network service should always be there, we put it there"); let peer_id: PeerId = network.local_peer_id(); - Ok(success_response(Body::from( - serde_json::to_string(&peer_id.to_base58()) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, - ))) + success_response_json(req, &peer_id.to_base58()) } /// HTTP handler to return the number of peers connected in the client's libp2p service. -pub fn get_peer_count(req: Request) -> ApiResult { +pub fn get_peer_count(req: Request) -> BoxFut { let network = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + .expect("The network service should always be there, we put it there"); let connected_peers: usize = network.connected_peers(); - Ok(success_response(Body::from( - serde_json::to_string(&connected_peers) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?, - ))) + success_response(req, &connected_peers) } /// HTTP handler to return the list of peers connected to the client's libp2p service. /// /// Peers are presented as a list of `PeerId::to_string()`. -pub fn get_peer_list(req: Request) -> ApiResult { +pub fn get_peer_list(req: Request) -> BoxFut { let network = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?; + .expect("The network service should always be there, we put it there"); let connected_peers: Vec = network .connected_peer_set() @@ -100,9 +84,5 @@ pub fn get_peer_list(req: Request) -> ApiResult { .map(PeerId::to_string) .collect(); - Ok(success_response(Body::from( - serde_json::to_string(&connected_peers).map_err(|e| { - ApiError::ServerError(format!("Unable to serialize Vec: {:?}", e)) - })?, - ))) + success_response_json(req, &connected_peers) } diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs index c75d3ba20..8ca7fb48a 100644 --- a/beacon_node/rest_api/src/node.rs +++ b/beacon_node/rest_api/src/node.rs @@ -1,25 +1,23 @@ -use crate::helpers::get_beacon_chain_from_request; -use crate::{success_response, ApiResult}; +use crate::helpers::*; +use crate::{ApiResult, BoxFut}; use beacon_chain::BeaconChainTypes; use hyper::{Body, Request}; use version; /// Read the version string from the current Lighthouse build. -pub fn get_version(_req: Request) -> ApiResult { - let body = Body::from( - serde_json::to_string(&version::version()) - .expect("Version should always be serialializable as JSON."), - ); - Ok(success_response(body)) +pub fn get_version(req: Request) -> BoxFut { + success_response_json(req, &version::version()) } /// Read the genesis time from the current beacon chain state. -pub fn get_genesis_time(req: Request) -> ApiResult { - let (_beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; +pub fn get_genesis_time(req: Request) -> BoxFut { + let bc = get_beacon_chain_from_request::(&req); + let (_beacon_chain, head_state) = match bc { + Ok((bc, hs)) => (bc, hs), + Err(e) => { + return e.into(); + } + }; let gen_time: u64 = head_state.genesis_time; - let body = Body::from( - serde_json::to_string(&gen_time) - .expect("Genesis should time always have a valid JSON serialization."), - ); - Ok(success_response(body)) + success_response(req, &gen_time) } diff --git a/beacon_node/rest_api/src/response_builder.rs b/beacon_node/rest_api/src/response_builder.rs index c1df4892c..b48b9e41a 100644 --- a/beacon_node/rest_api/src/response_builder.rs +++ b/beacon_node/rest_api/src/response_builder.rs @@ -27,15 +27,9 @@ impl ResponseBuilder { pub fn body(self, item: &T) -> ApiResult { let (body, content_type) = match self.encoding { - Encoding::JSON => ( - Body::from(serde_json::to_string(&item).map_err(|e| { - ApiError::ServerError(format!( - "Unable to serialize response body as JSON: {:?}", - e - )) - })?), - "application/json", - ), + Encoding::JSON => { + return self.body_json(item); + } Encoding::SSZ => (Body::from(item.as_ssz_bytes()), "application/ssz"), Encoding::YAML => ( Body::from(serde_yaml::to_string(&item).map_err(|e| { @@ -54,4 +48,17 @@ impl ResponseBuilder { .body(Body::from(body)) .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) } + + pub fn body_json(self, item: &T) -> ApiResult { + Response::builder() + .status(StatusCode::OK) + .header("content-type", "application/json") + .body(Body::from(serde_json::to_string(&item).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as JSON: {:?}", + e + )) + })?)) + .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) + } } diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs index ad168faf1..3132f3cd4 100644 --- a/beacon_node/rest_api/src/spec.rs +++ b/beacon_node/rest_api/src/spec.rs @@ -1,4 +1,4 @@ -use super::{success_response, ApiResult}; +use super::ApiResult; use crate::helpers::*; use crate::ApiError; use beacon_chain::BeaconChainTypes; @@ -14,7 +14,7 @@ pub fn get_spec(req: Request) -> ApiResult let json: String = serde_json::to_string(&beacon_chain.spec) .map_err(|e| ApiError::ServerError(format!("Unable to serialize spec: {:?}", e)))?; - Ok(success_response(Body::from(json))) + Ok(success_response_old(Body::from(json))) } /// HTTP handler to return the full Eth2Config object. @@ -27,7 +27,7 @@ pub fn get_eth2_config(req: Request) -> Api let json: String = serde_json::to_string(eth2_config.as_ref()) .map_err(|e| ApiError::ServerError(format!("Unable to serialize Eth2Config: {:?}", e)))?; - Ok(success_response(Body::from(json))) + Ok(success_response_old(Body::from(json))) } /// HTTP handler to return the full spec object. @@ -35,5 +35,5 @@ pub fn get_slots_per_epoch(_req: Request) - let json: String = serde_json::to_string(&T::EthSpec::slots_per_epoch()) .map_err(|e| ApiError::ServerError(format!("Unable to serialize epoch: {:?}", e)))?; - Ok(success_response(Body::from(json))) + Ok(success_response_old(Body::from(json))) } diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 8b2bbd2ac..7d236e0cf 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,5 +1,5 @@ -use super::{success_response, ApiResult}; -use crate::{helpers::*, ApiError, UrlQuery}; +use crate::helpers::*; +use crate::{ApiError, ApiResult, UrlQuery}; use beacon_chain::{BeaconChainTypes, BlockProcessingOutcome}; use bls::{AggregateSignature, PublicKey, Signature}; use futures::future::Future; @@ -160,7 +160,7 @@ pub fn get_validator_duties(req: Request) - serde_json::to_string(&duties) .expect("We should always be able to serialize the duties we created."), ); - Ok(success_response(body)) + Ok(success_response_old(body)) } /// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator. @@ -200,12 +200,10 @@ pub fn get_new_beacon_block(req: Request) - serde_json::to_string(&new_block) .expect("We should always be able to serialize a new block that we produced."), ); - Ok(success_response(body)) + Ok(success_response_old(body)) } -/* - - HTTP Handler to publish a BeaconBlock, which has been signed by a validator. +/// HTTP Handler to publish a BeaconBlock, which has been signed by a validator. pub fn publish_beacon_block(req: Request) -> ApiResult { let _ = check_content_type_for_json(&req)?; let log = get_logger_from_request(&req); @@ -222,7 +220,7 @@ pub fn publish_beacon_block(req: Request) - log, "Got the request body, now going to parse it into a block." ); - let block = body + let block_future = body .concat2() .map(move |chunk| chunk.iter().cloned().collect::>()) .map(|chunks| { @@ -235,22 +233,8 @@ pub fn publish_beacon_block(req: Request) - }); block_result }); - - .map_err(|e| ApiError::ServerError(format!("Unable parse request body: {:?}", e))) - .and_then(|body| { - trace!(log, "parsing json"); - let block_result: Result, ApiError> = - serde_json::from_slice(&body.as_slice()).map_err(|e| { - ApiError::InvalidQueryParams(format!( - "Unable to deserialize JSON into a BeaconBlock: {:?}", - e - )) - }); - block_result - }); - tokio::run(block_future); - let block = block_future.wait()?; - trace!(log, "BeaconBlock successfully parsed from JSON"; "block" => serde_json::to_string(&block).expect("We should always be able to serialize a block that we just created.")); + let block = block_future.wait()??; + trace!(log, "BeaconBlock successfully parsed from JSON"); match beacon_chain.process_block(block.clone()) { Ok(BlockProcessingOutcome::Processed { block_root }) => { // Block was processed, publish via gossipsub @@ -273,9 +257,8 @@ pub fn publish_beacon_block(req: Request) - } } - Ok(success_response(Body::empty())) + Ok(success_response_old(Body::empty())) } - */ /// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. pub fn get_new_attestation(req: Request) -> ApiResult { @@ -377,5 +360,5 @@ pub fn get_new_attestation(req: Request) -> serde_json::to_string(&attestation) .expect("We should always be able to serialize a new attestation that we produced."), ); - Ok(success_response(body)) + Ok(success_response_old(body)) } From ebd97730d572a80343f1cd8779d19e1b56e8a5eb Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 11 Sep 2019 01:43:49 +1000 Subject: [PATCH 272/305] Converted the Beacon API service to Futures - Added SSZ encode for HeadResponse - Converted all of the /beacon/ endpoints to return BoxFut instead of ApiResult - Wrapped all of the '?'s in a new macro try_future!() - Copied the try macro to try_future, so that a boxed future can easily be returned. - Replaced all of the response serializations to use the new success_response --- beacon_node/rest_api/src/beacon.rs | 164 +++++++++++++--------------- beacon_node/rest_api/src/helpers.rs | 2 +- beacon_node/rest_api/src/lib.rs | 4 +- beacon_node/rest_api/src/macros.rs | 13 +++ 4 files changed, 94 insertions(+), 89 deletions(-) create mode 100644 beacon_node/rest_api/src/macros.rs diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 4c57e4770..f5abc51af 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -9,7 +9,7 @@ use std::sync::Arc; use store::Store; use types::{BeaconBlock, BeaconState, Epoch, EthSpec, Hash256, Slot, Validator}; -#[derive(Serialize)] +#[derive(Serialize, Encode)] pub struct HeadResponse { pub slot: Slot, pub block_root: Hash256, @@ -23,11 +23,12 @@ pub struct HeadResponse { } /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. -pub fn get_head(req: Request) -> ApiResult { +pub fn get_head(req: Request) -> BoxFut { let beacon_chain = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + .expect("BeaconChain extension must be there, because we put it there.") + .clone(); let chain_head = beacon_chain.head(); @@ -55,10 +56,7 @@ pub fn get_head(req: Request) -> ApiResult previous_justified_block_root: chain_head.beacon_state.previous_justified_checkpoint.root, }; - let json: String = serde_json::to_string(&head) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize HeadResponse: {:?}", e)))?; - - Ok(success_response_old(Body::from(json))) + success_response(req, &head) } #[derive(Serialize, Encode)] @@ -69,84 +67,83 @@ pub struct BlockResponse { } /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. -pub fn get_block(req: Request) -> ApiResult { +pub fn get_block(req: Request) -> BoxFut { let beacon_chain = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; + .expect("BeaconChain extension must be there, because we put it there.") + .clone(); let query_params = ["root", "slot"]; - let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; + let query = try_future!(UrlQuery::from_request(&req)); + let (key, value) = try_future!(query.first_of(&query_params)); let block_root = match (key.as_ref(), value) { ("slot", value) => { - let target = parse_slot(&value)?; + let target = try_future!(parse_slot(&value)); - block_root_at_slot(&beacon_chain, target).ok_or_else(|| { + try_future!(block_root_at_slot(&beacon_chain, target).ok_or_else(|| { ApiError::NotFound(format!("Unable to find BeaconBlock for slot {:?}", target)) - })? + })) + } + ("root", value) => try_future!(parse_root(&value)), + _ => { + return Box::new(futures::future::err(ApiError::ServerError( + "Unexpected query parameter".into(), + ))) } - ("root", value) => parse_root(&value)?, - _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), }; - let block = beacon_chain + let block = try_future!(try_future!(beacon_chain .store - .get::>(&block_root)? - .ok_or_else(|| { - ApiError::NotFound(format!( - "Unable to find BeaconBlock for root {:?}", - block_root - )) - })?; + .get::>(&block_root)) + .ok_or_else(|| { + ApiError::NotFound(format!( + "Unable to find BeaconBlock for root {:?}", + block_root + )) + })); let response = BlockResponse { root: block_root, beacon_block: block, }; - ResponseBuilder::new(&req).body(&response) + success_response(req, &response) } /// HTTP handler to return a `BeaconBlock` root at a given `slot`. -pub fn get_block_root(req: Request) -> ApiResult { - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; +pub fn get_block_root(req: Request) -> BoxFut { + let (beacon_chain, _head_state) = try_future!(get_beacon_chain_from_request::(&req)); - let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; - let target = parse_slot(&slot_string)?; + let slot_string: String = + try_future!(try_future!(UrlQuery::from_request(&req)).only_one("slot")); + let target: Slot = try_future!(parse_slot(&slot_string)); - let root = block_root_at_slot(&beacon_chain, target).ok_or_else(|| { + let root = try_future!(block_root_at_slot(&beacon_chain, target).ok_or_else(|| { ApiError::NotFound(format!("Unable to find BeaconBlock for slot {:?}", target)) - })?; + })); - let json: String = serde_json::to_string(&root) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; - - Ok(success_response_old(Body::from(json))) + success_response(req, &root) } /// HTTP handler to return the `Fork` of the current head. -pub fn get_fork(req: Request) -> ApiResult { - let (_beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; +pub fn get_fork(req: Request) -> BoxFut { + let (_beacon_chain, head_state) = try_future!(get_beacon_chain_from_request::(&req)); - let json: String = serde_json::to_string(&head_state.fork).map_err(|e| { - ApiError::ServerError(format!("Unable to serialize BeaconState::Fork: {:?}", e)) - })?; - - Ok(success_response_old(Body::from(json))) + success_response(req, &head_state.fork) } /// HTTP handler to return the set of validators for an `Epoch` /// /// The `Epoch` parameter can be any epoch number. If it is not specified, /// the current epoch is assumed. -pub fn get_validators(req: Request) -> ApiResult { - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; +pub fn get_validators(req: Request) -> BoxFut { + let (beacon_chain, _head_state) = try_future!(get_beacon_chain_from_request::(&req)); let epoch = match UrlQuery::from_request(&req) { // We have some parameters, so make sure it's the epoch one and parse it - Ok(query) => query - .only_one("epoch")? + Ok(query) => try_future!(try_future!(query.only_one("epoch")) .parse::() .map(Epoch::from) .map_err(|e| { @@ -154,11 +151,11 @@ pub fn get_validators(req: Request) -> ApiR "Invalid epoch parameter, must be a u64. {:?}", e )) - })?, + })), // In this case, our url query did not contain any parameters, so we take the default - Err(_) => beacon_chain.epoch().map_err(|e| { + Err(_) => try_future!(beacon_chain.epoch().map_err(|e| { ApiError::ServerError(format!("Unable to determine current epoch: {:?}", e)) - })?, + })), }; let all_validators = &beacon_chain.head().beacon_state.validators; @@ -168,7 +165,7 @@ pub fn get_validators(req: Request) -> ApiR .cloned() .collect(); - ResponseBuilder::new(&req).body(&active_vals) + success_response(req, &active_vals) } #[derive(Serialize, Encode)] @@ -182,43 +179,42 @@ pub struct StateResponse { /// /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. -pub fn get_state(req: Request) -> ApiResult { - let (beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; +pub fn get_state(req: Request) -> BoxFut { + let (beacon_chain, head_state) = try_future!(get_beacon_chain_from_request::(&req)); let (key, value) = match UrlQuery::from_request(&req) { Ok(query) => { - // We have *some* parameters, check them. + // We have *some* parameters, just check them. let query_params = ["root", "slot"]; - match query.first_of(&query_params) { - Ok((k, v)) => (k, v), - Err(e) => { - // Wrong parameters provided, or another error, return the error. - return Err(e); - } - } + try_future!(query.first_of(&query_params)) } Err(ApiError::InvalidQueryParams(_)) => { // No parameters provided at all, use current slot. (String::from("slot"), head_state.slot.to_string()) } Err(e) => { - return Err(e); + return Box::new(futures::future::err(e)); } }; let (root, state): (Hash256, BeaconState) = match (key.as_ref(), value) { - ("slot", value) => state_at_slot(&beacon_chain, parse_slot(&value)?)?, + ("slot", value) => try_future!(state_at_slot( + &beacon_chain, + try_future!(parse_slot(&value)) + )), ("root", value) => { - let root = &parse_root(&value)?; + let root = &try_future!(parse_root(&value)); - let state = beacon_chain - .store - .get(root)? - .ok_or_else(|| ApiError::NotFound(format!("No state for root: {:?}", root)))?; + let state = try_future!(try_future!(beacon_chain.store.get(root)) + .ok_or_else(|| ApiError::NotFound(format!("No state for root: {:?}", root)))); (*root, state) } - _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), + _ => { + return Box::new(futures::future::err(ApiError::ServerError( + "Unexpected query parameter".into(), + ))) + } }; let response = StateResponse { @@ -226,32 +222,29 @@ pub fn get_state(req: Request) -> ApiResult beacon_state: state, }; - ResponseBuilder::new(&req).body(&response) + success_response(req, &response) } /// HTTP handler to return a `BeaconState` root at a given `slot`. /// /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. -pub fn get_state_root(req: Request) -> ApiResult { - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; +pub fn get_state_root(req: Request) -> BoxFut { + let (beacon_chain, _head_state) = try_future!(get_beacon_chain_from_request::(&req)); - let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; - let slot = parse_slot(&slot_string)?; + let slot_string = try_future!(try_future!(UrlQuery::from_request(&req)).only_one("slot")); + let slot = try_future!(parse_slot(&slot_string)); - let root = state_root_at_slot(&beacon_chain, slot)?; + let root = try_future!(state_root_at_slot(&beacon_chain, slot)); - let json: String = serde_json::to_string(&root) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; - - Ok(success_response_old(Body::from(json))) + success_response(req, &root) } /// HTTP handler to return the highest finalized slot. pub fn get_current_finalized_checkpoint( req: Request, -) -> ApiResult { - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; +) -> BoxFut { + let (beacon_chain, _head_state) = try_future!(get_beacon_chain_from_request::(&req)); let checkpoint = beacon_chain .head() @@ -259,17 +252,14 @@ pub fn get_current_finalized_checkpoint( .finalized_checkpoint .clone(); - let json: String = serde_json::to_string(&checkpoint) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize checkpoint: {:?}", e)))?; - - Ok(success_response_old(Body::from(json))) + success_response(req, &checkpoint) } /// HTTP handler to return a `BeaconState` at the genesis block. -pub fn get_genesis_state(req: Request) -> ApiResult { - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; +pub fn get_genesis_state(req: Request) -> BoxFut { + let (beacon_chain, _head_state) = try_future!(get_beacon_chain_from_request::(&req)); - let (_root, state) = state_at_slot(&beacon_chain, Slot::new(0))?; + let (_root, state) = try_future!(state_at_slot(&beacon_chain, Slot::new(0))); - ResponseBuilder::new(&req).body(&state) + success_response(req, &state) } diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 006deb268..9eae4a00a 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -227,7 +227,7 @@ pub fn get_beacon_chain_from_request( let beacon_chain = req .extensions() .get::>>() - .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".into()))?; + .expect("BeaconChain extension must be there, because we put it there."); let mut head_state = beacon_chain .state_now() .map_err(|e| ApiError::ServerError(format!("Unable to get current BeaconState {:?}", e)))?; diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index dc4abc2bf..e4f88caf3 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -1,4 +1,6 @@ #[macro_use] +mod macros; +#[macro_use] extern crate lazy_static; extern crate network as client_network; @@ -91,7 +93,6 @@ impl Service for ApiService { (&Method::GET, "/network/listen_port") => network::get_listen_port::(req), (&Method::GET, "/network/listen_addresses") => network::get_listen_addresses::(req), - /* // Methods for Beacon Node (&Method::GET, "/beacon/head") => beacon::get_head::(req), (&Method::GET, "/beacon/block") => beacon::get_block::(req), @@ -111,6 +112,7 @@ impl Service for ApiService { helpers::implementation_pending_response(req) } + /* // Methods for Validator (&Method::GET, "/beacon/validator/duties") => validator::get_validator_duties::(req), (&Method::GET, "/beacon/validator/block") => validator::get_new_beacon_block::(req), diff --git a/beacon_node/rest_api/src/macros.rs b/beacon_node/rest_api/src/macros.rs new file mode 100644 index 000000000..e95cfb8ae --- /dev/null +++ b/beacon_node/rest_api/src/macros.rs @@ -0,0 +1,13 @@ +macro_rules! try_future { + ($expr:expr) => { + match $expr { + core::result::Result::Ok(val) => val, + core::result::Result::Err(err) => { + return Box::new(futures::future::err(std::convert::From::from(err))) + } + } + }; + ($expr:expr,) => { + $crate::try_future!($expr) + }; +} From 04b43a51f95a2edc80db00fc20286026bc8e644b Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 11 Sep 2019 02:07:39 +1000 Subject: [PATCH 273/305] Updates gossipsub to LRUcache --- beacon_node/eth2-libp2p/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 7ad2f415d..2ea4414dd 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -7,8 +7,8 @@ edition = "2018" [dependencies] clap = "2.32.0" #SigP repository -libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "d4851ea3b564266aeb9d83d10148b972721999db" } -enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "d4851ea3b564266aeb9d83d10148b972721999db", features = ["serde"] } +libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "8ac9c744197faaadc0e2b64fed7470ac4e2a41ca" } +enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "8ac9c744197faaadc0e2b64fed7470ac4e2a41ca", features = ["serde"] } types = { path = "../../eth2/types" } serde = "1.0" serde_derive = "1.0" From 8c5a8034b6a2a06b7d67d11d665a1aa12b9f3061 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 10 Sep 2019 12:13:54 -0400 Subject: [PATCH 274/305] Add whiteblock script, CLI options to support it --- beacon_node/eth2-libp2p/Cargo.toml | 1 + beacon_node/eth2-libp2p/src/config.rs | 11 +++ beacon_node/eth2-libp2p/src/service.rs | 35 ++++++- beacon_node/src/config.rs | 23 ++--- beacon_node/src/main.rs | 13 ++- .../generate_deterministic_keypairs.rs | 14 ++- eth2/types/src/test_utils/mod.rs | 1 + eth2/utils/eth2_interop_keypairs/Cargo.toml | 7 +- .../specs/keygen_10_validators.yaml | 20 ++++ eth2/utils/eth2_interop_keypairs/src/lib.rs | 67 +++++++++++++ .../eth2_interop_keypairs/tests/from_file.rs | 23 +++++ .../tests/{test.rs => generation.rs} | 0 scripts/whiteblock_start.sh | 96 +++++++++++++++++++ validator_client/Cargo.toml | 1 + validator_client/src/config.rs | 15 ++- validator_client/src/main.rs | 43 +++++++-- 16 files changed, 342 insertions(+), 28 deletions(-) create mode 100644 eth2/utils/eth2_interop_keypairs/specs/keygen_10_validators.yaml create mode 100644 eth2/utils/eth2_interop_keypairs/tests/from_file.rs rename eth2/utils/eth2_interop_keypairs/tests/{test.rs => generation.rs} (100%) create mode 100755 scripts/whiteblock_start.sh diff --git a/beacon_node/eth2-libp2p/Cargo.toml b/beacon_node/eth2-libp2p/Cargo.toml index 7ad2f415d..f1f963362 100644 --- a/beacon_node/eth2-libp2p/Cargo.toml +++ b/beacon_node/eth2-libp2p/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" [dependencies] clap = "2.32.0" +hex = "0.3" #SigP repository libp2p = { git = "https://github.com/SigP/rust-libp2p", rev = "d4851ea3b564266aeb9d83d10148b972721999db" } enr = { git = "https://github.com/SigP/rust-libp2p/", rev = "d4851ea3b564266aeb9d83d10148b972721999db", features = ["serde"] } diff --git a/beacon_node/eth2-libp2p/src/config.rs b/beacon_node/eth2-libp2p/src/config.rs index fd44b99af..fa20d2cdd 100644 --- a/beacon_node/eth2-libp2p/src/config.rs +++ b/beacon_node/eth2-libp2p/src/config.rs @@ -40,6 +40,12 @@ pub struct Config { /// Target number of connected peers. pub max_peers: usize, + /// A secp256k1 secret key, as bytes in ASCII-encoded hex. + /// + /// With or without `0x` prefix. + #[serde(skip)] + pub secret_key_hex: Option, + /// Gossipsub configuration parameters. #[serde(skip)] pub gs_config: GossipsubConfig, @@ -70,6 +76,7 @@ impl Default for Config { discovery_address: "127.0.0.1".parse().expect("valid ip address"), discovery_port: 9000, max_peers: 10, + secret_key_hex: None, // Note: The topics by default are sent as plain strings. Hashes are an optional // parameter. gs_config: GossipsubConfigBuilder::new() @@ -158,6 +165,10 @@ impl Config { .map_err(|_| format!("Invalid discovery port: {}", disc_port_str))?; } + if let Some(p2p_priv_key) = args.value_of("p2p-priv-key") { + self.secret_key_hex = Some(p2p_priv_key.to_string()); + } + Ok(()) } } diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index dac011752..bf1ed0123 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -42,16 +42,22 @@ impl Service { pub fn new(config: NetworkConfig, log: slog::Logger) -> error::Result { trace!(log, "Libp2p Service starting"); + let local_keypair = if let Some(hex_bytes) = &config.secret_key_hex { + keypair_from_hex(hex_bytes)? + } else { + load_private_key(&config, &log) + }; + // load the private key from CLI flag, disk or generate a new one - let local_private_key = load_private_key(&config, &log); - let local_peer_id = PeerId::from(local_private_key.public()); + // let local_private_key = load_private_key(&config, &log); + let local_peer_id = PeerId::from(local_keypair.public()); info!(log, "Libp2p Service"; "peer_id" => format!("{:?}", local_peer_id)); let mut swarm = { // Set up the transport - tcp/ws with secio and mplex/yamux - let transport = build_transport(local_private_key.clone()); + let transport = build_transport(local_keypair.clone()); // Lighthouse network behaviour - let behaviour = Behaviour::new(&local_private_key, &config, &log)?; + let behaviour = Behaviour::new(&local_keypair, &config, &log)?; Swarm::new(transport, behaviour, local_peer_id.clone()) }; @@ -246,6 +252,27 @@ pub enum Libp2pEvent { }, } +fn keypair_from_hex(hex_bytes: &str) -> error::Result { + let hex_bytes = if hex_bytes.starts_with("0x") { + hex_bytes[2..].to_string() + } else { + hex_bytes.to_string() + }; + + hex::decode(&hex_bytes) + .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into()) + .and_then(keypair_from_bytes) +} + +fn keypair_from_bytes(mut bytes: Vec) -> error::Result { + libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes) + .map(|secret| { + let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into(); + Keypair::Secp256k1(keypair) + }) + .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) +} + /// Loads a private key from disk. If this fails, a new key is /// generated and is then saved to disk. /// diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index cf5616938..978e029e7 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -13,7 +13,7 @@ pub const CLIENT_CONFIG_FILENAME: &str = "beacon-node.toml"; pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml"; type Result = std::result::Result; -type Config = (ClientConfig, Eth2Config); +type Config = (ClientConfig, Eth2Config, Logger); /// Gets the fully-initialized global client and eth2 configuration objects. /// @@ -22,8 +22,10 @@ type Config = (ClientConfig, Eth2Config); /// The output of this function depends primarily upon the given `cli_args`, however it's behaviour /// may be influenced by other external services like the contents of the file system or the /// response of some remote server. -pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { - let mut builder = ConfigBuilder::new(cli_args, log)?; +pub fn get_configs(cli_args: &ArgMatches, core_log: Logger) -> Result { + let log = core_log.clone(); + + let mut builder = ConfigBuilder::new(cli_args, core_log)?; if let Some(server) = cli_args.value_of("eth1-server") { builder.set_eth1_backend_method(Eth1BackendMethod::Web3 { @@ -35,7 +37,7 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result { match cli_args.subcommand() { ("testnet", Some(sub_cmd_args)) => { - process_testnet_subcommand(&mut builder, sub_cmd_args, log)? + process_testnet_subcommand(&mut builder, sub_cmd_args, &log)? } // No sub-command assumes a resume operation. _ => { @@ -216,15 +218,15 @@ fn process_testnet_subcommand( } /// Allows for building a set of configurations based upon `clap` arguments. -struct ConfigBuilder<'a> { - log: &'a Logger, +struct ConfigBuilder { + log: Logger, eth2_config: Eth2Config, client_config: ClientConfig, } -impl<'a> ConfigBuilder<'a> { +impl ConfigBuilder { /// Create a new builder with default settings. - pub fn new(cli_args: &'a ArgMatches, log: &'a Logger) -> Result { + pub fn new(cli_args: &ArgMatches, log: Logger) -> Result { // Read the `--datadir` flag. // // If it's not present, try and find the home directory (`~`) and push the default data @@ -539,8 +541,7 @@ impl<'a> ConfigBuilder<'a> { /// cli_args). pub fn build(mut self, cli_args: &ArgMatches) -> Result { self.eth2_config.apply_cli_args(cli_args)?; - self.client_config - .apply_cli_args(cli_args, &mut self.log.clone())?; + self.client_config.apply_cli_args(cli_args, &mut self.log)?; if let Some(bump) = cli_args.value_of("port-bump") { let bump = bump @@ -561,7 +562,7 @@ impl<'a> ConfigBuilder<'a> { return Err("Specification constant mismatch".into()); } - Ok((self.client_config, self.eth2_config)) + Ok((self.client_config, self.eth2_config, self.log)) } } diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 5d2388785..54e4529c4 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -116,6 +116,13 @@ fn main() { .help("One or more comma-delimited multiaddrs to manually connect to a libp2p peer without an ENR.") .takes_value(true), ) + .arg( + Arg::with_name("p2p-priv-key") + .long("p2p-priv-key") + .value_name("HEX") + .help("A secp256k1 secret key, represented as ASCII-encoded hex bytes (with or without 0x prefix).") + .takes_value(true), + ) /* * gRPC parameters. */ @@ -355,13 +362,15 @@ fn main() { "Ethereum 2.0 is pre-release. This software is experimental." ); + let log_clone = log.clone(); + // Load the process-wide configuration. // // May load this from disk or create a new configuration, depending on the CLI flags supplied. - let (client_config, eth2_config) = match get_configs(&matches, &log) { + let (client_config, eth2_config, log) = match get_configs(&matches, log) { Ok(configs) => configs, Err(e) => { - crit!(log, "Failed to load configuration"; "error" => e); + crit!(log_clone, "Failed to load configuration. Exiting"; "error" => e); return; } }; diff --git a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs index a687eb978..188ce075d 100644 --- a/eth2/types/src/test_utils/generate_deterministic_keypairs.rs +++ b/eth2/types/src/test_utils/generate_deterministic_keypairs.rs @@ -1,7 +1,8 @@ use crate::*; -use eth2_interop_keypairs::keypair; +use eth2_interop_keypairs::{keypair, keypairs_from_yaml_file}; use log::debug; use rayon::prelude::*; +use std::path::PathBuf; /// Generates `validator_count` keypairs where the secret key is derived solely from the index of /// the validator. @@ -32,3 +33,14 @@ pub fn generate_deterministic_keypair(validator_index: usize) -> Keypair { sk: SecretKey::from_raw(raw.sk), } } + +/// Loads a list of keypairs from file. +pub fn load_keypairs_from_yaml(path: PathBuf) -> Result, String> { + Ok(keypairs_from_yaml_file(path)? + .into_iter() + .map(|raw| Keypair { + pk: PublicKey::from_raw(raw.pk), + sk: SecretKey::from_raw(raw.sk), + }) + .collect()) +} diff --git a/eth2/types/src/test_utils/mod.rs b/eth2/types/src/test_utils/mod.rs index 9ca9ca78a..b3ecb9089 100644 --- a/eth2/types/src/test_utils/mod.rs +++ b/eth2/types/src/test_utils/mod.rs @@ -8,6 +8,7 @@ mod test_random; pub use builders::*; pub use generate_deterministic_keypairs::generate_deterministic_keypair; pub use generate_deterministic_keypairs::generate_deterministic_keypairs; +pub use generate_deterministic_keypairs::load_keypairs_from_yaml; pub use keypairs_file::KeypairsFile; pub use rand::{ RngCore, diff --git a/eth2/utils/eth2_interop_keypairs/Cargo.toml b/eth2/utils/eth2_interop_keypairs/Cargo.toml index d8a111855..a1a851d1d 100644 --- a/eth2/utils/eth2_interop_keypairs/Cargo.toml +++ b/eth2/utils/eth2_interop_keypairs/Cargo.toml @@ -10,10 +10,11 @@ edition = "2018" lazy_static = "1.4" num-bigint = "0.2" eth2_hashing = "0.1" +hex = "0.3" milagro_bls = { git = "https://github.com/michaelsproul/milagro_bls", branch = "little-endian-v0.10" } +serde_yaml = "0.8" +serde = "1.0" +serde_derive = "1.0" [dev-dependencies] base64 = "0.10" -serde = "1.0" -serde_derive = "1.0" -serde_yaml = "0.8" diff --git a/eth2/utils/eth2_interop_keypairs/specs/keygen_10_validators.yaml b/eth2/utils/eth2_interop_keypairs/specs/keygen_10_validators.yaml new file mode 100644 index 000000000..b725ab2bd --- /dev/null +++ b/eth2/utils/eth2_interop_keypairs/specs/keygen_10_validators.yaml @@ -0,0 +1,20 @@ +- {privkey: '0x25295f0d1d592a90b333e26e85149708208e9f8e8bc18f6c77bd62f8ad7a6866', + pubkey: '0xa99a76ed7796f7be22d5b7e85deeb7c5677e88e511e0b337618f8c4eb61349b4bf2d153f649f7b53359fe8b94a38e44c'} +- {privkey: '0x51d0b65185db6989ab0b560d6deed19c7ead0e24b9b6372cbecb1f26bdfad000', + pubkey: '0xb89bebc699769726a318c8e9971bd3171297c61aea4a6578a7a4f94b547dcba5bac16a89108b6b6a1fe3695d1a874a0b'} +- {privkey: '0x315ed405fafe339603932eebe8dbfd650ce5dafa561f6928664c75db85f97857', + pubkey: '0xa3a32b0f8b4ddb83f1a0a853d81dd725dfe577d4f4c3db8ece52ce2b026eca84815c1a7e8e92a4de3d755733bf7e4a9b'} +- {privkey: '0x25b1166a43c109cb330af8945d364722757c65ed2bfed5444b5a2f057f82d391', + pubkey: '0x88c141df77cd9d8d7a71a75c826c41a9c9f03c6ee1b180f3e7852f6a280099ded351b58d66e653af8e42816a4d8f532e'} +- {privkey: '0x3f5615898238c4c4f906b507ee917e9ea1bb69b93f1dbd11a34d229c3b06784b', + pubkey: '0x81283b7a20e1ca460ebd9bbd77005d557370cabb1f9a44f530c4c4c66230f675f8df8b4c2818851aa7d77a80ca5a4a5e'} +- {privkey: '0x055794614bc85ed5436c1f5cab586aab6ca84835788621091f4f3b813761e7a8', + pubkey: '0xab0bdda0f85f842f431beaccf1250bf1fd7ba51b4100fd64364b6401fda85bb0069b3e715b58819684e7fc0b10a72a34'} +- {privkey: '0x1023c68852075965e0f7352dee3f76a84a83e7582c181c10179936c6d6348893', + pubkey: '0x9977f1c8b731a8d5558146bfb86caea26434f3c5878b589bf280a42c9159e700e9df0e4086296c20b011d2e78c27d373'} +- {privkey: '0x3a941600dc41e5d20e818473b817a28507c23cdfdb4b659c15461ee5c71e41f5', + pubkey: '0xa8d4c7c27795a725961317ef5953a7032ed6d83739db8b0e8a72353d1b8b4439427f7efa2c89caa03cc9f28f8cbab8ac'} +- {privkey: '0x066e3bdc0415530e5c7fed6382d5c822c192b620203cf669903e1810a8c67d06', + pubkey: '0xa6d310dbbfab9a22450f59993f87a4ce5db6223f3b5f1f30d2c4ec718922d400e0b3c7741de8e59960f72411a0ee10a7'} +- {privkey: '0x2b3b88a041168a1c4cd04bdd8de7964fd35238f95442dc678514f9dadb81ec34', + pubkey: '0x9893413c00283a3f9ed9fd9845dda1cea38228d22567f9541dccc357e54a2d6a6e204103c92564cbc05f4905ac7c493a'} diff --git a/eth2/utils/eth2_interop_keypairs/src/lib.rs b/eth2/utils/eth2_interop_keypairs/src/lib.rs index ac610ee77..cac7e7462 100644 --- a/eth2/utils/eth2_interop_keypairs/src/lib.rs +++ b/eth2/utils/eth2_interop_keypairs/src/lib.rs @@ -22,8 +22,13 @@ extern crate lazy_static; use eth2_hashing::hash; use milagro_bls::{Keypair, PublicKey, SecretKey}; use num_bigint::BigUint; +use serde_derive::{Deserialize, Serialize}; +use std::convert::TryInto; +use std::fs::File; +use std::path::PathBuf; pub const PRIVATE_KEY_BYTES: usize = 48; +pub const PUBLIC_KEY_BYTES: usize = 48; pub const HASH_BYTES: usize = 32; lazy_static! { @@ -63,3 +68,65 @@ pub fn keypair(validator_index: usize) -> Keypair { sk, } } + +#[derive(Serialize, Deserialize)] +struct YamlKeypair { + /// Big-endian. + privkey: String, + /// Big-endian. + pubkey: String, +} + +impl TryInto for YamlKeypair { + type Error = String; + + fn try_into(self) -> Result { + let privkey = string_to_bytes(&self.privkey)?; + let pubkey = string_to_bytes(&self.pubkey)?; + + if (privkey.len() > PRIVATE_KEY_BYTES) || (pubkey.len() > PUBLIC_KEY_BYTES) { + return Err("Public or private key is too long".into()); + } + + let sk = { + let mut bytes = vec![0; PRIVATE_KEY_BYTES - privkey.len()]; + bytes.extend_from_slice(&privkey); + SecretKey::from_bytes(&bytes) + .map_err(|e| format!("Failed to decode bytes into secret key: {:?}", e))? + }; + + let pk = { + let mut bytes = vec![0; PUBLIC_KEY_BYTES - pubkey.len()]; + bytes.extend_from_slice(&pubkey); + PublicKey::from_bytes(&bytes) + .map_err(|e| format!("Failed to decode bytes into public key: {:?}", e))? + }; + + Ok(Keypair { pk, sk }) + } +} + +fn string_to_bytes(string: &str) -> Result, String> { + let string = if string.starts_with("0x") { + &string[2..] + } else { + string + }; + + hex::decode(string).map_err(|e| format!("Unable to decode public or private key: {}", e)) +} + +/// Loads keypairs from a YAML encoded file. +/// +/// Uses this as reference: +/// https://github.com/ethereum/eth2.0-pm/blob/9a9dbcd95e2b8e10287797bd768014ab3d842e99/interop/mocked_start/keygen_10_validators.yaml +pub fn keypairs_from_yaml_file(path: PathBuf) -> Result, String> { + let file = + File::open(path.clone()).map_err(|e| format!("Unable to open YAML key file: {}", e))?; + + serde_yaml::from_reader::<_, Vec>(file) + .map_err(|e| format!("Could not parse YAML: {:?}", e))? + .into_iter() + .map(TryInto::try_into) + .collect::, String>>() +} diff --git a/eth2/utils/eth2_interop_keypairs/tests/from_file.rs b/eth2/utils/eth2_interop_keypairs/tests/from_file.rs new file mode 100644 index 000000000..dd62d1f3e --- /dev/null +++ b/eth2/utils/eth2_interop_keypairs/tests/from_file.rs @@ -0,0 +1,23 @@ +#![cfg(test)] +use eth2_interop_keypairs::{keypair as reference_keypair, keypairs_from_yaml_file}; +use std::path::PathBuf; + +fn yaml_path() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("specs") + .join("keygen_10_validators.yaml") +} + +#[test] +fn load_from_yaml() { + let keypairs = keypairs_from_yaml_file(yaml_path()).expect("should read keypairs from file"); + + keypairs.into_iter().enumerate().for_each(|(i, keypair)| { + assert_eq!( + keypair, + reference_keypair(i), + "Decoded key {} does not match generated key", + i + ) + }); +} diff --git a/eth2/utils/eth2_interop_keypairs/tests/test.rs b/eth2/utils/eth2_interop_keypairs/tests/generation.rs similarity index 100% rename from eth2/utils/eth2_interop_keypairs/tests/test.rs rename to eth2/utils/eth2_interop_keypairs/tests/generation.rs diff --git a/scripts/whiteblock_start.sh b/scripts/whiteblock_start.sh new file mode 100755 index 000000000..74bdd8cfa --- /dev/null +++ b/scripts/whiteblock_start.sh @@ -0,0 +1,96 @@ +#!/bin/bash + +<" + echo "--peers=" + echo "--validator-keys=" + echo "--gen-state=" + echo "--port=" +} + +while [ "$1" != "" ]; +do + PARAM=`echo $1 | awk -F= '{print $1}'` + VALUE=`echo $1 | sed 's/^[^=]*=//g'` + + case $PARAM in + --identity) + IDENTITY=$VALUE + ;; + --peers) + PEERS+=",$VALUE" + ;; + --validator-keys) + VALIDATOR_KEYS=$VALUE + ;; + --gen-state) + GEN_STATE=$VALUE + ;; + --port) + PORT=$VALUE + ;; + --help) + usage + exit + ;; + *) + echo "ERROR: unknown parameter \"$PARAM\"" + usage + exit 1 + ;; + esac + shift +done + +./beacon_node \ + --p2p-priv-key $IDENTITY \ + --logfile $BEACON_LOG_FILE \ + --libp2p-addresses $PEERS \ + --port $PORT \ + testnet \ + --force \ + file \ + ssz \ + $GEN_STATE \ + & \ + +./validator_client \ + --logfile $VALIDATOR_LOG_FILE \ + testnet \ + --bootstrap \ + interop-yaml \ + $YAML_KEY_FILE \ + +trap 'trap - SIGTERM && kill 0' SIGINT SIGTERM EXIT diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index 2000f5409..d360b93bd 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -19,6 +19,7 @@ eth2_config = { path = "../eth2/utils/eth2_config" } tree_hash = "0.1" clap = "2.32.0" lighthouse_bootstrap = { path = "../eth2/utils/lighthouse_bootstrap" } +eth2_interop_keypairs = { path = "../eth2/utils/eth2_interop_keypairs" } grpcio = { version = "0.4", default-features = false, features = ["protobuf-codec"] } protos = { path = "../protos" } slot_clock = { path = "../eth2/utils/slot_clock" } diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 3e13de722..0b4f20ff6 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -8,7 +8,10 @@ use std::io::{Error, ErrorKind}; use std::ops::Range; use std::path::PathBuf; use std::sync::Mutex; -use types::{test_utils::generate_deterministic_keypair, EthSpec, MainnetEthSpec}; +use types::{ + test_utils::{generate_deterministic_keypair, load_keypairs_from_yaml}, + EthSpec, MainnetEthSpec, +}; pub const DEFAULT_SERVER: &str = "localhost"; pub const DEFAULT_SERVER_GRPC_PORT: &str = "5051"; @@ -20,6 +23,8 @@ pub enum KeySource { Disk, /// Generate the keypairs (insecure, generates predictable keys). TestingKeypairRange(Range), + /// Load testing keypairs from YAML + YamlKeypairs(PathBuf), } impl Default for KeySource { @@ -230,6 +235,14 @@ impl Config { warn!(log, "Using insecure private keys"); self.fetch_testing_keypairs(range.clone())? } + KeySource::YamlKeypairs(path) => { + warn!( + log, + "Private keys are stored insecurely (plain text). Testing use only." + ); + + load_keypairs_from_yaml(path.to_path_buf())? + } }; // Check if it's an empty vector, and return none. diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index 39b2e3eae..c0d6961f0 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -16,6 +16,7 @@ use eth2_config::Eth2Config; use lighthouse_bootstrap::Bootstrapper; use protos::services_grpc::ValidatorServiceClient; use slog::{crit, error, info, o, Drain, Level, Logger}; +use std::path::PathBuf; use types::{InteropEthSpec, Keypair, MainnetEthSpec, MinimalEthSpec}; pub const DEFAULT_SPEC: &str = "minimal"; @@ -131,6 +132,14 @@ fn main() { .required(true) .help("The number of validators.")) ) + .subcommand(SubCommand::with_name("interop-yaml") + .about("Loads plain-text secret keys from YAML files. Expects the interop format defined + in the ethereum/eth2.0-pm repo.") + .arg(Arg::with_name("path") + .value_name("PATH") + .required(true) + .help("Path to a YAML file.")) + ) ) .get_matches(); @@ -143,8 +152,8 @@ fn main() { Some("crit") => drain.filter_level(Level::Critical), _ => unreachable!("guarded by clap"), }; - let log = slog::Logger::root(drain.fuse(), o!()); - let (client_config, eth2_config) = match get_configs(&matches, &log) { + let mut log = slog::Logger::root(drain.fuse(), o!()); + let (client_config, eth2_config) = match get_configs(&matches, &mut log) { Ok(tuple) => tuple, Err(e) => { crit!( @@ -195,9 +204,14 @@ fn main() { /// Parses the CLI arguments and attempts to load the client and eth2 configuration. /// /// This is not a pure function, it reads from disk and may contact network servers. -pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result<(ClientConfig, Eth2Config)> { +pub fn get_configs( + cli_args: &ArgMatches, + mut log: &mut Logger, +) -> Result<(ClientConfig, Eth2Config)> { let mut client_config = ClientConfig::default(); + client_config.apply_cli_args(&cli_args, &mut log)?; + if let Some(server) = cli_args.value_of("server") { client_config.server = server.to_string(); } @@ -215,14 +229,14 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result<(ClientConfig, } info!( - log, + *log, "Beacon node connection info"; "grpc_port" => client_config.server_grpc_port, "http_port" => client_config.server_http_port, "server" => &client_config.server, ); - match cli_args.subcommand() { + let (client_config, eth2_config) = match cli_args.subcommand() { ("testnet", Some(sub_cli_args)) => { if cli_args.is_present("eth2-config") && sub_cli_args.is_present("bootstrap") { return Err( @@ -234,7 +248,9 @@ pub fn get_configs(cli_args: &ArgMatches, log: &Logger) -> Result<(ClientConfig, process_testnet_subcommand(sub_cli_args, client_config, log) } _ => return Err("You must use the testnet command. See '--help'.".into()), - } + }?; + + Ok((client_config, eth2_config)) } /// Parses the `testnet` CLI subcommand. @@ -296,6 +312,21 @@ fn process_testnet_subcommand( KeySource::TestingKeypairRange(first..first + count) } + ("interop-yaml", Some(sub_cli_args)) => { + let path = sub_cli_args + .value_of("path") + .ok_or_else(|| "No yaml path supplied")? + .parse::() + .map_err(|e| format!("Unable to parse yaml path: {:?}", e))?; + + info!( + log, + "Loading keypairs from interop YAML format"; + "path" => format!("{:?}", path), + ); + + KeySource::YamlKeypairs(path) + } _ => KeySource::Disk, }; From 3fe61f5044d90f337513c3a3d4034fc9a01a7785 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 10 Sep 2019 17:40:21 -0400 Subject: [PATCH 275/305] Add additional logs to validator client --- beacon_node/beacon_chain/src/beacon_chain.rs | 4 ++++ beacon_node/rpc/src/validator.rs | 2 +- validator_client/Cargo.toml | 2 +- validator_client/src/config.rs | 6 +++++- validator_client/src/main.rs | 10 +++++++--- validator_client/src/service.rs | 20 +++++++++++++++++++- 6 files changed, 37 insertions(+), 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 064260cfc..79c241312 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1386,10 +1386,14 @@ impl BeaconChain { new_head.beacon_state.build_all_caches(&self.spec)?; + trace!(self.log, "Taking write lock on head"); + // Update the checkpoint that stores the head of the chain at the time it received the // block. *self.canonical_head.write() = new_head; + trace!(self.log, "Dropping write lock on head"); + // Save `self` to `self.store`. self.persist()?; diff --git a/beacon_node/rpc/src/validator.rs b/beacon_node/rpc/src/validator.rs index abc1cffc5..0533e2558 100644 --- a/beacon_node/rpc/src/validator.rs +++ b/beacon_node/rpc/src/validator.rs @@ -25,8 +25,8 @@ impl ValidatorService for ValidatorServiceInstance { req: GetDutiesRequest, sink: UnarySink, ) { - let validators = req.get_validators(); trace!(self.log, "RPC request"; "endpoint" => "GetValidatorDuties", "epoch" => req.get_epoch()); + let validators = req.get_validators(); let epoch = Epoch::from(req.get_epoch()); let slot = epoch.start_slot(T::EthSpec::slots_per_epoch()); diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index d360b93bd..dae07d76c 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -26,7 +26,7 @@ slot_clock = { path = "../eth2/utils/slot_clock" } types = { path = "../eth2/types" } serde = "1.0" serde_derive = "1.0" -slog = "^2.2.3" +slog = { version = "^2.2.3" , features = ["max_level_trace", "release_max_level_trace"] } slog-async = "^2.3.0" slog-json = "^2.3" slog-term = "^2.4.0" diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 0b4f20ff6..33e8addb6 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -232,7 +232,11 @@ impl Config { let keypairs = match &self.key_source { KeySource::Disk => self.fetch_keys_from_disk(log)?, KeySource::TestingKeypairRange(range) => { - warn!(log, "Using insecure private keys"); + warn!( + log, + "Using insecure interop private keys"; + "range" => format!("{:?}", range) + ); self.fetch_testing_keypairs(range.clone())? } KeySource::YamlKeypairs(path) => { diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index c0d6961f0..e445218eb 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -82,7 +82,8 @@ fn main() { ) .arg( Arg::with_name("server-grpc-port") - .long("g") + .long("server-grpc-port") + .short("g") .value_name("PORT") .help("Port to use for gRPC API connection to the server.") .default_value(DEFAULT_SERVER_GRPC_PORT) @@ -90,7 +91,8 @@ fn main() { ) .arg( Arg::with_name("server-http-port") - .long("h") + .long("server-http-port") + .short("h") .value_name("PORT") .help("Port to use for HTTP API connection to the server.") .default_value(DEFAULT_SERVER_HTTP_PORT) @@ -104,7 +106,7 @@ fn main() { .help("The title of the spec constants for chain config.") .takes_value(true) .possible_values(&["info", "debug", "trace", "warn", "error", "crit"]) - .default_value("info"), + .default_value("trace"), ) /* * The "testnet" sub-command. @@ -152,7 +154,9 @@ fn main() { Some("crit") => drain.filter_level(Level::Critical), _ => unreachable!("guarded by clap"), }; + let mut log = slog::Logger::root(drain.fuse(), o!()); + let (client_config, eth2_config) = match get_configs(&matches, &mut log) { Ok(tuple) => tuple, Err(e) => { diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index ba4f3c133..fd8de71ca 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -22,7 +22,7 @@ use protos::services_grpc::{ AttestationServiceClient, BeaconBlockServiceClient, BeaconNodeServiceClient, ValidatorServiceClient, }; -use slog::{crit, error, info, warn}; +use slog::{crit, error, info, trace, warn}; use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::marker::PhantomData; use std::sync::Arc; @@ -289,6 +289,11 @@ impl Service Service current_epoch + ); + // spawn a new thread separate to the runtime // TODO: Handle thread termination/timeout // TODO: Add duties thread back in, with channel to process duties in duty change. @@ -345,6 +357,12 @@ impl Service work.len() + ); + for (signer_index, work_type) in work { if work_type.produce_block { // we need to produce a block From 5d91d5948115b1f7efc7fd7e403dc5dbbcae715f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Tue, 10 Sep 2019 22:42:07 -0400 Subject: [PATCH 276/305] Fix deadlock on becaon chain head --- beacon_node/beacon_chain/src/beacon_chain.rs | 123 ++++++------------- 1 file changed, 40 insertions(+), 83 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 79c241312..48a012c36 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -8,7 +8,7 @@ use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY}; use lmd_ghost::LmdGhost; use operation_pool::DepositInsertStatus; use operation_pool::{OperationPool, PersistedOperationPool}; -use parking_lot::{RwLock, RwLockReadGuard}; +use parking_lot::RwLock; use slog::{error, info, trace, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; @@ -89,35 +89,6 @@ pub enum AttestationProcessingOutcome { Invalid(AttestationValidationError), } -/// Effectively a `Cow`, however when it is `Borrowed` it holds a `RwLockReadGuard` (a -/// read-lock on some read/write-locked state). -/// -/// Only has a small subset of the functionality of a `std::borrow::Cow`. -pub enum BeaconStateCow<'a, T: EthSpec> { - Borrowed(RwLockReadGuard<'a, CheckPoint>), - Owned(BeaconState), -} - -impl<'a, T: EthSpec> BeaconStateCow<'a, T> { - pub fn maybe_as_mut_ref(&mut self) -> Option<&mut BeaconState> { - match self { - BeaconStateCow::Borrowed(_) => None, - BeaconStateCow::Owned(ref mut state) => Some(state), - } - } -} - -impl<'a, T: EthSpec> std::ops::Deref for BeaconStateCow<'a, T> { - type Target = BeaconState; - - fn deref(&self) -> &BeaconState { - match self { - BeaconStateCow::Borrowed(checkpoint) => &checkpoint.beacon_state, - BeaconStateCow::Owned(state) => &state, - } - } -} - pub trait BeaconChainTypes: Send + Sync + 'static { type Store: store::Store; type SlotClock: slot_clock::SlotClock; @@ -338,13 +309,11 @@ impl BeaconChain { /// - As this iterator starts at the `head` of the chain (viz., the best block), the first slot /// returned may be earlier than the wall-clock slot. pub fn rev_iter_block_roots(&self) -> ReverseBlockRootIterator { - let state = &self.head().beacon_state; - let block_root = self.head().beacon_block_root; - let block_slot = state.slot; + let head = self.head(); - let iter = BlockRootsIterator::owned(self.store.clone(), state.clone()); + let iter = BlockRootsIterator::owned(self.store.clone(), head.beacon_state); - ReverseBlockRootIterator::new((block_root, block_slot), iter) + ReverseBlockRootIterator::new((head.beacon_block_root, head.beacon_block.slot), iter) } /// Iterates across all `(state_root, slot)` pairs from the head of the chain (inclusive) to @@ -357,13 +326,12 @@ impl BeaconChain { /// - As this iterator starts at the `head` of the chain (viz., the best block), the first slot /// returned may be earlier than the wall-clock slot. pub fn rev_iter_state_roots(&self) -> ReverseStateRootIterator { - let state = &self.head().beacon_state; - let state_root = self.head().beacon_state_root; - let state_slot = state.slot; + let head = self.head(); + let slot = head.beacon_state.slot; - let iter = StateRootsIterator::owned(self.store.clone(), state.clone()); + let iter = StateRootsIterator::owned(self.store.clone(), head.beacon_state); - ReverseStateRootIterator::new((state_root, state_slot), iter) + ReverseStateRootIterator::new((head.beacon_state_root, slot), iter) } /// Returns the block at the given root, if any. @@ -378,32 +346,25 @@ impl BeaconChain { Ok(self.store.get(block_root)?) } - /// Returns a read-lock guarded `CheckPoint` struct for reading the head (as chosen by the - /// fork-choice rule). + /// Returns a `Checkpoint` representing the head block and state. Contains the "best block"; + /// the head of the canonical `BeaconChain`. /// /// It is important to note that the `beacon_state` returned may not match the present slot. It /// is the state as it was when the head block was received, which could be some slots prior to /// now. - pub fn head<'a>(&'a self) -> RwLockReadGuard<'a, CheckPoint> { - self.canonical_head.read() + pub fn head(&self) -> CheckPoint { + self.canonical_head.read().clone() } /// Returns the `BeaconState` at the given slot. /// - /// May return: - /// - /// - A new state loaded from the database (for states prior to the head) - /// - A reference to the head state (note: this keeps a read lock on the head, try to use - /// sparingly). - /// - The head state, but with skipped slots (for states later than the head). - /// /// Returns `None` when the state is not found in the database or there is an error skipping /// to a future state. - pub fn state_at_slot(&self, slot: Slot) -> Result, Error> { - let head_state = &self.head().beacon_state; + pub fn state_at_slot(&self, slot: Slot) -> Result, Error> { + let head_state = self.head().beacon_state; if slot == head_state.slot { - Ok(BeaconStateCow::Borrowed(self.head())) + Ok(head_state) } else if slot > head_state.slot { let head_state_slot = head_state.slot; let mut state = head_state.clone(); @@ -423,7 +384,7 @@ impl BeaconChain { } }; } - Ok(BeaconStateCow::Owned(state)) + Ok(state) } else { let state_root = self .rev_iter_state_roots() @@ -431,11 +392,10 @@ impl BeaconChain { .map(|(root, _slot)| root) .ok_or_else(|| Error::NoStateForSlot(slot))?; - Ok(BeaconStateCow::Owned( - self.store - .get(&state_root)? - .ok_or_else(|| Error::NoStateForSlot(slot))?, - )) + Ok(self + .store + .get(&state_root)? + .ok_or_else(|| Error::NoStateForSlot(slot))?) } } @@ -447,7 +407,7 @@ impl BeaconChain { /// /// Returns `None` when there is an error skipping to a future state or the slot clock cannot /// be read. - pub fn state_now(&self) -> Result, Error> { + pub fn wall_clock_state(&self) -> Result, Error> { self.state_at_slot(self.slot()?) } @@ -499,14 +459,12 @@ impl BeaconChain { let head_state = &self.head().beacon_state; let mut state = if epoch(slot) == epoch(head_state.slot) { - BeaconStateCow::Borrowed(self.head()) + self.head().beacon_state.clone() } else { self.state_at_slot(slot)? }; - if let Some(state) = state.maybe_as_mut_ref() { - state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; - } + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; if epoch(state.slot) != epoch(slot) { return Err(Error::InvariantViolated(format!( @@ -534,14 +492,12 @@ impl BeaconChain { let head_state = &self.head().beacon_state; let mut state = if epoch == as_epoch(head_state.slot) { - BeaconStateCow::Borrowed(self.head()) + self.head().beacon_state.clone() } else { self.state_at_slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))? }; - if let Some(state) = state.maybe_as_mut_ref() { - state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; - } + state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; if as_epoch(state.slot) != epoch { return Err(Error::InvariantViolated(format!( @@ -569,11 +525,14 @@ impl BeaconChain { slot: Slot, ) -> Result { let state = self.state_at_slot(slot)?; + let head = self.head(); - let head_block_root = self.head().beacon_block_root; - let head_block_slot = self.head().beacon_block.slot; - - self.produce_attestation_data_for_block(shard, head_block_root, head_block_slot, &*state) + self.produce_attestation_data_for_block( + shard, + head.beacon_block_root, + head.beacon_block.slot, + &state, + ) } /// Produce an `AttestationData` that attests to the chain denoted by `block_root` and `state`. @@ -900,10 +859,8 @@ impl BeaconChain { /// Accept some exit and queue it for inclusion in an appropriate block. pub fn process_voluntary_exit(&self, exit: VoluntaryExit) -> Result<(), ExitValidationError> { - match self.state_now() { - Ok(state) => self - .op_pool - .insert_voluntary_exit(exit, &*state, &self.spec), + match self.wall_clock_state() { + Ok(state) => self.op_pool.insert_voluntary_exit(exit, &state, &self.spec), Err(e) => { error!( &self.log, @@ -918,8 +875,8 @@ impl BeaconChain { /// Accept some transfer and queue it for inclusion in an appropriate block. pub fn process_transfer(&self, transfer: Transfer) -> Result<(), TransferValidationError> { - match self.state_now() { - Ok(state) => self.op_pool.insert_transfer(transfer, &*state, &self.spec), + match self.wall_clock_state() { + Ok(state) => self.op_pool.insert_transfer(transfer, &state, &self.spec), Err(e) => { error!( &self.log, @@ -937,10 +894,10 @@ impl BeaconChain { &self, proposer_slashing: ProposerSlashing, ) -> Result<(), ProposerSlashingValidationError> { - match self.state_now() { + match self.wall_clock_state() { Ok(state) => { self.op_pool - .insert_proposer_slashing(proposer_slashing, &*state, &self.spec) + .insert_proposer_slashing(proposer_slashing, &state, &self.spec) } Err(e) => { error!( @@ -959,10 +916,10 @@ impl BeaconChain { &self, attester_slashing: AttesterSlashing, ) -> Result<(), AttesterSlashingValidationError> { - match self.state_now() { + match self.wall_clock_state() { Ok(state) => { self.op_pool - .insert_attester_slashing(attester_slashing, &*state, &self.spec) + .insert_attester_slashing(attester_slashing, &state, &self.spec) } Err(e) => { error!( From 2739ee83f9f97508b2236f5648550ee8f4ff343f Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 11 Sep 2019 18:02:00 +1000 Subject: [PATCH 277/305] Further restructuring futures API. - Adding try_future! macros where necessary - Returning ApiResult and mapping it to future instead - Upgrading POST publish block to return a future directly --- beacon_node/rest_api/src/beacon.rs | 2 +- beacon_node/rest_api/src/helpers.rs | 14 +++-- beacon_node/rest_api/src/lib.rs | 13 +++-- beacon_node/rest_api/src/node.rs | 16 ++---- beacon_node/rest_api/src/validator.rs | 83 ++++++++++++++------------- 5 files changed, 68 insertions(+), 60 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index f5abc51af..cf540c65a 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -203,7 +203,7 @@ pub fn get_state(req: Request) -> BoxFut { try_future!(parse_slot(&value)) )), ("root", value) => { - let root = &try_future!(parse_root(&value)); + let root: &Hash256 = &try_future!(parse_root(&value)); let state = try_future!(try_future!(beacon_chain.store.get(root)) .ok_or_else(|| ApiError::NotFound(format!("No state for root: {:?}", root)))); diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 9eae4a00a..a3633cfec 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -23,6 +23,13 @@ pub fn success_response(req: Request, item: &T) -> }) } +pub fn success_response_2(req: Request, item: &T) -> ApiResult { + ResponseBuilder::new(&req).body(item) +} +pub fn success_response_2_json(req: Request, item: &T) -> ApiResult { + ResponseBuilder::new(&req).body_json(item) +} + pub fn success_response_json(req: Request, item: &T) -> BoxFut { if let Err(e) = check_content_type_for_json(&req) { return Box::new(futures::future::err(e)); @@ -213,11 +220,10 @@ pub fn state_root_at_slot( } } -pub fn implementation_pending_response(_req: Request) -> BoxFut { - ApiError::NotImplemented( +pub fn implementation_pending_response(_req: Request) -> ApiResult { + Err(ApiError::NotImplemented( "API endpoint has not yet been implemented, but is planned to be soon.".to_owned(), - ) - .into() + )) } pub fn get_beacon_chain_from_request( diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index e4f88caf3..bc3d3d159 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -33,6 +33,7 @@ use std::sync::Arc; use tokio::runtime::TaskExecutor; use tokio::sync::mpsc; use url_query::UrlQuery; +use futures::future::IntoFuture; pub use beacon::{BlockResponse, HeadResponse, StateResponse}; pub use config::Config as ApiConfig; @@ -81,9 +82,10 @@ impl Service for ApiService { // Route the request to the correct handler. let result = match (req.method(), path.as_ref()) { // Methods for Client - (&Method::GET, "/node/version") => node::get_version(req), - (&Method::GET, "/node/genesis_time") => node::get_genesis_time::(req), - (&Method::GET, "/node/syncing") => helpers::implementation_pending_response(req), + (&Method::GET, "/node/version") => Box::new(node::get_version(req).into_future()), + (&Method::GET, "/node/genesis_time") => Box::new(node::get_genesis_time::(req).into_future()), + (&Method::GET, "/node/syncing") => Box::new(helpers::implementation_pending_response(req).into_future()), + /* // Methods for Network (&Method::GET, "/network/enr") => network::get_enr::(req), @@ -112,11 +114,12 @@ impl Service for ApiService { helpers::implementation_pending_response(req) } - /* // Methods for Validator (&Method::GET, "/beacon/validator/duties") => validator::get_validator_duties::(req), (&Method::GET, "/beacon/validator/block") => validator::get_new_beacon_block::(req), - //(&Method::POST, "/beacon/validator/block") => validator::publish_beacon_block::(req), + */ + (&Method::POST, "/beacon/validator/block") => validator::publish_beacon_block::(req), + /* (&Method::GET, "/beacon/validator/attestation") => { validator::get_new_attestation::(req) } diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs index 8ca7fb48a..33b8e055c 100644 --- a/beacon_node/rest_api/src/node.rs +++ b/beacon_node/rest_api/src/node.rs @@ -5,19 +5,13 @@ use hyper::{Body, Request}; use version; /// Read the version string from the current Lighthouse build. -pub fn get_version(req: Request) -> BoxFut { - success_response_json(req, &version::version()) +pub fn get_version(req: Request) -> ApiResult { + success_response_2_json(req, &version::version()) } /// Read the genesis time from the current beacon chain state. -pub fn get_genesis_time(req: Request) -> BoxFut { - let bc = get_beacon_chain_from_request::(&req); - let (_beacon_chain, head_state) = match bc { - Ok((bc, hs)) => (bc, hs), - Err(e) => { - return e.into(); - } - }; +pub fn get_genesis_time(req: Request) -> ApiResult { + let (_beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; let gen_time: u64 = head_state.genesis_time; - success_response(req, &gen_time) + success_response_2(req, &gen_time) } diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 7d236e0cf..1de164704 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,5 +1,6 @@ use crate::helpers::*; -use crate::{ApiError, ApiResult, UrlQuery}; +use crate::response_builder::ResponseBuilder; +use crate::{ApiError, ApiResult, UrlQuery, BoxFut}; use beacon_chain::{BeaconChainTypes, BlockProcessingOutcome}; use bls::{AggregateSignature, PublicKey, Signature}; use futures::future::Future; @@ -204,10 +205,10 @@ pub fn get_new_beacon_block(req: Request) - } /// HTTP Handler to publish a BeaconBlock, which has been signed by a validator. -pub fn publish_beacon_block(req: Request) -> ApiResult { - let _ = check_content_type_for_json(&req)?; +pub fn publish_beacon_block(req: Request) -> BoxFut { + let _ = try_future!(check_content_type_for_json(&req)); let log = get_logger_from_request(&req); - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; + let (beacon_chain, _head_state) = try_future!(get_beacon_chain_from_request::(&req)); // Get the network sending channel from the request, for later transmission let network_chan = req .extensions() @@ -215,49 +216,53 @@ pub fn publish_beacon_block(req: Request) - .expect("Should always get the network channel from the request, since we put it in there.") .clone(); + let response_builder = ResponseBuilder::new(&req); + let body = req.into_body(); trace!( log, "Got the request body, now going to parse it into a block." ); - let block_future = body + Box::new(body .concat2() - .map(move |chunk| chunk.iter().cloned().collect::>()) - .map(|chunks| { - let block_result: Result, ApiError> = - serde_json::from_slice(&chunks.as_slice()).map_err(|e| { - ApiError::InvalidQueryParams(format!( - "Unable to deserialize JSON into a BeaconBlock: {:?}", + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}",e))) + .map(|chunk| chunk.iter().cloned().collect::>()) + .and_then(|chunks| { + serde_json::from_slice(&chunks.as_slice()).map_err(|e| { + ApiError::InvalidQueryParams(format!( + "Unable to deserialize JSON into a BeaconBlock: {:?}", + e + )) + }) + }) + .and_then(move |block: BeaconBlock| { + let slot = block.slot; + match beacon_chain.process_block(block.clone()) { + Ok(BlockProcessingOutcome::Processed { block_root }) => { + // Block was processed, publish via gossipsub + info!(log, "Processed valid block from API, transmitting to network."; "block_slot" => slot, "block_root" => format!("{}", block_root)); + publish_beacon_block_to_network::(network_chan, block) + } + Ok(outcome) => { + warn!(log, "Block could not be processed, but is being sent to the network anyway."; "block_slot" => slot, "outcome" => format!("{:?}", outcome)); + //TODO need to send to network and return http 202 + Err(ApiError::InvalidQueryParams(format!( + "The BeaconBlock could not be processed: {:?}", + outcome + ))) + } + Err(e) => { + Err(ApiError::ServerError(format!( + "Unable to process block: {:?}", e - )) - }); - block_result - }); - let block = block_future.wait()??; - trace!(log, "BeaconBlock successfully parsed from JSON"); - match beacon_chain.process_block(block.clone()) { - Ok(BlockProcessingOutcome::Processed { block_root }) => { - // Block was processed, publish via gossipsub - info!(log, "Processed valid block from API, transmitting to network."; "block_slot" => block.slot, "block_root" => format!("{}", block_root)); - publish_beacon_block_to_network::(network_chan, block)?; - } - Ok(outcome) => { - warn!(log, "Block could not be processed, but is being sent to the network anyway."; "block_slot" => block.slot, "outcome" => format!("{:?}", outcome)); - //TODO need to send to network and return http 202 - return Err(ApiError::InvalidQueryParams(format!( - "The BeaconBlock could not be processed: {:?}", - outcome - ))); - } - Err(e) => { - return Err(ApiError::ServerError(format!( - "Unable to process block: {:?}", - e - ))); - } - } + ))) + } + } + }).and_then(|_| { + response_builder.body_json(&()) + })) + - Ok(success_response_old(Body::empty())) } /// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. From 0bd8187ff648e1eca0224a1f9d27792ce612db24 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Wed, 11 Sep 2019 18:21:51 +1000 Subject: [PATCH 278/305] Revert most of commit ebd97730d572a80343f1cd8779d19e1b56e8a5eb. - Undoing making of the beacon chain read stuff return futures, instead dealing with it elsewhere. --- beacon_node/rest_api/src/beacon.rs | 160 +++++++++++++++------------- beacon_node/rest_api/src/helpers.rs | 2 +- beacon_node/rest_api/src/lib.rs | 1 + 3 files changed, 87 insertions(+), 76 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index cf540c65a..850c77dc1 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -23,12 +23,11 @@ pub struct HeadResponse { } /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. -pub fn get_head(req: Request) -> BoxFut { +pub fn get_head(req: Request) -> ApiResult { let beacon_chain = req .extensions() .get::>>() - .expect("BeaconChain extension must be there, because we put it there.") - .clone(); + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; let chain_head = beacon_chain.head(); @@ -56,7 +55,10 @@ pub fn get_head(req: Request) -> BoxFut { previous_justified_block_root: chain_head.beacon_state.previous_justified_checkpoint.root, }; - success_response(req, &head) + let json: String = serde_json::to_string(&head) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize HeadResponse: {:?}", e)))?; + + Ok(success_response_old(Body::from(json))) } #[derive(Serialize, Encode)] @@ -67,83 +69,84 @@ pub struct BlockResponse { } /// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`. -pub fn get_block(req: Request) -> BoxFut { +pub fn get_block(req: Request) -> ApiResult { let beacon_chain = req .extensions() .get::>>() - .expect("BeaconChain extension must be there, because we put it there.") - .clone(); + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?; let query_params = ["root", "slot"]; - let query = try_future!(UrlQuery::from_request(&req)); - let (key, value) = try_future!(query.first_of(&query_params)); + let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?; let block_root = match (key.as_ref(), value) { ("slot", value) => { - let target = try_future!(parse_slot(&value)); + let target = parse_slot(&value)?; - try_future!(block_root_at_slot(&beacon_chain, target).ok_or_else(|| { + block_root_at_slot(&beacon_chain, target).ok_or_else(|| { ApiError::NotFound(format!("Unable to find BeaconBlock for slot {:?}", target)) - })) - } - ("root", value) => try_future!(parse_root(&value)), - _ => { - return Box::new(futures::future::err(ApiError::ServerError( - "Unexpected query parameter".into(), - ))) + })? } + ("root", value) => parse_root(&value)?, + _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), }; - let block = try_future!(try_future!(beacon_chain + let block = beacon_chain .store - .get::>(&block_root)) - .ok_or_else(|| { - ApiError::NotFound(format!( - "Unable to find BeaconBlock for root {:?}", - block_root - )) - })); + .get::>(&block_root)? + .ok_or_else(|| { + ApiError::NotFound(format!( + "Unable to find BeaconBlock for root {:?}", + block_root + )) + })?; let response = BlockResponse { root: block_root, beacon_block: block, }; - success_response(req, &response) + ResponseBuilder::new(&req).body(&response) } /// HTTP handler to return a `BeaconBlock` root at a given `slot`. -pub fn get_block_root(req: Request) -> BoxFut { - let (beacon_chain, _head_state) = try_future!(get_beacon_chain_from_request::(&req)); +pub fn get_block_root(req: Request) -> ApiResult { + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; - let slot_string: String = - try_future!(try_future!(UrlQuery::from_request(&req)).only_one("slot")); - let target: Slot = try_future!(parse_slot(&slot_string)); + let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; + let target = parse_slot(&slot_string)?; - let root = try_future!(block_root_at_slot(&beacon_chain, target).ok_or_else(|| { + let root = block_root_at_slot(&beacon_chain, target).ok_or_else(|| { ApiError::NotFound(format!("Unable to find BeaconBlock for slot {:?}", target)) - })); + })?; - success_response(req, &root) + let json: String = serde_json::to_string(&root) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; + + Ok(success_response_old(Body::from(json))) } /// HTTP handler to return the `Fork` of the current head. -pub fn get_fork(req: Request) -> BoxFut { - let (_beacon_chain, head_state) = try_future!(get_beacon_chain_from_request::(&req)); +pub fn get_fork(req: Request) -> ApiResult { + let (_beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; - success_response(req, &head_state.fork) + let json: String = serde_json::to_string(&head_state.fork).map_err(|e| { + ApiError::ServerError(format!("Unable to serialize BeaconState::Fork: {:?}", e)) + })?; + + Ok(success_response_old(Body::from(json))) } /// HTTP handler to return the set of validators for an `Epoch` /// /// The `Epoch` parameter can be any epoch number. If it is not specified, /// the current epoch is assumed. -pub fn get_validators(req: Request) -> BoxFut { - let (beacon_chain, _head_state) = try_future!(get_beacon_chain_from_request::(&req)); +pub fn get_validators(req: Request) -> ApiResult { + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; let epoch = match UrlQuery::from_request(&req) { // We have some parameters, so make sure it's the epoch one and parse it - Ok(query) => try_future!(try_future!(query.only_one("epoch")) + Ok(query) => query + .only_one("epoch")? .parse::() .map(Epoch::from) .map_err(|e| { @@ -151,11 +154,11 @@ pub fn get_validators(req: Request) -> BoxF "Invalid epoch parameter, must be a u64. {:?}", e )) - })), + })?, // In this case, our url query did not contain any parameters, so we take the default - Err(_) => try_future!(beacon_chain.epoch().map_err(|e| { + Err(_) => beacon_chain.epoch().map_err(|e| { ApiError::ServerError(format!("Unable to determine current epoch: {:?}", e)) - })), + })?, }; let all_validators = &beacon_chain.head().beacon_state.validators; @@ -165,7 +168,7 @@ pub fn get_validators(req: Request) -> BoxF .cloned() .collect(); - success_response(req, &active_vals) + ResponseBuilder::new(&req).body(&active_vals) } #[derive(Serialize, Encode)] @@ -179,42 +182,43 @@ pub struct StateResponse { /// /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. -pub fn get_state(req: Request) -> BoxFut { - let (beacon_chain, head_state) = try_future!(get_beacon_chain_from_request::(&req)); +pub fn get_state(req: Request) -> ApiResult { + let (beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; let (key, value) = match UrlQuery::from_request(&req) { Ok(query) => { // We have *some* parameters, just check them. let query_params = ["root", "slot"]; - try_future!(query.first_of(&query_params)) + match query.first_of(&query_params) { + Ok((k, v)) => (k, v), + Err(e) => { + // Wrong parameters provided, or another error, return the error. + return Err(e); + } + } } Err(ApiError::InvalidQueryParams(_)) => { // No parameters provided at all, use current slot. (String::from("slot"), head_state.slot.to_string()) } Err(e) => { - return Box::new(futures::future::err(e)); + return Err(e); } }; let (root, state): (Hash256, BeaconState) = match (key.as_ref(), value) { - ("slot", value) => try_future!(state_at_slot( - &beacon_chain, - try_future!(parse_slot(&value)) - )), + ("slot", value) => state_at_slot(&beacon_chain, parse_slot(&value)?)?, ("root", value) => { - let root: &Hash256 = &try_future!(parse_root(&value)); + let root = &parse_root(&value)?; - let state = try_future!(try_future!(beacon_chain.store.get(root)) - .ok_or_else(|| ApiError::NotFound(format!("No state for root: {:?}", root)))); + let state = beacon_chain + .store + .get(root)? + .ok_or_else(|| ApiError::NotFound(format!("No state for root: {:?}", root)))?; (*root, state) } - _ => { - return Box::new(futures::future::err(ApiError::ServerError( - "Unexpected query parameter".into(), - ))) - } + _ => return Err(ApiError::ServerError("Unexpected query parameter".into())), }; let response = StateResponse { @@ -222,29 +226,32 @@ pub fn get_state(req: Request) -> BoxFut { beacon_state: state, }; - success_response(req, &response) + ResponseBuilder::new(&req).body(&response) } /// HTTP handler to return a `BeaconState` root at a given `slot`. /// /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. -pub fn get_state_root(req: Request) -> BoxFut { - let (beacon_chain, _head_state) = try_future!(get_beacon_chain_from_request::(&req)); +pub fn get_state_root(req: Request) -> ApiResult { + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; - let slot_string = try_future!(try_future!(UrlQuery::from_request(&req)).only_one("slot")); - let slot = try_future!(parse_slot(&slot_string)); + let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; + let slot = parse_slot(&slot_string)?; - let root = try_future!(state_root_at_slot(&beacon_chain, slot)); + let root = state_root_at_slot(&beacon_chain, slot)?; - success_response(req, &root) + let json: String = serde_json::to_string(&root) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; + + Ok(success_response_old(Body::from(json))) } /// HTTP handler to return the highest finalized slot. pub fn get_current_finalized_checkpoint( req: Request, -) -> BoxFut { - let (beacon_chain, _head_state) = try_future!(get_beacon_chain_from_request::(&req)); +) -> ApiResult { + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; let checkpoint = beacon_chain .head() @@ -252,14 +259,17 @@ pub fn get_current_finalized_checkpoint( .finalized_checkpoint .clone(); - success_response(req, &checkpoint) + let json: String = serde_json::to_string(&checkpoint) + .map_err(|e| ApiError::ServerError(format!("Unable to serialize checkpoint: {:?}", e)))?; + + Ok(success_response_old(Body::from(json))) } /// HTTP handler to return a `BeaconState` at the genesis block. -pub fn get_genesis_state(req: Request) -> BoxFut { - let (beacon_chain, _head_state) = try_future!(get_beacon_chain_from_request::(&req)); +pub fn get_genesis_state(req: Request) -> ApiResult { + let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; - let (_root, state) = try_future!(state_at_slot(&beacon_chain, Slot::new(0))); + let (_root, state) = state_at_slot(&beacon_chain, Slot::new(0))?; - success_response(req, &state) + ResponseBuilder::new(&req).body(&state) } diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index a3633cfec..41915138f 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -233,7 +233,7 @@ pub fn get_beacon_chain_from_request( let beacon_chain = req .extensions() .get::>>() - .expect("BeaconChain extension must be there, because we put it there."); + .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".into()))?; let mut head_state = beacon_chain .state_now() .map_err(|e| ApiError::ServerError(format!("Unable to get current BeaconState {:?}", e)))?; diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index bc3d3d159..97ed971c2 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -95,6 +95,7 @@ impl Service for ApiService { (&Method::GET, "/network/listen_port") => network::get_listen_port::(req), (&Method::GET, "/network/listen_addresses") => network::get_listen_addresses::(req), + /* // Methods for Beacon Node (&Method::GET, "/beacon/head") => beacon::get_head::(req), (&Method::GET, "/beacon/block") => beacon::get_block::(req), From c254ac8c2ecd2816959d23a829dc628f4a713a0a Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Thu, 12 Sep 2019 01:44:45 +1000 Subject: [PATCH 279/305] Separated acquisition of BeaconChain and BeaconState. --- beacon_node/rest_api/src/beacon.rs | 23 +++++++++++------------ beacon_node/rest_api/src/helpers.rs | 19 +++++++++++-------- beacon_node/rest_api/src/metrics.rs | 2 +- beacon_node/rest_api/src/node.rs | 5 +++-- beacon_node/rest_api/src/spec.rs | 2 +- beacon_node/rest_api/src/validator.rs | 20 +++++--------------- 6 files changed, 32 insertions(+), 39 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 66f5e7731..3b9b2a008 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -109,7 +109,7 @@ pub fn get_block(req: Request) -> ApiResult /// HTTP handler to return a `BeaconBlock` root at a given `slot`. pub fn get_block_root(req: Request) -> ApiResult { - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; let target = parse_slot(&slot_string)?; @@ -126,7 +126,8 @@ pub fn get_block_root(req: Request) -> ApiR /// HTTP handler to return the `Fork` of the current head. pub fn get_fork(req: Request) -> ApiResult { - let (_beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; + let head_state = get_head_state(beacon_chain)?; let json: String = serde_json::to_string(&head_state.fork).map_err(|e| { ApiError::ServerError(format!("Unable to serialize BeaconState::Fork: {:?}", e)) @@ -140,7 +141,7 @@ pub fn get_fork(req: Request) -> ApiResult /// The `Epoch` parameter can be any epoch number. If it is not specified, /// the current epoch is assumed. pub fn get_validators(req: Request) -> ApiResult { - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let epoch = match UrlQuery::from_request(&req) { // We have some parameters, so make sure it's the epoch one and parse it @@ -182,7 +183,8 @@ pub struct StateResponse { /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. pub fn get_state(req: Request) -> ApiResult { - let (beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; + let head_state = get_head_state(beacon_chain.clone())?; let (key, value) = match UrlQuery::from_request(&req) { Ok(query) => { @@ -233,7 +235,7 @@ pub fn get_state(req: Request) -> ApiResult /// Will not return a state if the request slot is in the future. Will return states higher than /// the current head by skipping slots. pub fn get_state_root(req: Request) -> ApiResult { - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?; let slot = parse_slot(&slot_string)?; @@ -250,13 +252,10 @@ pub fn get_state_root(req: Request) -> ApiR pub fn get_current_finalized_checkpoint( req: Request, ) -> ApiResult { - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; + let head_state = get_head_state(beacon_chain)?; - let checkpoint = beacon_chain - .head() - .beacon_state - .finalized_checkpoint - .clone(); + let checkpoint = head_state.finalized_checkpoint.clone(); let json: String = serde_json::to_string(&checkpoint) .map_err(|e| ApiError::ServerError(format!("Unable to serialize checkpoint: {:?}", e)))?; @@ -266,7 +265,7 @@ pub fn get_current_finalized_checkpoint( /// HTTP handler to return a `BeaconState` at the genesis block. pub fn get_genesis_state(req: Request) -> ApiResult { - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let (_root, state) = state_at_slot(&beacon_chain, Slot::new(0))?; diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 76fc78750..4dd8a475d 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -172,21 +172,24 @@ pub fn implementation_pending_response(_req: Request) -> ApiResult { pub fn get_beacon_chain_from_request( req: &Request, -) -> Result<(Arc>, BeaconState), ApiError> { +) -> Result<(Arc>), ApiError> { // Get beacon state let beacon_chain = req .extensions() .get::>>() .ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".into()))?; - let mut head_state = beacon_chain - .state_now() - .map_err(|e| ApiError::ServerError(format!("Unable to get current BeaconState {:?}", e)))?; - if let Some(s) = head_state.maybe_as_mut_ref() { - s.build_all_caches(&beacon_chain.spec).ok(); - } + Ok(beacon_chain.clone()) +} - Ok((beacon_chain.clone(), head_state.clone())) +pub fn get_head_state( + bc: Arc>, +) -> Result, ApiError> { + let mut head_state = bc.head().beacon_state; + head_state + .build_all_caches(&bc.spec) + .map_err(|e| ApiError::ServerError(format!("Unable to build state cache: {:?}", e)))?; + Ok(head_state) } pub fn get_logger_from_request(req: &Request) -> slog::Logger { diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 62a769de1..01dc4d22d 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -30,7 +30,7 @@ pub fn get_prometheus(req: Request) -> ApiR let mut buffer = vec![]; let encoder = TextEncoder::new(); - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let db_path = req .extensions() .get::() diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs index c75d3ba20..4a9f11be0 100644 --- a/beacon_node/rest_api/src/node.rs +++ b/beacon_node/rest_api/src/node.rs @@ -1,4 +1,4 @@ -use crate::helpers::get_beacon_chain_from_request; +use crate::helpers::*; use crate::{success_response, ApiResult}; use beacon_chain::BeaconChainTypes; use hyper::{Body, Request}; @@ -15,7 +15,8 @@ pub fn get_version(_req: Request) -> ApiResult { /// Read the genesis time from the current beacon chain state. pub fn get_genesis_time(req: Request) -> ApiResult { - let (_beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; + let head_state = get_head_state(beacon_chain)?; let gen_time: u64 = head_state.genesis_time; let body = Body::from( serde_json::to_string(&gen_time) diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs index ad168faf1..a353b3833 100644 --- a/beacon_node/rest_api/src/spec.rs +++ b/beacon_node/rest_api/src/spec.rs @@ -9,7 +9,7 @@ use types::EthSpec; /// HTTP handler to return the full spec object. pub fn get_spec(req: Request) -> ApiResult { - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let json: String = serde_json::to_string(&beacon_chain.spec) .map_err(|e| ApiError::ServerError(format!("Unable to serialize spec: {:?}", e)))?; diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 49b4c0441..84cb7a308 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -34,7 +34,8 @@ impl ValidatorDuty { pub fn get_validator_duties(req: Request) -> ApiResult { let log = get_logger_from_request(&req); slog::trace!(log, "Validator duties requested of API: {:?}", &req); - let (beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; + let head_state = get_head_state(beacon_chain.clone())?; slog::trace!(log, "Got head state from request."); // Parse and check query parameters @@ -71,18 +72,6 @@ pub fn get_validator_duties(req: Request) - .collect::, _>>()?; let mut duties: Vec = Vec::new(); - // Update the committee cache - // TODO: Do we need to update the cache on the state, for the epoch which has been specified? - beacon_chain - .state_now() - .map_err(|e| ApiError::ServerError(format!("Unable to get current BeaconState {:?}", e)))? - .maybe_as_mut_ref() - .ok_or(ApiError::ServerError( - "Unable to get mutable BeaconState".into(), - ))? - .build_committee_cache(relative_epoch, &beacon_chain.spec) - .map_err(|e| ApiError::ServerError(format!("Unable to build state caches: {:?}", e)))?; - // Get a list of all validators for this epoch let validator_proposers: Vec = epoch .slot_iter(T::EthSpec::slots_per_epoch()) @@ -157,7 +146,7 @@ pub fn get_validator_duties(req: Request) - /// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator. pub fn get_new_beacon_block(req: Request) -> ApiResult { - let (beacon_chain, _head_state) = get_beacon_chain_from_request::(&req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; let query = UrlQuery::from_request(&req)?; let slot = query @@ -197,7 +186,8 @@ pub fn get_new_beacon_block(req: Request) - /// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. pub fn get_new_attestation(req: Request) -> ApiResult { - let (beacon_chain, head_state) = get_beacon_chain_from_request::(&req)?; + let beacon_chain = get_beacon_chain_from_request::(&req)?; + let head_state = get_head_state(beacon_chain.clone())?; let query = UrlQuery::from_request(&req)?; let val_pk_str = query From cd8f40b4b70d6b52d250e14fda8fcd6063c4f518 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Thu, 12 Sep 2019 15:20:31 +1000 Subject: [PATCH 280/305] Getting regular endpoint functions to return futures. - Wrapped endpoint functions in new into_boxfut function - Undid changes to Network API service, now returning ApiResult again. - Cleaning up of functions, and removal of success_response functions in updated endpoints. - A bunch of other clean-ups. --- beacon_node/rest_api/src/beacon.rs | 8 +---- beacon_node/rest_api/src/error.rs | 2 ++ beacon_node/rest_api/src/lib.rs | 37 +++++++++++++------- beacon_node/rest_api/src/network.rs | 39 +++++++++------------- beacon_node/rest_api/src/node.rs | 6 ++-- beacon_node/rest_api/src/validator.rs | 6 ++-- validator_client/src/block_producer/mod.rs | 4 +-- 7 files changed, 50 insertions(+), 52 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 23ef12aa8..d74ab2ed9 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -191,13 +191,7 @@ pub fn get_state(req: Request) -> ApiResult Ok(query) => { // We have *some* parameters, just check them. let query_params = ["root", "slot"]; - match query.first_of(&query_params) { - Ok((k, v)) => (k, v), - Err(e) => { - // Wrong parameters provided, or another error, return the error. - return Err(e); - } - } + query.first_of(&query_params)? } Err(ApiError::InvalidQueryParams(_)) => { // No parameters provided at all, use current slot. diff --git a/beacon_node/rest_api/src/error.rs b/beacon_node/rest_api/src/error.rs index 138affae4..82dc73da0 100644 --- a/beacon_node/rest_api/src/error.rs +++ b/beacon_node/rest_api/src/error.rs @@ -1,4 +1,6 @@ use crate::BoxFut; +use futures::future::IntoFuture; +use futures::Future; use hyper::{Body, Method, Request, Response, Server, StatusCode}; use std::error::Error as StdError; diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 97ed971c2..3903d0ea1 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -21,6 +21,7 @@ use client_network::NetworkMessage; use client_network::Service as NetworkService; use error::{ApiError, ApiResult}; use eth2_config::Eth2Config; +use futures::future::IntoFuture; use hyper::rt::Future; use hyper::server::conn::AddrStream; use hyper::service::{MakeService, Service}; @@ -33,7 +34,6 @@ use std::sync::Arc; use tokio::runtime::TaskExecutor; use tokio::sync::mpsc; use url_query::UrlQuery; -use futures::future::IntoFuture; pub use beacon::{BlockResponse, HeadResponse, StateResponse}; pub use config::Config as ApiConfig; @@ -51,6 +51,14 @@ pub struct ApiService { eth2_config: Arc, } +fn into_boxfut(item: F) -> BoxFut +where + F: IntoFuture, Error = ApiError>, + F::Future: Send, +{ + Box::new(item.into_future()) +} + impl Service for ApiService { type ReqBody = Body; type ResBody = Body; @@ -82,20 +90,25 @@ impl Service for ApiService { // Route the request to the correct handler. let result = match (req.method(), path.as_ref()) { // Methods for Client - (&Method::GET, "/node/version") => Box::new(node::get_version(req).into_future()), - (&Method::GET, "/node/genesis_time") => Box::new(node::get_genesis_time::(req).into_future()), - (&Method::GET, "/node/syncing") => Box::new(helpers::implementation_pending_response(req).into_future()), - /* + (&Method::GET, "/node/version") => into_boxfut(node::get_version(req)), + (&Method::GET, "/node/genesis_time") => into_boxfut(node::get_genesis_time::(req)), + (&Method::GET, "/node/syncing") => { + into_boxfut(helpers::implementation_pending_response(req)) + } // Methods for Network - (&Method::GET, "/network/enr") => network::get_enr::(req), - (&Method::GET, "/network/peer_count") => network::get_peer_count::(req), - (&Method::GET, "/network/peer_id") => network::get_peer_id::(req), - (&Method::GET, "/network/peers") => network::get_peer_list::(req), - (&Method::GET, "/network/listen_port") => network::get_listen_port::(req), - (&Method::GET, "/network/listen_addresses") => network::get_listen_addresses::(req), - + (&Method::GET, "/network/enr") => into_boxfut(network::get_enr::(req)), + (&Method::GET, "/network/peer_count") => into_boxfut(network::get_peer_count::(req)), + (&Method::GET, "/network/peer_id") => into_boxfut(network::get_peer_id::(req)), + (&Method::GET, "/network/peers") => into_boxfut(network::get_peer_list::(req)), + (&Method::GET, "/network/listen_port") => { + into_boxfut(network::get_listen_port::(req)) + } + (&Method::GET, "/network/listen_addresses") => { + into_boxfut(network::get_listen_addresses::(req)) + } /* + // Methods for Beacon Node (&Method::GET, "/beacon/head") => beacon::get_head::(req), (&Method::GET, "/beacon/block") => beacon::get_block::(req), diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index e037d43f0..26e5623c2 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -1,5 +1,7 @@ +use crate::error::{ApiError, ApiResult}; use crate::helpers::*; -use crate::{ApiError, BoxFut, NetworkService}; +use crate::response_builder::ResponseBuilder; +use crate::NetworkService; use beacon_chain::BeaconChainTypes; use eth2_libp2p::{Enr, Multiaddr, PeerId}; use hyper::{Body, Request}; @@ -8,81 +10,70 @@ use std::sync::Arc; /// HTTP handler to return the list of libp2p multiaddr the client is listening on. /// /// Returns a list of `Multiaddr`, serialized according to their `serde` impl. -pub fn get_listen_addresses(req: Request) -> BoxFut { +pub fn get_listen_addresses(req: Request) -> ApiResult { let network = req .extensions() .get::>>() .expect("The network service should always be there, we put it there"); let multiaddresses: Vec = network.listen_multiaddrs(); - success_response_json(req, &multiaddresses) + ResponseBuilder::new(&req).body_json(&multiaddresses) } /// HTTP handler to return the network port the client is listening on. /// /// Returns the TCP port number in its plain form (which is also valid JSON serialization) -pub fn get_listen_port(req: Request) -> BoxFut { +pub fn get_listen_port(req: Request) -> ApiResult { let network = req .extensions() .get::>>() .expect("The network service should always be there, we put it there") .clone(); - - success_response(req, &network.listen_port()) + ResponseBuilder::new(&req).body(&network.listen_port()) } /// HTTP handler to return the Discv5 ENR from the client's libp2p service. /// /// ENR is encoded as base64 string. -pub fn get_enr(req: Request) -> BoxFut { +pub fn get_enr(req: Request) -> ApiResult { let network = req .extensions() .get::>>() .expect("The network service should always be there, we put it there"); - - let enr: Enr = network.local_enr(); - success_response_json(req, &enr.to_base64()) + ResponseBuilder::new(&req).body_json(&network.local_enr().to_base64()) } /// HTTP handler to return the `PeerId` from the client's libp2p service. /// /// PeerId is encoded as base58 string. -pub fn get_peer_id(req: Request) -> BoxFut { +pub fn get_peer_id(req: Request) -> ApiResult { let network = req .extensions() .get::>>() .expect("The network service should always be there, we put it there"); - - let peer_id: PeerId = network.local_peer_id(); - - success_response_json(req, &peer_id.to_base58()) + ResponseBuilder::new(&req).body_json(&network.local_peer_id().to_base58()) } /// HTTP handler to return the number of peers connected in the client's libp2p service. -pub fn get_peer_count(req: Request) -> BoxFut { +pub fn get_peer_count(req: Request) -> ApiResult { let network = req .extensions() .get::>>() .expect("The network service should always be there, we put it there"); - - let connected_peers: usize = network.connected_peers(); - - success_response(req, &connected_peers) + ResponseBuilder::new(&req).body(&network.connected_peers()) } /// HTTP handler to return the list of peers connected to the client's libp2p service. /// /// Peers are presented as a list of `PeerId::to_string()`. -pub fn get_peer_list(req: Request) -> BoxFut { +pub fn get_peer_list(req: Request) -> ApiResult { let network = req .extensions() .get::>>() .expect("The network service should always be there, we put it there"); - let connected_peers: Vec = network .connected_peer_set() .iter() .map(PeerId::to_string) .collect(); - - success_response_json(req, &connected_peers) + ResponseBuilder::new(&req).body_json(&connected_peers) } diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs index 433aae6cf..5ef35f9a0 100644 --- a/beacon_node/rest_api/src/node.rs +++ b/beacon_node/rest_api/src/node.rs @@ -1,4 +1,5 @@ use crate::helpers::*; +use crate::response_builder::ResponseBuilder; use crate::{ApiResult, BoxFut}; use beacon_chain::BeaconChainTypes; use hyper::{Body, Request}; @@ -6,13 +7,12 @@ use version; /// Read the version string from the current Lighthouse build. pub fn get_version(req: Request) -> ApiResult { - success_response_2_json(req, &version::version()) + ResponseBuilder::new(&req).body_json(&version::version()) } /// Read the genesis time from the current beacon chain state. pub fn get_genesis_time(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; let head_state = get_head_state(beacon_chain)?; - let gen_time: u64 = head_state.genesis_time; - success_response_2(req, &gen_time) + ResponseBuilder::new(&req).body(&head_state.genesis_time) } diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index dfa35e298..7d0fbdabd 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,6 +1,6 @@ use crate::helpers::*; use crate::response_builder::ResponseBuilder; -use crate::{ApiError, ApiResult, UrlQuery, BoxFut}; +use crate::{ApiError, ApiResult, BoxFut, UrlQuery}; use beacon_chain::{BeaconChainTypes, BlockProcessingOutcome}; use bls::{AggregateSignature, PublicKey, Signature}; use futures::future::Future; @@ -197,7 +197,7 @@ pub fn get_new_beacon_block(req: Request) - pub fn publish_beacon_block(req: Request) -> BoxFut { let _ = try_future!(check_content_type_for_json(&req)); let log = get_logger_from_request(&req); - let (beacon_chain, _head_state) = try_future!(get_beacon_chain_from_request::(&req)); + let beacon_chain = try_future!(get_beacon_chain_from_request::(&req)); // Get the network sending channel from the request, for later transmission let network_chan = req .extensions() @@ -250,8 +250,6 @@ pub fn publish_beacon_block(req: Request) - }).and_then(|_| { response_builder.body_json(&()) })) - - } /// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. diff --git a/validator_client/src/block_producer/mod.rs b/validator_client/src/block_producer/mod.rs index 0716d740c..bb9c5741d 100644 --- a/validator_client/src/block_producer/mod.rs +++ b/validator_client/src/block_producer/mod.rs @@ -63,12 +63,12 @@ impl<'a, B: BeaconNodeBlock, S: Signer, E: EthSpec> BlockProducer<'a, B, S, E> { pub fn handle_produce_block(&mut self) { match self.produce_block() { Ok(ValidatorEvent::BlockProduced(slot)) => info!( - log, + self.log, "Block produced"; "validator" => format!("{}", self.signer), "slot" => slot, ), - Err(e) => error!(log, "Block production error"; "Error" => format!("{:?}", e)), + Err(e) => error!(self.log, "Block production error"; "Error" => format!("{:?}", e)), Ok(ValidatorEvent::SignerRejection(_slot)) => { error!(self.log, "Block production error"; "Error" => "Signer Could not sign the block".to_string()) } From 7c211f379100fad3b44ae0ef91756d2b6b266bdb Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 12 Sep 2019 10:40:29 -0400 Subject: [PATCH 281/305] Add explicit fails for 32bit architecture --- beacon_node/src/main.rs | 8 ++++++++ validator_client/src/main.rs | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index 54e4529c4..bb88e8f92 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -357,6 +357,14 @@ fn main() { let log = slog::Logger::root(drain.fuse(), o!()); + if std::mem::size_of::() != 8 { + crit!( + log, + "Lighthouse only supports 64bit CPUs"; + "detected" => format!("{}bit", std::mem::size_of::() * 8) + ); + } + warn!( log, "Ethereum 2.0 is pre-release. This software is experimental." diff --git a/validator_client/src/main.rs b/validator_client/src/main.rs index e445218eb..58914a9a8 100644 --- a/validator_client/src/main.rs +++ b/validator_client/src/main.rs @@ -157,6 +157,14 @@ fn main() { let mut log = slog::Logger::root(drain.fuse(), o!()); + if std::mem::size_of::() != 8 { + crit!( + log, + "Lighthouse only supports 64bit CPUs"; + "detected" => format!("{}bit", std::mem::size_of::() * 8) + ); + } + let (client_config, eth2_config) = match get_configs(&matches, &mut log) { Ok(tuple) => tuple, Err(e) => { From 62b5f9c5a0ace22f6c2b173b3e45a6b7a4f60097 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 12 Sep 2019 12:28:31 -0400 Subject: [PATCH 282/305] Move lcli out of the tests dir --- Cargo.toml | 2 +- {tests/lcli => lcli}/.gitignore | 0 {tests/lcli => lcli}/Cargo.toml | 6 +++--- {tests/lcli => lcli}/src/main.rs | 0 {tests/lcli => lcli}/src/parse_hex.rs | 0 {tests/lcli => lcli}/src/transition_blocks.rs | 0 6 files changed, 4 insertions(+), 4 deletions(-) rename {tests/lcli => lcli}/.gitignore (100%) rename {tests/lcli => lcli}/Cargo.toml (71%) rename {tests/lcli => lcli}/src/main.rs (100%) rename {tests/lcli => lcli}/src/parse_hex.rs (100%) rename {tests/lcli => lcli}/src/transition_blocks.rs (100%) diff --git a/Cargo.toml b/Cargo.toml index 0a98bb8dd..3600c90ca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ members = [ "beacon_node/version", "beacon_node/beacon_chain", "tests/ef_tests", - "tests/lcli", + "lcli", "protos", "validator_client", "account_manager", diff --git a/tests/lcli/.gitignore b/lcli/.gitignore similarity index 100% rename from tests/lcli/.gitignore rename to lcli/.gitignore diff --git a/tests/lcli/Cargo.toml b/lcli/Cargo.toml similarity index 71% rename from tests/lcli/Cargo.toml rename to lcli/Cargo.toml index 3322d8cca..b774d4d12 100644 --- a/tests/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -14,6 +14,6 @@ log = "0.4" serde = "1.0" serde_yaml = "0.8" simple_logger = "1.0" -types = { path = "../../eth2/types" } -state_processing = { path = "../../eth2/state_processing" } -eth2_ssz = { path = "../../eth2/utils/ssz" } +types = { path = "../eth2/types" } +state_processing = { path = "../eth2/state_processing" } +eth2_ssz = { path = "../eth2/utils/ssz" } diff --git a/tests/lcli/src/main.rs b/lcli/src/main.rs similarity index 100% rename from tests/lcli/src/main.rs rename to lcli/src/main.rs diff --git a/tests/lcli/src/parse_hex.rs b/lcli/src/parse_hex.rs similarity index 100% rename from tests/lcli/src/parse_hex.rs rename to lcli/src/parse_hex.rs diff --git a/tests/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs similarity index 100% rename from tests/lcli/src/transition_blocks.rs rename to lcli/src/transition_blocks.rs From b0e3ce78855c8823f1929a4205cc2e1577006a86 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Fri, 13 Sep 2019 18:58:08 +1000 Subject: [PATCH 283/305] Addressed Paul's comments regarding head_state. --- beacon_node/rest_api/src/beacon.rs | 6 +++--- beacon_node/rest_api/src/helpers.rs | 10 ---------- beacon_node/rest_api/src/metrics.rs | 1 + beacon_node/rest_api/src/node.rs | 9 ++------- beacon_node/rest_api/src/validator.rs | 19 +++++++++++++++---- 5 files changed, 21 insertions(+), 24 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 3b9b2a008..c1f49c1fc 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -8,7 +8,7 @@ use std::sync::Arc; use store::Store; use types::{BeaconBlock, BeaconState, Epoch, EthSpec, Hash256, Slot, Validator}; -#[derive(Serialize)] +#[derive(Serialize, Encode)] pub struct HeadResponse { pub slot: Slot, pub block_root: Hash256, @@ -184,7 +184,7 @@ pub struct StateResponse { /// the current head by skipping slots. pub fn get_state(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; - let head_state = get_head_state(beacon_chain.clone())?; + let head_state = beacon_chain.head().beacon_state; let (key, value) = match UrlQuery::from_request(&req) { Ok(query) => { @@ -253,7 +253,7 @@ pub fn get_current_finalized_checkpoint( req: Request, ) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; - let head_state = get_head_state(beacon_chain)?; + let head_state = beacon_chain.head().beacon_state; let checkpoint = head_state.finalized_checkpoint.clone(); diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 4dd8a475d..c58bf1038 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -182,16 +182,6 @@ pub fn get_beacon_chain_from_request( Ok(beacon_chain.clone()) } -pub fn get_head_state( - bc: Arc>, -) -> Result, ApiError> { - let mut head_state = bc.head().beacon_state; - head_state - .build_all_caches(&bc.spec) - .map_err(|e| ApiError::ServerError(format!("Unable to build state cache: {:?}", e)))?; - Ok(head_state) -} - pub fn get_logger_from_request(req: &Request) -> slog::Logger { let log = req .extensions() diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 01dc4d22d..8ce9cb6af 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -1,3 +1,4 @@ +use crate::response_builder::ResponseBuilder; use crate::{helpers::*, success_response, ApiError, ApiResult, DBPath}; use beacon_chain::BeaconChainTypes; use http::HeaderValue; diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs index 4a9f11be0..9048cb0f7 100644 --- a/beacon_node/rest_api/src/node.rs +++ b/beacon_node/rest_api/src/node.rs @@ -1,4 +1,5 @@ use crate::helpers::*; +use crate::response_builder::ResponseBuilder; use crate::{success_response, ApiResult}; use beacon_chain::BeaconChainTypes; use hyper::{Body, Request}; @@ -16,11 +17,5 @@ pub fn get_version(_req: Request) -> ApiResult { /// Read the genesis time from the current beacon chain state. pub fn get_genesis_time(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; - let head_state = get_head_state(beacon_chain)?; - let gen_time: u64 = head_state.genesis_time; - let body = Body::from( - serde_json::to_string(&gen_time) - .expect("Genesis should time always have a valid JSON serialization."), - ); - Ok(success_response(body)) + ResponseBuilder::new(&req).body(&beacon_chain.head().beacon_state.genesis_time) } diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 84cb7a308..e3075e623 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,4 +1,5 @@ use super::{success_response, ApiResult}; +use crate::response_builder::ResponseBuilder; use crate::{helpers::*, ApiError, UrlQuery}; use beacon_chain::BeaconChainTypes; use bls::{AggregateSignature, PublicKey, Signature}; @@ -35,7 +36,7 @@ pub fn get_validator_duties(req: Request) - let log = get_logger_from_request(&req); slog::trace!(log, "Validator duties requested of API: {:?}", &req); let beacon_chain = get_beacon_chain_from_request::(&req)?; - let head_state = get_head_state(beacon_chain.clone())?; + let mut head_state = beacon_chain.head().beacon_state; slog::trace!(log, "Got head state from request."); // Parse and check query parameters @@ -72,6 +73,10 @@ pub fn get_validator_duties(req: Request) - .collect::, _>>()?; let mut duties: Vec = Vec::new(); + // Build cache for the requested epoch + head_state + .build_committee_cache(relative_epoch, &beacon_chain.spec) + .map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?; // Get a list of all validators for this epoch let validator_proposers: Vec = epoch .slot_iter(T::EthSpec::slots_per_epoch()) @@ -79,7 +84,6 @@ pub fn get_validator_duties(req: Request) - head_state .get_beacon_proposer_index(slot, relative_epoch, &beacon_chain.spec) .map_err(|e| { - // TODO: why are we getting an uninitialized state error here??? ApiError::ServerError(format!( "Unable to get proposer index for validator: {:?}", e @@ -181,13 +185,13 @@ pub fn get_new_beacon_block(req: Request) - serde_json::to_string(&new_block) .expect("We should always be able to serialize a new block that we produced."), ); - Ok(success_response(body)) + ResponseBuilder::new(&req).body(&new_block) } /// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator. pub fn get_new_attestation(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; - let head_state = get_head_state(beacon_chain.clone())?; + let mut head_state = beacon_chain.head().beacon_state; let query = UrlQuery::from_request(&req)?; let val_pk_str = query @@ -195,6 +199,9 @@ pub fn get_new_attestation(req: Request) -> .map(|(_key, value)| value)?; let val_pk = parse_pubkey(val_pk_str.as_str())?; + head_state + .update_pubkey_cache() + .map_err(|e| ApiError::ServerError(format!("Unable to build pubkey cache: {:?}", e)))?; // Get the validator index from the supplied public key // If it does not exist in the index, we cannot continue. let val_index = head_state @@ -206,6 +213,10 @@ pub fn get_new_attestation(req: Request) -> "The provided validator public key does not correspond to a validator index.".into(), ))?; + // Build cache for the requested epoch + head_state + .build_committee_cache(RelativeEpoch::Current, &beacon_chain.spec) + .map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?; // Get the duties of the validator, to make sure they match up. // If they don't have duties this epoch, then return an error let val_duty = head_state From 0b2f3bbf00d74cc59b17ae84fb5aee3514ec7b3d Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Fri, 13 Sep 2019 18:59:01 +1000 Subject: [PATCH 284/305] Updated another function for head_state (missing from previous commit). --- beacon_node/rest_api/src/beacon.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index c1f49c1fc..8f4c730f9 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -127,13 +127,7 @@ pub fn get_block_root(req: Request) -> ApiR /// HTTP handler to return the `Fork` of the current head. pub fn get_fork(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; - let head_state = get_head_state(beacon_chain)?; - - let json: String = serde_json::to_string(&head_state.fork).map_err(|e| { - ApiError::ServerError(format!("Unable to serialize BeaconState::Fork: {:?}", e)) - })?; - - Ok(success_response(Body::from(json))) + ResponseBuilder::new(&req).body(&beacon_chain.head().beacon_state) } /// HTTP handler to return the set of validators for an `Epoch` From 91f5f17566596e1c87c2042f1a3fec0a60a9fb92 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Fri, 13 Sep 2019 19:10:11 +1000 Subject: [PATCH 285/305] Removing 'success_respons' in favour of 'ResponseBuilder' --- beacon_node/rest_api/src/beacon.rs | 20 ++++---------------- beacon_node/rest_api/src/metrics.rs | 2 +- beacon_node/rest_api/src/validator.rs | 16 ++-------------- 3 files changed, 7 insertions(+), 31 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 302061395..7828898e8 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -55,10 +55,7 @@ pub fn get_head(req: Request) -> ApiResult previous_justified_block_root: chain_head.beacon_state.previous_justified_checkpoint.root, }; - let json: String = serde_json::to_string(&head) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize HeadResponse: {:?}", e)))?; - - Ok(success_response_old(Body::from(json))) + ResponseBuilder::new(&req).body(&head) } #[derive(Serialize, Encode)] @@ -119,10 +116,7 @@ pub fn get_block_root(req: Request) -> ApiR ApiError::NotFound(format!("Unable to find BeaconBlock for slot {:?}", target)) })?; - let json: String = serde_json::to_string(&root) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; - - Ok(success_response_old(Body::from(json))) + ResponseBuilder::new(&req).body(&root) } /// HTTP handler to return the `Fork` of the current head. @@ -231,10 +225,7 @@ pub fn get_state_root(req: Request) -> ApiR let root = state_root_at_slot(&beacon_chain, slot)?; - let json: String = serde_json::to_string(&root) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?; - - Ok(success_response_old(Body::from(json))) + ResponseBuilder::new(&req).body(&root) } /// HTTP handler to return the highest finalized slot. @@ -246,10 +237,7 @@ pub fn get_current_finalized_checkpoint( let checkpoint = head_state.finalized_checkpoint.clone(); - let json: String = serde_json::to_string(&checkpoint) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize checkpoint: {:?}", e)))?; - - Ok(success_response_old(Body::from(json))) + ResponseBuilder::new(&req).body(&checkpoint) } /// HTTP handler to return a `BeaconState` at the genesis block. diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 09d361b8a..d50e4a98c 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -1,5 +1,5 @@ use crate::response_builder::ResponseBuilder; -use crate::{helpers::*, success_response, ApiError, ApiResult, DBPath}; +use crate::{helpers::*, ApiError, ApiResult, DBPath}; use beacon_chain::BeaconChainTypes; use http::HeaderValue; use hyper::{Body, Request}; diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index c665a0b1f..05ae6b8c9 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -149,11 +149,7 @@ pub fn get_validator_duties(req: Request) - duties.append(&mut vec![duty]); } - let body = Body::from( - serde_json::to_string(&duties) - .expect("We should always be able to serialize the duties we created."), - ); - Ok(success_response_old(body)) + ResponseBuilder::new(&req).body_json(&duties) } /// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator. @@ -189,10 +185,6 @@ pub fn get_new_beacon_block(req: Request) - )) })?; - let body = Body::from( - serde_json::to_string(&new_block) - .expect("We should always be able to serialize a new block that we produced."), - ); ResponseBuilder::new(&req).body(&new_block) } @@ -359,9 +351,5 @@ pub fn get_new_attestation(req: Request) -> signature: AggregateSignature::new(), }; - let body = Body::from( - serde_json::to_string(&attestation) - .expect("We should always be able to serialize a new attestation that we produced."), - ); - Ok(success_response_old(body)) + ResponseBuilder::new(&req).body(&attestation) } From 006350c0cdaf3032404c8c07bb299cef22bb4ceb Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Fri, 13 Sep 2019 19:14:09 +1000 Subject: [PATCH 286/305] Fixed small bug with get_fork function --- beacon_node/rest_api/src/beacon.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 7828898e8..fef3cbdf1 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -122,7 +122,7 @@ pub fn get_block_root(req: Request) -> ApiR /// HTTP handler to return the `Fork` of the current head. pub fn get_fork(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; - ResponseBuilder::new(&req).body(&beacon_chain.head().beacon_state) + ResponseBuilder::new(&req).body(&beacon_chain.head().beacon_state.fork) } /// HTTP handler to return the set of validators for an `Epoch` From 1dd86baf1abe0f15893124a9be9cc714841d7834 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Fri, 13 Sep 2019 19:38:40 +1000 Subject: [PATCH 287/305] Cleaning up the rest of the API functions. - Removed all unused imports - Fixed random compiler errors - Removed all of the 'sucess_response' helpers. - Enabled all of the API endpoints again, wrapping in 'into_boxfut' - Tidied up /metrics endpoint - Added a 'body_text' part to ResponseBuilder, mainly for the Prometheus /metrics endpoint - Cleaned up the unnecessary helpers::* imports, to be more explicit. --- beacon_node/rest_api/src/beacon.rs | 2 +- beacon_node/rest_api/src/error.rs | 6 +- beacon_node/rest_api/src/helpers.rs | 38 +---------- beacon_node/rest_api/src/lib.rs | 71 +++++++++++--------- beacon_node/rest_api/src/metrics.rs | 16 ++--- beacon_node/rest_api/src/network.rs | 5 +- beacon_node/rest_api/src/node.rs | 4 +- beacon_node/rest_api/src/response_builder.rs | 8 +++ beacon_node/rest_api/src/spec.rs | 21 ++---- beacon_node/rest_api/src/validator.rs | 7 +- 10 files changed, 70 insertions(+), 108 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index fef3cbdf1..159337de4 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -1,6 +1,6 @@ use crate::helpers::*; use crate::response_builder::ResponseBuilder; -use crate::{ApiError, ApiResult, BoxFut, NetworkService, UrlQuery}; +use crate::{ApiError, ApiResult, UrlQuery}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use hyper::{Body, Request}; use serde::Serialize; diff --git a/beacon_node/rest_api/src/error.rs b/beacon_node/rest_api/src/error.rs index 82dc73da0..26cf7ba1f 100644 --- a/beacon_node/rest_api/src/error.rs +++ b/beacon_node/rest_api/src/error.rs @@ -1,7 +1,5 @@ use crate::BoxFut; -use futures::future::IntoFuture; -use futures::Future; -use hyper::{Body, Method, Request, Response, Server, StatusCode}; +use hyper::{Body, Response, StatusCode}; use std::error::Error as StdError; #[derive(PartialEq, Debug, Clone)] @@ -71,7 +69,7 @@ impl From for ApiError { } impl StdError for ApiError { - fn cause(&self) -> Option<&StdError> { + fn cause(&self) -> Option<&dyn StdError> { None } } diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 99a8f1dd4..3385e2017 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -1,53 +1,19 @@ -use crate::response_builder::ResponseBuilder; -use crate::{ApiError, ApiResult, BoxFut}; +use crate::{ApiError, ApiResult}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use bls::PublicKey; use eth2_libp2p::{PubsubMessage, Topic}; use eth2_libp2p::{BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX}; use hex; use http::header; -use hyper::{Body, Request, Response, StatusCode}; +use hyper::{Body, Request}; use network::NetworkMessage; use parking_lot::RwLock; -use serde::Serialize; use ssz::Encode; use std::sync::Arc; use store::{iter::AncestorIter, Store}; use tokio::sync::mpsc; use types::{BeaconBlock, BeaconState, EthSpec, Hash256, RelativeEpoch, Slot}; -pub fn success_response(req: Request, item: &T) -> BoxFut { - Box::new(match ResponseBuilder::new(&req).body(item) { - Ok(resp) => futures::future::ok(resp), - Err(e) => futures::future::err(e), - }) -} - -pub fn success_response_2(req: Request, item: &T) -> ApiResult { - ResponseBuilder::new(&req).body(item) -} -pub fn success_response_2_json(req: Request, item: &T) -> ApiResult { - ResponseBuilder::new(&req).body_json(item) -} - -pub fn success_response_json(req: Request, item: &T) -> BoxFut { - if let Err(e) = check_content_type_for_json(&req) { - return Box::new(futures::future::err(e)); - } - Box::new(match ResponseBuilder::new(&req).body_json(item) { - Ok(resp) => futures::future::ok(resp), - Err(e) => futures::future::err(e), - }) -} - -pub fn success_response_old(body: Body) -> Response { - Response::builder() - .status(StatusCode::OK) - .header("content-type", "application/json") - .body(body) - .expect("We should always be able to make response from the success body.") -} - /// Parse a slot from a `0x` preixed string. /// /// E.g., `"1234"` diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 3903d0ea1..35678391a 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -23,9 +23,8 @@ use error::{ApiError, ApiResult}; use eth2_config::Eth2Config; use futures::future::IntoFuture; use hyper::rt::Future; -use hyper::server::conn::AddrStream; -use hyper::service::{MakeService, Service}; -use hyper::{Body, Method, Request, Response, Server, StatusCode}; +use hyper::service::Service; +use hyper::{Body, Method, Request, Response, Server}; use parking_lot::RwLock; use slog::{info, o, warn}; use std::ops::Deref; @@ -37,8 +36,6 @@ use url_query::UrlQuery; pub use beacon::{BlockResponse, HeadResponse, StateResponse}; pub use config::Config as ApiConfig; -use eth2_libp2p::rpc::RequestId; -use serde::export::PhantomData; type BoxFut = Box, Error = ApiError> + Send>; @@ -107,58 +104,66 @@ impl Service for ApiService { (&Method::GET, "/network/listen_addresses") => { into_boxfut(network::get_listen_addresses::(req)) } - /* // Methods for Beacon Node - (&Method::GET, "/beacon/head") => beacon::get_head::(req), - (&Method::GET, "/beacon/block") => beacon::get_block::(req), - (&Method::GET, "/beacon/block_root") => beacon::get_block_root::(req), - (&Method::GET, "/beacon/blocks") => helpers::implementation_pending_response(req), - (&Method::GET, "/beacon/fork") => beacon::get_fork::(req), - (&Method::GET, "/beacon/attestations") => helpers::implementation_pending_response(req), + (&Method::GET, "/beacon/head") => into_boxfut(beacon::get_head::(req)), + (&Method::GET, "/beacon/block") => into_boxfut(beacon::get_block::(req)), + (&Method::GET, "/beacon/block_root") => into_boxfut(beacon::get_block_root::(req)), + (&Method::GET, "/beacon/blocks") => { + into_boxfut(helpers::implementation_pending_response(req)) + } + (&Method::GET, "/beacon/fork") => into_boxfut(beacon::get_fork::(req)), + (&Method::GET, "/beacon/attestations") => { + into_boxfut(helpers::implementation_pending_response(req)) + } (&Method::GET, "/beacon/attestations/pending") => { - helpers::implementation_pending_response(req) + into_boxfut(helpers::implementation_pending_response(req)) } - (&Method::GET, "/beacon/validators") => beacon::get_validators::(req), + (&Method::GET, "/beacon/validators") => into_boxfut(beacon::get_validators::(req)), (&Method::GET, "/beacon/validators/indicies") => { - helpers::implementation_pending_response(req) + into_boxfut(helpers::implementation_pending_response(req)) } (&Method::GET, "/beacon/validators/pubkeys") => { - helpers::implementation_pending_response(req) + into_boxfut(helpers::implementation_pending_response(req)) } // Methods for Validator - (&Method::GET, "/beacon/validator/duties") => validator::get_validator_duties::(req), - (&Method::GET, "/beacon/validator/block") => validator::get_new_beacon_block::(req), - */ + (&Method::GET, "/beacon/validator/duties") => { + into_boxfut(validator::get_validator_duties::(req)) + } + (&Method::GET, "/beacon/validator/block") => { + into_boxfut(validator::get_new_beacon_block::(req)) + } (&Method::POST, "/beacon/validator/block") => validator::publish_beacon_block::(req), - /* (&Method::GET, "/beacon/validator/attestation") => { - validator::get_new_attestation::(req) + into_boxfut(validator::get_new_attestation::(req)) } (&Method::POST, "/beacon/validator/attestation") => { - helpers::implementation_pending_response(req) + into_boxfut(helpers::implementation_pending_response(req)) } - (&Method::GET, "/beacon/state") => beacon::get_state::(req), - (&Method::GET, "/beacon/state_root") => beacon::get_state_root::(req), + (&Method::GET, "/beacon/state") => into_boxfut(beacon::get_state::(req)), + (&Method::GET, "/beacon/state_root") => into_boxfut(beacon::get_state_root::(req)), (&Method::GET, "/beacon/state/current_finalized_checkpoint") => { - beacon::get_current_finalized_checkpoint::(req) + into_boxfut(beacon::get_current_finalized_checkpoint::(req)) + } + (&Method::GET, "/beacon/state/genesis") => { + into_boxfut(beacon::get_genesis_state::(req)) } - (&Method::GET, "/beacon/state/genesis") => beacon::get_genesis_state::(req), //TODO: Add aggreggate/filtered state lookups here, e.g. /beacon/validators/balances // Methods for bootstrap and checking configuration - (&Method::GET, "/spec") => spec::get_spec::(req), - (&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::(req), - (&Method::GET, "/spec/deposit_contract") => { - helpers::implementation_pending_response(req) + (&Method::GET, "/spec") => into_boxfut(spec::get_spec::(req)), + (&Method::GET, "/spec/slots_per_epoch") => { + into_boxfut(spec::get_slots_per_epoch::(req)) } - (&Method::GET, "/spec/eth2_config") => spec::get_eth2_config::(req), + (&Method::GET, "/spec/deposit_contract") => { + into_boxfut(helpers::implementation_pending_response(req)) + } + (&Method::GET, "/spec/eth2_config") => into_boxfut(spec::get_eth2_config::(req)), - (&Method::GET, "/metrics") => metrics::get_prometheus::(req), - */ + (&Method::GET, "/metrics") => into_boxfut(metrics::get_prometheus::(req)), _ => Box::new(futures::future::err(ApiError::NotFound( "Request path and/or method not found.".to_owned(), ))), diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index d50e4a98c..33437a534 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -1,7 +1,7 @@ +use crate::helpers::get_beacon_chain_from_request; use crate::response_builder::ResponseBuilder; -use crate::{helpers::*, ApiError, ApiResult, DBPath}; +use crate::{ApiError, ApiResult, DBPath}; use beacon_chain::BeaconChainTypes; -use http::HeaderValue; use hyper::{Body, Request}; use prometheus::{Encoder, TextEncoder}; @@ -62,14 +62,6 @@ pub fn get_prometheus(req: Request) -> ApiR .unwrap(); String::from_utf8(buffer) - .map(|string| { - let mut response = success_response_old(Body::from(string)); - // Need to change the header to text/plain for prometheus - response.headers_mut().insert( - "content-type", - HeaderValue::from_static("text/plain; charset=utf-8"), - ); - response - }) - .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e))) + .map(|string| ResponseBuilder::new(&req).body_text(string)) + .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e)))? } diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index 26e5623c2..afbddde84 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -1,9 +1,8 @@ -use crate::error::{ApiError, ApiResult}; -use crate::helpers::*; +use crate::error::ApiResult; use crate::response_builder::ResponseBuilder; use crate::NetworkService; use beacon_chain::BeaconChainTypes; -use eth2_libp2p::{Enr, Multiaddr, PeerId}; +use eth2_libp2p::{Multiaddr, PeerId}; use hyper::{Body, Request}; use std::sync::Arc; diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs index 3eb8e5594..cb1b28df7 100644 --- a/beacon_node/rest_api/src/node.rs +++ b/beacon_node/rest_api/src/node.rs @@ -1,6 +1,6 @@ -use crate::helpers::*; +use crate::helpers::get_beacon_chain_from_request; use crate::response_builder::ResponseBuilder; -use crate::{ApiResult, BoxFut}; +use crate::ApiResult; use beacon_chain::BeaconChainTypes; use hyper::{Body, Request}; use version; diff --git a/beacon_node/rest_api/src/response_builder.rs b/beacon_node/rest_api/src/response_builder.rs index b48b9e41a..31f717697 100644 --- a/beacon_node/rest_api/src/response_builder.rs +++ b/beacon_node/rest_api/src/response_builder.rs @@ -61,4 +61,12 @@ impl ResponseBuilder { })?)) .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) } + + pub fn body_text(self, text: String) -> ApiResult { + Response::builder() + .status(StatusCode::OK) + .header("content-type", "text/plain; charset=utf-8") + .body(Body::from(text)) + .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) + } } diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs index 5ab518636..55a139f16 100644 --- a/beacon_node/rest_api/src/spec.rs +++ b/beacon_node/rest_api/src/spec.rs @@ -1,5 +1,6 @@ use super::ApiResult; -use crate::helpers::*; +use crate::helpers::get_beacon_chain_from_request; +use crate::response_builder::ResponseBuilder; use crate::ApiError; use beacon_chain::BeaconChainTypes; use eth2_config::Eth2Config; @@ -10,11 +11,7 @@ use types::EthSpec; /// HTTP handler to return the full spec object. pub fn get_spec(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; - - let json: String = serde_json::to_string(&beacon_chain.spec) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize spec: {:?}", e)))?; - - Ok(success_response_old(Body::from(json))) + ResponseBuilder::new(&req).body_json(&beacon_chain.spec) } /// HTTP handler to return the full Eth2Config object. @@ -24,16 +21,10 @@ pub fn get_eth2_config(req: Request) -> Api .get::>() .ok_or_else(|| ApiError::ServerError("Eth2Config extension missing".to_string()))?; - let json: String = serde_json::to_string(eth2_config.as_ref()) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize Eth2Config: {:?}", e)))?; - - Ok(success_response_old(Body::from(json))) + ResponseBuilder::new(&req).body_json(eth2_config.as_ref()) } /// HTTP handler to return the full spec object. -pub fn get_slots_per_epoch(_req: Request) -> ApiResult { - let json: String = serde_json::to_string(&T::EthSpec::slots_per_epoch()) - .map_err(|e| ApiError::ServerError(format!("Unable to serialize epoch: {:?}", e)))?; - - Ok(success_response_old(Body::from(json))) +pub fn get_slots_per_epoch(req: Request) -> ApiResult { + ResponseBuilder::new(&req).body(&T::EthSpec::slots_per_epoch()) } diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 05ae6b8c9..d9d55bad4 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,11 +1,14 @@ -use crate::helpers::*; +use crate::helpers::{ + check_content_type_for_json, get_beacon_chain_from_request, get_logger_from_request, + parse_pubkey, publish_beacon_block_to_network, +}; use crate::response_builder::ResponseBuilder; use crate::{ApiError, ApiResult, BoxFut, UrlQuery}; use beacon_chain::{BeaconChainTypes, BlockProcessingOutcome}; use bls::{AggregateSignature, PublicKey, Signature}; use futures::future::Future; use futures::stream::Stream; -use hyper::{Body, Error, Request}; +use hyper::{Body, Request}; use network::NetworkMessage; use parking_lot::RwLock; use serde::{Deserialize, Serialize}; From f48311900ead67638ccddf09cb4b198c15589f7f Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Fri, 13 Sep 2019 20:42:56 +1000 Subject: [PATCH 288/305] Restructured response builder to give YAML or JSON when SSZ is not available, not just JSON. --- beacon_node/rest_api/src/beacon.rs | 18 ++--- beacon_node/rest_api/src/error.rs | 2 + beacon_node/rest_api/src/metrics.rs | 2 +- beacon_node/rest_api/src/network.rs | 12 ++-- beacon_node/rest_api/src/node.rs | 4 +- beacon_node/rest_api/src/response_builder.rs | 72 +++++++++++++------- beacon_node/rest_api/src/spec.rs | 6 +- beacon_node/rest_api/src/validator.rs | 8 +-- 8 files changed, 76 insertions(+), 48 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 159337de4..13f52dc9a 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -55,7 +55,7 @@ pub fn get_head(req: Request) -> ApiResult previous_justified_block_root: chain_head.beacon_state.previous_justified_checkpoint.root, }; - ResponseBuilder::new(&req).body(&head) + ResponseBuilder::new(&req)?.body(&head) } #[derive(Serialize, Encode)] @@ -102,7 +102,7 @@ pub fn get_block(req: Request) -> ApiResult beacon_block: block, }; - ResponseBuilder::new(&req).body(&response) + ResponseBuilder::new(&req)?.body(&response) } /// HTTP handler to return a `BeaconBlock` root at a given `slot`. @@ -116,13 +116,13 @@ pub fn get_block_root(req: Request) -> ApiR ApiError::NotFound(format!("Unable to find BeaconBlock for slot {:?}", target)) })?; - ResponseBuilder::new(&req).body(&root) + ResponseBuilder::new(&req)?.body(&root) } /// HTTP handler to return the `Fork` of the current head. pub fn get_fork(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; - ResponseBuilder::new(&req).body(&beacon_chain.head().beacon_state.fork) + ResponseBuilder::new(&req)?.body(&beacon_chain.head().beacon_state.fork) } /// HTTP handler to return the set of validators for an `Epoch` @@ -157,7 +157,7 @@ pub fn get_validators(req: Request) -> ApiR .cloned() .collect(); - ResponseBuilder::new(&req).body(&active_vals) + ResponseBuilder::new(&req)?.body(&active_vals) } #[derive(Serialize, Encode)] @@ -210,7 +210,7 @@ pub fn get_state(req: Request) -> ApiResult beacon_state: state, }; - ResponseBuilder::new(&req).body(&response) + ResponseBuilder::new(&req)?.body(&response) } /// HTTP handler to return a `BeaconState` root at a given `slot`. @@ -225,7 +225,7 @@ pub fn get_state_root(req: Request) -> ApiR let root = state_root_at_slot(&beacon_chain, slot)?; - ResponseBuilder::new(&req).body(&root) + ResponseBuilder::new(&req)?.body(&root) } /// HTTP handler to return the highest finalized slot. @@ -237,7 +237,7 @@ pub fn get_current_finalized_checkpoint( let checkpoint = head_state.finalized_checkpoint.clone(); - ResponseBuilder::new(&req).body(&checkpoint) + ResponseBuilder::new(&req)?.body(&checkpoint) } /// HTTP handler to return a `BeaconState` at the genesis block. @@ -246,5 +246,5 @@ pub fn get_genesis_state(req: Request) -> A let (_root, state) = state_at_slot(&beacon_chain, Slot::new(0))?; - ResponseBuilder::new(&req).body(&state) + ResponseBuilder::new(&req)?.body(&state) } diff --git a/beacon_node/rest_api/src/error.rs b/beacon_node/rest_api/src/error.rs index 26cf7ba1f..e52ba4af6 100644 --- a/beacon_node/rest_api/src/error.rs +++ b/beacon_node/rest_api/src/error.rs @@ -9,6 +9,7 @@ pub enum ApiError { NotImplemented(String), InvalidQueryParams(String), NotFound(String), + UnsupportedType(String), ImATeapot(String), // Just in case. } @@ -22,6 +23,7 @@ impl ApiError { ApiError::NotImplemented(desc) => (StatusCode::NOT_IMPLEMENTED, desc), ApiError::InvalidQueryParams(desc) => (StatusCode::BAD_REQUEST, desc), ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc), + ApiError::UnsupportedType(desc) => (StatusCode::UNSUPPORTED_MEDIA_TYPE, desc), ApiError::ImATeapot(desc) => (StatusCode::IM_A_TEAPOT, desc), } } diff --git a/beacon_node/rest_api/src/metrics.rs b/beacon_node/rest_api/src/metrics.rs index 33437a534..e9d98434e 100644 --- a/beacon_node/rest_api/src/metrics.rs +++ b/beacon_node/rest_api/src/metrics.rs @@ -62,6 +62,6 @@ pub fn get_prometheus(req: Request) -> ApiR .unwrap(); String::from_utf8(buffer) - .map(|string| ResponseBuilder::new(&req).body_text(string)) + .map(|string| ResponseBuilder::new(&req)?.body_text(string)) .map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e)))? } diff --git a/beacon_node/rest_api/src/network.rs b/beacon_node/rest_api/src/network.rs index afbddde84..f193ef8ea 100644 --- a/beacon_node/rest_api/src/network.rs +++ b/beacon_node/rest_api/src/network.rs @@ -15,7 +15,7 @@ pub fn get_listen_addresses(req: Request) -> ApiResul .get::>>() .expect("The network service should always be there, we put it there"); let multiaddresses: Vec = network.listen_multiaddrs(); - ResponseBuilder::new(&req).body_json(&multiaddresses) + ResponseBuilder::new(&req)?.body_no_ssz(&multiaddresses) } /// HTTP handler to return the network port the client is listening on. @@ -27,7 +27,7 @@ pub fn get_listen_port(req: Request) -> ApiResult { .get::>>() .expect("The network service should always be there, we put it there") .clone(); - ResponseBuilder::new(&req).body(&network.listen_port()) + ResponseBuilder::new(&req)?.body(&network.listen_port()) } /// HTTP handler to return the Discv5 ENR from the client's libp2p service. @@ -38,7 +38,7 @@ pub fn get_enr(req: Request) -> ApiResult { .extensions() .get::>>() .expect("The network service should always be there, we put it there"); - ResponseBuilder::new(&req).body_json(&network.local_enr().to_base64()) + ResponseBuilder::new(&req)?.body_no_ssz(&network.local_enr().to_base64()) } /// HTTP handler to return the `PeerId` from the client's libp2p service. @@ -49,7 +49,7 @@ pub fn get_peer_id(req: Request) -> ApiResult { .extensions() .get::>>() .expect("The network service should always be there, we put it there"); - ResponseBuilder::new(&req).body_json(&network.local_peer_id().to_base58()) + ResponseBuilder::new(&req)?.body_no_ssz(&network.local_peer_id().to_base58()) } /// HTTP handler to return the number of peers connected in the client's libp2p service. @@ -58,7 +58,7 @@ pub fn get_peer_count(req: Request) -> ApiResult { .extensions() .get::>>() .expect("The network service should always be there, we put it there"); - ResponseBuilder::new(&req).body(&network.connected_peers()) + ResponseBuilder::new(&req)?.body(&network.connected_peers()) } /// HTTP handler to return the list of peers connected to the client's libp2p service. @@ -74,5 +74,5 @@ pub fn get_peer_list(req: Request) -> ApiResult { .iter() .map(PeerId::to_string) .collect(); - ResponseBuilder::new(&req).body_json(&connected_peers) + ResponseBuilder::new(&req)?.body_no_ssz(&connected_peers) } diff --git a/beacon_node/rest_api/src/node.rs b/beacon_node/rest_api/src/node.rs index cb1b28df7..882edcfd5 100644 --- a/beacon_node/rest_api/src/node.rs +++ b/beacon_node/rest_api/src/node.rs @@ -7,11 +7,11 @@ use version; /// Read the version string from the current Lighthouse build. pub fn get_version(req: Request) -> ApiResult { - ResponseBuilder::new(&req).body_json(&version::version()) + ResponseBuilder::new(&req)?.body_no_ssz(&version::version()) } /// Read the genesis time from the current beacon chain state. pub fn get_genesis_time(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; - ResponseBuilder::new(&req).body(&beacon_chain.head().beacon_state.genesis_time) + ResponseBuilder::new(&req)?.body(&beacon_chain.head().beacon_state.genesis_time) } diff --git a/beacon_node/rest_api/src/response_builder.rs b/beacon_node/rest_api/src/response_builder.rs index 31f717697..793360cd2 100644 --- a/beacon_node/rest_api/src/response_builder.rs +++ b/beacon_node/rest_api/src/response_builder.rs @@ -8,6 +8,7 @@ pub enum Encoding { JSON, SSZ, YAML, + TEXT, } pub struct ResponseBuilder { @@ -15,22 +16,55 @@ pub struct ResponseBuilder { } impl ResponseBuilder { - pub fn new(req: &Request) -> Self { - let encoding = match req.headers().get(header::CONTENT_TYPE) { - Some(h) if h == "application/ssz" => Encoding::SSZ, - Some(h) if h == "application/yaml" => Encoding::YAML, + pub fn new(req: &Request) -> Result { + let content_header = req + .headers() + .get(header::CONTENT_TYPE) + .map_or(Ok(""), |h| h.to_str()) + .map_err(|e| { + ApiError::InvalidQueryParams(format!( + "The content-type header contains invalid characters: {:?}", + e + )) + }) + .map(|h| String::from(h))?; + + let encoding = match content_header { + ref h if h.starts_with("application/ssz") => Encoding::SSZ, + ref h if h.starts_with("application/yaml") => Encoding::YAML, + ref h if h.starts_with("text/plain") => Encoding::TEXT, _ => Encoding::JSON, }; - - Self { encoding } + Ok(Self { encoding }) } pub fn body(self, item: &T) -> ApiResult { + match self.encoding { + Encoding::SSZ => Response::builder() + .status(StatusCode::OK) + .header("content-type", "application/ssz") + .body(Body::from(item.as_ssz_bytes())) + .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))), + _ => self.body_no_ssz(item), + } + } + + pub fn body_no_ssz(self, item: &T) -> ApiResult { let (body, content_type) = match self.encoding { - Encoding::JSON => { - return self.body_json(item); + Encoding::JSON => ( + Body::from(serde_json::to_string(&item).map_err(|e| { + ApiError::ServerError(format!( + "Unable to serialize response body as JSON: {:?}", + e + )) + })?), + "application/json", + ), + Encoding::SSZ => { + return Err(ApiError::UnsupportedType( + "Response cannot be encoded as SSZ.".into(), + )); } - Encoding::SSZ => (Body::from(item.as_ssz_bytes()), "application/ssz"), Encoding::YAML => ( Body::from(serde_yaml::to_string(&item).map_err(|e| { ApiError::ServerError(format!( @@ -38,8 +72,13 @@ impl ResponseBuilder { e )) })?), - "application/ssz", + "application/yaml", ), + Encoding::TEXT => { + return Err(ApiError::UnsupportedType( + "Response cannot be encoded as plain text.".into(), + )); + } }; Response::builder() @@ -49,19 +88,6 @@ impl ResponseBuilder { .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) } - pub fn body_json(self, item: &T) -> ApiResult { - Response::builder() - .status(StatusCode::OK) - .header("content-type", "application/json") - .body(Body::from(serde_json::to_string(&item).map_err(|e| { - ApiError::ServerError(format!( - "Unable to serialize response body as JSON: {:?}", - e - )) - })?)) - .map_err(|e| ApiError::ServerError(format!("Failed to build response: {:?}", e))) - } - pub fn body_text(self, text: String) -> ApiResult { Response::builder() .status(StatusCode::OK) diff --git a/beacon_node/rest_api/src/spec.rs b/beacon_node/rest_api/src/spec.rs index 55a139f16..083ff5ad4 100644 --- a/beacon_node/rest_api/src/spec.rs +++ b/beacon_node/rest_api/src/spec.rs @@ -11,7 +11,7 @@ use types::EthSpec; /// HTTP handler to return the full spec object. pub fn get_spec(req: Request) -> ApiResult { let beacon_chain = get_beacon_chain_from_request::(&req)?; - ResponseBuilder::new(&req).body_json(&beacon_chain.spec) + ResponseBuilder::new(&req)?.body_no_ssz(&beacon_chain.spec) } /// HTTP handler to return the full Eth2Config object. @@ -21,10 +21,10 @@ pub fn get_eth2_config(req: Request) -> Api .get::>() .ok_or_else(|| ApiError::ServerError("Eth2Config extension missing".to_string()))?; - ResponseBuilder::new(&req).body_json(eth2_config.as_ref()) + ResponseBuilder::new(&req)?.body_no_ssz(eth2_config.as_ref()) } /// HTTP handler to return the full spec object. pub fn get_slots_per_epoch(req: Request) -> ApiResult { - ResponseBuilder::new(&req).body(&T::EthSpec::slots_per_epoch()) + ResponseBuilder::new(&req)?.body(&T::EthSpec::slots_per_epoch()) } diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index d9d55bad4..b79466b4d 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -152,7 +152,7 @@ pub fn get_validator_duties(req: Request) - duties.append(&mut vec![duty]); } - ResponseBuilder::new(&req).body_json(&duties) + ResponseBuilder::new(&req)?.body_no_ssz(&duties) } /// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator. @@ -188,7 +188,7 @@ pub fn get_new_beacon_block(req: Request) - )) })?; - ResponseBuilder::new(&req).body(&new_block) + ResponseBuilder::new(&req)?.body(&new_block) } /// HTTP Handler to publish a BeaconBlock, which has been signed by a validator. @@ -246,7 +246,7 @@ pub fn publish_beacon_block(req: Request) - } } }).and_then(|_| { - response_builder.body_json(&()) + response_builder?.body_no_ssz(&()) })) } @@ -354,5 +354,5 @@ pub fn get_new_attestation(req: Request) -> signature: AggregateSignature::new(), }; - ResponseBuilder::new(&req).body(&attestation) + ResponseBuilder::new(&req)?.body(&attestation) } From d3ce182ddc80417fa091d3d8818c6a098cd00f15 Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Fri, 13 Sep 2019 20:52:12 +1000 Subject: [PATCH 289/305] Renamed 'InvalidQueryParams' to 'BadRequest', since it is a more general error that is returned in a number of cases. --- beacon_node/rest_api/src/beacon.rs | 7 ++--- beacon_node/rest_api/src/error.rs | 4 +-- beacon_node/rest_api/src/helpers.rs | 16 +++++----- beacon_node/rest_api/src/response_builder.rs | 7 +++-- beacon_node/rest_api/src/url_query.rs | 8 ++--- beacon_node/rest_api/src/validator.rs | 33 +++++++++----------- 6 files changed, 34 insertions(+), 41 deletions(-) diff --git a/beacon_node/rest_api/src/beacon.rs b/beacon_node/rest_api/src/beacon.rs index 13f52dc9a..c1a9da6ee 100644 --- a/beacon_node/rest_api/src/beacon.rs +++ b/beacon_node/rest_api/src/beacon.rs @@ -139,10 +139,7 @@ pub fn get_validators(req: Request) -> ApiR .parse::() .map(Epoch::from) .map_err(|e| { - ApiError::InvalidQueryParams(format!( - "Invalid epoch parameter, must be a u64. {:?}", - e - )) + ApiError::BadRequest(format!("Invalid epoch parameter, must be a u64. {:?}", e)) })?, // In this case, our url query did not contain any parameters, so we take the default Err(_) => beacon_chain.epoch().map_err(|e| { @@ -181,7 +178,7 @@ pub fn get_state(req: Request) -> ApiResult let query_params = ["root", "slot"]; query.first_of(&query_params)? } - Err(ApiError::InvalidQueryParams(_)) => { + Err(ApiError::BadRequest(_)) => { // No parameters provided at all, use current slot. (String::from("slot"), head_state.slot.to_string()) } diff --git a/beacon_node/rest_api/src/error.rs b/beacon_node/rest_api/src/error.rs index e52ba4af6..70384dce9 100644 --- a/beacon_node/rest_api/src/error.rs +++ b/beacon_node/rest_api/src/error.rs @@ -7,7 +7,7 @@ pub enum ApiError { MethodNotAllowed(String), ServerError(String), NotImplemented(String), - InvalidQueryParams(String), + BadRequest(String), NotFound(String), UnsupportedType(String), ImATeapot(String), // Just in case. @@ -21,7 +21,7 @@ impl ApiError { ApiError::MethodNotAllowed(desc) => (StatusCode::METHOD_NOT_ALLOWED, desc), ApiError::ServerError(desc) => (StatusCode::INTERNAL_SERVER_ERROR, desc), ApiError::NotImplemented(desc) => (StatusCode::NOT_IMPLEMENTED, desc), - ApiError::InvalidQueryParams(desc) => (StatusCode::BAD_REQUEST, desc), + ApiError::BadRequest(desc) => (StatusCode::BAD_REQUEST, desc), ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc), ApiError::UnsupportedType(desc) => (StatusCode::UNSUPPORTED_MEDIA_TYPE, desc), ApiError::ImATeapot(desc) => (StatusCode::IM_A_TEAPOT, desc), diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 3385e2017..3f76f4e25 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -21,7 +21,7 @@ pub fn parse_slot(string: &str) -> Result { string .parse::() .map(Slot::from) - .map_err(|e| ApiError::InvalidQueryParams(format!("Unable to parse slot: {:?}", e))) + .map_err(|e| ApiError::BadRequest(format!("Unable to parse slot: {:?}", e))) } /// Checks the provided request to ensure that the `content-type` header. @@ -31,7 +31,7 @@ pub fn parse_slot(string: &str) -> Result { pub fn check_content_type_for_json(req: &Request) -> Result<(), ApiError> { match req.headers().get(header::CONTENT_TYPE) { Some(h) if h == "application/json" => Ok(()), - Some(h) => Err(ApiError::InvalidQueryParams(format!( + Some(h) => Err(ApiError::BadRequest(format!( "The provided content-type {:?} is not available, this endpoint only supports json.", h ))), @@ -49,9 +49,9 @@ pub fn parse_root(string: &str) -> Result { let trimmed = string.trim_start_matches(PREFIX); trimmed .parse() - .map_err(|e| ApiError::InvalidQueryParams(format!("Unable to parse root: {:?}", e))) + .map_err(|e| ApiError::BadRequest(format!("Unable to parse root: {:?}", e))) } else { - Err(ApiError::InvalidQueryParams( + Err(ApiError::BadRequest( "Root must have a '0x' prefix".to_string(), )) } @@ -62,13 +62,13 @@ pub fn parse_pubkey(string: &str) -> Result { const PREFIX: &str = "0x"; if string.starts_with(PREFIX) { let pubkey_bytes = hex::decode(string.trim_start_matches(PREFIX)) - .map_err(|e| ApiError::InvalidQueryParams(format!("Invalid hex string: {:?}", e)))?; + .map_err(|e| ApiError::BadRequest(format!("Invalid hex string: {:?}", e)))?; let pubkey = PublicKey::from_bytes(pubkey_bytes.as_slice()).map_err(|e| { - ApiError::InvalidQueryParams(format!("Unable to deserialize public key: {:?}.", e)) + ApiError::BadRequest(format!("Unable to deserialize public key: {:?}.", e)) })?; return Ok(pubkey); } else { - return Err(ApiError::InvalidQueryParams( + return Err(ApiError::BadRequest( "Public key must have a '0x' prefix".to_string(), )); } @@ -145,7 +145,7 @@ pub fn state_root_at_slot( // // We could actually speculate about future state roots by skipping slots, however that's // likely to cause confusion for API users. - Err(ApiError::InvalidQueryParams(format!( + Err(ApiError::BadRequest(format!( "Requested slot {} is past the current slot {}", slot, current_slot ))) diff --git a/beacon_node/rest_api/src/response_builder.rs b/beacon_node/rest_api/src/response_builder.rs index 793360cd2..d5b530f8a 100644 --- a/beacon_node/rest_api/src/response_builder.rs +++ b/beacon_node/rest_api/src/response_builder.rs @@ -17,22 +17,23 @@ pub struct ResponseBuilder { impl ResponseBuilder { pub fn new(req: &Request) -> Result { - let content_header = req + let content_header: String = req .headers() .get(header::CONTENT_TYPE) .map_or(Ok(""), |h| h.to_str()) .map_err(|e| { - ApiError::InvalidQueryParams(format!( + ApiError::BadRequest(format!( "The content-type header contains invalid characters: {:?}", e )) }) .map(|h| String::from(h))?; + // JSON is our default encoding, unless something else is requested. let encoding = match content_header { ref h if h.starts_with("application/ssz") => Encoding::SSZ, ref h if h.starts_with("application/yaml") => Encoding::YAML, - ref h if h.starts_with("text/plain") => Encoding::TEXT, + ref h if h.starts_with("text/") => Encoding::TEXT, _ => Encoding::JSON, }; Ok(Self { encoding }) diff --git a/beacon_node/rest_api/src/url_query.rs b/beacon_node/rest_api/src/url_query.rs index 3802ff831..f0c587a32 100644 --- a/beacon_node/rest_api/src/url_query.rs +++ b/beacon_node/rest_api/src/url_query.rs @@ -12,7 +12,7 @@ impl<'a> UrlQuery<'a> { /// Returns `Err` if `req` does not contain any query parameters. pub fn from_request(req: &'a Request) -> Result { let query_str = req.uri().query().ok_or_else(|| { - ApiError::InvalidQueryParams( + ApiError::BadRequest( "URL query must be valid and contain at least one key.".to_string(), ) })?; @@ -28,7 +28,7 @@ impl<'a> UrlQuery<'a> { .find(|(key, _value)| keys.contains(&&**key)) .map(|(key, value)| (key.into_owned(), value.into_owned())) .ok_or_else(|| { - ApiError::InvalidQueryParams(format!( + ApiError::BadRequest(format!( "URL query must contain at least one of the following keys: {:?}", keys )) @@ -48,13 +48,13 @@ impl<'a> UrlQuery<'a> { if first_key == key { Ok(first_value.to_string()) } else { - Err(ApiError::InvalidQueryParams(format!( + Err(ApiError::BadRequest(format!( "Only the {} query parameter is supported", key ))) } } else { - Err(ApiError::InvalidQueryParams(format!( + Err(ApiError::BadRequest(format!( "Only one query parameter is allowed, {} supplied", queries.len() ))) diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index b79466b4d..53d9e8b8b 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -58,10 +58,7 @@ pub fn get_validator_duties(req: Request) - slog::trace!(log, "Requested epoch {:?}", v); Epoch::new(v.parse::().map_err(|e| { slog::info!(log, "Invalid epoch {:?}", e); - ApiError::InvalidQueryParams(format!( - "Invalid epoch parameter, must be a u64. {:?}", - e - )) + ApiError::BadRequest(format!("Invalid epoch parameter, must be a u64. {:?}", e)) })?) } Err(_) => { @@ -72,7 +69,7 @@ pub fn get_validator_duties(req: Request) - }; let relative_epoch = RelativeEpoch::from_epoch(current_epoch, epoch).map_err(|e| { slog::info!(log, "Requested epoch out of range."); - ApiError::InvalidQueryParams(format!( + ApiError::BadRequest(format!( "Cannot get RelativeEpoch, epoch out of range: {:?}", e )) @@ -166,17 +163,17 @@ pub fn get_new_beacon_block(req: Request) - .parse::() .map(Slot::from) .map_err(|e| { - ApiError::InvalidQueryParams(format!("Invalid slot parameter, must be a u64. {:?}", e)) + ApiError::BadRequest(format!("Invalid slot parameter, must be a u64. {:?}", e)) })?; let randao_bytes = query .first_of(&["randao_reveal"]) .map(|(_key, value)| value) .map(hex::decode)? .map_err(|e| { - ApiError::InvalidQueryParams(format!("Invalid hex string for randao_reveal: {:?}", e)) + ApiError::BadRequest(format!("Invalid hex string for randao_reveal: {:?}", e)) })?; let randao_reveal = Signature::from_bytes(randao_bytes.as_slice()).map_err(|e| { - ApiError::InvalidQueryParams(format!("randao_reveal is not a valid signature: {:?}", e)) + ApiError::BadRequest(format!("randao_reveal is not a valid signature: {:?}", e)) })?; let (new_block, _state) = beacon_chain @@ -216,7 +213,7 @@ pub fn publish_beacon_block(req: Request) - .map(|chunk| chunk.iter().cloned().collect::>()) .and_then(|chunks| { serde_json::from_slice(&chunks.as_slice()).map_err(|e| { - ApiError::InvalidQueryParams(format!( + ApiError::BadRequest(format!( "Unable to deserialize JSON into a BeaconBlock: {:?}", e )) @@ -233,7 +230,7 @@ pub fn publish_beacon_block(req: Request) - Ok(outcome) => { warn!(log, "Block could not be processed, but is being sent to the network anyway."; "block_slot" => slot, "outcome" => format!("{:?}", outcome)); //TODO need to send to network and return http 202 - Err(ApiError::InvalidQueryParams(format!( + Err(ApiError::BadRequest(format!( "The BeaconBlock could not be processed: {:?}", outcome ))) @@ -271,7 +268,7 @@ pub fn get_new_attestation(req: Request) -> .map_err(|e| { ApiError::ServerError(format!("Unable to read validator index cache. {:?}", e)) })? - .ok_or(ApiError::InvalidQueryParams( + .ok_or(ApiError::BadRequest( "The provided validator public key does not correspond to a validator index.".into(), ))?; @@ -289,14 +286,14 @@ pub fn get_new_attestation(req: Request) -> e )) })? - .ok_or(ApiError::InvalidQueryParams("No validator duties could be found for the requested validator. Cannot provide valid attestation.".into()))?; + .ok_or(ApiError::BadRequest("No validator duties could be found for the requested validator. Cannot provide valid attestation.".into()))?; // Check that we are requesting an attestation during the slot where it is relevant. let present_slot = beacon_chain.slot().map_err(|e| ApiError::ServerError( format!("Beacon node is unable to determine present slot, either the state isn't generated or the chain hasn't begun. {:?}", e) ))?; if val_duty.slot != present_slot { - return Err(ApiError::InvalidQueryParams(format!("Validator is only able to request an attestation during the slot they are allocated. Current slot: {:?}, allocated slot: {:?}", head_state.slot, val_duty.slot))); + return Err(ApiError::BadRequest(format!("Validator is only able to request an attestation during the slot they are allocated. Current slot: {:?}, allocated slot: {:?}", head_state.slot, val_duty.slot))); } // Parse the POC bit and insert it into the aggregation bits @@ -305,7 +302,7 @@ pub fn get_new_attestation(req: Request) -> .map(|(_key, value)| value)? .parse::() .map_err(|e| { - ApiError::InvalidQueryParams(format!("Invalid slot parameter, must be a u64. {:?}", e)) + ApiError::BadRequest(format!("Invalid slot parameter, must be a u64. {:?}", e)) })?; let mut aggregation_bits = BitList::with_capacity(val_duty.committee_len) @@ -327,20 +324,18 @@ pub fn get_new_attestation(req: Request) -> .parse::() .map(Slot::from) .map_err(|e| { - ApiError::InvalidQueryParams(format!("Invalid slot parameter, must be a u64. {:?}", e)) + ApiError::BadRequest(format!("Invalid slot parameter, must be a u64. {:?}", e)) })?; let current_slot = beacon_chain.head().beacon_state.slot.as_u64(); if requested_slot != current_slot { - return Err(ApiError::InvalidQueryParams(format!("Attestation data can only be requested for the current slot ({:?}), not your requested slot ({:?})", current_slot, requested_slot))); + return Err(ApiError::BadRequest(format!("Attestation data can only be requested for the current slot ({:?}), not your requested slot ({:?})", current_slot, requested_slot))); } let shard = query .first_of(&["shard"]) .map(|(_key, value)| value)? .parse::() - .map_err(|e| { - ApiError::InvalidQueryParams(format!("Shard is not a valid u64 value: {:?}", e)) - })?; + .map_err(|e| ApiError::BadRequest(format!("Shard is not a valid u64 value: {:?}", e)))?; let attestation_data = beacon_chain .produce_attestation_data(shard, current_slot.into()) From 23ce271b5fb06527c5b2f6e90c3d245ca1f75bdd Mon Sep 17 00:00:00 2001 From: Luke Anderson Date: Fri, 13 Sep 2019 21:22:32 +1000 Subject: [PATCH 290/305] Return HTTP 202 to indicate processing error. - A processing error of a validator's block or attestation should not prevent publishing. Now a 202 error is returned, to indicate that it has not been processed, but has still been published. - Added a publish_attestation function to the API, handling POST requests for /beacon/validator/attestation. --- beacon_node/rest_api/src/error.rs | 6 ++- beacon_node/rest_api/src/helpers.rs | 34 +++++++++++-- beacon_node/rest_api/src/lib.rs | 3 +- beacon_node/rest_api/src/validator.rs | 72 ++++++++++++++++++++++++--- 4 files changed, 102 insertions(+), 13 deletions(-) diff --git a/beacon_node/rest_api/src/error.rs b/beacon_node/rest_api/src/error.rs index 70384dce9..9f815a7d3 100644 --- a/beacon_node/rest_api/src/error.rs +++ b/beacon_node/rest_api/src/error.rs @@ -10,7 +10,8 @@ pub enum ApiError { BadRequest(String), NotFound(String), UnsupportedType(String), - ImATeapot(String), // Just in case. + ImATeapot(String), // Just in case. + ProcessingError(String), // A 202 error, for when a block/attestation cannot be processed, but still transmitted. } pub type ApiResult = Result, ApiError>; @@ -25,6 +26,7 @@ impl ApiError { ApiError::NotFound(desc) => (StatusCode::NOT_FOUND, desc), ApiError::UnsupportedType(desc) => (StatusCode::UNSUPPORTED_MEDIA_TYPE, desc), ApiError::ImATeapot(desc) => (StatusCode::IM_A_TEAPOT, desc), + ApiError::ProcessingError(desc) => (StatusCode::ACCEPTED, desc), } } } @@ -34,7 +36,7 @@ impl Into> for ApiError { let status_code = self.status_code(); Response::builder() .status(status_code.0) - .header("content-type", "text/plain") + .header("content-type", "text/plain; charset=utf-8") .body(Body::from(status_code.1)) .expect("Response should always be created.") } diff --git a/beacon_node/rest_api/src/helpers.rs b/beacon_node/rest_api/src/helpers.rs index 3f76f4e25..a711246b0 100644 --- a/beacon_node/rest_api/src/helpers.rs +++ b/beacon_node/rest_api/src/helpers.rs @@ -2,7 +2,9 @@ use crate::{ApiError, ApiResult}; use beacon_chain::{BeaconChain, BeaconChainTypes}; use bls::PublicKey; use eth2_libp2p::{PubsubMessage, Topic}; -use eth2_libp2p::{BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX}; +use eth2_libp2p::{ + BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX, +}; use hex; use http::header; use hyper::{Body, Request}; @@ -12,7 +14,7 @@ use ssz::Encode; use std::sync::Arc; use store::{iter::AncestorIter, Store}; use tokio::sync::mpsc; -use types::{BeaconBlock, BeaconState, EthSpec, Hash256, RelativeEpoch, Slot}; +use types::{Attestation, BeaconBlock, BeaconState, EthSpec, Hash256, RelativeEpoch, Slot}; /// Parse a slot from a `0x` preixed string. /// @@ -227,7 +229,7 @@ pub fn publish_beacon_block_to_network( // Publish the block to the p2p network via gossipsub. if let Err(e) = chan.write().try_send(NetworkMessage::Publish { topics: vec![topic], - message: message, + message, }) { return Err(ApiError::ServerError(format!( "Unable to send new block to network: {:?}", @@ -238,6 +240,32 @@ pub fn publish_beacon_block_to_network( Ok(()) } +pub fn publish_attestation_to_network( + chan: Arc>>, + attestation: Attestation, +) -> Result<(), ApiError> { + // create the network topic to send on + let topic_string = format!( + "/{}/{}/{}", + TOPIC_PREFIX, BEACON_ATTESTATION_TOPIC, TOPIC_ENCODING_POSTFIX + ); + let topic = Topic::new(topic_string); + let message = PubsubMessage::Attestation(attestation.as_ssz_bytes()); + + // Publish the attestation to the p2p network via gossipsub. + if let Err(e) = chan.write().try_send(NetworkMessage::Publish { + topics: vec![topic], + message, + }) { + return Err(ApiError::ServerError(format!( + "Unable to send new attestation to network: {:?}", + e + ))); + } + + Ok(()) +} + #[cfg(test)] mod test { use super::*; diff --git a/beacon_node/rest_api/src/lib.rs b/beacon_node/rest_api/src/lib.rs index 35678391a..133fc3a26 100644 --- a/beacon_node/rest_api/src/lib.rs +++ b/beacon_node/rest_api/src/lib.rs @@ -140,7 +140,7 @@ impl Service for ApiService { into_boxfut(validator::get_new_attestation::(req)) } (&Method::POST, "/beacon/validator/attestation") => { - into_boxfut(helpers::implementation_pending_response(req)) + validator::publish_attestation::(req) } (&Method::GET, "/beacon/state") => into_boxfut(beacon::get_state::(req)), @@ -164,6 +164,7 @@ impl Service for ApiService { (&Method::GET, "/spec/eth2_config") => into_boxfut(spec::get_eth2_config::(req)), (&Method::GET, "/metrics") => into_boxfut(metrics::get_prometheus::(req)), + _ => Box::new(futures::future::err(ApiError::NotFound( "Request path and/or method not found.".to_owned(), ))), diff --git a/beacon_node/rest_api/src/validator.rs b/beacon_node/rest_api/src/validator.rs index 53d9e8b8b..60c0eed06 100644 --- a/beacon_node/rest_api/src/validator.rs +++ b/beacon_node/rest_api/src/validator.rs @@ -1,10 +1,10 @@ use crate::helpers::{ check_content_type_for_json, get_beacon_chain_from_request, get_logger_from_request, - parse_pubkey, publish_beacon_block_to_network, + parse_pubkey, publish_attestation_to_network, publish_beacon_block_to_network, }; use crate::response_builder::ResponseBuilder; use crate::{ApiError, ApiResult, BoxFut, UrlQuery}; -use beacon_chain::{BeaconChainTypes, BlockProcessingOutcome}; +use beacon_chain::{AttestationProcessingOutcome, BeaconChainTypes, BlockProcessingOutcome}; use bls::{AggregateSignature, PublicKey, Signature}; use futures::future::Future; use futures::stream::Stream; @@ -228,16 +228,16 @@ pub fn publish_beacon_block(req: Request) - publish_beacon_block_to_network::(network_chan, block) } Ok(outcome) => { - warn!(log, "Block could not be processed, but is being sent to the network anyway."; "block_slot" => slot, "outcome" => format!("{:?}", outcome)); - //TODO need to send to network and return http 202 - Err(ApiError::BadRequest(format!( - "The BeaconBlock could not be processed: {:?}", + warn!(log, "BeaconBlock could not be processed, but is being sent to the network anyway."; "outcome" => format!("{:?}", outcome)); + publish_beacon_block_to_network::(network_chan, block)?; + Err(ApiError::ProcessingError(format!( + "The BeaconBlock could not be processed, but has still been published: {:?}", outcome ))) } Err(e) => { Err(ApiError::ServerError(format!( - "Unable to process block: {:?}", + "Error while processing block: {:?}", e ))) } @@ -351,3 +351,61 @@ pub fn get_new_attestation(req: Request) -> ResponseBuilder::new(&req)?.body(&attestation) } + +/// HTTP Handler to publish an Attestation, which has been signed by a validator. +pub fn publish_attestation(req: Request) -> BoxFut { + let _ = try_future!(check_content_type_for_json(&req)); + let log = get_logger_from_request(&req); + let beacon_chain = try_future!(get_beacon_chain_from_request::(&req)); + // Get the network sending channel from the request, for later transmission + let network_chan = req + .extensions() + .get::>>>() + .expect("Should always get the network channel from the request, since we put it in there.") + .clone(); + + let response_builder = ResponseBuilder::new(&req); + + let body = req.into_body(); + trace!( + log, + "Got the request body, now going to parse it into an attesation." + ); + Box::new(body + .concat2() + .map_err(|e| ApiError::ServerError(format!("Unable to get request body: {:?}",e))) + .map(|chunk| chunk.iter().cloned().collect::>()) + .and_then(|chunks| { + serde_json::from_slice(&chunks.as_slice()).map_err(|e| { + ApiError::BadRequest(format!( + "Unable to deserialize JSON into a BeaconBlock: {:?}", + e + )) + }) + }) + .and_then(move |attestation: Attestation| { + match beacon_chain.process_attestation(attestation.clone()) { + Ok(AttestationProcessingOutcome::Processed) => { + // Block was processed, publish via gossipsub + info!(log, "Processed valid attestation from API, transmitting to network."); + publish_attestation_to_network::(network_chan, attestation) + } + Ok(outcome) => { + warn!(log, "Attestation could not be processed, but is being sent to the network anyway."; "outcome" => format!("{:?}", outcome)); + publish_attestation_to_network::(network_chan, attestation)?; + Err(ApiError::ProcessingError(format!( + "The Attestation could not be processed, but has still been published: {:?}", + outcome + ))) + } + Err(e) => { + Err(ApiError::ServerError(format!( + "Error while processing attestation: {:?}", + e + ))) + } + } + }).and_then(|_| { + response_builder?.body_no_ssz(&()) + })) +} From 2676c8a62d56775ee4211e49e0d12bf39b34fe10 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 13 Sep 2019 18:33:17 -0400 Subject: [PATCH 291/305] Remove some dust code from beacon chain --- beacon_node/beacon_chain/src/beacon_chain.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 48a012c36..ad0627078 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -644,7 +644,6 @@ impl BeaconChain { // Attempt to process the attestation using the `self.head()` state. // // This is purely an effort to avoid loading a `BeaconState` unnecessarily from the DB. - // Take a read lock on the head beacon state. let state = &self.head().beacon_state; // If it turns out that the attestation was made using the head state, then there @@ -674,12 +673,6 @@ impl BeaconChain { ); } - // Ensure the read-lock from `self.head()` is dropped. - // - // This is likely unnecessary, however it remains as a reminder to ensure this lock - // isn't hogged. - std::mem::drop(state); - // Use the `data.beacon_block_root` to load the state from the latest non-skipped // slot preceding the attestation's creation. // From e1f6052d5eb34e837d7d08862291d9cd1829ea18 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Fri, 13 Sep 2019 18:49:39 -0400 Subject: [PATCH 292/305] Add unfinished pycli integration --- lcli/Cargo.toml | 1 + lcli/src/main.rs | 21 ++++++++ lcli/src/pycli.rs | 123 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 145 insertions(+) create mode 100644 lcli/src/pycli.rs diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index b774d4d12..55bfc1654 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -17,3 +17,4 @@ simple_logger = "1.0" types = { path = "../eth2/types" } state_processing = { path = "../eth2/state_processing" } eth2_ssz = { path = "../eth2/utils/ssz" } +regex = "1.3" diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 63f01c671..87d670cb9 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -2,16 +2,20 @@ extern crate log; mod parse_hex; +mod pycli; mod transition_blocks; use clap::{App, Arg, SubCommand}; use parse_hex::run_parse_hex; +use pycli::run_pycli; use std::fs::File; use std::path::PathBuf; use std::time::{SystemTime, UNIX_EPOCH}; use transition_blocks::run_transition_blocks; use types::{test_utils::TestingBeaconStateBuilder, EthSpec, MainnetEthSpec, MinimalEthSpec}; +type LocalEthSpec = MinimalEthSpec; + fn main() { simple_logger::init().expect("logger should initialize"); @@ -111,6 +115,21 @@ fn main() { .help("SSZ encoded as 0x-prefixed hex"), ), ) + .subcommand( + SubCommand::with_name("pycli") + .about("TODO") + .version("0.1.0") + .author("Paul Hauner ") + .arg( + Arg::with_name("pycli-path") + .long("pycli-path") + .short("p") + .value_name("PATH") + .takes_value(true) + .default_value("../../pycli") + .help("Path to the pycli repository."), + ), + ) .get_matches(); match matches.subcommand() { @@ -157,6 +176,8 @@ fn main() { ("pretty-hex", Some(matches)) => { run_parse_hex(matches).unwrap_or_else(|e| error!("Failed to pretty print hex: {}", e)) } + ("pycli", Some(matches)) => run_pycli::(matches) + .unwrap_or_else(|e| error!("Failed to run pycli: {}", e)), (other, _) => error!("Unknown subcommand {}. See --help.", other), } } diff --git a/lcli/src/pycli.rs b/lcli/src/pycli.rs new file mode 100644 index 000000000..4b1b32828 --- /dev/null +++ b/lcli/src/pycli.rs @@ -0,0 +1,123 @@ +use clap::ArgMatches; +use ssz::Decode; +use std::fs; +use std::path::PathBuf; +use std::process::Command; +use types::{BeaconState, EthSpec}; + +pub fn run_pycli(matches: &ArgMatches) -> Result<(), String> { + let cmd_path = matches + .value_of("pycli-path") + .ok_or_else(|| "No pycli-path supplied")?; + + let pycli = PyCli::new(cmd_path.to_string())?; + + let block_path = PathBuf::from("/tmp/trinity/block_16.ssz"); + let pre_state_path = PathBuf::from("/tmp/trinity/state_15.ssz"); + + pycli + .transition_blocks::(block_path, pre_state_path) + .map_err(|e| e.to_string())?; + + Ok(()) +} + +/* + * TODO: loading from file. + * +use regex::Regex; +use std::collections::HashMap; +use std::ffi::OsString; + +const BLOCK_PREFIX: &str = "block_"; +const PRE_PREFIX: &str = "state_pre_"; +const POST_PREFIX: &str = "state_post_"; + +struct Case { + pre: Option>, + post: Option>, + block: Option>, +} + +fn get_sets(dir: PathBuf) -> Result<(), String> { + let map: HashMap> = HashMap::new(); + + fs::read_dir(dir) + .map_err(|e| format!("Unable to read source directory: {:?}", e))? + .filter_map(Result::ok) + .map(|f| f.file_name().into_string()) + .filter_map(Result::ok) + .try_for_each(|filename| { + if filename.starts_with(BLOCK_PREFIX) { + let regex = Regex::new(r".*root0x(.........)") + .map_err(|e| format!("Failed to compile block regex: {:?}", e))?; + let captures = regex.captures(&filename). + // block + } else if filename.starts_with(PRE_PREFIX) { + dbg!("pre state"); + } else if filename.starts_with(POST_PREFIX) { + dbg!("post state"); + } else { + dbg!("unknown file"); + } + + Ok(()) + }) +} +*/ + +/// A wrapper around Danny Ryan's `pycli` utility: +/// +/// https://github.com/djrtwo/pycli +/// +/// Provides functions for testing consensus logic against the executable Python spec. +pub struct PyCli { + cmd_path: PathBuf, +} + +impl PyCli { + /// Create a new instance, parsing the given `cmd_path` as a canonical path. + pub fn new(cmd_path: String) -> Result { + Ok(Self { + cmd_path: fs::canonicalize(cmd_path) + .map_err(|e| format!("Failed to canonicalize pycli path: {:?}", e))?, + }) + } + + /// Performs block processing on the state at the given `pre_state_path`, using the block at + /// `block_path`. + /// + /// Returns an SSZ-encoded `BeaconState` on success. + pub fn transition_blocks( + &self, + block_path: PathBuf, + pre_state_path: PathBuf, + ) -> Result, String> { + let output = Command::new("python") + .current_dir(self.cmd_path.clone()) + .arg("pycli.py") + .arg("transition") + .arg("blocks") + .arg(format!("--pre={}", path_string(pre_state_path)?)) + .arg(path_string(block_path)?) + .output() + .map_err(|e| format!("Failed to run command: {:?}", e))?; + + if output.status.success() { + let state = BeaconState::from_ssz_bytes(&output.stdout) + .map_err(|e| format!("Failed to parse SSZ: {:?}", e))?; + Ok(state) + } else { + Err(format!("pycli returned an error: {:?}", output)) + } + } +} + +fn path_string(path: PathBuf) -> Result { + let path = + fs::canonicalize(path).map_err(|e| format!("Unable to canonicalize path: {:?}", e))?; + + path.into_os_string() + .into_string() + .map_err(|p| format!("Unable to stringify path: {:?}", p)) +} From 110e627d7b4b91bb55f5fdbfb54d38ccb313dce0 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 14 Sep 2019 10:34:03 -0400 Subject: [PATCH 293/305] Add basic, not-useful websocket server --- Cargo.toml | 1 + beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/config.rs | 2 + beacon_node/client/src/lib.rs | 5 ++ beacon_node/websocket_server/Cargo.toml | 17 ++++++ beacon_node/websocket_server/src/lib.rs | 75 +++++++++++++++++++++++++ 6 files changed, 101 insertions(+) create mode 100644 beacon_node/websocket_server/Cargo.toml create mode 100644 beacon_node/websocket_server/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index 3600c90ca..9b31060a2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,6 +33,7 @@ members = [ "beacon_node/rpc", "beacon_node/version", "beacon_node/beacon_chain", + "beacon_node/websocket_server", "tests/ef_tests", "lcli", "protos", diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 9b5a9cf42..383318b0d 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -10,6 +10,7 @@ network = { path = "../network" } eth2-libp2p = { path = "../eth2-libp2p" } rpc = { path = "../rpc" } rest_api = { path = "../rest_api" } +websocket_server = { path = "../websocket_server" } prometheus = "^0.6" types = { path = "../../eth2/types" } tree_hash = "0.1" diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 5b0553c5b..1e07e7cf2 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -27,6 +27,7 @@ pub struct Config { pub network: network::NetworkConfig, pub rpc: rpc::RPCConfig, pub rest_api: rest_api::ApiConfig, + pub websocket_server: websocket_server::Config, } /// Defines how the client should initialize a BeaconChain. @@ -96,6 +97,7 @@ impl Default for Config { network: NetworkConfig::new(), rpc: <_>::default(), rest_api: <_>::default(), + websocket_server: <_>::default(), spec_constants: TESTNET_SPEC_CONSTANTS.into(), beacon_chain_start_method: <_>::default(), eth1_backend_method: <_>::default(), diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index afcd538b5..08674166d 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -229,6 +229,11 @@ where None }; + // Start the websocket server + let _websocket_sender = if client_config.websocket_server.enabled { + websocket_server::start_server::(&client_config.websocket_server, &log)?; + }; + let (slot_timer_exit_signal, exit) = exit_future::signal(); if let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() { // set up the validator work interval - start at next slot and proceed every slot diff --git a/beacon_node/websocket_server/Cargo.toml b/beacon_node/websocket_server/Cargo.toml new file mode 100644 index 000000000..f846f62b7 --- /dev/null +++ b/beacon_node/websocket_server/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "websocket_server" +version = "0.1.0" +authors = ["Paul Hauner "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +exit-future = "0.1.3" +futures = "0.1.25" +serde = "1.0" +serde_derive = "1.0" +slog = "^2.2.3" +tokio = "0.1.16" +types = { path = "../../eth2/types" } +ws = "0.9" diff --git a/beacon_node/websocket_server/src/lib.rs b/beacon_node/websocket_server/src/lib.rs new file mode 100644 index 000000000..eb28b10be --- /dev/null +++ b/beacon_node/websocket_server/src/lib.rs @@ -0,0 +1,75 @@ +use serde_derive::{Deserialize, Serialize}; +use slog::{error, info, Logger}; +use std::net::Ipv4Addr; +use std::thread; +use types::EthSpec; +use ws::{Sender, WebSocket}; + +/// The core configuration of a Lighthouse beacon node. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub enabled: bool, + /// The IPv4 address the REST API HTTP server will listen on. + pub listen_address: Ipv4Addr, + /// The port the REST API HTTP server will listen on. + pub port: u16, +} + +impl Default for Config { + fn default() -> Self { + Config { + enabled: true, + listen_address: Ipv4Addr::new(127, 0, 0, 1), + port: 5053, + } + } +} + +pub struct WebSocketSender { + sender: Sender, +} + +impl WebSocketSender { + pub fn send_string(&self, string: String) -> Result<(), String> { + self.sender + .send(string) + .map_err(|e| format!("Unable to broadcast to websocket clients: {:?}", e)) + } +} + +pub fn start_server(config: &Config, log: &Logger) -> Result { + let server_string = format!("{}:{}", config.listen_address, config.port); + + info!( + log, + "Websocket server starting"; + "listen_address" => &server_string + ); + + // Create a server that simply ignores any incoming messages. + let server = WebSocket::new(|_| |_| Ok(())) + .map_err(|e| format!("Failed to initialize websocket server: {:?}", e))?; + + let broadcaster = server.broadcaster(); + + let log_inner = log.clone(); + let _handle = thread::spawn(move || match server.listen(server_string) { + Ok(_) => { + info!( + log_inner, + "Websocket server stopped"; + ); + } + Err(e) => { + error!( + log_inner, + "Websocket server failed to start"; + "error" => format!("{:?}", e) + ); + } + }); + + Ok(WebSocketSender { + sender: broadcaster, + }) +} From 07990e0e922e4bd917780a963b4fb9a78e386d9f Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 14 Sep 2019 15:41:35 -0400 Subject: [PATCH 294/305] Adds beacon chain events, websocket event handler --- beacon_node/beacon_chain/src/beacon_chain.rs | 112 ++++++++++++++++++ .../beacon_chain/src/beacon_chain_builder.rs | 4 +- beacon_node/beacon_chain/src/events.rs | 51 ++++++++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 11 +- beacon_node/client/Cargo.toml | 1 + beacon_node/client/src/lib.rs | 49 +++++--- beacon_node/client/src/notifier.rs | 9 +- beacon_node/src/run.rs | 28 ++--- beacon_node/websocket_server/Cargo.toml | 2 + beacon_node/websocket_server/src/lib.rs | 46 +++++-- 11 files changed, 262 insertions(+), 52 deletions(-) create mode 100644 beacon_node/beacon_chain/src/events.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ad0627078..33205e97c 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1,6 +1,7 @@ use crate::checkpoint::CheckPoint; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; +use crate::events::{EventHandler, EventKind}; use crate::fork_choice::{Error as ForkChoiceError, ForkChoice}; use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator}; use crate::metrics; @@ -95,6 +96,7 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type LmdGhost: LmdGhost; type Eth1Chain: Eth1ChainBackend; type EthSpec: types::EthSpec; + type EventHandler: EventHandler; } /// Represents the "Beacon Chain" component of Ethereum 2.0. Allows import of blocks and block @@ -117,6 +119,8 @@ pub struct BeaconChain { /// A state-machine that is updated with information from the network and chooses a canonical /// head block. pub fork_choice: ForkChoice, + /// A handler for events generated by the beacon chain. + pub event_handler: T::EventHandler, /// Logging to CLI, etc. log: Logger, } @@ -126,6 +130,7 @@ impl BeaconChain { pub fn from_genesis( store: Arc, eth1_backend: T::Eth1Chain, + event_handler: T::EventHandler, mut genesis_state: BeaconState, mut genesis_block: BeaconBlock, spec: ChainSpec, @@ -174,6 +179,7 @@ impl BeaconChain { canonical_head, genesis_block_root, fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root), + event_handler, store, log, }) @@ -183,6 +189,7 @@ impl BeaconChain { pub fn from_store( store: Arc, eth1_backend: T::Eth1Chain, + event_handler: T::EventHandler, spec: ChainSpec, log: Logger, ) -> Result>, Error> { @@ -219,6 +226,7 @@ impl BeaconChain { slot_clock, fork_choice: ForkChoice::new(store.clone(), last_finalized_block, last_finalized_root), op_pool, + event_handler, eth1_chain: Eth1Chain::new(eth1_backend), canonical_head: RwLock::new(p.canonical_head), genesis_block_root: p.genesis_block_root, @@ -629,6 +637,59 @@ impl BeaconChain { pub fn process_attestation( &self, attestation: Attestation, + ) -> Result { + let outcome = self.process_attestation_internal(attestation.clone()); + + match &outcome { + Ok(outcome) => match outcome { + AttestationProcessingOutcome::Processed => { + trace!( + self.log, + "Beacon attestation imported"; + "shard" => attestation.data.crosslink.shard, + "target_epoch" => attestation.data.target.epoch, + ); + let _ = self + .event_handler + .register(EventKind::BeaconAttestationImported { + attestation: Box::new(attestation), + }); + } + other => { + warn!( + self.log, + "Beacon attestation rejected"; + "reason" => format!("{:?}", other), + ); + let _ = self + .event_handler + .register(EventKind::BeaconAttestationRejected { + reason: format!("Invalid attestation: {:?}", other), + attestation: Box::new(attestation), + }); + } + }, + Err(e) => { + error!( + self.log, + "Beacon attestation processing error"; + "error" => format!("{:?}", e), + ); + let _ = self + .event_handler + .register(EventKind::BeaconAttestationRejected { + reason: format!("Internal error: {:?}", e), + attestation: Box::new(attestation), + }); + } + } + + outcome + } + + pub fn process_attestation_internal( + &self, + attestation: Attestation, ) -> Result { metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_REQUESTS); let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_TIMES); @@ -932,6 +993,57 @@ impl BeaconChain { pub fn process_block( &self, block: BeaconBlock, + ) -> Result { + let outcome = self.process_block_internal(block.clone()); + + match &outcome { + Ok(outcome) => match outcome { + BlockProcessingOutcome::Processed { block_root } => { + trace!( + self.log, + "Beacon block imported"; + "block_root" => format!("{:?}", block_root), + "block_slot" => format!("{:?}", block_root), + ); + let _ = self.event_handler.register(EventKind::BeaconBlockImported { + block_root: *block_root, + block: Box::new(block), + }); + } + other => { + warn!( + self.log, + "Beacon block rejected"; + "reason" => format!("{:?}", other), + ); + let _ = self.event_handler.register(EventKind::BeaconBlockRejected { + reason: format!("Invalid block: {:?}", other), + block: Box::new(block), + }); + } + }, + Err(e) => { + error!( + self.log, + "Beacon block processing error"; + "error" => format!("{:?}", e), + ); + let _ = self.event_handler.register(EventKind::BeaconBlockRejected { + reason: format!("Internal error: {:?}", e), + block: Box::new(block), + }); + } + } + + outcome + } + + /// Accept some block and attempt to add it to block DAG. + /// + /// Will accept blocks from prior slots, however it will reject any block from a future slot. + fn process_block_internal( + &self, + block: BeaconBlock, ) -> Result { metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS); let full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES); diff --git a/beacon_node/beacon_chain/src/beacon_chain_builder.rs b/beacon_node/beacon_chain/src/beacon_chain_builder.rs index 2a3537020..357644a2d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain_builder.rs +++ b/beacon_node/beacon_chain/src/beacon_chain_builder.rs @@ -131,10 +131,11 @@ impl BeaconChainBuilder { self, store: Arc, eth1_backend: T::Eth1Chain, + event_handler: T::EventHandler, ) -> Result, String> { Ok(match self.build_strategy { BuildStrategy::LoadFromStore => { - BeaconChain::from_store(store, eth1_backend, self.spec, self.log) + BeaconChain::from_store(store, eth1_backend, event_handler, self.spec, self.log) .map_err(|e| format!("Error loading BeaconChain from database: {:?}", e))? .ok_or_else(|| format!("Unable to find exising BeaconChain in database."))? } @@ -144,6 +145,7 @@ impl BeaconChainBuilder { } => BeaconChain::from_genesis( store, eth1_backend, + event_handler, genesis_state.as_ref().clone(), genesis_block.as_ref().clone(), self.spec, diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs new file mode 100644 index 000000000..d690eabf1 --- /dev/null +++ b/beacon_node/beacon_chain/src/events.rs @@ -0,0 +1,51 @@ +use serde_derive::{Deserialize, Serialize}; +use std::marker::PhantomData; +use types::{Attestation, BeaconBlock, EthSpec, Hash256}; + +pub trait EventHandler: Sized + Send + Sync { + fn register(&self, kind: EventKind) -> Result<(), String>; +} + +pub struct NullEventHandler(PhantomData); + +impl EventHandler for NullEventHandler { + fn register(&self, _kind: EventKind) -> Result<(), String> { + Ok(()) + } +} + +impl Default for NullEventHandler { + fn default() -> Self { + NullEventHandler(PhantomData) + } +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde( + bound = "T: EthSpec", + rename_all = "snake_case", + tag = "event", + content = "data" +)] +pub enum EventKind { + BeaconHeadChanged { + reorg: bool, + current_head_beacon_block_root: Hash256, + previous_head_beacon_block_root: Hash256, + }, + BeaconBlockImported { + block_root: Hash256, + block: Box>, + }, + BeaconBlockRejected { + reason: String, + block: Box>, + }, + BeaconAttestationImported { + attestation: Box>, + }, + BeaconAttestationRejected { + reason: String, + attestation: Box>, + }, +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 036172348..7f7e4ec2b 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -7,6 +7,7 @@ mod beacon_chain_builder; mod checkpoint; mod errors; mod eth1_chain; +pub mod events; mod fork_choice; mod iter; mod metrics; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 7670ac74e..97b802ddf 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,6 +1,6 @@ use crate::{ - AttestationProcessingOutcome, BeaconChain, BeaconChainBuilder, BeaconChainTypes, - BlockProcessingOutcome, InteropEth1ChainBackend, + events::NullEventHandler, AttestationProcessingOutcome, BeaconChain, BeaconChainBuilder, + BeaconChainTypes, BlockProcessingOutcome, InteropEth1ChainBackend, }; use lmd_ghost::LmdGhost; use rayon::prelude::*; @@ -68,6 +68,7 @@ where type LmdGhost = L; type Eth1Chain = InteropEth1ChainBackend; type EthSpec = E; + type EventHandler = NullEventHandler; } /// A testing harness which can instantiate a `BeaconChain` and populate it with blocks and @@ -103,7 +104,11 @@ where let chain = BeaconChainBuilder::quick_start(HARNESS_GENESIS_TIME, &keypairs, spec.clone(), log) .unwrap_or_else(|e| panic!("Failed to create beacon chain builder: {}", e)) - .build(store.clone(), InteropEth1ChainBackend::default()) + .build( + store.clone(), + InteropEth1ChainBackend::default(), + NullEventHandler::default(), + ) .unwrap_or_else(|e| panic!("Failed to build beacon chain: {}", e)); Self { diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 383318b0d..e55721793 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -6,6 +6,7 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } +store = { path = "../store" } network = { path = "../network" } eth2-libp2p = { path = "../eth2-libp2p" } rpc = { path = "../rpc" } diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 08674166d..b4c7c9347 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -20,18 +20,19 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use tokio::runtime::TaskExecutor; use tokio::timer::Interval; use types::EthSpec; +use websocket_server::WebSocketSender; pub use beacon_chain::{BeaconChainTypes, Eth1ChainBackend, InteropEth1ChainBackend}; pub use config::{BeaconChainStartMethod, Config as ClientConfig, Eth1BackendMethod}; pub use eth2_config::Eth2Config; #[derive(Clone)] -pub struct ClientType { +pub struct RuntimeBeaconChainTypes { _phantom_s: PhantomData, _phantom_e: PhantomData, } -impl BeaconChainTypes for ClientType +impl BeaconChainTypes for RuntimeBeaconChainTypes where S: Store + 'static, E: EthSpec, @@ -41,17 +42,22 @@ where type LmdGhost = ThreadSafeReducedTree; type Eth1Chain = InteropEth1ChainBackend; type EthSpec = E; + type EventHandler = WebSocketSender; } /// Main beacon node client service. This provides the connection and initialisation of the clients /// sub-services in multiple threads. -pub struct Client { +pub struct Client +where + S: Store + Clone + 'static, + E: EthSpec, +{ /// Configuration for the lighthouse client. _client_config: ClientConfig, /// The beacon chain for the running client. - beacon_chain: Arc>, + beacon_chain: Arc>>, /// Reference to the network service. - pub network: Arc>, + pub network: Arc>>, /// Signal to terminate the RPC server. pub rpc_exit_signal: Option, /// Signal to terminate the slot timer. @@ -60,19 +66,22 @@ pub struct Client { pub api_exit_signal: Option, /// The clients logger. log: slog::Logger, + /* /// Marker to pin the beacon chain generics. - phantom: PhantomData, + phantom: PhantomData, + */ } -impl Client +impl Client where - T: BeaconChainTypes + Clone, + S: Store + Clone + 'static, + E: EthSpec, { /// Generate an instance of the client. Spawn and link all internal sub-processes. pub fn new( client_config: ClientConfig, eth2_config: Eth2Config, - store: T::Store, + store: S, log: slog::Logger, executor: &TaskExecutor, ) -> error::Result { @@ -169,11 +178,19 @@ where } }; - let eth1_backend = T::Eth1Chain::new(String::new()).map_err(|e| format!("{:?}", e))?; + let eth1_backend = + InteropEth1ChainBackend::new(String::new()).map_err(|e| format!("{:?}", e))?; - let beacon_chain: Arc> = Arc::new( + // Start the websocket server. + let websocket_sender: WebSocketSender = if client_config.websocket_server.enabled { + websocket_server::start_server(&client_config.websocket_server, &log)? + } else { + WebSocketSender::dummy() + }; + + let beacon_chain: Arc>> = Arc::new( beacon_chain_builder - .build(store, eth1_backend) + .build(store, eth1_backend, websocket_sender) .map_err(error::Error::from)?, ); @@ -229,11 +246,6 @@ where None }; - // Start the websocket server - let _websocket_sender = if client_config.websocket_server.enabled { - websocket_server::start_server::(&client_config.websocket_server, &log)?; - }; - let (slot_timer_exit_signal, exit) = exit_future::signal(); if let Some(duration_to_next_slot) = beacon_chain.slot_clock.duration_to_next_slot() { // set up the validator work interval - start at next slot and proceed every slot @@ -268,12 +280,11 @@ where api_exit_signal, log, network, - phantom: PhantomData, }) } } -impl Drop for Client { +impl Drop for Client { fn drop(&mut self) { // Save the beacon chain to it's store before dropping. let _result = self.beacon_chain.persist(); diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 343918d4d..20da963ec 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -1,11 +1,12 @@ use crate::Client; -use beacon_chain::BeaconChainTypes; use exit_future::Exit; use futures::{Future, Stream}; use slog::{debug, o, warn}; use std::time::{Duration, Instant}; +use store::Store; use tokio::runtime::TaskExecutor; use tokio::timer::Interval; +use types::EthSpec; /// The interval between heartbeat events. pub const HEARTBEAT_INTERVAL_SECONDS: u64 = 15; @@ -17,7 +18,11 @@ pub const WARN_PEER_COUNT: usize = 1; /// durations. /// /// Presently unused, but remains for future use. -pub fn run(client: &Client, executor: TaskExecutor, exit: Exit) { +pub fn run(client: &Client, executor: TaskExecutor, exit: Exit) +where + S: Store + Clone + 'static, + E: EthSpec, +{ // notification heartbeat let interval = Interval::new( Instant::now(), diff --git a/beacon_node/src/run.rs b/beacon_node/src/run.rs index d036ef0c4..3d6607552 100644 --- a/beacon_node/src/run.rs +++ b/beacon_node/src/run.rs @@ -1,19 +1,17 @@ -use client::{ - error, notifier, BeaconChainTypes, Client, ClientConfig, ClientType, Eth1BackendMethod, - Eth2Config, -}; +use client::{error, notifier, Client, ClientConfig, Eth1BackendMethod, Eth2Config}; use futures::sync::oneshot; use futures::Future; use slog::{error, info}; use std::cell::RefCell; use std::path::Path; use std::path::PathBuf; +use store::Store; use store::{DiskStore, MemoryStore}; use tokio::runtime::Builder; use tokio::runtime::Runtime; use tokio::runtime::TaskExecutor; use tokio_timer::clock::Clock; -use types::{InteropEthSpec, MainnetEthSpec, MinimalEthSpec}; +use types::{EthSpec, InteropEthSpec, MainnetEthSpec, MinimalEthSpec}; /// Reads the configuration and initializes a `BeaconChain` with the required types and parameters. /// @@ -52,14 +50,7 @@ pub fn run_beacon_node( macro_rules! run_client { ($store: ty, $eth_spec: ty) => { - run::>( - &db_path, - client_config, - eth2_config, - executor, - runtime, - log, - ) + run::<$store, $eth_spec>(&db_path, client_config, eth2_config, executor, runtime, log) }; } @@ -82,7 +73,7 @@ pub fn run_beacon_node( } /// Performs the type-generic parts of launching a `BeaconChain`. -fn run( +fn run( db_path: &Path, client_config: ClientConfig, eth2_config: Eth2Config, @@ -91,12 +82,13 @@ fn run( log: &slog::Logger, ) -> error::Result<()> where - T: BeaconChainTypes + Clone, - T::Store: OpenDatabase, + S: Store + Clone + 'static + OpenDatabase, + E: EthSpec, { - let store = T::Store::open_database(&db_path)?; + let store = S::open_database(&db_path)?; - let client: Client = Client::new(client_config, eth2_config, store, log.clone(), &executor)?; + let client: Client = + Client::new(client_config, eth2_config, store, log.clone(), &executor)?; // run service until ctrl-c let (ctrlc_send, ctrlc_oneshot) = oneshot::channel(); diff --git a/beacon_node/websocket_server/Cargo.toml b/beacon_node/websocket_server/Cargo.toml index f846f62b7..a7bf85b12 100644 --- a/beacon_node/websocket_server/Cargo.toml +++ b/beacon_node/websocket_server/Cargo.toml @@ -7,10 +7,12 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +beacon_chain = { path = "../beacon_chain" } exit-future = "0.1.3" futures = "0.1.25" serde = "1.0" serde_derive = "1.0" +serde_json = "^1.0" slog = "^2.2.3" tokio = "0.1.16" types = { path = "../../eth2/types" } diff --git a/beacon_node/websocket_server/src/lib.rs b/beacon_node/websocket_server/src/lib.rs index eb28b10be..1c7d9ddb9 100644 --- a/beacon_node/websocket_server/src/lib.rs +++ b/beacon_node/websocket_server/src/lib.rs @@ -1,5 +1,7 @@ -use serde_derive::{Deserialize, Serialize}; +use beacon_chain::events::{EventHandler, EventKind}; +use serde::{Deserialize, Serialize}; use slog::{error, info, Logger}; +use std::marker::PhantomData; use std::net::Ipv4Addr; use std::thread; use types::EthSpec; @@ -25,19 +27,44 @@ impl Default for Config { } } -pub struct WebSocketSender { - sender: Sender, +pub struct WebSocketSender { + sender: Option, + _phantom: PhantomData, } -impl WebSocketSender { +impl WebSocketSender { + /// Creates a dummy websocket server that never starts and where all future calls are no-ops. + pub fn dummy() -> Self { + Self { + sender: None, + _phantom: PhantomData, + } + } + pub fn send_string(&self, string: String) -> Result<(), String> { - self.sender - .send(string) - .map_err(|e| format!("Unable to broadcast to websocket clients: {:?}", e)) + if let Some(sender) = &self.sender { + sender + .send(string) + .map_err(|e| format!("Unable to broadcast to websocket clients: {:?}", e)) + } else { + Ok(()) + } } } -pub fn start_server(config: &Config, log: &Logger) -> Result { +impl EventHandler for WebSocketSender { + fn register(&self, kind: EventKind) -> Result<(), String> { + self.send_string( + serde_json::to_string(&kind) + .map_err(|e| format!("Unable to serialize event: {:?}", e))?, + ) + } +} + +pub fn start_server( + config: &Config, + log: &Logger, +) -> Result, String> { let server_string = format!("{}:{}", config.listen_address, config.port); info!( @@ -70,6 +97,7 @@ pub fn start_server(config: &Config, log: &Logger) -> Result Date: Sat, 14 Sep 2019 22:25:53 -0400 Subject: [PATCH 295/305] Fix signature serialization bug --- eth2/utils/bls/src/signature.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/eth2/utils/bls/src/signature.rs b/eth2/utils/bls/src/signature.rs index 7c7f677d7..7a2bc6051 100644 --- a/eth2/utils/bls/src/signature.rs +++ b/eth2/utils/bls/src/signature.rs @@ -1,9 +1,8 @@ use super::{PublicKey, SecretKey, BLS_SIG_BYTE_SIZE}; -use hex::encode as hex_encode; use milagro_bls::Signature as RawSignature; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; -use serde_hex::HexVisitor; +use serde_hex::{encode as hex_encode, HexVisitor}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; /// A single BLS signature. From da26341011eca01064ec1579e7cdc345be433204 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 14 Sep 2019 22:26:09 -0400 Subject: [PATCH 296/305] Add beacon chain event for finalization --- beacon_node/beacon_chain/src/beacon_chain.rs | 57 +++++++++++--------- beacon_node/beacon_chain/src/events.rs | 6 ++- 2 files changed, 37 insertions(+), 26 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 33205e97c..d3f5e9d7d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1379,8 +1379,10 @@ impl BeaconChain { let previous_slot = self.head().beacon_block.slot; let new_slot = beacon_block.slot; + let is_reorg = self.head().beacon_block_root != beacon_block.parent_root; + // If we switched to a new chain (instead of building atop the present chain). - if self.head().beacon_block_root != beacon_block.parent_root { + if is_reorg { metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT); warn!( self.log, @@ -1415,12 +1417,34 @@ impl BeaconChain { new_epoch: new_finalized_epoch, }) } else { - self.update_canonical_head(CheckPoint { + let previous_head_beacon_block_root = self.canonical_head.read().beacon_block_root; + let current_head_beacon_block_root = beacon_block_root; + + let mut new_head = CheckPoint { beacon_block, beacon_block_root, beacon_state, beacon_state_root, - })?; + }; + + new_head.beacon_state.build_all_caches(&self.spec)?; + + let timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); + + // Update the checkpoint that stores the head of the chain at the time it received the + // block. + *self.canonical_head.write() = new_head; + + metrics::stop_timer(timer); + + // Save `self` to `self.store`. + self.persist()?; + + let _ = self.event_handler.register(EventKind::BeaconHeadChanged { + reorg: is_reorg, + previous_head_beacon_block_root, + current_head_beacon_block_root, + }); if new_finalized_epoch != old_finalized_epoch { self.after_finalization(old_finalized_epoch, finalized_root)?; @@ -1442,28 +1466,6 @@ impl BeaconChain { result } - /// Update the canonical head to `new_head`. - fn update_canonical_head(&self, mut new_head: CheckPoint) -> Result<(), Error> { - let timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES); - - new_head.beacon_state.build_all_caches(&self.spec)?; - - trace!(self.log, "Taking write lock on head"); - - // Update the checkpoint that stores the head of the chain at the time it received the - // block. - *self.canonical_head.write() = new_head; - - trace!(self.log, "Dropping write lock on head"); - - // Save `self` to `self.store`. - self.persist()?; - - metrics::stop_timer(timer); - - Ok(()) - } - /// Called after `self` has had a new block finalized. /// /// Performs pruning and finality-based optimizations. @@ -1495,6 +1497,11 @@ impl BeaconChain { self.op_pool.prune_all(&finalized_state, &self.spec); + let _ = self.event_handler.register(EventKind::BeaconFinalization { + epoch: new_finalized_epoch, + root: finalized_block_root, + }); + Ok(()) } } diff --git a/beacon_node/beacon_chain/src/events.rs b/beacon_node/beacon_chain/src/events.rs index d690eabf1..c93a13c8a 100644 --- a/beacon_node/beacon_chain/src/events.rs +++ b/beacon_node/beacon_chain/src/events.rs @@ -1,6 +1,6 @@ use serde_derive::{Deserialize, Serialize}; use std::marker::PhantomData; -use types::{Attestation, BeaconBlock, EthSpec, Hash256}; +use types::{Attestation, BeaconBlock, Epoch, EthSpec, Hash256}; pub trait EventHandler: Sized + Send + Sync { fn register(&self, kind: EventKind) -> Result<(), String>; @@ -33,6 +33,10 @@ pub enum EventKind { current_head_beacon_block_root: Hash256, previous_head_beacon_block_root: Hash256, }, + BeaconFinalization { + epoch: Epoch, + root: Hash256, + }, BeaconBlockImported { block_root: Hash256, block: Box>, From 9c5eded1ab2f003d5833f025dfc2363bcbf4eacc Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 14 Sep 2019 22:57:46 -0400 Subject: [PATCH 297/305] Add websocket config to CLI --- beacon_node/client/src/config.rs | 1 + beacon_node/src/config.rs | 1 + beacon_node/src/main.rs | 25 +++++++++++- beacon_node/websocket_server/Cargo.toml | 1 + beacon_node/websocket_server/src/config.rs | 45 ++++++++++++++++++++++ beacon_node/websocket_server/src/lib.rs | 22 +---------- 6 files changed, 74 insertions(+), 21 deletions(-) create mode 100644 beacon_node/websocket_server/src/config.rs diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 1e07e7cf2..997808cb4 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -173,6 +173,7 @@ impl Config { self.network.apply_cli_args(args)?; self.rpc.apply_cli_args(args)?; self.rest_api.apply_cli_args(args)?; + self.websocket_server.apply_cli_args(args)?; if let Some(log_file) = args.value_of("logfile") { self.log_file = PathBuf::from(log_file); diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 978e029e7..5cfb45287 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -552,6 +552,7 @@ impl ConfigBuilder { self.client_config.network.discovery_port += bump; self.client_config.rpc.port += bump; self.client_config.rest_api.port += bump; + self.client_config.websocket_server.port += bump; } if self.eth2_config.spec_constants != self.client_config.spec_constants { diff --git a/beacon_node/src/main.rs b/beacon_node/src/main.rs index bb88e8f92..7bc7e8abe 100644 --- a/beacon_node/src/main.rs +++ b/beacon_node/src/main.rs @@ -147,7 +147,7 @@ fn main() { .conflicts_with("port-bump") .takes_value(true), ) - /* Client related arguments */ + /* REST API related arguments */ .arg( Arg::with_name("no-api") .long("no-api") @@ -169,6 +169,29 @@ fn main() { .conflicts_with("port-bump") .takes_value(true), ) + /* Websocket related arguments */ + .arg( + Arg::with_name("no-ws") + .long("no-ws") + .help("Disable websocket server.") + .takes_value(false), + ) + .arg( + Arg::with_name("ws-address") + .long("ws-address") + .value_name("ADDRESS") + .help("Set the listen address for the websocket server.") + .conflicts_with_all(&["no-ws"]) + .takes_value(true), + ) + .arg( + Arg::with_name("ws-port") + .long("ws-port") + .value_name("PORT") + .help("Set the listen TCP port for the websocket server.") + .conflicts_with_all(&["no-ws", "port-bump"]) + .takes_value(true), + ) /* * Eth1 Integration diff --git a/beacon_node/websocket_server/Cargo.toml b/beacon_node/websocket_server/Cargo.toml index a7bf85b12..48f046e07 100644 --- a/beacon_node/websocket_server/Cargo.toml +++ b/beacon_node/websocket_server/Cargo.toml @@ -8,6 +8,7 @@ edition = "2018" [dependencies] beacon_chain = { path = "../beacon_chain" } +clap = "2.32.0" exit-future = "0.1.3" futures = "0.1.25" serde = "1.0" diff --git a/beacon_node/websocket_server/src/config.rs b/beacon_node/websocket_server/src/config.rs new file mode 100644 index 000000000..c07f0da83 --- /dev/null +++ b/beacon_node/websocket_server/src/config.rs @@ -0,0 +1,45 @@ +use clap::ArgMatches; +use serde::{Deserialize, Serialize}; +use std::net::Ipv4Addr; + +/// The core configuration of a Lighthouse beacon node. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Config { + pub enabled: bool, + /// The IPv4 address the REST API HTTP server will listen on. + pub listen_address: Ipv4Addr, + /// The port the REST API HTTP server will listen on. + pub port: u16, +} + +impl Default for Config { + fn default() -> Self { + Config { + enabled: true, + listen_address: Ipv4Addr::new(127, 0, 0, 1), + port: 5053, + } + } +} + +impl Config { + pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> { + if args.is_present("no-ws") { + self.enabled = false; + } + + if let Some(rpc_address) = args.value_of("ws-address") { + self.listen_address = rpc_address + .parse::() + .map_err(|_| "ws-address is not a valid IPv4 address.")?; + } + + if let Some(rpc_port) = args.value_of("ws-port") { + self.port = rpc_port + .parse::() + .map_err(|_| "ws-port is not a valid u16.")?; + } + + Ok(()) + } +} diff --git a/beacon_node/websocket_server/src/lib.rs b/beacon_node/websocket_server/src/lib.rs index 1c7d9ddb9..ad9cabf4a 100644 --- a/beacon_node/websocket_server/src/lib.rs +++ b/beacon_node/websocket_server/src/lib.rs @@ -1,31 +1,13 @@ use beacon_chain::events::{EventHandler, EventKind}; -use serde::{Deserialize, Serialize}; use slog::{error, info, Logger}; use std::marker::PhantomData; -use std::net::Ipv4Addr; use std::thread; use types::EthSpec; use ws::{Sender, WebSocket}; -/// The core configuration of a Lighthouse beacon node. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Config { - pub enabled: bool, - /// The IPv4 address the REST API HTTP server will listen on. - pub listen_address: Ipv4Addr, - /// The port the REST API HTTP server will listen on. - pub port: u16, -} +mod config; -impl Default for Config { - fn default() -> Self { - Config { - enabled: true, - listen_address: Ipv4Addr::new(127, 0, 0, 1), - port: 5053, - } - } -} +pub use config::Config; pub struct WebSocketSender { sender: Option, From 33e62fb84340b65f9dc509252420386a638c8047 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 14 Sep 2019 23:36:56 -0400 Subject: [PATCH 298/305] Add websockets page to book --- book/src/SUMMARY.md | 1 + book/src/websockets.md | 108 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) create mode 100644 book/src/websockets.md diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index 4ffa694cd..01613f9fd 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -2,6 +2,7 @@ * [Introduction](./intro.md) * [Development Environment](./setup.md) +* [Websocket Interface](./websockets.md) * [Simple Local Testnet](./simple-testnet.md) * [Interop](./interop.md) * [Environment](./interop-environment.md) diff --git a/book/src/websockets.md b/book/src/websockets.md new file mode 100644 index 000000000..2b91bd88f --- /dev/null +++ b/book/src/websockets.md @@ -0,0 +1,108 @@ +# Websocket Interface + +By default, a Lighthouse `beacon_node` exposes a websocket server on `localhost:5053`. + +The following CLI flags control the websocket server: + +- `--no-ws`: disable the websocket server. +- `--ws-port`: specify the listen port of the server. +- `--ws-address`: specify the listen address of the server. + +All clients connected to the websocket server will receive the same stream of events, all triggered +by the `BeaconChain`. Each event is a JSON object with the following schema: + +```json +{ + "event": "string", + "data": "object" +} +``` + +## Events + +The following events may be emitted: + +### Beacon Head Changed + +Occurs whenever the canonical head of the beacon chain changes. + +```json +{ + "event": "beacon_head_changed", + "data": { + "reorg": "boolean", + "current_head_beacon_block_root": "string", + "previous_head_beacon_block_root": "string" + } +} +``` + +### Beacon Finalization + +Occurs whenever the finalized checkpoint of the canonical head changes. + +```json +{ + "event": "beacon_finalization", + "data": { + "epoch": "number", + "root": "string" + } +} +``` + +### Beacon Block Imported + +Occurs whenever the beacon node imports a valid block. + +```json +{ + "event": "beacon_block_imported", + "data": { + "block": "object" + } +} +``` + +### Beacon Block Rejected + +Occurs whenever the beacon node rejects a block because it is invalid or an +error occurred during validation. + +```json +{ + "event": "beacon_block_rejected", + "data": { + "reason": "string", + "block": "object" + } +} +``` + +### Beacon Attestation Imported + +Occurs whenever the beacon node imports a valid attestation. + +```json +{ + "event": "beacon_attestation_imported", + "data": { + "attestation": "object" + } +} +``` + +### Beacon Attestation Rejected + +Occurs whenever the beacon node rejects an attestation because it is invalid or +an error occurred during validation. + +```json +{ + "event": "beacon_attestation_rejected", + "data": { + "reason": "string", + "attestation": "object" + } +} +``` From 1b497e2e24269c73e8db5f2c3ccd3cd5b78efe93 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sun, 15 Sep 2019 09:32:27 -0400 Subject: [PATCH 299/305] Gracefully shutdown the websocket server --- beacon_node/client/src/lib.rs | 19 +++++++--- beacon_node/websocket_server/src/lib.rs | 48 ++++++++++++++++++++----- 2 files changed, 54 insertions(+), 13 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index b4c7c9347..73b0e5aed 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -64,6 +64,8 @@ where pub slot_timer_exit_signal: Option, /// Signal to terminate the API pub api_exit_signal: Option, + /// Signal to terminate the websocket server + pub websocket_exit_signal: Option, /// The clients logger. log: slog::Logger, /* @@ -182,11 +184,17 @@ where InteropEth1ChainBackend::new(String::new()).map_err(|e| format!("{:?}", e))?; // Start the websocket server. - let websocket_sender: WebSocketSender = if client_config.websocket_server.enabled { - websocket_server::start_server(&client_config.websocket_server, &log)? - } else { - WebSocketSender::dummy() - }; + let (websocket_sender, websocket_exit_signal): (WebSocketSender, Option<_>) = + if client_config.websocket_server.enabled { + let (sender, exit) = websocket_server::start_server( + &client_config.websocket_server, + executor, + &log, + )?; + (sender, Some(exit)) + } else { + (WebSocketSender::dummy(), None) + }; let beacon_chain: Arc>> = Arc::new( beacon_chain_builder @@ -278,6 +286,7 @@ where rpc_exit_signal, slot_timer_exit_signal: Some(slot_timer_exit_signal), api_exit_signal, + websocket_exit_signal, log, network, }) diff --git a/beacon_node/websocket_server/src/lib.rs b/beacon_node/websocket_server/src/lib.rs index ad9cabf4a..c161224c7 100644 --- a/beacon_node/websocket_server/src/lib.rs +++ b/beacon_node/websocket_server/src/lib.rs @@ -1,7 +1,9 @@ use beacon_chain::events::{EventHandler, EventKind}; -use slog::{error, info, Logger}; +use futures::Future; +use slog::{debug, error, info, warn, Logger}; use std::marker::PhantomData; use std::thread; +use tokio::runtime::TaskExecutor; use types::EthSpec; use ws::{Sender, WebSocket}; @@ -45,8 +47,9 @@ impl EventHandler for WebSocketSender { pub fn start_server( config: &Config, + executor: &TaskExecutor, log: &Logger, -) -> Result, String> { +) -> Result<(WebSocketSender, exit_future::Signal), String> { let server_string = format!("{}:{}", config.listen_address, config.port); info!( @@ -61,12 +64,38 @@ pub fn start_server( let broadcaster = server.broadcaster(); + // Produce a signal/channel that can gracefully shutdown the websocket server. + let exit_signal = { + let (exit_signal, exit) = exit_future::signal(); + + let log_inner = log.clone(); + let broadcaster_inner = server.broadcaster(); + let exit_future = exit.and_then(move |_| { + if let Err(e) = broadcaster_inner.shutdown() { + warn!( + log_inner, + "Websocket server errored on shutdown"; + "error" => format!("{:?}", e) + ); + } else { + info!(log_inner, "Websocket server shutdown"); + } + Ok(()) + }); + + // Place a future on the executor that will shutdown the websocket server when the + // application exits. + executor.spawn(exit_future); + + exit_signal + }; + let log_inner = log.clone(); let _handle = thread::spawn(move || match server.listen(server_string) { Ok(_) => { - info!( + debug!( log_inner, - "Websocket server stopped"; + "Websocket server thread stopped"; ); } Err(e) => { @@ -78,8 +107,11 @@ pub fn start_server( } }); - Ok(WebSocketSender { - sender: Some(broadcaster), - _phantom: PhantomData, - }) + Ok(( + WebSocketSender { + sender: Some(broadcaster), + _phantom: PhantomData, + }, + exit_signal, + )) } From 8ceb2e3d9533e60b684905cb8f7526b6e1359c8c Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Sat, 21 Sep 2019 11:21:47 +1000 Subject: [PATCH 300/305] Refactor slot clock to remove underflow Previously I had used `Instant` to refer to the genesis time. --- beacon_node/beacon_chain/src/beacon_chain.rs | 14 ++- eth2/utils/slot_clock/src/lib.rs | 37 +++---- .../slot_clock/src/system_time_slot_clock.rs | 97 +++++++++++-------- .../slot_clock/src/testing_slot_clock.rs | 8 +- validator_client/src/service.rs | 9 +- 5 files changed, 81 insertions(+), 84 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d3f5e9d7d..9fa907796 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -158,12 +158,11 @@ impl BeaconChain { )); // Slot clock - let slot_clock = T::SlotClock::from_eth2_genesis( + let slot_clock = T::SlotClock::new( spec.genesis_slot, - genesis_state.genesis_time, + Duration::from_secs(genesis_state.genesis_time), Duration::from_millis(spec.milliseconds_per_slot), - ) - .map_err(|_| Error::SlotClockDidNotStart)?; + ); info!(log, "Beacon chain initialized from genesis"; "validator_count" => genesis_state.validators.len(), @@ -202,12 +201,11 @@ impl BeaconChain { let state = &p.canonical_head.beacon_state; - let slot_clock = T::SlotClock::from_eth2_genesis( + let slot_clock = T::SlotClock::new( spec.genesis_slot, - state.genesis_time, + Duration::from_secs(state.genesis_time), Duration::from_millis(spec.milliseconds_per_slot), - ) - .map_err(|_| Error::SlotClockDidNotStart)?; + ); let last_finalized_root = p.canonical_head.beacon_state.finalized_checkpoint.root; let last_finalized_block = &p.canonical_head.beacon_block; diff --git a/eth2/utils/slot_clock/src/lib.rs b/eth2/utils/slot_clock/src/lib.rs index 6192d1b6f..d31a1dc82 100644 --- a/eth2/utils/slot_clock/src/lib.rs +++ b/eth2/utils/slot_clock/src/lib.rs @@ -5,40 +5,27 @@ mod metrics; mod system_time_slot_clock; mod testing_slot_clock; -use std::time::{Duration, Instant, SystemTime, SystemTimeError, UNIX_EPOCH}; +use std::time::Duration; pub use crate::system_time_slot_clock::SystemTimeSlotClock; pub use crate::testing_slot_clock::TestingSlotClock; pub use metrics::scrape_for_metrics; pub use types::Slot; +/// A clock that reports the current slot. +/// +/// The clock is not required to be monotonically increasing and may go backwards. pub trait SlotClock: Send + Sync + Sized { - fn from_eth2_genesis( - genesis_slot: Slot, - genesis_seconds: u64, - slot_duration: Duration, - ) -> Result { - let duration_between_now_and_unix_epoch = SystemTime::now().duration_since(UNIX_EPOCH)?; - let duration_between_unix_epoch_and_genesis = Duration::from_secs(genesis_seconds); - - let genesis_instant = if duration_between_now_and_unix_epoch - < duration_between_unix_epoch_and_genesis - { - Instant::now() - + (duration_between_unix_epoch_and_genesis - duration_between_now_and_unix_epoch) - } else { - Instant::now() - - (duration_between_now_and_unix_epoch - duration_between_unix_epoch_and_genesis) - }; - - Ok(Self::new(genesis_slot, genesis_instant, slot_duration)) - } - - fn new(genesis_slot: Slot, genesis: Instant, slot_duration: Duration) -> Self; + /// Creates a new slot clock where the first slot is `genesis_slot`, genesis occured + /// `genesis_duration` after the `UNIX_EPOCH` and each slot is `slot_duration` apart. + fn new(genesis_slot: Slot, genesis_duration: Duration, slot_duration: Duration) -> Self; + /// Returns the slot at this present time. fn now(&self) -> Option; - fn duration_to_next_slot(&self) -> Option; - + /// Returns the duration between slots fn slot_duration(&self) -> Duration; + + /// Returns the duration until the next slot. + fn duration_to_next_slot(&self) -> Option; } diff --git a/eth2/utils/slot_clock/src/system_time_slot_clock.rs b/eth2/utils/slot_clock/src/system_time_slot_clock.rs index aae12c18c..d2ebd42ea 100644 --- a/eth2/utils/slot_clock/src/system_time_slot_clock.rs +++ b/eth2/utils/slot_clock/src/system_time_slot_clock.rs @@ -1,5 +1,5 @@ use super::SlotClock; -use std::time::{Duration, Instant}; +use std::time::{Duration, SystemTime, UNIX_EPOCH}; use types::Slot; pub use std::time::SystemTimeError; @@ -8,53 +8,60 @@ pub use std::time::SystemTimeError; #[derive(Clone)] pub struct SystemTimeSlotClock { genesis_slot: Slot, - genesis: Instant, + genesis_duration: Duration, slot_duration: Duration, } impl SlotClock for SystemTimeSlotClock { - fn new(genesis_slot: Slot, genesis: Instant, slot_duration: Duration) -> Self { + fn new(genesis_slot: Slot, genesis_duration: Duration, slot_duration: Duration) -> Self { if slot_duration.as_millis() == 0 { panic!("SystemTimeSlotClock cannot have a < 1ms slot duration."); } Self { genesis_slot, - genesis, + genesis_duration, slot_duration, } } fn now(&self) -> Option { - let now = Instant::now(); + let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?; + let genesis = self.genesis_duration; - if now < self.genesis { - None - } else { - let slot = Slot::from( - (now.duration_since(self.genesis).as_millis() / self.slot_duration.as_millis()) - as u64, - ); + if now > genesis { + let since_genesis = now + .checked_sub(genesis) + .expect("Control flow ensures now is greater than genesis"); + let slot = + Slot::from((since_genesis.as_millis() / self.slot_duration.as_millis()) as u64); Some(slot + self.genesis_slot) + } else { + None } } fn duration_to_next_slot(&self) -> Option { - let now = Instant::now(); - if now < self.genesis { - Some(self.genesis - now) + let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?; + let genesis = self.genesis_duration; + + let slot_start = |slot: Slot| -> Duration { + let slot = slot.as_u64() as u32; + genesis + slot * self.slot_duration + }; + + if now > genesis { + Some( + slot_start(self.now()? + 1) + .checked_sub(now) + .expect("The next slot cannot start before now"), + ) } else { - let duration_since_genesis = now - self.genesis; - let millis_since_genesis = duration_since_genesis.as_millis(); - let millis_per_slot = self.slot_duration.as_millis(); - - let current_slot = millis_since_genesis / millis_per_slot; - let next_slot = current_slot + 1; - - let next_slot = - self.genesis + Duration::from_millis((next_slot * millis_per_slot) as u64); - - Some(next_slot.duration_since(now)) + Some( + genesis + .checked_sub(now) + .expect("Control flow ensures genesis is greater than or equal to now"), + ) } } @@ -75,30 +82,28 @@ mod tests { fn test_slot_now() { let genesis_slot = Slot::new(0); - let prior_genesis = - |seconds_prior: u64| Instant::now() - Duration::from_secs(seconds_prior); + let prior_genesis = |milliseconds_prior: u64| { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("should get system time") + - Duration::from_millis(milliseconds_prior) + }; let clock = SystemTimeSlotClock::new(genesis_slot, prior_genesis(0), Duration::from_secs(1)); assert_eq!(clock.now(), Some(Slot::new(0))); let clock = - SystemTimeSlotClock::new(genesis_slot, prior_genesis(5), Duration::from_secs(1)); + SystemTimeSlotClock::new(genesis_slot, prior_genesis(5_000), Duration::from_secs(1)); assert_eq!(clock.now(), Some(Slot::new(5))); - let clock = SystemTimeSlotClock::new( - genesis_slot, - Instant::now() - Duration::from_millis(500), - Duration::from_secs(1), - ); + let clock = + SystemTimeSlotClock::new(genesis_slot, prior_genesis(500), Duration::from_secs(1)); assert_eq!(clock.now(), Some(Slot::new(0))); assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); - let clock = SystemTimeSlotClock::new( - genesis_slot, - Instant::now() - Duration::from_millis(1_500), - Duration::from_secs(1), - ); + let clock = + SystemTimeSlotClock::new(genesis_slot, prior_genesis(1_500), Duration::from_secs(1)); assert_eq!(clock.now(), Some(Slot::new(1))); assert!(clock.duration_to_next_slot().unwrap() < Duration::from_millis(500)); } @@ -106,18 +111,26 @@ mod tests { #[test] #[should_panic] fn zero_seconds() { - SystemTimeSlotClock::new(Slot::new(0), Instant::now(), Duration::from_secs(0)); + SystemTimeSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(0)); } #[test] #[should_panic] fn zero_millis() { - SystemTimeSlotClock::new(Slot::new(0), Instant::now(), Duration::from_millis(0)); + SystemTimeSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_millis(0), + ); } #[test] #[should_panic] fn less_than_one_millis() { - SystemTimeSlotClock::new(Slot::new(0), Instant::now(), Duration::from_nanos(999)); + SystemTimeSlotClock::new( + Slot::new(0), + Duration::from_secs(0), + Duration::from_nanos(999), + ); } } diff --git a/eth2/utils/slot_clock/src/testing_slot_clock.rs b/eth2/utils/slot_clock/src/testing_slot_clock.rs index d90cb157a..0697ec2bc 100644 --- a/eth2/utils/slot_clock/src/testing_slot_clock.rs +++ b/eth2/utils/slot_clock/src/testing_slot_clock.rs @@ -1,6 +1,6 @@ use super::SlotClock; use std::sync::RwLock; -use std::time::{Duration, Instant}; +use std::time::Duration; use types::Slot; /// A slot clock where the slot is manually set instead of being determined by the system time. @@ -21,7 +21,7 @@ impl TestingSlotClock { } impl SlotClock for TestingSlotClock { - fn new(genesis_slot: Slot, _genesis: Instant, _slot_duration: Duration) -> Self { + fn new(genesis_slot: Slot, _genesis_duration: Duration, _slot_duration: Duration) -> Self { TestingSlotClock { slot: RwLock::new(genesis_slot), } @@ -49,7 +49,9 @@ mod tests { #[test] fn test_slot_now() { - let clock = TestingSlotClock::new(Slot::new(10), Instant::now(), Duration::from_secs(0)); + let null = Duration::from_secs(0); + + let clock = TestingSlotClock::new(Slot::new(10), null, null); assert_eq!(clock.now(), Some(Slot::new(10))); clock.set_slot(123); assert_eq!(clock.now(), Some(Slot::new(123))); diff --git a/validator_client/src/service.rs b/validator_client/src/service.rs index fd8de71ca..ec6a6a9f1 100644 --- a/validator_client/src/service.rs +++ b/validator_client/src/service.rs @@ -159,14 +159,11 @@ impl Service(|e| { - format!("Unable to start slot clock: {}.", e).into() - })?; + ); /* Generate the duties manager */ From b316086be0e17207be068fbd17d8df111d6a4803 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 23 Sep 2019 22:20:47 +1000 Subject: [PATCH 301/305] Add write_ssz_files feature to beacon_chain crate --- beacon_node/beacon_chain/Cargo.toml | 4 ++++ beacon_node/beacon_chain/src/beacon_chain.rs | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index ae89ac1e1..02a45d137 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -4,6 +4,10 @@ version = "0.1.0" authors = ["Paul Hauner ", "Age Manning "] edition = "2018" +[features] + +write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing. + [dependencies] eth2_config = { path = "../../eth2/utils/eth2_config" } merkle_proof = { path = "../../eth2/utils/merkle_proof" } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 9fa907796..d9403cec9 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -42,7 +42,7 @@ pub const GRAFFITI: &str = "sigp/lighthouse-0.0.0-prerelease"; /// files in the temp directory. /// /// Only useful for testing. -const WRITE_BLOCK_PROCESSING_SSZ: bool = true; +const WRITE_BLOCK_PROCESSING_SSZ: bool = cfg!(feature = "write_ssz_files"); #[derive(Debug, PartialEq)] pub enum BlockProcessingOutcome { From e7a580393c1054086dc7fd2ab13307e27ce13eb8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 23 Sep 2019 22:21:34 +1000 Subject: [PATCH 302/305] Remove unnecessary drop() --- beacon_node/beacon_chain/src/beacon_chain.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d9403cec9..f0d158964 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -373,8 +373,7 @@ impl BeaconChain { Ok(head_state) } else if slot > head_state.slot { let head_state_slot = head_state.slot; - let mut state = head_state.clone(); - drop(head_state); + let mut state = head_state; while state.slot < slot { match per_slot_processing(&mut state, &self.spec) { Ok(()) => (), From 4fccec158aaf2e63b779ddee268633d59f4e16c8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Mon, 23 Sep 2019 22:22:19 +1000 Subject: [PATCH 303/305] Fix wasted iterations when getting previous state --- beacon_node/beacon_chain/src/beacon_chain.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index f0d158964..731165f81 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -393,7 +393,8 @@ impl BeaconChain { } else { let state_root = self .rev_iter_state_roots() - .find(|(_root, s)| *s == slot) + .take_while(|(_root, current_slot)| *current_slot >= slot) + .find(|(_root, current_slot)| *current_slot == slot) .map(|(root, _slot)| root) .ok_or_else(|| Error::NoStateForSlot(slot))?; From b4806d27eb576064402a81c39444b096698fc640 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 26 Sep 2019 10:46:56 +1000 Subject: [PATCH 304/305] Fix comments from Michael --- beacon_node/client/src/lib.rs | 4 --- beacon_node/eth2-libp2p/src/service.rs | 1 - beacon_node/network/src/message_handler.rs | 6 +--- beacon_node/network/src/sync/manager.rs | 36 ++++++++++------------ 4 files changed, 18 insertions(+), 29 deletions(-) diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index d7df3e0d6..fc5e9f860 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -68,10 +68,6 @@ where pub websocket_exit_signal: Option, /// The clients logger. log: slog::Logger, - /* - /// Marker to pin the beacon chain generics. - phantom: PhantomData, - */ } impl Client diff --git a/beacon_node/eth2-libp2p/src/service.rs b/beacon_node/eth2-libp2p/src/service.rs index bf1ed0123..f9c06a532 100644 --- a/beacon_node/eth2-libp2p/src/service.rs +++ b/beacon_node/eth2-libp2p/src/service.rs @@ -49,7 +49,6 @@ impl Service { }; // load the private key from CLI flag, disk or generate a new one - // let local_private_key = load_private_key(&config, &log); let local_peer_id = PeerId::from(local_keypair.public()); info!(log, "Libp2p Service"; "peer_id" => format!("{:?}", local_peer_id)); diff --git a/beacon_node/network/src/message_handler.rs b/beacon_node/network/src/message_handler.rs index 782d2129e..898304272 100644 --- a/beacon_node/network/src/message_handler.rs +++ b/beacon_node/network/src/message_handler.rs @@ -73,11 +73,7 @@ impl MessageHandler { .for_each(move |msg| Ok(handler.handle_message(msg))) .map_err(move |_| { debug!(log, "Network message handler terminated."); - }), /* - .then(move |_| { - debug!(log.clone(), "Message handler shutdown"); - }), - */ + }), ); Ok(handler_send) diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 171d0fdf0..9e92ade76 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -648,26 +648,24 @@ impl SyncManager { // process queued block requests for (peer_id, block_requests) in self.import_queue.iter_mut() { - { - if block_requests.state == BlockRequestsState::Queued { - let request_id = self.current_req_id; - block_requests.state = BlockRequestsState::Pending(request_id); - self.current_req_id += 1; + if block_requests.state == BlockRequestsState::Queued { + let request_id = self.current_req_id; + block_requests.state = BlockRequestsState::Pending(request_id); + self.current_req_id += 1; - let request = BeaconBlocksRequest { - head_block_root: block_requests.target_head_root, - start_slot: block_requests.current_start_slot.as_u64(), - count: MAX_BLOCKS_PER_REQUEST, - step: 0, - }; - request_blocks( - &mut self.network, - &self.log, - peer_id.clone(), - request_id, - request, - ); - } + let request = BeaconBlocksRequest { + head_block_root: block_requests.target_head_root, + start_slot: block_requests.current_start_slot.as_u64(), + count: MAX_BLOCKS_PER_REQUEST, + step: 0, + }; + request_blocks( + &mut self.network, + &self.log, + peer_id.clone(), + request_id, + request, + ); } } } From 97a45cdcb8af1c79f114c20e0d2c9b095f0794ab Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 26 Sep 2019 13:00:31 +1000 Subject: [PATCH 305/305] Address Michael's comments --- eth2/utils/ssz/src/encode.rs | 4 ++++ lcli/src/pycli.rs | 44 ----------------------------------- lcli/src/transition_blocks.rs | 7 ------ 3 files changed, 4 insertions(+), 51 deletions(-) diff --git a/eth2/utils/ssz/src/encode.rs b/eth2/utils/ssz/src/encode.rs index 5113fb71a..52b3d9bfd 100644 --- a/eth2/utils/ssz/src/encode.rs +++ b/eth2/utils/ssz/src/encode.rs @@ -27,6 +27,10 @@ pub trait Encode { BYTES_PER_LENGTH_OFFSET } + /// Returns the size (in bytes) when `self` is serialized. + /// + /// Returns the same value as `self.as_ssz_bytes().len()` but this method is significantly more + /// efficient. fn ssz_bytes_len(&self) -> usize; /// Returns the full-form encoding of this object. diff --git a/lcli/src/pycli.rs b/lcli/src/pycli.rs index 4b1b32828..dda61dd72 100644 --- a/lcli/src/pycli.rs +++ b/lcli/src/pycli.rs @@ -22,50 +22,6 @@ pub fn run_pycli(matches: &ArgMatches) -> Result<(), String> { Ok(()) } -/* - * TODO: loading from file. - * -use regex::Regex; -use std::collections::HashMap; -use std::ffi::OsString; - -const BLOCK_PREFIX: &str = "block_"; -const PRE_PREFIX: &str = "state_pre_"; -const POST_PREFIX: &str = "state_post_"; - -struct Case { - pre: Option>, - post: Option>, - block: Option>, -} - -fn get_sets(dir: PathBuf) -> Result<(), String> { - let map: HashMap> = HashMap::new(); - - fs::read_dir(dir) - .map_err(|e| format!("Unable to read source directory: {:?}", e))? - .filter_map(Result::ok) - .map(|f| f.file_name().into_string()) - .filter_map(Result::ok) - .try_for_each(|filename| { - if filename.starts_with(BLOCK_PREFIX) { - let regex = Regex::new(r".*root0x(.........)") - .map_err(|e| format!("Failed to compile block regex: {:?}", e))?; - let captures = regex.captures(&filename). - // block - } else if filename.starts_with(PRE_PREFIX) { - dbg!("pre state"); - } else if filename.starts_with(POST_PREFIX) { - dbg!("post state"); - } else { - dbg!("unknown file"); - } - - Ok(()) - }) -} -*/ - /// A wrapper around Danny Ryan's `pycli` utility: /// /// https://github.com/djrtwo/pycli diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index d8b0974b4..01e639db8 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -41,13 +41,6 @@ pub fn run_transition_blocks(matches: &ArgMatches) -> Result<(), String> { .write_all(&post_state.as_ssz_bytes()) .map_err(|e| format!("Unable to write to output file: {:?}", e))?; - /* - println!( - "{}", - serde_yaml::to_string(&post_state).expect("Should serialize state") - ); - */ - Ok(()) }