Update to latest master
This commit is contained in:
commit
72cf50d904
@ -17,6 +17,7 @@ rust:
|
||||
- nightly
|
||||
matrix:
|
||||
allow_failures:
|
||||
- rust: beta
|
||||
- rust: nightly
|
||||
fast_finish: true
|
||||
install:
|
||||
|
@ -11,6 +11,7 @@ members = [
|
||||
"eth2/utils/eth2_interop_keypairs",
|
||||
"eth2/utils/logging",
|
||||
"eth2/utils/eth2_hashing",
|
||||
"eth2/utils/lighthouse_metrics",
|
||||
"eth2/utils/merkle_proof",
|
||||
"eth2/utils/int_to_bytes",
|
||||
"eth2/utils/serde_hex",
|
||||
@ -25,7 +26,6 @@ members = [
|
||||
"beacon_node",
|
||||
"beacon_node/store",
|
||||
"beacon_node/client",
|
||||
"beacon_node/http_server",
|
||||
"beacon_node/rest_api",
|
||||
"beacon_node/network",
|
||||
"beacon_node/eth2-libp2p",
|
||||
|
@ -7,7 +7,8 @@ edition = "2018"
|
||||
[dependencies]
|
||||
store = { path = "../store" }
|
||||
parking_lot = "0.7"
|
||||
prometheus = "^0.6"
|
||||
lazy_static = "1.3.0"
|
||||
lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" }
|
||||
log = "0.4"
|
||||
operation_pool = { path = "../../eth2/operation_pool" }
|
||||
serde = "1.0"
|
||||
|
@ -2,7 +2,7 @@ use crate::checkpoint::CheckPoint;
|
||||
use crate::errors::{BeaconChainError as Error, BlockProductionError};
|
||||
use crate::fork_choice::{Error as ForkChoiceError, ForkChoice};
|
||||
use crate::iter::{ReverseBlockRootIterator, ReverseStateRootIterator};
|
||||
use crate::metrics::Metrics;
|
||||
use crate::metrics;
|
||||
use crate::persisted_beacon_chain::{PersistedBeaconChain, BEACON_CHAIN_DB_KEY};
|
||||
use lmd_ghost::LmdGhost;
|
||||
use log::trace;
|
||||
@ -77,7 +77,7 @@ pub enum AttestationProcessingOutcome {
|
||||
Invalid(AttestationValidationError),
|
||||
}
|
||||
|
||||
pub trait BeaconChainTypes {
|
||||
pub trait BeaconChainTypes: Send + Sync + 'static {
|
||||
type Store: store::Store;
|
||||
type SlotClock: slot_clock::SlotClock;
|
||||
type LmdGhost: LmdGhost<Self::Store, Self::EthSpec>;
|
||||
@ -106,8 +106,6 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
/// A state-machine that is updated with information from the network and chooses a canonical
|
||||
/// head block.
|
||||
pub fork_choice: ForkChoice<T>,
|
||||
/// Stores metrics about this `BeaconChain`.
|
||||
pub metrics: Metrics,
|
||||
/// Logging to CLI, etc.
|
||||
log: Logger,
|
||||
}
|
||||
@ -157,7 +155,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
canonical_head,
|
||||
genesis_block_root,
|
||||
fork_choice: ForkChoice::new(store.clone(), &genesis_block, genesis_block_root),
|
||||
metrics: Metrics::new()?,
|
||||
store,
|
||||
log,
|
||||
})
|
||||
@ -195,7 +192,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
canonical_head: RwLock::new(p.canonical_head),
|
||||
state: RwLock::new(p.state),
|
||||
genesis_block_root: p.genesis_block_root,
|
||||
metrics: Metrics::new()?,
|
||||
store,
|
||||
log,
|
||||
}))
|
||||
@ -203,6 +199,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
/// Attempt to save this instance to `self.store`.
|
||||
pub fn persist(&self) -> Result<(), Error> {
|
||||
let timer = metrics::start_timer(&metrics::PERSIST_CHAIN);
|
||||
|
||||
let p: PersistedBeaconChain<T> = PersistedBeaconChain {
|
||||
canonical_head: self.canonical_head.read().clone(),
|
||||
op_pool: PersistedOperationPool::from_operation_pool(&self.op_pool),
|
||||
@ -213,6 +211,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
let key = Hash256::from_slice(&BEACON_CHAIN_DB_KEY.as_bytes());
|
||||
self.store.put(&key, &p)?;
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -472,8 +472,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
) -> Result<AttestationData, Error> {
|
||||
// Collect some metrics.
|
||||
self.metrics.attestation_production_requests.inc();
|
||||
let timer = self.metrics.attestation_production_times.start_timer();
|
||||
metrics::inc_counter(&metrics::ATTESTATION_PRODUCTION_REQUESTS);
|
||||
let timer = metrics::start_timer(&metrics::ATTESTATION_PRODUCTION_TIMES);
|
||||
|
||||
let slots_per_epoch = T::EthSpec::slots_per_epoch();
|
||||
let current_epoch_start_slot = state.current_epoch().start_slot(slots_per_epoch);
|
||||
@ -520,8 +520,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
};
|
||||
|
||||
// Collect some metrics.
|
||||
self.metrics.attestation_production_successes.inc();
|
||||
timer.observe_duration();
|
||||
metrics::inc_counter(&metrics::ATTESTATION_PRODUCTION_SUCCESSES);
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
Ok(AttestationData {
|
||||
beacon_block_root: head_block_root,
|
||||
@ -536,7 +536,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// If valid, the attestation is added to `self.op_pool` and `self.fork_choice`.
|
||||
///
|
||||
/// Returns an `Ok(AttestationProcessingOutcome)` if the chain was able to make a determination
|
||||
/// about the `attestation` (wether it was invalid or not). Returns an `Err` if the was an
|
||||
/// about the `attestation` (whether it was invalid or not). Returns an `Err` if there was an
|
||||
/// error during this process and no determination was able to be made.
|
||||
///
|
||||
/// ## Notes
|
||||
@ -547,125 +547,105 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
&self,
|
||||
attestation: Attestation<T::EthSpec>,
|
||||
) -> Result<AttestationProcessingOutcome, Error> {
|
||||
metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_REQUESTS);
|
||||
let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_TIMES);
|
||||
|
||||
// From the store, load the attestation's "head block".
|
||||
//
|
||||
// An honest validator would have set this block to be the head of the chain (i.e., the
|
||||
// result of running fork choice).
|
||||
if let Some(attestation_head_block) = self
|
||||
let result = if let Some(attestation_head_block) = self
|
||||
.store
|
||||
.get::<BeaconBlock<T::EthSpec>>(&attestation.data.beacon_block_root)?
|
||||
{
|
||||
let finalized_epoch = self.head().beacon_state.finalized_checkpoint.epoch;
|
||||
|
||||
if attestation_head_block.slot
|
||||
<= finalized_epoch.start_slot(T::EthSpec::slots_per_epoch())
|
||||
{
|
||||
// Ignore any attestation where the slot of `data.beacon_block_root` is equal to or
|
||||
// prior to the finalized epoch.
|
||||
//
|
||||
// For any valid attestation if the `beacon_block_root` is prior to finalization, then
|
||||
// all other parameters (source, target, etc) must all be prior to finalization and
|
||||
// therefore no longer interesting.
|
||||
return Ok(AttestationProcessingOutcome::FinalizedSlot {
|
||||
attestation: attestation_head_block.epoch(),
|
||||
finalized: finalized_epoch,
|
||||
});
|
||||
}
|
||||
|
||||
// Attempt to process the attestation using the `self.head()` state.
|
||||
//
|
||||
// This is purely an effort to avoid loading a `BeaconState` unnecessarily from the DB.
|
||||
let optional_outcome: Option<Result<AttestationProcessingOutcome, Error>> = {
|
||||
// Take a read lock on the head beacon state.
|
||||
//
|
||||
// The purpose of this whole `let processed ...` block is to ensure that the read
|
||||
// lock is dropped if we don't end up using the head beacon state.
|
||||
let state = &self.head().beacon_state;
|
||||
// Take a read lock on the head beacon state.
|
||||
let state = &self.head().beacon_state;
|
||||
|
||||
// If it turns out that the attestation was made using the head state, then there
|
||||
// is no need to load a state from the database to process the attestation.
|
||||
//
|
||||
// Note: use the epoch of the target because it indicates which epoch the
|
||||
// attestation was created in. You cannot use the epoch of the head block, because
|
||||
// the block doesn't necessarily need to be in the same epoch as the attestation
|
||||
// (e.g., if there are skip slots between the epoch the block was created in and
|
||||
// the epoch for the attestation).
|
||||
//
|
||||
// This check also ensures that the slot for `data.beacon_block_root` is not higher
|
||||
// than `state.root` by ensuring that the block is in the history of `state`.
|
||||
if state.current_epoch() == attestation.data.target.epoch
|
||||
&& (attestation.data.beacon_block_root == self.head().beacon_block_root
|
||||
|| state
|
||||
.get_block_root(attestation_head_block.slot)
|
||||
.map(|root| *root == attestation.data.beacon_block_root)
|
||||
.unwrap_or_else(|_| false))
|
||||
{
|
||||
// The head state is able to be used to validate this attestation. No need to load
|
||||
// anything from the database.
|
||||
Some(self.process_attestation_for_state_and_block(
|
||||
attestation.clone(),
|
||||
state,
|
||||
&attestation_head_block,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
// If it turns out that the attestation was made using the head state, then there
|
||||
// is no need to load a state from the database to process the attestation.
|
||||
//
|
||||
// Note: use the epoch of the target because it indicates which epoch the
|
||||
// attestation was created in. You cannot use the epoch of the head block, because
|
||||
// the block doesn't necessarily need to be in the same epoch as the attestation
|
||||
// (e.g., if there are skip slots between the epoch the block was created in and
|
||||
// the epoch for the attestation).
|
||||
//
|
||||
// This check also ensures that the slot for `data.beacon_block_root` is not higher
|
||||
// than `state.root` by ensuring that the block is in the history of `state`.
|
||||
if state.current_epoch() == attestation.data.target.epoch
|
||||
&& (attestation.data.beacon_block_root == self.head().beacon_block_root
|
||||
|| state
|
||||
.get_block_root(attestation_head_block.slot)
|
||||
.map(|root| *root == attestation.data.beacon_block_root)
|
||||
.unwrap_or_else(|_| false))
|
||||
{
|
||||
// The head state is able to be used to validate this attestation. No need to load
|
||||
// anything from the database.
|
||||
return self.process_attestation_for_state_and_block(
|
||||
attestation.clone(),
|
||||
state,
|
||||
&attestation_head_block,
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(outcome) = optional_outcome {
|
||||
// Verification was already completed with an in-memory state. Return that result.
|
||||
outcome
|
||||
// Ensure the read-lock from `self.head()` is dropped.
|
||||
//
|
||||
// This is likely unnecessary, however it remains as a reminder to ensure this lock
|
||||
// isn't hogged.
|
||||
std::mem::drop(state);
|
||||
|
||||
// Use the `data.beacon_block_root` to load the state from the latest non-skipped
|
||||
// slot preceding the attestation's creation.
|
||||
//
|
||||
// This state is guaranteed to be in the same chain as the attestation, but it's
|
||||
// not guaranteed to be from the same slot or epoch as the attestation.
|
||||
let mut state: BeaconState<T::EthSpec> = self
|
||||
.store
|
||||
.get(&attestation_head_block.state_root)?
|
||||
.ok_or_else(|| Error::MissingBeaconState(attestation_head_block.state_root))?;
|
||||
|
||||
// Ensure the state loaded from the database matches the state of the attestation
|
||||
// head block.
|
||||
//
|
||||
// The state needs to be advanced from the current slot through to the epoch in
|
||||
// which the attestation was created in. It would be an error to try and use
|
||||
// `state.get_attestation_data_slot(..)` because the state matching the
|
||||
// `data.beacon_block_root` isn't necessarily in a nearby epoch to the attestation
|
||||
// (e.g., if there were lots of skip slots since the head of the chain and the
|
||||
// epoch creation epoch).
|
||||
for _ in state.slot.as_u64()
|
||||
..attestation
|
||||
.data
|
||||
.target
|
||||
.epoch
|
||||
.start_slot(T::EthSpec::slots_per_epoch())
|
||||
.as_u64()
|
||||
{
|
||||
per_slot_processing(&mut state, &self.spec)?;
|
||||
}
|
||||
|
||||
state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
|
||||
|
||||
let attestation_slot = state.get_attestation_data_slot(&attestation.data)?;
|
||||
|
||||
// Reject any attestation where the `state` loaded from `data.beacon_block_root`
|
||||
// has a higher slot than the attestation.
|
||||
//
|
||||
// Permitting this would allow for attesters to vote on _future_ slots.
|
||||
if attestation_slot > state.slot {
|
||||
Ok(AttestationProcessingOutcome::AttestsToFutureState {
|
||||
state: state.slot,
|
||||
attestation: attestation_slot,
|
||||
})
|
||||
} else {
|
||||
// Use the `data.beacon_block_root` to load the state from the latest non-skipped
|
||||
// slot preceding the attestations creation.
|
||||
//
|
||||
// This state is guaranteed to be in the same chain as the attestation, but it's
|
||||
// not guaranteed to be from the same slot or epoch as the attestation.
|
||||
let mut state: BeaconState<T::EthSpec> = self
|
||||
.store
|
||||
.get(&attestation_head_block.state_root)?
|
||||
.ok_or_else(|| Error::MissingBeaconState(attestation_head_block.state_root))?;
|
||||
|
||||
// Ensure the state loaded from the database matches the state of the attestation
|
||||
// head block.
|
||||
//
|
||||
// The state needs to be advanced from the current slot through to the epoch in
|
||||
// which the attestation was created in. It would be an error to try and use
|
||||
// `state.get_attestation_data_slot(..)` because the state matching the
|
||||
// `data.beacon_block_root` isn't necessarily in a nearby epoch to the attestation
|
||||
// (e.g., if there were lots of skip slots since the head of the chain and the
|
||||
// epoch creation epoch).
|
||||
for _ in state.slot.as_u64()
|
||||
..attestation
|
||||
.data
|
||||
.target
|
||||
.epoch
|
||||
.start_slot(T::EthSpec::slots_per_epoch())
|
||||
.as_u64()
|
||||
{
|
||||
per_slot_processing(&mut state, &self.spec)?;
|
||||
}
|
||||
|
||||
state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
|
||||
|
||||
let attestation_slot = state.get_attestation_data_slot(&attestation.data)?;
|
||||
|
||||
// Reject any attestation where the `state` loaded from `data.beacon_block_root`
|
||||
// has a higher slot than the attestation.
|
||||
//
|
||||
// Permitting this would allow for attesters to vote on _future_ slots.
|
||||
if attestation_slot > state.slot {
|
||||
Ok(AttestationProcessingOutcome::AttestsToFutureState {
|
||||
state: state.slot,
|
||||
attestation: attestation_slot,
|
||||
})
|
||||
} else {
|
||||
self.process_attestation_for_state_and_block(
|
||||
attestation,
|
||||
&state,
|
||||
&attestation_head_block,
|
||||
)
|
||||
}
|
||||
self.process_attestation_for_state_and_block(
|
||||
attestation,
|
||||
&state,
|
||||
&attestation_head_block,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// Drop any attestation where we have not processed `attestation.data.beacon_block_root`.
|
||||
@ -680,7 +660,15 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
Ok(AttestationProcessingOutcome::UnknownHeadBlock {
|
||||
beacon_block_root: attestation.data.beacon_block_root,
|
||||
})
|
||||
};
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
if let Ok(AttestationProcessingOutcome::Processed) = &result {
|
||||
metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_SUCCESSES);
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Verifies the `attestation` against the `state` to which it is attesting.
|
||||
@ -692,7 +680,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// The given `state` must fulfil one of the following conditions:
|
||||
///
|
||||
/// - `state` corresponds to the `block.state_root` identified by
|
||||
/// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`.
|
||||
/// `attestation.data.beacon_block_root`. (Viz., `attestation` was created using `state`).
|
||||
/// - `state.slot` is in the same epoch as `data.target.epoch` and
|
||||
/// `attestation.data.beacon_block_root` is in the history of `state`.
|
||||
///
|
||||
@ -707,9 +695,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
state: &BeaconState<T::EthSpec>,
|
||||
block: &BeaconBlock<T::EthSpec>,
|
||||
) -> Result<AttestationProcessingOutcome, Error> {
|
||||
self.metrics.attestation_processing_requests.inc();
|
||||
let timer = self.metrics.attestation_processing_times.start_timer();
|
||||
|
||||
// Find the highest between:
|
||||
//
|
||||
// - The highest valid finalized epoch we've ever seen (i.e., the head).
|
||||
@ -719,7 +704,17 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
state.finalized_checkpoint.epoch,
|
||||
);
|
||||
|
||||
let result = if block.slot <= finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()) {
|
||||
// A helper function to allow attestation processing to be metered.
|
||||
let verify_attestation_for_state = |state, attestation, spec, verify_signatures| {
|
||||
let timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_CORE);
|
||||
|
||||
let result = verify_attestation_for_state(state, attestation, spec, verify_signatures);
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
result
|
||||
};
|
||||
|
||||
if block.slot <= finalized_epoch.start_slot(T::EthSpec::slots_per_epoch()) {
|
||||
// Ignore any attestation where the slot of `data.beacon_block_root` is equal to or
|
||||
// prior to the finalized epoch.
|
||||
//
|
||||
@ -753,14 +748,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.insert_attestation(attestation, state, &self.spec)?;
|
||||
|
||||
// Update the metrics.
|
||||
self.metrics.attestation_processing_successes.inc();
|
||||
metrics::inc_counter(&metrics::ATTESTATION_PROCESSING_SUCCESSES);
|
||||
|
||||
Ok(AttestationProcessingOutcome::Processed)
|
||||
};
|
||||
|
||||
timer.observe_duration();
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
/// Accept some deposit and queue it for inclusion in an appropriate block.
|
||||
@ -809,8 +800,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
&self,
|
||||
block: BeaconBlock<T::EthSpec>,
|
||||
) -> Result<BlockProcessingOutcome, Error> {
|
||||
self.metrics.block_processing_requests.inc();
|
||||
let timer = self.metrics.block_processing_times.start_timer();
|
||||
metrics::inc_counter(&metrics::BLOCK_PROCESSING_REQUESTS);
|
||||
let full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES);
|
||||
|
||||
let finalized_slot = self
|
||||
.state
|
||||
@ -827,8 +818,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
return Ok(BlockProcessingOutcome::GenesisBlock);
|
||||
}
|
||||
|
||||
let block_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_BLOCK_ROOT);
|
||||
|
||||
let block_root = block.canonical_root();
|
||||
|
||||
metrics::stop_timer(block_root_timer);
|
||||
|
||||
if block_root == self.genesis_block_root {
|
||||
return Ok(BlockProcessingOutcome::GenesisBlock);
|
||||
}
|
||||
@ -848,6 +843,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
return Ok(BlockProcessingOutcome::BlockIsAlreadyKnown);
|
||||
}
|
||||
|
||||
// Records the time taken to load the block and state from the database during block
|
||||
// processing.
|
||||
let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ);
|
||||
|
||||
// Load the blocks parent block from the database, returning invalid if that block is not
|
||||
// found.
|
||||
let parent_block: BeaconBlock<T::EthSpec> = match self.store.get(&block.parent_root)? {
|
||||
@ -867,15 +866,34 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.get(&parent_state_root)?
|
||||
.ok_or_else(|| Error::DBInconsistent(format!("Missing state {}", parent_state_root)))?;
|
||||
|
||||
metrics::stop_timer(db_read_timer);
|
||||
|
||||
let catchup_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CATCHUP_STATE);
|
||||
|
||||
// Keep a list of any states that were "skipped" (block-less) in between the parent state
|
||||
// slot and the block slot. These will need to be stored in the database.
|
||||
let mut intermediate_states = vec![];
|
||||
|
||||
// Transition the parent state to the block slot.
|
||||
let mut state: BeaconState<T::EthSpec> = parent_state;
|
||||
for _ in state.slot.as_u64()..block.slot.as_u64() {
|
||||
for i in state.slot.as_u64()..block.slot.as_u64() {
|
||||
if i > 0 {
|
||||
intermediate_states.push(state.clone());
|
||||
}
|
||||
per_slot_processing(&mut state, &self.spec)?;
|
||||
}
|
||||
|
||||
metrics::stop_timer(catchup_timer);
|
||||
|
||||
let committee_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_COMMITTEE);
|
||||
|
||||
state.build_committee_cache(RelativeEpoch::Previous, &self.spec)?;
|
||||
state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
|
||||
|
||||
metrics::stop_timer(committee_timer);
|
||||
|
||||
let core_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_CORE);
|
||||
|
||||
// Apply the received block to its parent state (which has been transitioned into this
|
||||
// slot).
|
||||
match per_block_processing(&mut state, &block, &self.spec) {
|
||||
@ -886,16 +904,45 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
_ => {}
|
||||
}
|
||||
|
||||
metrics::stop_timer(core_timer);
|
||||
|
||||
let state_root_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_STATE_ROOT);
|
||||
|
||||
let state_root = state.canonical_root();
|
||||
|
||||
if block.state_root != state_root {
|
||||
return Ok(BlockProcessingOutcome::StateRootMismatch);
|
||||
}
|
||||
|
||||
metrics::stop_timer(state_root_timer);
|
||||
|
||||
let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE);
|
||||
|
||||
// Store all the states between the parent block state and this blocks slot before storing
|
||||
// the final state.
|
||||
for (i, intermediate_state) in intermediate_states.iter().enumerate() {
|
||||
// To avoid doing an unnecessary tree hash, use the following (slot + 1) state's
|
||||
// state_roots field to find the root.
|
||||
let following_state = match intermediate_states.get(i + 1) {
|
||||
Some(following_state) => following_state,
|
||||
None => &state,
|
||||
};
|
||||
let intermediate_state_root =
|
||||
following_state.get_state_root(intermediate_state.slot)?;
|
||||
|
||||
self.store
|
||||
.put(&intermediate_state_root, intermediate_state)?;
|
||||
}
|
||||
|
||||
// Store the block and state.
|
||||
self.store.put(&block_root, &block)?;
|
||||
self.store.put(&state_root, &state)?;
|
||||
|
||||
metrics::stop_timer(db_write_timer);
|
||||
|
||||
let fork_choice_register_timer =
|
||||
metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_REGISTER);
|
||||
|
||||
// Register the new block with the fork choice service.
|
||||
if let Err(e) = self.fork_choice.process_block(&state, &block, block_root) {
|
||||
error!(
|
||||
@ -907,6 +954,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
)
|
||||
}
|
||||
|
||||
metrics::stop_timer(fork_choice_register_timer);
|
||||
|
||||
let find_head_timer =
|
||||
metrics::start_timer(&metrics::BLOCK_PROCESSING_FORK_CHOICE_FIND_HEAD);
|
||||
|
||||
// Execute the fork choice algorithm, enthroning a new head if discovered.
|
||||
//
|
||||
// Note: in the future we may choose to run fork-choice less often, potentially based upon
|
||||
@ -919,11 +971,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
)
|
||||
};
|
||||
|
||||
self.metrics.block_processing_successes.inc();
|
||||
self.metrics
|
||||
.operations_per_block_attestation
|
||||
.observe(block.body.attestations.len() as f64);
|
||||
timer.observe_duration();
|
||||
metrics::stop_timer(find_head_timer);
|
||||
|
||||
metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES);
|
||||
metrics::observe(
|
||||
&metrics::OPERATIONS_PER_BLOCK_ATTESTATION,
|
||||
block.body.attestations.len() as f64,
|
||||
);
|
||||
metrics::stop_timer(full_timer);
|
||||
|
||||
Ok(BlockProcessingOutcome::Processed { block_root })
|
||||
}
|
||||
@ -958,8 +1013,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
produce_at_slot: Slot,
|
||||
randao_reveal: Signature,
|
||||
) -> Result<(BeaconBlock<T::EthSpec>, BeaconState<T::EthSpec>), BlockProductionError> {
|
||||
self.metrics.block_production_requests.inc();
|
||||
let timer = self.metrics.block_production_times.start_timer();
|
||||
metrics::inc_counter(&metrics::BLOCK_PRODUCTION_REQUESTS);
|
||||
let timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_TIMES);
|
||||
|
||||
// If required, transition the new state to the present slot.
|
||||
while state.slot < produce_at_slot {
|
||||
@ -1011,28 +1066,25 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
block.state_root = state_root;
|
||||
|
||||
self.metrics.block_production_successes.inc();
|
||||
timer.observe_duration();
|
||||
metrics::inc_counter(&metrics::BLOCK_PRODUCTION_SUCCESSES);
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
Ok((block, state))
|
||||
}
|
||||
|
||||
/// Execute the fork choice algorithm and enthrone the result as the canonical head.
|
||||
pub fn fork_choice(&self) -> Result<(), Error> {
|
||||
self.metrics.fork_choice_requests.inc();
|
||||
metrics::inc_counter(&metrics::FORK_CHOICE_REQUESTS);
|
||||
|
||||
// Start fork choice metrics timer.
|
||||
let timer = self.metrics.fork_choice_times.start_timer();
|
||||
let timer = metrics::start_timer(&metrics::FORK_CHOICE_TIMES);
|
||||
|
||||
// Determine the root of the block that is the head of the chain.
|
||||
let beacon_block_root = self.fork_choice.find_head(&self)?;
|
||||
|
||||
// End fork choice metrics timer.
|
||||
timer.observe_duration();
|
||||
|
||||
// If a new head was chosen.
|
||||
if beacon_block_root != self.head().beacon_block_root {
|
||||
self.metrics.fork_choice_changed_head.inc();
|
||||
let result = if beacon_block_root != self.head().beacon_block_root {
|
||||
metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD);
|
||||
|
||||
let beacon_block: BeaconBlock<T::EthSpec> = self
|
||||
.store
|
||||
@ -1050,7 +1102,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
// If we switched to a new chain (instead of building atop the present chain).
|
||||
if self.head().beacon_block_root != beacon_block.parent_root {
|
||||
self.metrics.fork_choice_reorg_count.inc();
|
||||
metrics::inc_counter(&metrics::FORK_CHOICE_REORG_COUNT);
|
||||
warn!(
|
||||
self.log,
|
||||
"Beacon chain re-org";
|
||||
@ -1094,11 +1146,22 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
};
|
||||
|
||||
// End fork choice metrics timer.
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
if let Err(_) = result {
|
||||
metrics::inc_counter(&metrics::FORK_CHOICE_ERRORS);
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Update the canonical head to `new_head`.
|
||||
fn update_canonical_head(&self, new_head: CheckPoint<T::EthSpec>) -> Result<(), Error> {
|
||||
let timer = metrics::start_timer(&metrics::UPDATE_HEAD_TIMES);
|
||||
|
||||
// Update the checkpoint that stores the head of the chain at the time it received the
|
||||
// block.
|
||||
*self.canonical_head.write() = new_head;
|
||||
@ -1125,6 +1188,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
// Save `self` to `self.store`.
|
||||
self.persist()?;
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -1152,6 +1217,13 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.fork_choice
|
||||
.process_finalization(&finalized_block, finalized_block_root)?;
|
||||
|
||||
let finalized_state = self
|
||||
.store
|
||||
.get::<BeaconState<T::EthSpec>>(&finalized_block.state_root)?
|
||||
.ok_or_else(|| Error::MissingBeaconState(finalized_block.state_root))?;
|
||||
|
||||
self.op_pool.prune_all(&finalized_state, &self.spec);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,4 @@
|
||||
use crate::fork_choice::Error as ForkChoiceError;
|
||||
use crate::metrics::Error as MetricsError;
|
||||
use state_processing::per_block_processing::errors::{
|
||||
AttestationValidationError, IndexedAttestationValidationError,
|
||||
};
|
||||
@ -34,7 +33,6 @@ pub enum BeaconChainError {
|
||||
MissingBeaconBlock(Hash256),
|
||||
MissingBeaconState(Hash256),
|
||||
SlotProcessingError(SlotProcessingError),
|
||||
MetricsError(String),
|
||||
NoStateForAttestation {
|
||||
beacon_block_root: Hash256,
|
||||
},
|
||||
@ -44,12 +42,6 @@ pub enum BeaconChainError {
|
||||
|
||||
easy_from_to!(SlotProcessingError, BeaconChainError);
|
||||
|
||||
impl From<MetricsError> for BeaconChainError {
|
||||
fn from(e: MetricsError) -> BeaconChainError {
|
||||
BeaconChainError::MetricsError(format!("{:?}", e))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum BlockProductionError {
|
||||
UnableToGetBlockRootFromState,
|
||||
|
@ -1,4 +1,4 @@
|
||||
use crate::{BeaconChain, BeaconChainTypes};
|
||||
use crate::{metrics, BeaconChain, BeaconChainTypes};
|
||||
use lmd_ghost::LmdGhost;
|
||||
use state_processing::common::get_attesting_indices;
|
||||
use std::sync::Arc;
|
||||
@ -46,6 +46,8 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
||||
}
|
||||
|
||||
pub fn find_head(&self, chain: &BeaconChain<T>) -> Result<Hash256> {
|
||||
let timer = metrics::start_timer(&metrics::FORK_CHOICE_FIND_HEAD_TIMES);
|
||||
|
||||
let start_slot = |epoch: Epoch| epoch.start_slot(T::EthSpec::slots_per_epoch());
|
||||
|
||||
// From the specification:
|
||||
@ -97,9 +99,14 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
||||
.map(|v| v.effective_balance)
|
||||
};
|
||||
|
||||
self.backend
|
||||
let result = self
|
||||
.backend
|
||||
.find_head(start_block_slot, start_block_root, weight)
|
||||
.map_err(Into::into)
|
||||
.map_err(Into::into);
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Process all attestations in the given `block`.
|
||||
@ -112,6 +119,7 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
||||
block: &BeaconBlock<T::EthSpec>,
|
||||
block_root: Hash256,
|
||||
) -> Result<()> {
|
||||
let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES);
|
||||
// Note: we never count the block as a latest message, only attestations.
|
||||
//
|
||||
// I (Paul H) do not have an explicit reference to this, but I derive it from this
|
||||
@ -136,6 +144,8 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
||||
// a block that has the majority of votes applied to it.
|
||||
self.backend.process_block(block, block_root)?;
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -148,6 +158,8 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
||||
attestation: &Attestation<T::EthSpec>,
|
||||
block: &BeaconBlock<T::EthSpec>,
|
||||
) -> Result<()> {
|
||||
let timer = metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
|
||||
|
||||
let block_hash = attestation.data.beacon_block_root;
|
||||
|
||||
// Ignore any attestations to the zero hash.
|
||||
@ -175,6 +187,8 @@ impl<T: BeaconChainTypes> ForkChoice<T> {
|
||||
}
|
||||
}
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,7 @@
|
||||
#![recursion_limit = "128"] // For lazy-static
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
mod beacon_chain;
|
||||
mod checkpoint;
|
||||
mod errors;
|
||||
@ -13,6 +17,7 @@ pub use self::beacon_chain::{
|
||||
pub use self::checkpoint::CheckPoint;
|
||||
pub use self::errors::{BeaconChainError, BlockProductionError};
|
||||
pub use lmd_ghost;
|
||||
pub use metrics::scrape_for_metrics;
|
||||
pub use parking_lot;
|
||||
pub use slot_clock;
|
||||
pub use state_processing::per_block_processing::errors::{
|
||||
|
@ -1,143 +1,276 @@
|
||||
pub use prometheus::Error;
|
||||
use prometheus::{Histogram, HistogramOpts, IntCounter, Opts, Registry};
|
||||
use crate::{BeaconChain, BeaconChainTypes};
|
||||
pub use lighthouse_metrics::*;
|
||||
use types::{BeaconState, Epoch, Hash256, Slot};
|
||||
|
||||
pub struct Metrics {
|
||||
pub block_processing_requests: IntCounter,
|
||||
pub block_processing_successes: IntCounter,
|
||||
pub block_processing_times: Histogram,
|
||||
pub block_production_requests: IntCounter,
|
||||
pub block_production_successes: IntCounter,
|
||||
pub block_production_times: Histogram,
|
||||
pub attestation_production_requests: IntCounter,
|
||||
pub attestation_production_successes: IntCounter,
|
||||
pub attestation_production_times: Histogram,
|
||||
pub attestation_processing_requests: IntCounter,
|
||||
pub attestation_processing_successes: IntCounter,
|
||||
pub attestation_processing_times: Histogram,
|
||||
pub fork_choice_requests: IntCounter,
|
||||
pub fork_choice_changed_head: IntCounter,
|
||||
pub fork_choice_reorg_count: IntCounter,
|
||||
pub fork_choice_times: Histogram,
|
||||
pub operations_per_block_attestation: Histogram,
|
||||
lazy_static! {
|
||||
/*
|
||||
* Block Processing
|
||||
*/
|
||||
pub static ref BLOCK_PROCESSING_REQUESTS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_block_processing_requests_total",
|
||||
"Count of blocks submitted for processing"
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_SUCCESSES: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_block_processing_successes_total",
|
||||
"Count of blocks processed without error"
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_TIMES: Result<Histogram> =
|
||||
try_create_histogram("beacon_block_processing_seconds", "Full runtime of block processing");
|
||||
pub static ref BLOCK_PROCESSING_BLOCK_ROOT: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_block_root_seconds",
|
||||
"Time spent calculating the block root when processing a block."
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_DB_READ: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_db_read_seconds",
|
||||
"Time spent loading block and state from DB for block processing"
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_CATCHUP_STATE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_catch_up_state_seconds",
|
||||
"Time spent skipping slots on a state before processing a block."
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_COMMITTEE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_committee_building_seconds",
|
||||
"Time spent building/obtaining committees for block processing."
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_CORE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_core_seconds",
|
||||
"Time spent doing the core per_block_processing state processing."
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_STATE_ROOT: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_state_root_seconds",
|
||||
"Time spent calculating the state root when processing a block."
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_DB_WRITE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_db_write_seconds",
|
||||
"Time spent writing a newly processed block and state to DB"
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_FORK_CHOICE_REGISTER: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_fork_choice_register_seconds",
|
||||
"Time spent registering the new block with fork choice (but not finding head)"
|
||||
);
|
||||
pub static ref BLOCK_PROCESSING_FORK_CHOICE_FIND_HEAD: Result<Histogram> = try_create_histogram(
|
||||
"beacon_block_processing_fork_choice_find_head_seconds",
|
||||
"Time spent finding the new head after processing a new block"
|
||||
);
|
||||
|
||||
/*
|
||||
* Block Production
|
||||
*/
|
||||
pub static ref BLOCK_PRODUCTION_REQUESTS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_block_production_requests_total",
|
||||
"Count of all block production requests"
|
||||
);
|
||||
pub static ref BLOCK_PRODUCTION_SUCCESSES: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_block_production_successes_total",
|
||||
"Count of blocks successfully produced."
|
||||
);
|
||||
pub static ref BLOCK_PRODUCTION_TIMES: Result<Histogram> =
|
||||
try_create_histogram("beacon_block_production_seconds", "Full runtime of block production");
|
||||
|
||||
/*
|
||||
* Block Statistics
|
||||
*/
|
||||
pub static ref OPERATIONS_PER_BLOCK_ATTESTATION: Result<Histogram> = try_create_histogram(
|
||||
"beacon_operations_per_block_attestation_total",
|
||||
"Number of attestations in a block"
|
||||
);
|
||||
|
||||
/*
|
||||
* Attestation Processing
|
||||
*/
|
||||
pub static ref ATTESTATION_PROCESSING_REQUESTS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_attestation_processing_requests_total",
|
||||
"Count of all attestations submitted for processing"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_SUCCESSES: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_attestation_processing_successes_total",
|
||||
"total_attestation_processing_successes"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_seconds",
|
||||
"Full runtime of attestation processing"
|
||||
);
|
||||
pub static ref ATTESTATION_PROCESSING_CORE: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_processing_core_seconds",
|
||||
"Time spent on the core spec processing of attestation processing"
|
||||
);
|
||||
|
||||
/*
|
||||
* Attestation Production
|
||||
*/
|
||||
pub static ref ATTESTATION_PRODUCTION_REQUESTS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_attestation_production_requests_total",
|
||||
"Count of all attestation production requests"
|
||||
);
|
||||
pub static ref ATTESTATION_PRODUCTION_SUCCESSES: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_attestation_production_successes_total",
|
||||
"Count of attestations processed without error"
|
||||
);
|
||||
pub static ref ATTESTATION_PRODUCTION_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_attestation_production_seconds",
|
||||
"Full runtime of attestation production"
|
||||
);
|
||||
|
||||
/*
|
||||
* Fork Choice
|
||||
*/
|
||||
pub static ref FORK_CHOICE_REQUESTS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_fork_choice_requests_total",
|
||||
"Count of occasions where fork choice has tried to find a head"
|
||||
);
|
||||
pub static ref FORK_CHOICE_ERRORS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_fork_choice_errors_total",
|
||||
"Count of occasions where fork choice has returned an error when trying to find a head"
|
||||
);
|
||||
pub static ref FORK_CHOICE_CHANGED_HEAD: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_fork_choice_changed_head_total",
|
||||
"Count of occasions fork choice has found a new head"
|
||||
);
|
||||
pub static ref FORK_CHOICE_REORG_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_fork_choice_reorg_total",
|
||||
"Count of occasions fork choice has switched to a different chain"
|
||||
);
|
||||
pub static ref FORK_CHOICE_TIMES: Result<Histogram> =
|
||||
try_create_histogram("beacon_fork_choice_seconds", "Full runtime of fork choice");
|
||||
pub static ref FORK_CHOICE_FIND_HEAD_TIMES: Result<Histogram> =
|
||||
try_create_histogram("beacon_fork_choice_find_head_seconds", "Full runtime of fork choice find_head function");
|
||||
pub static ref FORK_CHOICE_PROCESS_BLOCK_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_fork_choice_process_block_seconds",
|
||||
"Time taken to add a block and all attestations to fork choice"
|
||||
);
|
||||
pub static ref FORK_CHOICE_PROCESS_ATTESTATION_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_fork_choice_process_attestation_seconds",
|
||||
"Time taken to add an attestation to fork choice"
|
||||
);
|
||||
|
||||
/*
|
||||
* Persisting BeaconChain to disk
|
||||
*/
|
||||
pub static ref PERSIST_CHAIN: Result<Histogram> =
|
||||
try_create_histogram("beacon_persist_chain", "Time taken to update the canonical head");
|
||||
|
||||
/*
|
||||
* Chain Head
|
||||
*/
|
||||
pub static ref UPDATE_HEAD_TIMES: Result<Histogram> =
|
||||
try_create_histogram("beacon_update_head_seconds", "Time taken to update the canonical head");
|
||||
pub static ref HEAD_STATE_SLOT: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_slot", "Slot of the block at the head of the chain");
|
||||
pub static ref HEAD_STATE_ROOT: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_root", "Root of the block at the head of the chain");
|
||||
pub static ref HEAD_STATE_LATEST_BLOCK_SLOT: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_latest_block_slot", "Latest block slot at the head of the chain");
|
||||
pub static ref HEAD_STATE_CURRENT_JUSTIFIED_ROOT: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_current_justified_root", "Current justified root at the head of the chain");
|
||||
pub static ref HEAD_STATE_CURRENT_JUSTIFIED_EPOCH: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_current_justified_epoch", "Current justified epoch at the head of the chain");
|
||||
pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_previous_justified_root", "Previous justified root at the head of the chain");
|
||||
pub static ref HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_previous_justified_epoch", "Previous justified epoch at the head of the chain");
|
||||
pub static ref HEAD_STATE_FINALIZED_ROOT: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_finalized_root", "Finalized root at the head of the chain");
|
||||
pub static ref HEAD_STATE_FINALIZED_EPOCH: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_finalized_epoch", "Finalized epoch at the head of the chain");
|
||||
pub static ref HEAD_STATE_SHARDS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_shard_total", "Count of shards in the beacon chain");
|
||||
pub static ref HEAD_STATE_TOTAL_VALIDATORS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_total_validators_total", "Count of validators at the head of the chain");
|
||||
pub static ref HEAD_STATE_ACTIVE_VALIDATORS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_active_validators_total", "Count of active validators at the head of the chain");
|
||||
pub static ref HEAD_STATE_VALIDATOR_BALANCES: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_validator_balances_total", "Sum of all validator balances at the head of the chain");
|
||||
pub static ref HEAD_STATE_SLASHED_VALIDATORS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_slashed_validators_total", "Count of all slashed validators at the head of the chain");
|
||||
pub static ref HEAD_STATE_WITHDRAWN_VALIDATORS: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_withdrawn_validators_total", "Sum of all validator balances at the head of the chain");
|
||||
pub static ref HEAD_STATE_ETH1_DEPOSIT_INDEX: Result<IntGauge> =
|
||||
try_create_int_gauge("beacon_head_state_eth1_deposit_index", "Eth1 deposit index at the head of the chain");
|
||||
}
|
||||
|
||||
impl Metrics {
|
||||
pub fn new() -> Result<Self, Error> {
|
||||
Ok(Self {
|
||||
block_processing_requests: {
|
||||
let opts = Opts::new("block_processing_requests", "total_blocks_processed");
|
||||
IntCounter::with_opts(opts)?
|
||||
},
|
||||
block_processing_successes: {
|
||||
let opts = Opts::new("block_processing_successes", "total_valid_blocks_processed");
|
||||
IntCounter::with_opts(opts)?
|
||||
},
|
||||
block_processing_times: {
|
||||
let opts = HistogramOpts::new("block_processing_times", "block_processing_time");
|
||||
Histogram::with_opts(opts)?
|
||||
},
|
||||
block_production_requests: {
|
||||
let opts = Opts::new("block_production_requests", "attempts_to_produce_new_block");
|
||||
IntCounter::with_opts(opts)?
|
||||
},
|
||||
block_production_successes: {
|
||||
let opts = Opts::new("block_production_successes", "blocks_successfully_produced");
|
||||
IntCounter::with_opts(opts)?
|
||||
},
|
||||
block_production_times: {
|
||||
let opts = HistogramOpts::new("block_production_times", "block_production_time");
|
||||
Histogram::with_opts(opts)?
|
||||
},
|
||||
attestation_production_requests: {
|
||||
let opts = Opts::new(
|
||||
"attestation_production_requests",
|
||||
"total_attestation_production_requests",
|
||||
);
|
||||
IntCounter::with_opts(opts)?
|
||||
},
|
||||
attestation_production_successes: {
|
||||
let opts = Opts::new(
|
||||
"attestation_production_successes",
|
||||
"total_attestation_production_successes",
|
||||
);
|
||||
IntCounter::with_opts(opts)?
|
||||
},
|
||||
attestation_production_times: {
|
||||
let opts = HistogramOpts::new(
|
||||
"attestation_production_times",
|
||||
"attestation_production_time",
|
||||
);
|
||||
Histogram::with_opts(opts)?
|
||||
},
|
||||
attestation_processing_requests: {
|
||||
let opts = Opts::new(
|
||||
"attestation_processing_requests",
|
||||
"total_attestation_processing_requests",
|
||||
);
|
||||
IntCounter::with_opts(opts)?
|
||||
},
|
||||
attestation_processing_successes: {
|
||||
let opts = Opts::new(
|
||||
"attestation_processing_successes",
|
||||
"total_attestation_processing_successes",
|
||||
);
|
||||
IntCounter::with_opts(opts)?
|
||||
},
|
||||
attestation_processing_times: {
|
||||
let opts = HistogramOpts::new(
|
||||
"attestation_processing_times",
|
||||
"attestation_processing_time",
|
||||
);
|
||||
Histogram::with_opts(opts)?
|
||||
},
|
||||
fork_choice_requests: {
|
||||
let opts = Opts::new("fork_choice_requests", "total_times_fork_choice_called");
|
||||
IntCounter::with_opts(opts)?
|
||||
},
|
||||
fork_choice_changed_head: {
|
||||
let opts = Opts::new(
|
||||
"fork_choice_changed_head",
|
||||
"total_times_fork_choice_chose_a_new_head",
|
||||
);
|
||||
IntCounter::with_opts(opts)?
|
||||
},
|
||||
fork_choice_reorg_count: {
|
||||
let opts = Opts::new("fork_choice_reorg_count", "number_of_reorgs");
|
||||
IntCounter::with_opts(opts)?
|
||||
},
|
||||
fork_choice_times: {
|
||||
let opts = HistogramOpts::new("fork_choice_time", "total_time_to_run_fork_choice");
|
||||
Histogram::with_opts(opts)?
|
||||
},
|
||||
operations_per_block_attestation: {
|
||||
let opts = HistogramOpts::new(
|
||||
"operations_per_block_attestation",
|
||||
"count_of_attestations_per_block",
|
||||
);
|
||||
Histogram::with_opts(opts)?
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
pub fn register(&self, registry: &Registry) -> Result<(), Error> {
|
||||
registry.register(Box::new(self.block_processing_requests.clone()))?;
|
||||
registry.register(Box::new(self.block_processing_successes.clone()))?;
|
||||
registry.register(Box::new(self.block_processing_times.clone()))?;
|
||||
registry.register(Box::new(self.block_production_requests.clone()))?;
|
||||
registry.register(Box::new(self.block_production_successes.clone()))?;
|
||||
registry.register(Box::new(self.block_production_times.clone()))?;
|
||||
registry.register(Box::new(self.attestation_production_requests.clone()))?;
|
||||
registry.register(Box::new(self.attestation_production_successes.clone()))?;
|
||||
registry.register(Box::new(self.attestation_production_times.clone()))?;
|
||||
registry.register(Box::new(self.attestation_processing_requests.clone()))?;
|
||||
registry.register(Box::new(self.attestation_processing_successes.clone()))?;
|
||||
registry.register(Box::new(self.attestation_processing_times.clone()))?;
|
||||
registry.register(Box::new(self.fork_choice_requests.clone()))?;
|
||||
registry.register(Box::new(self.fork_choice_changed_head.clone()))?;
|
||||
registry.register(Box::new(self.fork_choice_reorg_count.clone()))?;
|
||||
registry.register(Box::new(self.fork_choice_times.clone()))?;
|
||||
registry.register(Box::new(self.operations_per_block_attestation.clone()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
/// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot,
|
||||
/// head state info, etc) and update the Prometheus `DEFAULT_REGISTRY`.
|
||||
pub fn scrape_for_metrics<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) {
|
||||
scrape_head_state::<T>(
|
||||
&beacon_chain.head().beacon_state,
|
||||
beacon_chain.head().beacon_state_root,
|
||||
);
|
||||
}
|
||||
|
||||
/// Scrape the given `state` assuming it's the head state, updating the `DEFAULT_REGISTRY`.
|
||||
fn scrape_head_state<T: BeaconChainTypes>(state: &BeaconState<T::EthSpec>, state_root: Hash256) {
|
||||
set_gauge_by_slot(&HEAD_STATE_SLOT, state.slot);
|
||||
set_gauge_by_hash(&HEAD_STATE_ROOT, state_root);
|
||||
set_gauge_by_slot(
|
||||
&HEAD_STATE_LATEST_BLOCK_SLOT,
|
||||
state.latest_block_header.slot,
|
||||
);
|
||||
set_gauge_by_hash(
|
||||
&HEAD_STATE_CURRENT_JUSTIFIED_ROOT,
|
||||
state.current_justified_checkpoint.root,
|
||||
);
|
||||
set_gauge_by_epoch(
|
||||
&HEAD_STATE_CURRENT_JUSTIFIED_EPOCH,
|
||||
state.current_justified_checkpoint.epoch,
|
||||
);
|
||||
set_gauge_by_hash(
|
||||
&HEAD_STATE_PREVIOUS_JUSTIFIED_ROOT,
|
||||
state.previous_justified_checkpoint.root,
|
||||
);
|
||||
set_gauge_by_epoch(
|
||||
&HEAD_STATE_PREVIOUS_JUSTIFIED_EPOCH,
|
||||
state.previous_justified_checkpoint.epoch,
|
||||
);
|
||||
set_gauge_by_hash(&HEAD_STATE_FINALIZED_ROOT, state.finalized_checkpoint.root);
|
||||
set_gauge_by_epoch(
|
||||
&HEAD_STATE_FINALIZED_EPOCH,
|
||||
state.finalized_checkpoint.epoch,
|
||||
);
|
||||
set_gauge_by_usize(&HEAD_STATE_SHARDS, state.previous_crosslinks.len());
|
||||
set_gauge_by_usize(&HEAD_STATE_TOTAL_VALIDATORS, state.validators.len());
|
||||
set_gauge_by_u64(
|
||||
&HEAD_STATE_VALIDATOR_BALANCES,
|
||||
state.balances.iter().fold(0_u64, |acc, i| acc + i),
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&HEAD_STATE_ACTIVE_VALIDATORS,
|
||||
state
|
||||
.validators
|
||||
.iter()
|
||||
.filter(|v| v.is_active_at(state.current_epoch()))
|
||||
.count(),
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&HEAD_STATE_SLASHED_VALIDATORS,
|
||||
state.validators.iter().filter(|v| v.slashed).count(),
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&HEAD_STATE_WITHDRAWN_VALIDATORS,
|
||||
state
|
||||
.validators
|
||||
.iter()
|
||||
.filter(|v| v.is_withdrawable_at(state.current_epoch()))
|
||||
.count(),
|
||||
);
|
||||
set_gauge_by_u64(&HEAD_STATE_ETH1_DEPOSIT_INDEX, state.eth1_deposit_index);
|
||||
}
|
||||
|
||||
fn set_gauge_by_slot(gauge: &Result<IntGauge>, value: Slot) {
|
||||
set_gauge(gauge, value.as_u64() as i64);
|
||||
}
|
||||
|
||||
fn set_gauge_by_epoch(gauge: &Result<IntGauge>, value: Epoch) {
|
||||
set_gauge(gauge, value.as_u64() as i64);
|
||||
}
|
||||
|
||||
fn set_gauge_by_hash(gauge: &Result<IntGauge>, value: Hash256) {
|
||||
set_gauge(gauge, value.to_low_u64_le() as i64);
|
||||
}
|
||||
|
||||
fn set_gauge_by_usize(gauge: &Result<IntGauge>, value: usize) {
|
||||
set_gauge(gauge, value as i64);
|
||||
}
|
||||
|
||||
fn set_gauge_by_u64(gauge: &Result<IntGauge>, value: u64) {
|
||||
set_gauge(gauge, value as i64);
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ where
|
||||
|
||||
impl<L, E> BeaconChainTypes for CommonTypes<L, E>
|
||||
where
|
||||
L: LmdGhost<MemoryStore, E>,
|
||||
L: LmdGhost<MemoryStore, E> + 'static,
|
||||
E: EthSpec,
|
||||
{
|
||||
type Store = MemoryStore;
|
||||
@ -69,7 +69,7 @@ where
|
||||
/// Used for testing.
|
||||
pub struct BeaconChainHarness<L, E>
|
||||
where
|
||||
L: LmdGhost<MemoryStore, E>,
|
||||
L: LmdGhost<MemoryStore, E> + 'static,
|
||||
E: EthSpec,
|
||||
{
|
||||
pub chain: BeaconChain<CommonTypes<L, E>>,
|
||||
|
@ -7,7 +7,7 @@ edition = "2018"
|
||||
[dependencies]
|
||||
beacon_chain = { path = "../beacon_chain" }
|
||||
network = { path = "../network" }
|
||||
http_server = { path = "../http_server" }
|
||||
eth2-libp2p = { path = "../eth2-libp2p" }
|
||||
rpc = { path = "../rpc" }
|
||||
rest_api = { path = "../rest_api" }
|
||||
prometheus = "^0.6"
|
||||
@ -27,3 +27,5 @@ clap = "2.32.0"
|
||||
dirs = "1.0.3"
|
||||
exit-future = "0.1.3"
|
||||
futures = "0.1.25"
|
||||
reqwest = "0.9"
|
||||
url = "1.2"
|
||||
|
@ -1,3 +1,4 @@
|
||||
use crate::bootstrapper::Bootstrapper;
|
||||
use crate::error::Result;
|
||||
use crate::{config::GenesisState, ClientConfig};
|
||||
use beacon_chain::{
|
||||
@ -35,7 +36,11 @@ pub struct ClientType<S: Store, E: EthSpec> {
|
||||
_phantom_u: PhantomData<E>,
|
||||
}
|
||||
|
||||
impl<S: Store, E: EthSpec + Clone> BeaconChainTypes for ClientType<S, E> {
|
||||
impl<S, E> BeaconChainTypes for ClientType<S, E>
|
||||
where
|
||||
S: Store + 'static,
|
||||
E: EthSpec,
|
||||
{
|
||||
type Store = S;
|
||||
type SlotClock = SystemTimeSlotClock;
|
||||
type LmdGhost = ThreadSafeReducedTree<S, E>;
|
||||
@ -74,6 +79,16 @@ where
|
||||
serde_yaml::from_reader(file)
|
||||
.map_err(|e| format!("Unable to parse YAML genesis state file: {:?}", e))?
|
||||
}
|
||||
GenesisState::HttpBootstrap { server } => {
|
||||
let bootstrapper = Bootstrapper::from_server_string(server.to_string())
|
||||
.map_err(|e| format!("Failed to initialize bootstrap client: {}", e))?;
|
||||
|
||||
let (state, _block) = bootstrapper
|
||||
.genesis()
|
||||
.map_err(|e| format!("Failed to bootstrap genesis state: {}", e))?;
|
||||
|
||||
state
|
||||
}
|
||||
};
|
||||
|
||||
let mut genesis_block = BeaconBlock::empty(&spec);
|
||||
|
210
beacon_node/client/src/bootstrapper.rs
Normal file
210
beacon_node/client/src/bootstrapper.rs
Normal file
@ -0,0 +1,210 @@
|
||||
use eth2_libp2p::{
|
||||
multiaddr::{Multiaddr, Protocol},
|
||||
Enr,
|
||||
};
|
||||
use reqwest::{Error as HttpError, Url};
|
||||
use serde::Deserialize;
|
||||
use std::borrow::Cow;
|
||||
use std::net::Ipv4Addr;
|
||||
use types::{BeaconBlock, BeaconState, Checkpoint, EthSpec, Hash256, Slot};
|
||||
use url::Host;
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Error {
|
||||
InvalidUrl,
|
||||
HttpError(HttpError),
|
||||
}
|
||||
|
||||
impl From<HttpError> for Error {
|
||||
fn from(e: HttpError) -> Error {
|
||||
Error::HttpError(e)
|
||||
}
|
||||
}
|
||||
|
||||
/// Used to load "bootstrap" information from the HTTP API of another Lighthouse beacon node.
|
||||
///
|
||||
/// Bootstrapping information includes things like genesis and finalized states and blocks, and
|
||||
/// libp2p connection details.
|
||||
pub struct Bootstrapper {
|
||||
url: Url,
|
||||
}
|
||||
|
||||
impl Bootstrapper {
|
||||
/// Parses the given `server` as a URL, instantiating `Self`.
|
||||
pub fn from_server_string(server: String) -> Result<Self, String> {
|
||||
Ok(Self {
|
||||
url: Url::parse(&server).map_err(|e| format!("Invalid bootstrap server url: {}", e))?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Build a multiaddr using the HTTP server URL that is not guaranteed to be correct.
|
||||
///
|
||||
/// The address is created by querying the HTTP server for its listening libp2p addresses.
|
||||
/// Then, we find the first TCP port in those addresses and combine the port with the URL of
|
||||
/// the server.
|
||||
///
|
||||
/// For example, the server `http://192.168.0.1` might end up with a `best_effort_multiaddr` of
|
||||
/// `/ipv4/192.168.0.1/tcp/9000` if the server advertises a listening address of
|
||||
/// `/ipv4/172.0.0.1/tcp/9000`.
|
||||
pub fn best_effort_multiaddr(&self) -> Option<Multiaddr> {
|
||||
let tcp_port = self.listen_port().ok()?;
|
||||
|
||||
let mut multiaddr = Multiaddr::with_capacity(2);
|
||||
|
||||
match self.url.host()? {
|
||||
Host::Ipv4(addr) => multiaddr.push(Protocol::Ip4(addr)),
|
||||
Host::Domain(s) => multiaddr.push(Protocol::Dns4(Cow::Borrowed(s))),
|
||||
_ => return None,
|
||||
};
|
||||
|
||||
multiaddr.push(Protocol::Tcp(tcp_port));
|
||||
|
||||
Some(multiaddr)
|
||||
}
|
||||
|
||||
/// Returns the IPv4 address of the server URL, unless it contains a FQDN.
|
||||
pub fn server_ipv4_addr(&self) -> Option<Ipv4Addr> {
|
||||
match self.url.host()? {
|
||||
Host::Ipv4(addr) => Some(addr),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the servers ENR address.
|
||||
pub fn enr(&self) -> Result<Enr, String> {
|
||||
get_enr(self.url.clone()).map_err(|e| format!("Unable to get ENR: {:?}", e))
|
||||
}
|
||||
|
||||
/// Returns the servers listening libp2p addresses.
|
||||
pub fn listen_port(&self) -> Result<u16, String> {
|
||||
get_listen_port(self.url.clone()).map_err(|e| format!("Unable to get listen port: {:?}", e))
|
||||
}
|
||||
|
||||
/// Returns the genesis block and state.
|
||||
pub fn genesis<T: EthSpec>(&self) -> Result<(BeaconState<T>, BeaconBlock<T>), String> {
|
||||
let genesis_slot = Slot::new(0);
|
||||
|
||||
let block = get_block(self.url.clone(), genesis_slot)
|
||||
.map_err(|e| format!("Unable to get genesis block: {:?}", e))?
|
||||
.beacon_block;
|
||||
let state = get_state(self.url.clone(), genesis_slot)
|
||||
.map_err(|e| format!("Unable to get genesis state: {:?}", e))?
|
||||
.beacon_state;
|
||||
|
||||
Ok((state, block))
|
||||
}
|
||||
|
||||
/// Returns the most recent finalized state and block.
|
||||
pub fn finalized<T: EthSpec>(&self) -> Result<(BeaconState<T>, BeaconBlock<T>), String> {
|
||||
let slots_per_epoch = get_slots_per_epoch(self.url.clone())
|
||||
.map_err(|e| format!("Unable to get slots per epoch: {:?}", e))?;
|
||||
let finalized_slot = get_finalized_slot(self.url.clone(), slots_per_epoch.as_u64())
|
||||
.map_err(|e| format!("Unable to get finalized slot: {:?}", e))?;
|
||||
|
||||
let block = get_block(self.url.clone(), finalized_slot)
|
||||
.map_err(|e| format!("Unable to get finalized block: {:?}", e))?
|
||||
.beacon_block;
|
||||
let state = get_state(self.url.clone(), finalized_slot)
|
||||
.map_err(|e| format!("Unable to get finalized state: {:?}", e))?
|
||||
.beacon_state;
|
||||
|
||||
Ok((state, block))
|
||||
}
|
||||
}
|
||||
|
||||
fn get_slots_per_epoch(mut url: Url) -> Result<Slot, Error> {
|
||||
url.path_segments_mut()
|
||||
.map(|mut url| {
|
||||
url.push("spec").push("slots_per_epoch");
|
||||
})
|
||||
.map_err(|_| Error::InvalidUrl)?;
|
||||
|
||||
reqwest::get(url)?
|
||||
.error_for_status()?
|
||||
.json()
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn get_finalized_slot(mut url: Url, slots_per_epoch: u64) -> Result<Slot, Error> {
|
||||
url.path_segments_mut()
|
||||
.map(|mut url| {
|
||||
url.push("beacon").push("latest_finalized_checkpoint");
|
||||
})
|
||||
.map_err(|_| Error::InvalidUrl)?;
|
||||
|
||||
let checkpoint: Checkpoint = reqwest::get(url)?.error_for_status()?.json()?;
|
||||
|
||||
Ok(checkpoint.epoch.start_slot(slots_per_epoch))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
pub struct StateResponse<T: EthSpec> {
|
||||
pub root: Hash256,
|
||||
pub beacon_state: BeaconState<T>,
|
||||
}
|
||||
|
||||
fn get_state<T: EthSpec>(mut url: Url, slot: Slot) -> Result<StateResponse<T>, Error> {
|
||||
url.path_segments_mut()
|
||||
.map(|mut url| {
|
||||
url.push("beacon").push("state");
|
||||
})
|
||||
.map_err(|_| Error::InvalidUrl)?;
|
||||
|
||||
url.query_pairs_mut()
|
||||
.append_pair("slot", &format!("{}", slot.as_u64()));
|
||||
|
||||
reqwest::get(url)?
|
||||
.error_for_status()?
|
||||
.json()
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
pub struct BlockResponse<T: EthSpec> {
|
||||
pub root: Hash256,
|
||||
pub beacon_block: BeaconBlock<T>,
|
||||
}
|
||||
|
||||
fn get_block<T: EthSpec>(mut url: Url, slot: Slot) -> Result<BlockResponse<T>, Error> {
|
||||
url.path_segments_mut()
|
||||
.map(|mut url| {
|
||||
url.push("beacon").push("block");
|
||||
})
|
||||
.map_err(|_| Error::InvalidUrl)?;
|
||||
|
||||
url.query_pairs_mut()
|
||||
.append_pair("slot", &format!("{}", slot.as_u64()));
|
||||
|
||||
reqwest::get(url)?
|
||||
.error_for_status()?
|
||||
.json()
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn get_enr(mut url: Url) -> Result<Enr, Error> {
|
||||
url.path_segments_mut()
|
||||
.map(|mut url| {
|
||||
url.push("network").push("enr");
|
||||
})
|
||||
.map_err(|_| Error::InvalidUrl)?;
|
||||
|
||||
reqwest::get(url)?
|
||||
.error_for_status()?
|
||||
.json()
|
||||
.map_err(Into::into)
|
||||
}
|
||||
|
||||
fn get_listen_port(mut url: Url) -> Result<u16, Error> {
|
||||
url.path_segments_mut()
|
||||
.map(|mut url| {
|
||||
url.push("network").push("listen_port");
|
||||
})
|
||||
.map_err(|_| Error::InvalidUrl)?;
|
||||
|
||||
reqwest::get(url)?
|
||||
.error_for_status()?
|
||||
.json()
|
||||
.map_err(Into::into)
|
||||
}
|
@ -1,9 +1,8 @@
|
||||
use crate::Eth2Config;
|
||||
use crate::{Bootstrapper, Eth2Config};
|
||||
use clap::ArgMatches;
|
||||
use http_server::HttpServerConfig;
|
||||
use network::NetworkConfig;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use slog::{info, o, Drain};
|
||||
use slog::{info, o, warn, Drain};
|
||||
use std::fs::{self, OpenOptions};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Mutex;
|
||||
@ -25,7 +24,6 @@ pub struct Config {
|
||||
pub genesis_state: GenesisState,
|
||||
pub network: network::NetworkConfig,
|
||||
pub rpc: rpc::RPCConfig,
|
||||
pub http: HttpServerConfig,
|
||||
pub rest_api: rest_api::ApiConfig,
|
||||
}
|
||||
|
||||
@ -48,6 +46,8 @@ pub enum GenesisState {
|
||||
},
|
||||
/// Load a YAML-encoded genesis state from a file.
|
||||
Yaml { file: PathBuf },
|
||||
/// Use a HTTP server (running our REST-API) to load genesis and finalized states and blocks.
|
||||
HttpBootstrap { server: String },
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
@ -59,7 +59,6 @@ impl Default for Config {
|
||||
db_name: "chain_db".to_string(),
|
||||
network: NetworkConfig::new(),
|
||||
rpc: rpc::RPCConfig::default(),
|
||||
http: HttpServerConfig::default(),
|
||||
rest_api: rest_api::ApiConfig::default(),
|
||||
spec_constants: TESTNET_SPEC_CONSTANTS.into(),
|
||||
genesis_state: GenesisState::RecentGenesis {
|
||||
@ -143,7 +142,6 @@ impl Config {
|
||||
|
||||
self.network.apply_cli_args(args)?;
|
||||
self.rpc.apply_cli_args(args)?;
|
||||
self.http.apply_cli_args(args)?;
|
||||
self.rest_api.apply_cli_args(args)?;
|
||||
|
||||
if let Some(log_file) = args.value_of("logfile") {
|
||||
@ -151,6 +149,40 @@ impl Config {
|
||||
self.update_logger(log)?;
|
||||
};
|
||||
|
||||
// If the `--bootstrap` flag is provided, overwrite the default configuration.
|
||||
if let Some(server) = args.value_of("bootstrap") {
|
||||
do_bootstrapping(self, server.to_string(), &log)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform the HTTP bootstrapping procedure, reading an ENR and multiaddr from the HTTP server and
|
||||
/// adding them to the `config`.
|
||||
fn do_bootstrapping(config: &mut Config, server: String, log: &slog::Logger) -> Result<(), String> {
|
||||
// Set the genesis state source.
|
||||
config.genesis_state = GenesisState::HttpBootstrap {
|
||||
server: server.to_string(),
|
||||
};
|
||||
|
||||
let bootstrapper = Bootstrapper::from_server_string(server.to_string())?;
|
||||
|
||||
config.network.boot_nodes.push(bootstrapper.enr()?);
|
||||
|
||||
if let Some(server_multiaddr) = bootstrapper.best_effort_multiaddr() {
|
||||
info!(
|
||||
log,
|
||||
"Estimated bootstrapper libp2p address";
|
||||
"multiaddr" => format!("{:?}", server_multiaddr)
|
||||
);
|
||||
config.network.libp2p_nodes.push(server_multiaddr);
|
||||
} else {
|
||||
warn!(
|
||||
log,
|
||||
"Unable to estimate a bootstrapper libp2p address, this node may not find any peers."
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
extern crate slog;
|
||||
|
||||
mod beacon_chain_types;
|
||||
mod bootstrapper;
|
||||
mod config;
|
||||
|
||||
pub mod error;
|
||||
@ -10,7 +11,6 @@ use beacon_chain::BeaconChain;
|
||||
use exit_future::Signal;
|
||||
use futures::{future::Future, Stream};
|
||||
use network::Service as NetworkService;
|
||||
use prometheus::Registry;
|
||||
use slog::{error, info, o};
|
||||
use slot_clock::SlotClock;
|
||||
use std::marker::PhantomData;
|
||||
@ -22,7 +22,8 @@ use tokio::timer::Interval;
|
||||
pub use beacon_chain::BeaconChainTypes;
|
||||
pub use beacon_chain_types::ClientType;
|
||||
pub use beacon_chain_types::InitialiseBeaconChain;
|
||||
pub use config::Config as ClientConfig;
|
||||
pub use bootstrapper::Bootstrapper;
|
||||
pub use config::{Config as ClientConfig, GenesisState};
|
||||
pub use eth2_config::Eth2Config;
|
||||
|
||||
/// Main beacon node client service. This provides the connection and initialisation of the clients
|
||||
@ -36,8 +37,6 @@ pub struct Client<T: BeaconChainTypes> {
|
||||
pub network: Arc<NetworkService<T>>,
|
||||
/// Signal to terminate the RPC server.
|
||||
pub rpc_exit_signal: Option<Signal>,
|
||||
/// Signal to terminate the HTTP server.
|
||||
pub http_exit_signal: Option<Signal>,
|
||||
/// Signal to terminate the slot timer.
|
||||
pub slot_timer_exit_signal: Option<Signal>,
|
||||
/// Signal to terminate the API
|
||||
@ -50,7 +49,7 @@ pub struct Client<T: BeaconChainTypes> {
|
||||
|
||||
impl<T> Client<T>
|
||||
where
|
||||
T: BeaconChainTypes + InitialiseBeaconChain<T> + Clone + 'static,
|
||||
T: BeaconChainTypes + InitialiseBeaconChain<T> + Clone,
|
||||
{
|
||||
/// Generate an instance of the client. Spawn and link all internal sub-processes.
|
||||
pub fn new(
|
||||
@ -60,7 +59,6 @@ where
|
||||
log: slog::Logger,
|
||||
executor: &TaskExecutor,
|
||||
) -> error::Result<Self> {
|
||||
let metrics_registry = Registry::new();
|
||||
let store = Arc::new(store);
|
||||
let seconds_per_slot = eth2_config.spec.seconds_per_slot;
|
||||
|
||||
@ -71,11 +69,6 @@ where
|
||||
eth2_config.spec.clone(),
|
||||
log.clone(),
|
||||
)?);
|
||||
// Registry all beacon chain metrics with the global registry.
|
||||
beacon_chain
|
||||
.metrics
|
||||
.register(&metrics_registry)
|
||||
.expect("Failed to registry metrics");
|
||||
|
||||
if beacon_chain.read_slot_clock().is_none() {
|
||||
panic!("Cannot start client before genesis!")
|
||||
@ -117,29 +110,14 @@ where
|
||||
None
|
||||
};
|
||||
|
||||
// Start the `http_server` service.
|
||||
//
|
||||
// Note: presently we are ignoring the config and _always_ starting a HTTP server.
|
||||
let http_exit_signal = if client_config.http.enabled {
|
||||
Some(http_server::start_service(
|
||||
&client_config.http,
|
||||
executor,
|
||||
network_send,
|
||||
beacon_chain.clone(),
|
||||
client_config.db_path().expect("unable to read datadir"),
|
||||
metrics_registry,
|
||||
&log,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Start the `rest_api` service
|
||||
let api_exit_signal = if client_config.rest_api.enabled {
|
||||
match rest_api::start_server(
|
||||
&client_config.rest_api,
|
||||
executor,
|
||||
beacon_chain.clone(),
|
||||
network.clone(),
|
||||
client_config.db_path().expect("unable to read datadir"),
|
||||
&log,
|
||||
) {
|
||||
Ok(s) => Some(s),
|
||||
@ -181,7 +159,6 @@ where
|
||||
Ok(Client {
|
||||
_client_config: client_config,
|
||||
beacon_chain,
|
||||
http_exit_signal,
|
||||
rpc_exit_signal,
|
||||
slot_timer_exit_signal: Some(slot_timer_exit_signal),
|
||||
api_exit_signal,
|
||||
|
@ -17,11 +17,7 @@ pub const WARN_PEER_COUNT: usize = 1;
|
||||
/// durations.
|
||||
///
|
||||
/// Presently unused, but remains for future use.
|
||||
pub fn run<T: BeaconChainTypes + Send + Sync + 'static>(
|
||||
client: &Client<T>,
|
||||
executor: TaskExecutor,
|
||||
exit: Exit,
|
||||
) {
|
||||
pub fn run<T: BeaconChainTypes>(client: &Client<T>, executor: TaskExecutor, exit: Exit) {
|
||||
// notification heartbeat
|
||||
let interval = Interval::new(
|
||||
Instant::now(),
|
||||
|
@ -27,3 +27,5 @@ fnv = "1.0.6"
|
||||
unsigned-varint = "0.2.2"
|
||||
bytes = "0.4.12"
|
||||
tokio-io-timeout = "0.3.1"
|
||||
lazy_static = "1.3.0"
|
||||
lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" }
|
||||
|
@ -78,6 +78,10 @@ impl<TSubstream: AsyncRead + AsyncWrite> Behaviour<TSubstream> {
|
||||
log: behaviour_log,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn discovery(&self) -> &Discovery<TSubstream> {
|
||||
&self.discovery
|
||||
}
|
||||
}
|
||||
|
||||
// Implement the NetworkBehaviourEventProcess trait so that we can derive NetworkBehaviour for Behaviour
|
||||
@ -87,7 +91,7 @@ impl<TSubstream: AsyncRead + AsyncWrite> NetworkBehaviourEventProcess<GossipsubE
|
||||
fn inject_event(&mut self, event: GossipsubEvent) {
|
||||
match event {
|
||||
GossipsubEvent::Message(gs_msg) => {
|
||||
trace!(self.log, "Received GossipEvent"; "msg" => format!("{:?}", gs_msg));
|
||||
trace!(self.log, "Received GossipEvent");
|
||||
|
||||
let msg = PubsubMessage::from_topics(&gs_msg.topics, gs_msg.data);
|
||||
|
||||
|
@ -1,3 +1,4 @@
|
||||
use crate::metrics;
|
||||
use crate::{error, NetworkConfig};
|
||||
/// This manages the discovery and management of peers.
|
||||
///
|
||||
@ -102,6 +103,10 @@ impl<TSubstream> Discovery<TSubstream> {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn local_enr(&self) -> &Enr {
|
||||
self.discovery.local_enr()
|
||||
}
|
||||
|
||||
/// Manually search for peers. This restarts the discovery round, sparking multiple rapid
|
||||
/// queries.
|
||||
pub fn discover_peers(&mut self) {
|
||||
@ -119,6 +124,11 @@ impl<TSubstream> Discovery<TSubstream> {
|
||||
self.connected_peers.len()
|
||||
}
|
||||
|
||||
/// The current number of connected libp2p peers.
|
||||
pub fn connected_peer_set(&self) -> &HashSet<PeerId> {
|
||||
&self.connected_peers
|
||||
}
|
||||
|
||||
/// Search for new peers using the underlying discovery mechanism.
|
||||
fn find_peers(&mut self) {
|
||||
// pick a random NodeId
|
||||
@ -159,10 +169,16 @@ where
|
||||
|
||||
fn inject_connected(&mut self, peer_id: PeerId, _endpoint: ConnectedPoint) {
|
||||
self.connected_peers.insert(peer_id);
|
||||
|
||||
metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT);
|
||||
metrics::set_gauge(&metrics::PEERS_CONNECTED, self.connected_peers() as i64);
|
||||
}
|
||||
|
||||
fn inject_disconnected(&mut self, peer_id: &PeerId, _endpoint: ConnectedPoint) {
|
||||
self.connected_peers.remove(peer_id);
|
||||
|
||||
metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT);
|
||||
metrics::set_gauge(&metrics::PEERS_CONNECTED, self.connected_peers() as i64);
|
||||
}
|
||||
|
||||
fn inject_replaced(
|
||||
@ -217,6 +233,7 @@ where
|
||||
}
|
||||
Discv5Event::SocketUpdated(socket) => {
|
||||
info!(self.log, "Address updated"; "IP" => format!("{}",socket.ip()));
|
||||
metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT);
|
||||
let mut address = Multiaddr::from(socket.ip());
|
||||
address.push(Protocol::Tcp(self.tcp_port));
|
||||
let enr = self.discovery.local_enr();
|
||||
|
@ -2,21 +2,29 @@
|
||||
/// all required libp2p functionality.
|
||||
///
|
||||
/// This crate builds and manages the libp2p services required by the beacon node.
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
pub mod behaviour;
|
||||
mod config;
|
||||
mod discovery;
|
||||
pub mod error;
|
||||
mod metrics;
|
||||
pub mod rpc;
|
||||
mod service;
|
||||
|
||||
pub use behaviour::PubsubMessage;
|
||||
pub use config::{Config as NetworkConfig, *};
|
||||
pub use config::{
|
||||
Config as NetworkConfig, BEACON_ATTESTATION_TOPIC, BEACON_BLOCK_TOPIC, SHARD_TOPIC_PREFIX,
|
||||
TOPIC_ENCODING_POSTFIX, TOPIC_PREFIX,
|
||||
};
|
||||
pub use libp2p::enr::Enr;
|
||||
pub use libp2p::gossipsub::{Topic, TopicHash};
|
||||
pub use libp2p::multiaddr;
|
||||
pub use libp2p::Multiaddr;
|
||||
pub use libp2p::{
|
||||
gossipsub::{GossipsubConfig, GossipsubConfigBuilder},
|
||||
PeerId,
|
||||
PeerId, Swarm,
|
||||
};
|
||||
pub use rpc::RPCEvent;
|
||||
pub use service::Libp2pEvent;
|
||||
|
20
beacon_node/eth2-libp2p/src/metrics.rs
Normal file
20
beacon_node/eth2-libp2p/src/metrics.rs
Normal file
@ -0,0 +1,20 @@
|
||||
pub use lighthouse_metrics::*;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref ADDRESS_UPDATE_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_address_update_total",
|
||||
"Count of libp2p socked updated events (when our view of our IP address has changed)"
|
||||
);
|
||||
pub static ref PEERS_CONNECTED: Result<IntGauge> = try_create_int_gauge(
|
||||
"libp2p_peer_connected_peers_total",
|
||||
"Count of libp2p peers currently connected"
|
||||
);
|
||||
pub static ref PEER_CONNECT_EVENT_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_peer_connect_event_total",
|
||||
"Count of libp2p peer connect events (not the current number of connected peers)"
|
||||
);
|
||||
pub static ref PEER_DISCONNECT_EVENT_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_peer_disconnect_event_total",
|
||||
"Count of libp2p peer disconnect events"
|
||||
);
|
||||
}
|
@ -16,7 +16,7 @@ use libp2p::core::{
|
||||
upgrade::{InboundUpgradeExt, OutboundUpgradeExt},
|
||||
};
|
||||
use libp2p::{core, secio, PeerId, Swarm, Transport};
|
||||
use slog::{debug, info, trace, warn};
|
||||
use slog::{crit, debug, info, trace, warn};
|
||||
use std::fs::File;
|
||||
use std::io::prelude::*;
|
||||
use std::io::{Error, ErrorKind};
|
||||
@ -33,7 +33,7 @@ pub struct Service {
|
||||
//TODO: Make this private
|
||||
pub swarm: Swarm<Libp2pStream, Libp2pBehaviour>,
|
||||
/// This node's PeerId.
|
||||
_local_peer_id: PeerId,
|
||||
pub local_peer_id: PeerId,
|
||||
/// The libp2p logger handle.
|
||||
pub log: slog::Logger,
|
||||
}
|
||||
@ -68,10 +68,15 @@ impl Service {
|
||||
log_address.push(Protocol::P2p(local_peer_id.clone().into()));
|
||||
info!(log, "Listening established"; "Address" => format!("{}", log_address));
|
||||
}
|
||||
Err(err) => warn!(
|
||||
log,
|
||||
"Failed to listen on address"; "Address" => format!("{}", listen_multiaddr), "Error" => format!("{:?}", err)
|
||||
),
|
||||
Err(err) => {
|
||||
crit!(
|
||||
log,
|
||||
"Unable to listen on libp2p address";
|
||||
"error" => format!("{:?}", err),
|
||||
"listen_multiaddr" => format!("{}", listen_multiaddr),
|
||||
);
|
||||
return Err("Libp2p was unable to listen on the given listen address.".into());
|
||||
}
|
||||
};
|
||||
|
||||
// attempt to connect to user-input libp2p nodes
|
||||
@ -126,7 +131,7 @@ impl Service {
|
||||
info!(log, "Subscribed to topics"; "Topics" => format!("{:?}", subscribed_topics.iter().map(|t| format!("{}", t)).collect::<Vec<String>>()));
|
||||
|
||||
Ok(Service {
|
||||
_local_peer_id: local_peer_id,
|
||||
local_peer_id,
|
||||
swarm,
|
||||
log,
|
||||
})
|
||||
|
@ -1,23 +0,0 @@
|
||||
[package]
|
||||
name = "http_server"
|
||||
version = "0.1.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
beacon_chain = { path = "../beacon_chain" }
|
||||
iron = "^0.6"
|
||||
router = "^0.6"
|
||||
network = { path = "../network" }
|
||||
types = { path = "../../eth2/types" }
|
||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
||||
persistent = "^0.4"
|
||||
prometheus = { version = "^0.6", features = ["process"] }
|
||||
clap = "2.32.0"
|
||||
futures = "0.1.23"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0"
|
||||
slog = { version = "^2.2.3" , features = ["max_level_trace"] }
|
||||
tokio = "0.1.17"
|
||||
exit-future = "0.1.4"
|
@ -1,71 +0,0 @@
|
||||
use crate::{key::BeaconChainKey, map_persistent_err_to_500};
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use iron::prelude::*;
|
||||
use iron::{
|
||||
headers::{CacheControl, CacheDirective, ContentType},
|
||||
status::Status,
|
||||
AfterMiddleware, Handler, IronResult, Request, Response,
|
||||
};
|
||||
use persistent::Read;
|
||||
use router::Router;
|
||||
use serde_json::json;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Yields a handler for the HTTP API.
|
||||
pub fn build_handler<T: BeaconChainTypes + 'static>(
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
) -> impl Handler {
|
||||
let mut router = Router::new();
|
||||
|
||||
router.get("/node/fork", handle_fork::<T>, "fork");
|
||||
|
||||
let mut chain = Chain::new(router);
|
||||
|
||||
// Insert `BeaconChain` so it may be accessed in a request.
|
||||
chain.link(Read::<BeaconChainKey<T>>::both(beacon_chain.clone()));
|
||||
// Set the content-type headers.
|
||||
chain.link_after(SetJsonContentType);
|
||||
// Set the cache headers.
|
||||
chain.link_after(SetCacheDirectives);
|
||||
|
||||
chain
|
||||
}
|
||||
|
||||
/// Sets the `cache-control` headers on _all_ responses, unless they are already set.
|
||||
struct SetCacheDirectives;
|
||||
impl AfterMiddleware for SetCacheDirectives {
|
||||
fn after(&self, _req: &mut Request, mut resp: Response) -> IronResult<Response> {
|
||||
// This is run for every requests, AFTER all handlers have been executed
|
||||
if resp.headers.get::<CacheControl>() == None {
|
||||
resp.headers.set(CacheControl(vec![
|
||||
CacheDirective::NoCache,
|
||||
CacheDirective::NoStore,
|
||||
]));
|
||||
}
|
||||
Ok(resp)
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the `content-type` headers on _all_ responses, unless they are already set.
|
||||
struct SetJsonContentType;
|
||||
impl AfterMiddleware for SetJsonContentType {
|
||||
fn after(&self, _req: &mut Request, mut resp: Response) -> IronResult<Response> {
|
||||
if resp.headers.get::<ContentType>() == None {
|
||||
resp.headers.set(ContentType::json());
|
||||
}
|
||||
Ok(resp)
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_fork<T: BeaconChainTypes + 'static>(req: &mut Request) -> IronResult<Response> {
|
||||
let beacon_chain = req
|
||||
.get::<Read<BeaconChainKey<T>>>()
|
||||
.map_err(map_persistent_err_to_500)?;
|
||||
|
||||
let response = json!({
|
||||
"fork": beacon_chain.head().beacon_state.fork,
|
||||
"network_id": beacon_chain.spec.network_id
|
||||
});
|
||||
|
||||
Ok(Response::with((Status::Ok, response.to_string())))
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
use crate::metrics::LocalMetrics;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use iron::typemap::Key;
|
||||
use prometheus::Registry;
|
||||
use std::marker::PhantomData;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct BeaconChainKey<T> {
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes + 'static> Key for BeaconChainKey<T> {
|
||||
type Value = Arc<BeaconChain<T>>;
|
||||
}
|
||||
|
||||
pub struct MetricsRegistryKey;
|
||||
|
||||
impl Key for MetricsRegistryKey {
|
||||
type Value = Registry;
|
||||
}
|
||||
|
||||
pub struct LocalMetricsKey;
|
||||
|
||||
impl Key for LocalMetricsKey {
|
||||
type Value = LocalMetrics;
|
||||
}
|
||||
|
||||
pub struct DBPathKey;
|
||||
|
||||
impl Key for DBPathKey {
|
||||
type Value = PathBuf;
|
||||
}
|
@ -1,145 +0,0 @@
|
||||
mod api;
|
||||
mod key;
|
||||
mod metrics;
|
||||
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use clap::ArgMatches;
|
||||
use futures::Future;
|
||||
use iron::prelude::*;
|
||||
use network::NetworkMessage;
|
||||
use prometheus::Registry;
|
||||
use router::Router;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use slog::{info, o, warn};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::runtime::TaskExecutor;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct HttpServerConfig {
|
||||
pub enabled: bool,
|
||||
pub listen_address: String,
|
||||
pub listen_port: String,
|
||||
}
|
||||
|
||||
impl Default for HttpServerConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: false,
|
||||
listen_address: "127.0.0.1".to_string(),
|
||||
listen_port: "5052".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpServerConfig {
|
||||
pub fn apply_cli_args(&mut self, args: &ArgMatches) -> Result<(), &'static str> {
|
||||
if args.is_present("http") {
|
||||
self.enabled = true;
|
||||
}
|
||||
|
||||
if let Some(listen_address) = args.value_of("http-address") {
|
||||
self.listen_address = listen_address.to_string();
|
||||
}
|
||||
|
||||
if let Some(listen_port) = args.value_of("http-port") {
|
||||
self.listen_port = listen_port.to_string();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Build the `iron` HTTP server, defining the core routes.
|
||||
pub fn create_iron_http_server<T: BeaconChainTypes + 'static>(
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
db_path: PathBuf,
|
||||
metrics_registry: Registry,
|
||||
) -> Iron<Router> {
|
||||
let mut router = Router::new();
|
||||
|
||||
// A `GET` request to `/metrics` is handled by the `metrics` module.
|
||||
router.get(
|
||||
"/metrics",
|
||||
metrics::build_handler(beacon_chain.clone(), db_path, metrics_registry),
|
||||
"metrics",
|
||||
);
|
||||
|
||||
// Any request to all other endpoints is handled by the `api` module.
|
||||
router.any("/*", api::build_handler(beacon_chain.clone()), "api");
|
||||
|
||||
Iron::new(router)
|
||||
}
|
||||
|
||||
/// Start the HTTP service on the tokio `TaskExecutor`.
|
||||
pub fn start_service<T: BeaconChainTypes + 'static>(
|
||||
config: &HttpServerConfig,
|
||||
executor: &TaskExecutor,
|
||||
_network_chan: mpsc::UnboundedSender<NetworkMessage>,
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
db_path: PathBuf,
|
||||
metrics_registry: Registry,
|
||||
log: &slog::Logger,
|
||||
) -> exit_future::Signal {
|
||||
let log = log.new(o!("Service"=>"HTTP"));
|
||||
|
||||
// Create:
|
||||
// - `shutdown_trigger` a one-shot to shut down this service.
|
||||
// - `wait_for_shutdown` a future that will wait until someone calls shutdown.
|
||||
let (shutdown_trigger, wait_for_shutdown) = exit_future::signal();
|
||||
|
||||
// Create an `iron` http, without starting it yet.
|
||||
let iron = create_iron_http_server(beacon_chain, db_path, metrics_registry);
|
||||
|
||||
// Create a HTTP server future.
|
||||
//
|
||||
// 1. Start the HTTP server
|
||||
// 2. Build an exit future that will shutdown the server when requested.
|
||||
// 3. Return the exit future, so the caller may shutdown the service when desired.
|
||||
let http_service = {
|
||||
let listen_address = format!("{}:{}", config.listen_address, config.listen_port);
|
||||
// Start the HTTP server
|
||||
let server_start_result = iron.http(listen_address.clone());
|
||||
|
||||
if server_start_result.is_ok() {
|
||||
info!(log, "HTTP server running on {}", listen_address);
|
||||
} else {
|
||||
warn!(log, "HTTP server failed to start on {}", listen_address);
|
||||
}
|
||||
|
||||
// Build a future that will shutdown the HTTP server when the `shutdown_trigger` is
|
||||
// triggered.
|
||||
wait_for_shutdown.and_then(move |_| {
|
||||
info!(log, "HTTP server shutting down");
|
||||
|
||||
if let Ok(mut server) = server_start_result {
|
||||
// According to the documentation, `server.close()` "doesn't work" and the server
|
||||
// keeps listening.
|
||||
//
|
||||
// It is being called anyway, because it seems like the right thing to do. If you
|
||||
// know this has negative side-effects, please create an issue to discuss.
|
||||
//
|
||||
// See: https://docs.rs/iron/0.6.0/iron/struct.Listening.html#impl
|
||||
match server.close() {
|
||||
_ => (),
|
||||
};
|
||||
}
|
||||
info!(log, "HTTP server shutdown complete.");
|
||||
Ok(())
|
||||
})
|
||||
};
|
||||
|
||||
// Attach the HTTP server to the executor.
|
||||
executor.spawn(http_service);
|
||||
|
||||
shutdown_trigger
|
||||
}
|
||||
|
||||
/// Helper function for mapping a failure to read state to a 500 server error.
|
||||
fn map_persistent_err_to_500(e: persistent::PersistentError) -> iron::error::IronError {
|
||||
iron::error::IronError {
|
||||
error: Box::new(e),
|
||||
response: iron::Response::with(iron::status::Status::InternalServerError),
|
||||
}
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
use crate::{
|
||||
key::{BeaconChainKey, DBPathKey, LocalMetricsKey, MetricsRegistryKey},
|
||||
map_persistent_err_to_500,
|
||||
};
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use iron::prelude::*;
|
||||
use iron::{status::Status, Handler, IronResult, Request, Response};
|
||||
use persistent::Read;
|
||||
use prometheus::{Encoder, Registry, TextEncoder};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use local_metrics::LocalMetrics;
|
||||
|
||||
mod local_metrics;
|
||||
|
||||
/// Yields a handler for the metrics endpoint.
|
||||
pub fn build_handler<T: BeaconChainTypes + 'static>(
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
db_path: PathBuf,
|
||||
metrics_registry: Registry,
|
||||
) -> impl Handler {
|
||||
let mut chain = Chain::new(handle_metrics::<T>);
|
||||
|
||||
let local_metrics = LocalMetrics::new().unwrap();
|
||||
local_metrics.register(&metrics_registry).unwrap();
|
||||
|
||||
chain.link(Read::<BeaconChainKey<T>>::both(beacon_chain));
|
||||
chain.link(Read::<MetricsRegistryKey>::both(metrics_registry));
|
||||
chain.link(Read::<LocalMetricsKey>::both(local_metrics));
|
||||
chain.link(Read::<DBPathKey>::both(db_path));
|
||||
|
||||
chain
|
||||
}
|
||||
|
||||
/// Handle a request for Prometheus metrics.
|
||||
///
|
||||
/// Returns a text string containing all metrics.
|
||||
fn handle_metrics<T: BeaconChainTypes + 'static>(req: &mut Request) -> IronResult<Response> {
|
||||
let beacon_chain = req
|
||||
.get::<Read<BeaconChainKey<T>>>()
|
||||
.map_err(map_persistent_err_to_500)?;
|
||||
|
||||
let r = req
|
||||
.get::<Read<MetricsRegistryKey>>()
|
||||
.map_err(map_persistent_err_to_500)?;
|
||||
|
||||
let local_metrics = req
|
||||
.get::<Read<LocalMetricsKey>>()
|
||||
.map_err(map_persistent_err_to_500)?;
|
||||
|
||||
let db_path = req
|
||||
.get::<Read<DBPathKey>>()
|
||||
.map_err(map_persistent_err_to_500)?;
|
||||
|
||||
// Update metrics that are calculated on each scrape.
|
||||
local_metrics.update(&beacon_chain, &db_path);
|
||||
|
||||
let mut buffer = vec![];
|
||||
let encoder = TextEncoder::new();
|
||||
|
||||
// Gather `DEFAULT_REGISTRY` metrics.
|
||||
encoder.encode(&prometheus::gather(), &mut buffer).unwrap();
|
||||
|
||||
// Gather metrics from our registry.
|
||||
let metric_families = r.gather();
|
||||
encoder.encode(&metric_families, &mut buffer).unwrap();
|
||||
|
||||
let prom_string = String::from_utf8(buffer).unwrap();
|
||||
|
||||
Ok(Response::with((Status::Ok, prom_string)))
|
||||
}
|
@ -1,154 +0,0 @@
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use prometheus::{IntGauge, Opts, Registry};
|
||||
use slot_clock::SlotClock;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use types::{EthSpec, Slot};
|
||||
|
||||
// If set to `true` will iterate and sum the balances of all validators in the state for each
|
||||
// scrape.
|
||||
const SHOULD_SUM_VALIDATOR_BALANCES: bool = true;
|
||||
|
||||
pub struct LocalMetrics {
|
||||
present_slot: IntGauge,
|
||||
present_epoch: IntGauge,
|
||||
best_slot: IntGauge,
|
||||
best_beacon_block_root: IntGauge,
|
||||
justified_beacon_block_root: IntGauge,
|
||||
finalized_beacon_block_root: IntGauge,
|
||||
validator_count: IntGauge,
|
||||
justified_epoch: IntGauge,
|
||||
finalized_epoch: IntGauge,
|
||||
validator_balances_sum: IntGauge,
|
||||
database_size: IntGauge,
|
||||
}
|
||||
|
||||
impl LocalMetrics {
|
||||
/// Create a new instance.
|
||||
pub fn new() -> Result<Self, prometheus::Error> {
|
||||
Ok(Self {
|
||||
present_slot: {
|
||||
let opts = Opts::new("present_slot", "slot_at_time_of_scrape");
|
||||
IntGauge::with_opts(opts)?
|
||||
},
|
||||
present_epoch: {
|
||||
let opts = Opts::new("present_epoch", "epoch_at_time_of_scrape");
|
||||
IntGauge::with_opts(opts)?
|
||||
},
|
||||
best_slot: {
|
||||
let opts = Opts::new("best_slot", "slot_of_block_at_chain_head");
|
||||
IntGauge::with_opts(opts)?
|
||||
},
|
||||
best_beacon_block_root: {
|
||||
let opts = Opts::new("best_beacon_block_root", "root_of_block_at_chain_head");
|
||||
IntGauge::with_opts(opts)?
|
||||
},
|
||||
justified_beacon_block_root: {
|
||||
let opts = Opts::new(
|
||||
"justified_beacon_block_root",
|
||||
"root_of_block_at_justified_head",
|
||||
);
|
||||
IntGauge::with_opts(opts)?
|
||||
},
|
||||
finalized_beacon_block_root: {
|
||||
let opts = Opts::new(
|
||||
"finalized_beacon_block_root",
|
||||
"root_of_block_at_finalized_head",
|
||||
);
|
||||
IntGauge::with_opts(opts)?
|
||||
},
|
||||
validator_count: {
|
||||
let opts = Opts::new("validator_count", "number_of_validators");
|
||||
IntGauge::with_opts(opts)?
|
||||
},
|
||||
justified_epoch: {
|
||||
let opts = Opts::new("justified_epoch", "state_justified_epoch");
|
||||
IntGauge::with_opts(opts)?
|
||||
},
|
||||
finalized_epoch: {
|
||||
let opts = Opts::new("finalized_epoch", "state_finalized_epoch");
|
||||
IntGauge::with_opts(opts)?
|
||||
},
|
||||
validator_balances_sum: {
|
||||
let opts = Opts::new("validator_balances_sum", "sum_of_all_validator_balances");
|
||||
IntGauge::with_opts(opts)?
|
||||
},
|
||||
database_size: {
|
||||
let opts = Opts::new("database_size", "size_of_on_disk_db_in_mb");
|
||||
IntGauge::with_opts(opts)?
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Registry this instance with the `registry`.
|
||||
pub fn register(&self, registry: &Registry) -> Result<(), prometheus::Error> {
|
||||
registry.register(Box::new(self.present_slot.clone()))?;
|
||||
registry.register(Box::new(self.present_epoch.clone()))?;
|
||||
registry.register(Box::new(self.best_slot.clone()))?;
|
||||
registry.register(Box::new(self.best_beacon_block_root.clone()))?;
|
||||
registry.register(Box::new(self.justified_beacon_block_root.clone()))?;
|
||||
registry.register(Box::new(self.finalized_beacon_block_root.clone()))?;
|
||||
registry.register(Box::new(self.validator_count.clone()))?;
|
||||
registry.register(Box::new(self.finalized_epoch.clone()))?;
|
||||
registry.register(Box::new(self.justified_epoch.clone()))?;
|
||||
registry.register(Box::new(self.validator_balances_sum.clone()))?;
|
||||
registry.register(Box::new(self.database_size.clone()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Update the metrics in `self` to the latest values.
|
||||
pub fn update<T: BeaconChainTypes>(&self, beacon_chain: &BeaconChain<T>, db_path: &PathBuf) {
|
||||
let state = &beacon_chain.head().beacon_state;
|
||||
|
||||
let present_slot = beacon_chain
|
||||
.slot_clock
|
||||
.present_slot()
|
||||
.unwrap_or_else(|_| None)
|
||||
.unwrap_or_else(|| Slot::new(0));
|
||||
self.present_slot.set(present_slot.as_u64() as i64);
|
||||
self.present_epoch
|
||||
.set(present_slot.epoch(T::EthSpec::slots_per_epoch()).as_u64() as i64);
|
||||
|
||||
self.best_slot.set(state.slot.as_u64() as i64);
|
||||
self.best_beacon_block_root
|
||||
.set(beacon_chain.head().beacon_block_root.to_low_u64_le() as i64);
|
||||
self.justified_beacon_block_root.set(
|
||||
beacon_chain
|
||||
.head()
|
||||
.beacon_state
|
||||
.current_justified_checkpoint
|
||||
.root
|
||||
.to_low_u64_le() as i64,
|
||||
);
|
||||
self.finalized_beacon_block_root.set(
|
||||
beacon_chain
|
||||
.head()
|
||||
.beacon_state
|
||||
.finalized_checkpoint
|
||||
.root
|
||||
.to_low_u64_le() as i64,
|
||||
);
|
||||
self.validator_count.set(state.validators.len() as i64);
|
||||
self.justified_epoch
|
||||
.set(state.current_justified_checkpoint.epoch.as_u64() as i64);
|
||||
self.finalized_epoch
|
||||
.set(state.finalized_checkpoint.epoch.as_u64() as i64);
|
||||
if SHOULD_SUM_VALIDATOR_BALANCES {
|
||||
self.validator_balances_sum
|
||||
.set(state.balances.iter().sum::<u64>() as i64);
|
||||
}
|
||||
let db_size = if let Ok(iter) = fs::read_dir(db_path) {
|
||||
iter.filter_map(Result::ok)
|
||||
.map(size_of_dir_entry)
|
||||
.fold(0_u64, |sum, val| sum + val)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
self.database_size.set(db_size as i64);
|
||||
}
|
||||
}
|
||||
|
||||
fn size_of_dir_entry(dir: fs::DirEntry) -> u64 {
|
||||
dir.metadata().map(|m| m.len()).unwrap_or(0)
|
||||
}
|
@ -5,7 +5,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use core::marker::PhantomData;
|
||||
use eth2_libp2p::Service as LibP2PService;
|
||||
use eth2_libp2p::Topic;
|
||||
use eth2_libp2p::{Libp2pEvent, PeerId};
|
||||
use eth2_libp2p::{Enr, Libp2pEvent, Multiaddr, PeerId, Swarm};
|
||||
use eth2_libp2p::{PubsubMessage, RPCEvent};
|
||||
use futures::prelude::*;
|
||||
use futures::Stream;
|
||||
@ -18,6 +18,7 @@ use tokio::sync::{mpsc, oneshot};
|
||||
/// Service that handles communication between internal services and the eth2_libp2p network service.
|
||||
pub struct Service<T: BeaconChainTypes> {
|
||||
libp2p_service: Arc<Mutex<LibP2PService>>,
|
||||
libp2p_port: u16,
|
||||
_libp2p_exit: oneshot::Sender<()>,
|
||||
_network_send: mpsc::UnboundedSender<NetworkMessage>,
|
||||
_phantom: PhantomData<T>,
|
||||
@ -57,6 +58,7 @@ impl<T: BeaconChainTypes + 'static> Service<T> {
|
||||
)?;
|
||||
let network_service = Service {
|
||||
libp2p_service,
|
||||
libp2p_port: config.libp2p_port,
|
||||
_libp2p_exit: libp2p_exit,
|
||||
_network_send: network_send.clone(),
|
||||
_phantom: PhantomData,
|
||||
@ -65,6 +67,52 @@ impl<T: BeaconChainTypes + 'static> Service<T> {
|
||||
Ok((Arc::new(network_service), network_send))
|
||||
}
|
||||
|
||||
/// Returns the local ENR from the underlying Discv5 behaviour that external peers may connect
|
||||
/// to.
|
||||
pub fn local_enr(&self) -> Enr {
|
||||
self.libp2p_service
|
||||
.lock()
|
||||
.swarm
|
||||
.discovery()
|
||||
.local_enr()
|
||||
.clone()
|
||||
}
|
||||
|
||||
/// Returns the local libp2p PeerID.
|
||||
pub fn local_peer_id(&self) -> PeerId {
|
||||
self.libp2p_service.lock().local_peer_id.clone()
|
||||
}
|
||||
|
||||
/// Returns the list of `Multiaddr` that the underlying libp2p instance is listening on.
|
||||
pub fn listen_multiaddrs(&self) -> Vec<Multiaddr> {
|
||||
Swarm::listeners(&self.libp2p_service.lock().swarm)
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns the libp2p port that this node has been configured to listen using.
|
||||
pub fn listen_port(&self) -> u16 {
|
||||
self.libp2p_port
|
||||
}
|
||||
|
||||
/// Returns the number of libp2p connected peers.
|
||||
pub fn connected_peers(&self) -> usize {
|
||||
self.libp2p_service.lock().swarm.connected_peers()
|
||||
}
|
||||
|
||||
/// Returns the set of `PeerId` that are connected via libp2p.
|
||||
pub fn connected_peer_set(&self) -> Vec<PeerId> {
|
||||
self.libp2p_service
|
||||
.lock()
|
||||
.swarm
|
||||
.discovery()
|
||||
.connected_peer_set()
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Provides a reference to the underlying libp2p service.
|
||||
pub fn libp2p_service(&self) -> Arc<Mutex<LibP2PService>> {
|
||||
self.libp2p_service.clone()
|
||||
}
|
||||
|
@ -377,23 +377,6 @@ impl<T: BeaconChainTypes> SimpleSync<T> {
|
||||
.filter(|block| block.slot >= req.start_slot)
|
||||
.collect();
|
||||
|
||||
// TODO: Again find a more elegant way to include genesis if needed
|
||||
// if the genesis is requested, add it in
|
||||
if req.start_slot == 0 {
|
||||
if let Ok(Some(genesis)) = self
|
||||
.chain
|
||||
.store
|
||||
.get::<BeaconBlock<T::EthSpec>>(&self.chain.genesis_block_root)
|
||||
{
|
||||
blocks.push(genesis);
|
||||
} else {
|
||||
warn!(
|
||||
self.log,
|
||||
"Requested genesis, which is not in the chain store";
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
blocks.reverse();
|
||||
blocks.dedup_by_key(|brs| brs.slot);
|
||||
|
||||
|
@ -7,6 +7,8 @@ edition = "2018"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[dependencies]
|
||||
beacon_chain = { path = "../beacon_chain" }
|
||||
network = { path = "../network" }
|
||||
eth2-libp2p = { path = "../eth2-libp2p" }
|
||||
store = { path = "../store" }
|
||||
version = { path = "../version" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
@ -18,8 +20,12 @@ state_processing = { path = "../../eth2/state_processing" }
|
||||
types = { path = "../../eth2/types" }
|
||||
clap = "2.32.0"
|
||||
http = "^0.1.17"
|
||||
prometheus = { version = "^0.6", features = ["process"] }
|
||||
hyper = "0.12.32"
|
||||
futures = "0.1"
|
||||
exit-future = "0.1.3"
|
||||
tokio = "0.1.17"
|
||||
url = "2.0"
|
||||
lazy_static = "1.3.0"
|
||||
lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" }
|
||||
slot_clock = { path = "../../eth2/utils/slot_clock" }
|
||||
|
@ -2,9 +2,114 @@ use super::{success_response, ApiResult};
|
||||
use crate::{helpers::*, ApiError, UrlQuery};
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use hyper::{Body, Request};
|
||||
use serde::Serialize;
|
||||
use std::sync::Arc;
|
||||
use store::Store;
|
||||
use types::BeaconState;
|
||||
use types::{BeaconBlock, BeaconState, EthSpec, Hash256, Slot};
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct HeadResponse {
|
||||
pub slot: Slot,
|
||||
pub block_root: Hash256,
|
||||
pub state_root: Hash256,
|
||||
}
|
||||
|
||||
/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`.
|
||||
pub fn get_head<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
|
||||
let beacon_chain = req
|
||||
.extensions()
|
||||
.get::<Arc<BeaconChain<T>>>()
|
||||
.ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?;
|
||||
|
||||
let head = HeadResponse {
|
||||
slot: beacon_chain.head().beacon_state.slot,
|
||||
block_root: beacon_chain.head().beacon_block_root,
|
||||
state_root: beacon_chain.head().beacon_state_root,
|
||||
};
|
||||
|
||||
let json: String = serde_json::to_string(&head)
|
||||
.map_err(|e| ApiError::ServerError(format!("Unable to serialize HeadResponse: {:?}", e)))?;
|
||||
|
||||
Ok(success_response(Body::from(json)))
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
pub struct BlockResponse<T: EthSpec> {
|
||||
pub root: Hash256,
|
||||
pub beacon_block: BeaconBlock<T>,
|
||||
}
|
||||
|
||||
/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`.
|
||||
pub fn get_block<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
|
||||
let beacon_chain = req
|
||||
.extensions()
|
||||
.get::<Arc<BeaconChain<T>>>()
|
||||
.ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?;
|
||||
|
||||
let query_params = ["root", "slot"];
|
||||
let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?;
|
||||
|
||||
let block_root = match (key.as_ref(), value) {
|
||||
("slot", value) => {
|
||||
let target = parse_slot(&value)?;
|
||||
|
||||
block_root_at_slot(&beacon_chain, target).ok_or_else(|| {
|
||||
ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target))
|
||||
})?
|
||||
}
|
||||
("root", value) => parse_root(&value)?,
|
||||
_ => return Err(ApiError::ServerError("Unexpected query parameter".into())),
|
||||
};
|
||||
|
||||
let block = beacon_chain
|
||||
.store
|
||||
.get::<BeaconBlock<T::EthSpec>>(&block_root)?
|
||||
.ok_or_else(|| {
|
||||
ApiError::NotFound(format!(
|
||||
"Unable to find BeaconBlock for root {}",
|
||||
block_root
|
||||
))
|
||||
})?;
|
||||
|
||||
let response = BlockResponse {
|
||||
root: block_root,
|
||||
beacon_block: block,
|
||||
};
|
||||
|
||||
let json: String = serde_json::to_string(&response).map_err(|e| {
|
||||
ApiError::ServerError(format!("Unable to serialize BlockResponse: {:?}", e))
|
||||
})?;
|
||||
|
||||
Ok(success_response(Body::from(json)))
|
||||
}
|
||||
|
||||
/// HTTP handler to return a `BeaconBlock` root at a given `slot`.
|
||||
pub fn get_block_root<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
|
||||
let beacon_chain = req
|
||||
.extensions()
|
||||
.get::<Arc<BeaconChain<T>>>()
|
||||
.ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?;
|
||||
|
||||
let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?;
|
||||
let target = parse_slot(&slot_string)?;
|
||||
|
||||
let root = block_root_at_slot(&beacon_chain, target).ok_or_else(|| {
|
||||
ApiError::NotFound(format!("Unable to find BeaconBlock for slot {}", target))
|
||||
})?;
|
||||
|
||||
let json: String = serde_json::to_string(&root)
|
||||
.map_err(|e| ApiError::ServerError(format!("Unable to serialize root: {:?}", e)))?;
|
||||
|
||||
Ok(success_response(Body::from(json)))
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(bound = "T: EthSpec")]
|
||||
pub struct StateResponse<T: EthSpec> {
|
||||
pub root: Hash256,
|
||||
pub beacon_state: BeaconState<T>,
|
||||
}
|
||||
|
||||
/// HTTP handler to return a `BeaconState` at a given `root` or `slot`.
|
||||
///
|
||||
@ -19,26 +124,34 @@ pub fn get_state<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult
|
||||
let query_params = ["root", "slot"];
|
||||
let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?;
|
||||
|
||||
let state: BeaconState<T::EthSpec> = match (key.as_ref(), value) {
|
||||
let (root, state): (Hash256, BeaconState<T::EthSpec>) = match (key.as_ref(), value) {
|
||||
("slot", value) => state_at_slot(&beacon_chain, parse_slot(&value)?)?,
|
||||
("root", value) => {
|
||||
let root = &parse_root(&value)?;
|
||||
|
||||
beacon_chain
|
||||
let state = beacon_chain
|
||||
.store
|
||||
.get(root)?
|
||||
.ok_or_else(|| ApiError::NotFound(format!("No state for root: {}", root)))?
|
||||
.ok_or_else(|| ApiError::NotFound(format!("No state for root: {}", root)))?;
|
||||
|
||||
(*root, state)
|
||||
}
|
||||
_ => unreachable!("Guarded by UrlQuery::from_request()"),
|
||||
_ => return Err(ApiError::ServerError("Unexpected query parameter".into())),
|
||||
};
|
||||
|
||||
let json: String = serde_json::to_string(&state)
|
||||
.map_err(|e| ApiError::ServerError(format!("Unable to serialize BeaconState: {:?}", e)))?;
|
||||
let response = StateResponse {
|
||||
root,
|
||||
beacon_state: state,
|
||||
};
|
||||
|
||||
let json: String = serde_json::to_string(&response).map_err(|e| {
|
||||
ApiError::ServerError(format!("Unable to serialize StateResponse: {:?}", e))
|
||||
})?;
|
||||
|
||||
Ok(success_response(Body::from(json)))
|
||||
}
|
||||
|
||||
/// HTTP handler to return a `BeaconState` root at a given or `slot`.
|
||||
/// HTTP handler to return a `BeaconState` root at a given `slot`.
|
||||
///
|
||||
/// Will not return a state if the request slot is in the future. Will return states higher than
|
||||
/// the current head by skipping slots.
|
||||
@ -58,3 +171,24 @@ pub fn get_state_root<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiR
|
||||
|
||||
Ok(success_response(Body::from(json)))
|
||||
}
|
||||
|
||||
/// HTTP handler to return the highest finalized slot.
|
||||
pub fn get_latest_finalized_checkpoint<T: BeaconChainTypes + 'static>(
|
||||
req: Request<Body>,
|
||||
) -> ApiResult {
|
||||
let beacon_chain = req
|
||||
.extensions()
|
||||
.get::<Arc<BeaconChain<T>>>()
|
||||
.ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?;
|
||||
|
||||
let checkpoint = beacon_chain
|
||||
.head()
|
||||
.beacon_state
|
||||
.finalized_checkpoint
|
||||
.clone();
|
||||
|
||||
let json: String = serde_json::to_string(&checkpoint)
|
||||
.map_err(|e| ApiError::ServerError(format!("Unable to serialize checkpoint: {:?}", e)))?;
|
||||
|
||||
Ok(success_response(Body::from(json)))
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ impl Default for Config {
|
||||
Config {
|
||||
enabled: true, // rest_api enabled by default
|
||||
listen_address: Ipv4Addr::new(127, 0, 0, 1),
|
||||
port: 1248,
|
||||
port: 5052,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31,22 +31,40 @@ pub fn parse_root(string: &str) -> Result<Hash256, ApiError> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a `BeaconState` in the canonical chain of `beacon_chain` at the given `slot`, if
|
||||
/// possible.
|
||||
/// Returns the root of the `BeaconBlock` in the canonical chain of `beacon_chain` at the given
|
||||
/// `slot`, if possible.
|
||||
///
|
||||
/// May return a root for a previous slot, in the case of skip slots.
|
||||
pub fn block_root_at_slot<T: BeaconChainTypes>(
|
||||
beacon_chain: &BeaconChain<T>,
|
||||
target: Slot,
|
||||
) -> Option<Hash256> {
|
||||
beacon_chain
|
||||
.rev_iter_block_roots()
|
||||
.take_while(|(_root, slot)| *slot >= target)
|
||||
.find(|(_root, slot)| *slot == target)
|
||||
.map(|(root, _slot)| root)
|
||||
}
|
||||
|
||||
/// Returns a `BeaconState` and it's root in the canonical chain of `beacon_chain` at the given
|
||||
/// `slot`, if possible.
|
||||
///
|
||||
/// Will not return a state if the request slot is in the future. Will return states higher than
|
||||
/// the current head by skipping slots.
|
||||
pub fn state_at_slot<T: BeaconChainTypes>(
|
||||
beacon_chain: &BeaconChain<T>,
|
||||
slot: Slot,
|
||||
) -> Result<BeaconState<T::EthSpec>, ApiError> {
|
||||
) -> Result<(Hash256, BeaconState<T::EthSpec>), ApiError> {
|
||||
let head_state = &beacon_chain.head().beacon_state;
|
||||
|
||||
if head_state.slot == slot {
|
||||
// The request slot is the same as the best block (head) slot.
|
||||
|
||||
// I'm not sure if this `.clone()` will be optimized out. If not, it seems unnecessary.
|
||||
Ok(beacon_chain.head().beacon_state.clone())
|
||||
Ok((
|
||||
beacon_chain.head().beacon_state_root,
|
||||
beacon_chain.head().beacon_state.clone(),
|
||||
))
|
||||
} else {
|
||||
let root = state_root_at_slot(beacon_chain, slot)?;
|
||||
|
||||
@ -55,7 +73,7 @@ pub fn state_at_slot<T: BeaconChainTypes>(
|
||||
.get(&root)?
|
||||
.ok_or_else(|| ApiError::NotFound(format!("Unable to find state at root {}", root)))?;
|
||||
|
||||
Ok(state)
|
||||
Ok((root, state))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,21 +1,31 @@
|
||||
extern crate futures;
|
||||
extern crate hyper;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate network as client_network;
|
||||
|
||||
mod beacon;
|
||||
mod config;
|
||||
mod helpers;
|
||||
mod metrics;
|
||||
mod network;
|
||||
mod node;
|
||||
mod spec;
|
||||
mod url_query;
|
||||
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
pub use config::Config as ApiConfig;
|
||||
use client_network::Service as NetworkService;
|
||||
use hyper::rt::Future;
|
||||
use hyper::service::service_fn_ok;
|
||||
use hyper::{Body, Method, Response, Server, StatusCode};
|
||||
use slog::{info, o, warn};
|
||||
use std::ops::Deref;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::runtime::TaskExecutor;
|
||||
use url_query::UrlQuery;
|
||||
|
||||
pub use beacon::{BlockResponse, HeadResponse, StateResponse};
|
||||
pub use config::Config as ApiConfig;
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
pub enum ApiError {
|
||||
MethodNotAllowed(String),
|
||||
@ -63,10 +73,12 @@ impl From<state_processing::per_slot_processing::Error> for ApiError {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
|
||||
pub fn start_server<T: BeaconChainTypes>(
|
||||
config: &ApiConfig,
|
||||
executor: &TaskExecutor,
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
network_service: Arc<NetworkService<T>>,
|
||||
db_path: PathBuf,
|
||||
log: &slog::Logger,
|
||||
) -> Result<exit_future::Signal, hyper::Error> {
|
||||
let log = log.new(o!("Service" => "Api"));
|
||||
@ -80,6 +92,8 @@ pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
|
||||
Ok(())
|
||||
});
|
||||
|
||||
let db_path = DBPath(db_path);
|
||||
|
||||
// Get the address to bind to
|
||||
let bind_addr = (config.listen_address, config.port).into();
|
||||
|
||||
@ -90,27 +104,53 @@ pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
|
||||
let service = move || {
|
||||
let log = server_log.clone();
|
||||
let beacon_chain = server_bc.clone();
|
||||
let db_path = db_path.clone();
|
||||
let network_service = network_service.clone();
|
||||
|
||||
// Create a simple handler for the router, inject our stateful objects into the request.
|
||||
service_fn_ok(move |mut req| {
|
||||
metrics::inc_counter(&metrics::REQUEST_COUNT);
|
||||
let timer = metrics::start_timer(&metrics::REQUEST_RESPONSE_TIME);
|
||||
|
||||
req.extensions_mut().insert::<slog::Logger>(log.clone());
|
||||
req.extensions_mut()
|
||||
.insert::<Arc<BeaconChain<T>>>(beacon_chain.clone());
|
||||
req.extensions_mut().insert::<DBPath>(db_path.clone());
|
||||
req.extensions_mut()
|
||||
.insert::<Arc<NetworkService<T>>>(network_service.clone());
|
||||
|
||||
let path = req.uri().path().to_string();
|
||||
|
||||
// Route the request to the correct handler.
|
||||
let result = match (req.method(), path.as_ref()) {
|
||||
(&Method::GET, "/beacon/head") => beacon::get_head::<T>(req),
|
||||
(&Method::GET, "/beacon/block") => beacon::get_block::<T>(req),
|
||||
(&Method::GET, "/beacon/block_root") => beacon::get_block_root::<T>(req),
|
||||
(&Method::GET, "/beacon/latest_finalized_checkpoint") => {
|
||||
beacon::get_latest_finalized_checkpoint::<T>(req)
|
||||
}
|
||||
(&Method::GET, "/beacon/state") => beacon::get_state::<T>(req),
|
||||
(&Method::GET, "/beacon/state_root") => beacon::get_state_root::<T>(req),
|
||||
(&Method::GET, "/metrics") => metrics::get_prometheus::<T>(req),
|
||||
(&Method::GET, "/network/enr") => network::get_enr::<T>(req),
|
||||
(&Method::GET, "/network/peer_count") => network::get_peer_count::<T>(req),
|
||||
(&Method::GET, "/network/peer_id") => network::get_peer_id::<T>(req),
|
||||
(&Method::GET, "/network/peers") => network::get_peer_list::<T>(req),
|
||||
(&Method::GET, "/network/listen_port") => network::get_listen_port::<T>(req),
|
||||
(&Method::GET, "/network/listen_addresses") => {
|
||||
network::get_listen_addresses::<T>(req)
|
||||
}
|
||||
(&Method::GET, "/node/version") => node::get_version(req),
|
||||
(&Method::GET, "/node/genesis_time") => node::get_genesis_time::<T>(req),
|
||||
(&Method::GET, "/spec") => spec::get_spec::<T>(req),
|
||||
(&Method::GET, "/spec/slots_per_epoch") => spec::get_slots_per_epoch::<T>(req),
|
||||
_ => Err(ApiError::MethodNotAllowed(path.clone())),
|
||||
};
|
||||
|
||||
match result {
|
||||
let response = match result {
|
||||
// Return the `hyper::Response`.
|
||||
Ok(response) => {
|
||||
metrics::inc_counter(&metrics::SUCCESS_COUNT);
|
||||
slog::debug!(log, "Request successful: {:?}", path);
|
||||
response
|
||||
}
|
||||
@ -119,7 +159,11 @@ pub fn start_server<T: BeaconChainTypes + Clone + 'static>(
|
||||
slog::debug!(log, "Request failure: {:?}", path);
|
||||
e.into()
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
|
||||
response
|
||||
})
|
||||
};
|
||||
|
||||
@ -152,3 +196,14 @@ fn success_response(body: Body) -> Response<Body> {
|
||||
.body(body)
|
||||
.expect("We should always be able to make response from the success body.")
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DBPath(PathBuf);
|
||||
|
||||
impl Deref for DBPath {
|
||||
type Target = PathBuf;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
69
beacon_node/rest_api/src/metrics.rs
Normal file
69
beacon_node/rest_api/src/metrics.rs
Normal file
@ -0,0 +1,69 @@
|
||||
use crate::{success_response, ApiError, ApiResult, DBPath};
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use hyper::{Body, Request};
|
||||
use prometheus::{Encoder, TextEncoder};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use lighthouse_metrics::*;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref REQUEST_RESPONSE_TIME: Result<Histogram> = try_create_histogram(
|
||||
"http_server_request_duration_seconds",
|
||||
"Time taken to build a response to a HTTP request"
|
||||
);
|
||||
pub static ref REQUEST_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"http_server_request_total",
|
||||
"Total count of HTTP requests received"
|
||||
);
|
||||
pub static ref SUCCESS_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"http_server_success_total",
|
||||
"Total count of HTTP 200 responses sent"
|
||||
);
|
||||
}
|
||||
|
||||
/// Returns the full set of Prometheus metrics for the Beacon Node application.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// This is a HTTP handler method.
|
||||
pub fn get_prometheus<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
|
||||
let mut buffer = vec![];
|
||||
let encoder = TextEncoder::new();
|
||||
|
||||
let beacon_chain = req
|
||||
.extensions()
|
||||
.get::<Arc<BeaconChain<T>>>()
|
||||
.ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?;
|
||||
let db_path = req
|
||||
.extensions()
|
||||
.get::<DBPath>()
|
||||
.ok_or_else(|| ApiError::ServerError("DBPath extension missing".to_string()))?;
|
||||
|
||||
// There are two categories of metrics:
|
||||
//
|
||||
// - Dynamically updated: things like histograms and event counters that are updated on the
|
||||
// fly.
|
||||
// - Statically updated: things which are only updated at the time of the scrape (used where we
|
||||
// can avoid cluttering up code with metrics calls).
|
||||
//
|
||||
// The `lighthouse_metrics` crate has a `DEFAULT_REGISTRY` global singleton (via `lazy_static`)
|
||||
// which keeps the state of all the metrics. Dynamically updated things will already be
|
||||
// up-to-date in the registry (because they update themselves) however statically updated
|
||||
// things need to be "scraped".
|
||||
//
|
||||
// We proceed by, first updating all the static metrics using `scrape_for_metrics(..)`. Then,
|
||||
// using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into
|
||||
// a string that can be returned via HTTP.
|
||||
|
||||
slot_clock::scrape_for_metrics::<T::EthSpec, T::SlotClock>(&beacon_chain.slot_clock);
|
||||
store::scrape_for_metrics(&db_path);
|
||||
beacon_chain::scrape_for_metrics(&beacon_chain);
|
||||
|
||||
encoder
|
||||
.encode(&lighthouse_metrics::gather(), &mut buffer)
|
||||
.unwrap();
|
||||
|
||||
String::from_utf8(buffer)
|
||||
.map(|string| success_response(Body::from(string)))
|
||||
.map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e)))
|
||||
}
|
108
beacon_node/rest_api/src/network.rs
Normal file
108
beacon_node/rest_api/src/network.rs
Normal file
@ -0,0 +1,108 @@
|
||||
use crate::{success_response, ApiError, ApiResult, NetworkService};
|
||||
use beacon_chain::BeaconChainTypes;
|
||||
use eth2_libp2p::{Enr, Multiaddr, PeerId};
|
||||
use hyper::{Body, Request};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// HTTP handle to return the list of libp2p multiaddr the client is listening on.
|
||||
///
|
||||
/// Returns a list of `Multiaddr`, serialized according to their `serde` impl.
|
||||
pub fn get_listen_addresses<T: BeaconChainTypes>(req: Request<Body>) -> ApiResult {
|
||||
let network = req
|
||||
.extensions()
|
||||
.get::<Arc<NetworkService<T>>>()
|
||||
.ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?;
|
||||
|
||||
let multiaddresses: Vec<Multiaddr> = network.listen_multiaddrs();
|
||||
|
||||
Ok(success_response(Body::from(
|
||||
serde_json::to_string(&multiaddresses)
|
||||
.map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?,
|
||||
)))
|
||||
}
|
||||
|
||||
/// HTTP handle to return the list of libp2p multiaddr the client is listening on.
|
||||
///
|
||||
/// Returns a list of `Multiaddr`, serialized according to their `serde` impl.
|
||||
pub fn get_listen_port<T: BeaconChainTypes>(req: Request<Body>) -> ApiResult {
|
||||
let network = req
|
||||
.extensions()
|
||||
.get::<Arc<NetworkService<T>>>()
|
||||
.ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?;
|
||||
|
||||
Ok(success_response(Body::from(
|
||||
serde_json::to_string(&network.listen_port())
|
||||
.map_err(|e| ApiError::ServerError(format!("Unable to serialize port: {:?}", e)))?,
|
||||
)))
|
||||
}
|
||||
|
||||
/// HTTP handle to return the Discv5 ENR from the client's libp2p service.
|
||||
///
|
||||
/// ENR is encoded as base64 string.
|
||||
pub fn get_enr<T: BeaconChainTypes>(req: Request<Body>) -> ApiResult {
|
||||
let network = req
|
||||
.extensions()
|
||||
.get::<Arc<NetworkService<T>>>()
|
||||
.ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?;
|
||||
|
||||
let enr: Enr = network.local_enr();
|
||||
|
||||
Ok(success_response(Body::from(
|
||||
serde_json::to_string(&enr.to_base64())
|
||||
.map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?,
|
||||
)))
|
||||
}
|
||||
|
||||
/// HTTP handle to return the `PeerId` from the client's libp2p service.
|
||||
///
|
||||
/// PeerId is encoded as base58 string.
|
||||
pub fn get_peer_id<T: BeaconChainTypes>(req: Request<Body>) -> ApiResult {
|
||||
let network = req
|
||||
.extensions()
|
||||
.get::<Arc<NetworkService<T>>>()
|
||||
.ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?;
|
||||
|
||||
let peer_id: PeerId = network.local_peer_id();
|
||||
|
||||
Ok(success_response(Body::from(
|
||||
serde_json::to_string(&peer_id.to_base58())
|
||||
.map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?,
|
||||
)))
|
||||
}
|
||||
|
||||
/// HTTP handle to return the number of peers connected in the client's libp2p service.
|
||||
pub fn get_peer_count<T: BeaconChainTypes>(req: Request<Body>) -> ApiResult {
|
||||
let network = req
|
||||
.extensions()
|
||||
.get::<Arc<NetworkService<T>>>()
|
||||
.ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?;
|
||||
|
||||
let connected_peers: usize = network.connected_peers();
|
||||
|
||||
Ok(success_response(Body::from(
|
||||
serde_json::to_string(&connected_peers)
|
||||
.map_err(|e| ApiError::ServerError(format!("Unable to serialize Enr: {:?}", e)))?,
|
||||
)))
|
||||
}
|
||||
|
||||
/// HTTP handle to return the list of peers connected to the client's libp2p service.
|
||||
///
|
||||
/// Peers are presented as a list of `PeerId::to_string()`.
|
||||
pub fn get_peer_list<T: BeaconChainTypes>(req: Request<Body>) -> ApiResult {
|
||||
let network = req
|
||||
.extensions()
|
||||
.get::<Arc<NetworkService<T>>>()
|
||||
.ok_or_else(|| ApiError::ServerError("NetworkService extension missing".to_string()))?;
|
||||
|
||||
let connected_peers: Vec<String> = network
|
||||
.connected_peer_set()
|
||||
.iter()
|
||||
.map(PeerId::to_string)
|
||||
.collect();
|
||||
|
||||
Ok(success_response(Body::from(
|
||||
serde_json::to_string(&connected_peers).map_err(|e| {
|
||||
ApiError::ServerError(format!("Unable to serialize Vec<PeerId>: {:?}", e))
|
||||
})?,
|
||||
)))
|
||||
}
|
27
beacon_node/rest_api/src/spec.rs
Normal file
27
beacon_node/rest_api/src/spec.rs
Normal file
@ -0,0 +1,27 @@
|
||||
use super::{success_response, ApiResult};
|
||||
use crate::ApiError;
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||
use hyper::{Body, Request};
|
||||
use std::sync::Arc;
|
||||
use types::EthSpec;
|
||||
|
||||
/// HTTP handler to return the full spec object.
|
||||
pub fn get_spec<T: BeaconChainTypes + 'static>(req: Request<Body>) -> ApiResult {
|
||||
let beacon_chain = req
|
||||
.extensions()
|
||||
.get::<Arc<BeaconChain<T>>>()
|
||||
.ok_or_else(|| ApiError::ServerError("Beacon chain extension missing".to_string()))?;
|
||||
|
||||
let json: String = serde_json::to_string(&beacon_chain.spec)
|
||||
.map_err(|e| ApiError::ServerError(format!("Unable to serialize spec: {:?}", e)))?;
|
||||
|
||||
Ok(success_response(Body::from(json)))
|
||||
}
|
||||
|
||||
/// HTTP handler to return the full spec object.
|
||||
pub fn get_slots_per_epoch<T: BeaconChainTypes + 'static>(_req: Request<Body>) -> ApiResult {
|
||||
let json: String = serde_json::to_string(&T::EthSpec::slots_per_epoch())
|
||||
.map_err(|e| ApiError::ServerError(format!("Unable to serialize epoch: {:?}", e)))?;
|
||||
|
||||
Ok(success_response(Body::from(json)))
|
||||
}
|
@ -127,28 +127,6 @@ fn main() {
|
||||
.help("Listen port for RPC endpoint.")
|
||||
.takes_value(true),
|
||||
)
|
||||
/*
|
||||
* HTTP server parameters.
|
||||
*/
|
||||
.arg(
|
||||
Arg::with_name("http")
|
||||
.long("http")
|
||||
.help("Enable the HTTP server.")
|
||||
.takes_value(false),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("http-address")
|
||||
.long("http-address")
|
||||
.value_name("Address")
|
||||
.help("Listen address for the HTTP server.")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("http-port")
|
||||
.long("http-port")
|
||||
.help("Listen port for the HTTP server.")
|
||||
.takes_value(true),
|
||||
)
|
||||
/* Client related arguments */
|
||||
.arg(
|
||||
Arg::with_name("api")
|
||||
@ -214,6 +192,23 @@ fn main() {
|
||||
.possible_values(&["info", "debug", "trace", "warn", "error", "crit"])
|
||||
.default_value("trace"),
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("verbosity")
|
||||
.short("v")
|
||||
.multiple(true)
|
||||
.help("Sets the verbosity level")
|
||||
.takes_value(true),
|
||||
)
|
||||
/*
|
||||
* Bootstrap.
|
||||
*/
|
||||
.arg(
|
||||
Arg::with_name("bootstrap")
|
||||
.long("bootstrap")
|
||||
.value_name("HTTP_SERVER")
|
||||
.help("Load the genesis state and libp2p address from the HTTP API of another Lighthouse node.")
|
||||
.takes_value(true)
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
// build the initial logger
|
||||
@ -234,6 +229,11 @@ fn main() {
|
||||
|
||||
let mut log = slog::Logger::root(drain.fuse(), o!());
|
||||
|
||||
warn!(
|
||||
log,
|
||||
"Ethereum 2.0 is pre-release. This software is experimental."
|
||||
);
|
||||
|
||||
let data_dir = match matches
|
||||
.value_of("datadir")
|
||||
.and_then(|v| Some(PathBuf::from(v)))
|
||||
|
@ -4,7 +4,7 @@ use client::{
|
||||
};
|
||||
use futures::sync::oneshot;
|
||||
use futures::Future;
|
||||
use slog::{error, info, warn};
|
||||
use slog::{error, info};
|
||||
use std::cell::RefCell;
|
||||
use std::path::Path;
|
||||
use std::path::PathBuf;
|
||||
@ -42,11 +42,6 @@ pub fn run_beacon_node(
|
||||
|
||||
let other_client_config = client_config.clone();
|
||||
|
||||
warn!(
|
||||
log,
|
||||
"Ethereum 2.0 is pre-release. This software is experimental."
|
||||
);
|
||||
|
||||
info!(
|
||||
log,
|
||||
"BeaconNode init";
|
||||
@ -123,7 +118,7 @@ fn run<T>(
|
||||
log: &slog::Logger,
|
||||
) -> error::Result<()>
|
||||
where
|
||||
T: BeaconChainTypes + InitialiseBeaconChain<T> + Clone + Send + Sync + 'static,
|
||||
T: BeaconChainTypes + InitialiseBeaconChain<T> + Clone,
|
||||
T::Store: OpenDatabase,
|
||||
{
|
||||
let store = T::Store::open_database(&db_path)?;
|
||||
|
@ -15,3 +15,5 @@ eth2_ssz = "0.1"
|
||||
eth2_ssz_derive = "0.1"
|
||||
tree_hash = "0.1"
|
||||
types = { path = "../../eth2/types" }
|
||||
lazy_static = "1.3.0"
|
||||
lighthouse_metrics = { path = "../../eth2/utils/lighthouse_metrics" }
|
||||
|
@ -9,10 +9,26 @@ impl<T: EthSpec> StoreItem for BeaconBlock<T> {
|
||||
}
|
||||
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
self.as_ssz_bytes()
|
||||
let timer = metrics::start_timer(&metrics::BEACON_BLOCK_WRITE_TIMES);
|
||||
let bytes = self.as_ssz_bytes();
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
metrics::inc_counter(&metrics::BEACON_BLOCK_WRITE_COUNT);
|
||||
metrics::inc_counter_by(&metrics::BEACON_BLOCK_WRITE_BYTES, bytes.len() as i64);
|
||||
|
||||
bytes
|
||||
}
|
||||
|
||||
fn from_store_bytes(bytes: &mut [u8]) -> Result<Self, Error> {
|
||||
Self::from_ssz_bytes(bytes).map_err(Into::into)
|
||||
let timer = metrics::start_timer(&metrics::BEACON_BLOCK_READ_TIMES);
|
||||
|
||||
let len = bytes.len();
|
||||
let result = Self::from_ssz_bytes(bytes).map_err(Into::into);
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
metrics::inc_counter(&metrics::BEACON_BLOCK_READ_COUNT);
|
||||
metrics::inc_counter_by(&metrics::BEACON_BLOCK_READ_BYTES, len as i64);
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
@ -53,12 +53,29 @@ impl<T: EthSpec> StoreItem for BeaconState<T> {
|
||||
}
|
||||
|
||||
fn as_store_bytes(&self) -> Vec<u8> {
|
||||
let timer = metrics::start_timer(&metrics::BEACON_STATE_WRITE_TIMES);
|
||||
|
||||
let container = StorageContainer::new(self);
|
||||
container.as_ssz_bytes()
|
||||
let bytes = container.as_ssz_bytes();
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
metrics::inc_counter(&metrics::BEACON_STATE_WRITE_COUNT);
|
||||
metrics::inc_counter_by(&metrics::BEACON_STATE_WRITE_BYTES, bytes.len() as i64);
|
||||
|
||||
bytes
|
||||
}
|
||||
|
||||
fn from_store_bytes(bytes: &mut [u8]) -> Result<Self, Error> {
|
||||
let timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES);
|
||||
|
||||
let len = bytes.len();
|
||||
let container = StorageContainer::from_ssz_bytes(bytes)?;
|
||||
container.try_into()
|
||||
let result = container.try_into();
|
||||
|
||||
metrics::stop_timer(timer);
|
||||
metrics::inc_counter(&metrics::BEACON_STATE_READ_COUNT);
|
||||
metrics::inc_counter_by(&metrics::BEACON_STATE_READ_BYTES, len as i64);
|
||||
|
||||
result
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
use super::*;
|
||||
use crate::metrics;
|
||||
use db_key::Key;
|
||||
use leveldb::database::kv::KV;
|
||||
use leveldb::database::Database;
|
||||
@ -62,15 +63,27 @@ impl Store for LevelDB {
|
||||
fn get_bytes(&self, col: &str, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
|
||||
let column_key = Self::get_key_for_col(col, key);
|
||||
|
||||
self.db
|
||||
metrics::inc_counter(&metrics::DISK_DB_READ_COUNT);
|
||||
|
||||
let result = self
|
||||
.db
|
||||
.get(self.read_options(), column_key)
|
||||
.map_err(Into::into)
|
||||
.map_err(Into::into);
|
||||
|
||||
if let Ok(Some(bytes)) = &result {
|
||||
metrics::inc_counter_by(&metrics::DISK_DB_READ_BYTES, bytes.len() as i64)
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Store some `value` in `column`, indexed with `key`.
|
||||
fn put_bytes(&self, col: &str, key: &[u8], val: &[u8]) -> Result<(), Error> {
|
||||
let column_key = Self::get_key_for_col(col, key);
|
||||
|
||||
metrics::inc_counter(&metrics::DISK_DB_WRITE_COUNT);
|
||||
metrics::inc_counter_by(&metrics::DISK_DB_WRITE_BYTES, val.len() as i64);
|
||||
|
||||
self.db
|
||||
.put(self.write_options(), column_key, val)
|
||||
.map_err(Into::into)
|
||||
@ -80,6 +93,8 @@ impl Store for LevelDB {
|
||||
fn key_exists(&self, col: &str, key: &[u8]) -> Result<bool, Error> {
|
||||
let column_key = Self::get_key_for_col(col, key);
|
||||
|
||||
metrics::inc_counter(&metrics::DISK_DB_EXISTS_COUNT);
|
||||
|
||||
self.db
|
||||
.get(self.read_options(), column_key)
|
||||
.map_err(Into::into)
|
||||
@ -89,6 +104,9 @@ impl Store for LevelDB {
|
||||
/// Removes `key` from `column`.
|
||||
fn key_delete(&self, col: &str, key: &[u8]) -> Result<(), Error> {
|
||||
let column_key = Self::get_key_for_col(col, key);
|
||||
|
||||
metrics::inc_counter(&metrics::DISK_DB_DELETE_COUNT);
|
||||
|
||||
self.db
|
||||
.delete(self.write_options(), column_key)
|
||||
.map_err(Into::into)
|
||||
|
@ -7,18 +7,22 @@
|
||||
//!
|
||||
//! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See
|
||||
//! tests for implementation examples.
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
mod block_at_slot;
|
||||
mod errors;
|
||||
mod impls;
|
||||
mod leveldb_store;
|
||||
mod memory_store;
|
||||
mod metrics;
|
||||
|
||||
pub mod iter;
|
||||
|
||||
pub use self::leveldb_store::LevelDB as DiskStore;
|
||||
pub use self::memory_store::MemoryStore;
|
||||
pub use errors::Error;
|
||||
pub use metrics::scrape_for_metrics;
|
||||
pub use types::*;
|
||||
|
||||
/// An object capable of storing and retrieving objects implementing `StoreItem`.
|
||||
|
106
beacon_node/store/src/metrics.rs
Normal file
106
beacon_node/store/src/metrics.rs
Normal file
@ -0,0 +1,106 @@
|
||||
pub use lighthouse_metrics::{set_gauge, try_create_int_gauge, *};
|
||||
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
|
||||
lazy_static! {
|
||||
/*
|
||||
* General
|
||||
*/
|
||||
pub static ref DISK_DB_SIZE: Result<IntGauge> =
|
||||
try_create_int_gauge("store_disk_db_size", "Size of the on-disk database (bytes)");
|
||||
pub static ref DISK_DB_WRITE_BYTES: Result<IntCounter> = try_create_int_counter(
|
||||
"store_disk_db_write_bytes_total",
|
||||
"Number of bytes attempted to be written to the on-disk DB"
|
||||
);
|
||||
pub static ref DISK_DB_READ_BYTES: Result<IntCounter> = try_create_int_counter(
|
||||
"store_disk_db_read_bytes_total",
|
||||
"Number of bytes read from the on-disk DB"
|
||||
);
|
||||
pub static ref DISK_DB_READ_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"store_disk_db_read_count_total",
|
||||
"Total number of reads to the on-disk DB"
|
||||
);
|
||||
pub static ref DISK_DB_WRITE_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"store_disk_db_write_count_total",
|
||||
"Total number of writes to the on-disk DB"
|
||||
);
|
||||
pub static ref DISK_DB_EXISTS_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"store_disk_db_exists_count_total",
|
||||
"Total number of checks if a key is in the on-disk DB"
|
||||
);
|
||||
pub static ref DISK_DB_DELETE_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"store_disk_db_delete_count_total",
|
||||
"Total number of deletions from the on-disk DB"
|
||||
);
|
||||
/*
|
||||
* Beacon State
|
||||
*/
|
||||
pub static ref BEACON_STATE_READ_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"store_beacon_state_read_overhead_seconds",
|
||||
"Overhead on reading a beacon state from the DB (e.g., decoding)"
|
||||
);
|
||||
pub static ref BEACON_STATE_READ_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"store_beacon_state_read_total",
|
||||
"Total number of beacon state reads from the DB"
|
||||
);
|
||||
pub static ref BEACON_STATE_READ_BYTES: Result<IntCounter> = try_create_int_counter(
|
||||
"store_beacon_state_read_bytes_total",
|
||||
"Total number of beacon state bytes read from the DB"
|
||||
);
|
||||
pub static ref BEACON_STATE_WRITE_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"store_beacon_state_write_overhead_seconds",
|
||||
"Overhead on writing a beacon state to the DB (e.g., encoding)"
|
||||
);
|
||||
pub static ref BEACON_STATE_WRITE_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"store_beacon_state_write_total",
|
||||
"Total number of beacon state writes the DB"
|
||||
);
|
||||
pub static ref BEACON_STATE_WRITE_BYTES: Result<IntCounter> = try_create_int_counter(
|
||||
"store_beacon_state_write_bytes_total",
|
||||
"Total number of beacon state bytes written to the DB"
|
||||
);
|
||||
/*
|
||||
* Beacon Block
|
||||
*/
|
||||
pub static ref BEACON_BLOCK_READ_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"store_beacon_block_read_overhead_seconds",
|
||||
"Overhead on reading a beacon block from the DB (e.g., decoding)"
|
||||
);
|
||||
pub static ref BEACON_BLOCK_READ_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"store_beacon_block_read_total",
|
||||
"Total number of beacon block reads from the DB"
|
||||
);
|
||||
pub static ref BEACON_BLOCK_READ_BYTES: Result<IntCounter> = try_create_int_counter(
|
||||
"store_beacon_block_read_bytes_total",
|
||||
"Total number of beacon block bytes read from the DB"
|
||||
);
|
||||
pub static ref BEACON_BLOCK_WRITE_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"store_beacon_block_write_overhead_seconds",
|
||||
"Overhead on writing a beacon block to the DB (e.g., encoding)"
|
||||
);
|
||||
pub static ref BEACON_BLOCK_WRITE_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"store_beacon_block_write_total",
|
||||
"Total number of beacon block writes the DB"
|
||||
);
|
||||
pub static ref BEACON_BLOCK_WRITE_BYTES: Result<IntCounter> = try_create_int_counter(
|
||||
"store_beacon_block_write_bytes_total",
|
||||
"Total number of beacon block bytes written to the DB"
|
||||
);
|
||||
}
|
||||
|
||||
/// Updates the global metrics registry with store-related information.
|
||||
pub fn scrape_for_metrics(db_path: &PathBuf) {
|
||||
let db_size = if let Ok(iter) = fs::read_dir(db_path) {
|
||||
iter.filter_map(std::result::Result::ok)
|
||||
.map(size_of_dir_entry)
|
||||
.fold(0_u64, |sum, val| sum + val)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
set_gauge(&DISK_DB_SIZE, db_size as i64);
|
||||
}
|
||||
|
||||
fn size_of_dir_entry(dir: fs::DirEntry) -> u64 {
|
||||
dir.metadata().map(|m| m.len()).unwrap_or(0)
|
||||
}
|
@ -78,14 +78,6 @@ enabled = false
|
||||
listen_address = "127.0.0.1"
|
||||
port = 5051
|
||||
|
||||
#
|
||||
# Legacy HTTP server configuration. To be removed.
|
||||
#
|
||||
[http]
|
||||
enabled = false
|
||||
listen_address = "127.0.0.1"
|
||||
listen_port = "5052"
|
||||
|
||||
#
|
||||
# RESTful HTTP API server configuration.
|
||||
#
|
||||
@ -95,4 +87,4 @@ enabled = true
|
||||
# The listen port for the HTTP server.
|
||||
listen_address = "127.0.0.1"
|
||||
# The listen port for the HTTP server.
|
||||
port = 1248
|
||||
port = 5052
|
||||
|
@ -777,11 +777,7 @@ where
|
||||
}
|
||||
|
||||
pub fn get_ref(&self, i: usize) -> Option<&T> {
|
||||
if i < self.0.len() {
|
||||
Some(&self.0[i])
|
||||
} else {
|
||||
None
|
||||
}
|
||||
self.0.get(i)
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, i: usize, element: T) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
use crate::test_utils::TestRandom;
|
||||
use crate::utils::graffiti_from_hex_str;
|
||||
use crate::utils::{graffiti_from_hex_str, graffiti_to_hex_str};
|
||||
use crate::*;
|
||||
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
@ -16,7 +16,10 @@ use tree_hash_derive::TreeHash;
|
||||
pub struct BeaconBlockBody<T: EthSpec> {
|
||||
pub randao_reveal: Signature,
|
||||
pub eth1_data: Eth1Data,
|
||||
#[serde(deserialize_with = "graffiti_from_hex_str")]
|
||||
#[serde(
|
||||
serialize_with = "graffiti_to_hex_str",
|
||||
deserialize_with = "graffiti_from_hex_str"
|
||||
)]
|
||||
pub graffiti: [u8; 32],
|
||||
pub proposer_slashings: VariableList<ProposerSlashing, T::MaxProposerSlashings>,
|
||||
pub attester_slashings: VariableList<AttesterSlashing<T>, T::MaxAttesterSlashings>,
|
||||
|
@ -46,8 +46,20 @@ where
|
||||
Ok(array)
|
||||
}
|
||||
|
||||
// #[allow(clippy::trivially_copy_pass_by_ref)] // Serde requires the `byte` to be a ref.
|
||||
pub fn fork_to_hex_str<S>(bytes: &[u8; 4], serializer: S) -> Result<S::Ok, S::Error>
|
||||
pub fn fork_to_hex_str<S>(bytes: &[u8; FORK_BYTES_LEN], serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut hex_string: String = "0x".to_string();
|
||||
hex_string.push_str(&hex::encode(&bytes));
|
||||
|
||||
serializer.serialize_str(&hex_string)
|
||||
}
|
||||
|
||||
pub fn graffiti_to_hex_str<S>(
|
||||
bytes: &[u8; GRAFFITI_BYTES_LEN],
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
|
11
eth2/utils/lighthouse_metrics/Cargo.toml
Normal file
11
eth2/utils/lighthouse_metrics/Cargo.toml
Normal file
@ -0,0 +1,11 @@
|
||||
[package]
|
||||
name = "lighthouse_metrics"
|
||||
version = "0.1.0"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2018"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
lazy_static = "1.3.0"
|
||||
prometheus = "^0.6"
|
129
eth2/utils/lighthouse_metrics/src/lib.rs
Normal file
129
eth2/utils/lighthouse_metrics/src/lib.rs
Normal file
@ -0,0 +1,129 @@
|
||||
//! A wrapper around the `prometheus` crate that provides a global, `lazy_static` metrics registry
|
||||
//! and functions to add and use the following components (more info at
|
||||
//! [Prometheus docs](https://prometheus.io/docs/concepts/metric_types/)):
|
||||
//!
|
||||
//! - `Histogram`: used with `start_timer(..)` and `stop_timer(..)` to record durations (e.g.,
|
||||
//! block processing time).
|
||||
//! - `IncCounter`: used to represent an ideally ever-growing, never-shrinking integer (e.g.,
|
||||
//! number of block processing requests).
|
||||
//! - `IntGauge`: used to represent an varying integer (e.g., number of attestations per block).
|
||||
//!
|
||||
//! ## Important
|
||||
//!
|
||||
//! Metrics will fail if two items have the same `name`. All metrics must have a unique `name`.
|
||||
//! Because we use a global registry there is no namespace per crate, it's one big global space.
|
||||
//!
|
||||
//! See the [Prometheus naming best practices](https://prometheus.io/docs/practices/naming/) when
|
||||
//! choosing metric names.
|
||||
//!
|
||||
//! ## Example
|
||||
//!
|
||||
//! ```rust
|
||||
//! #[macro_use]
|
||||
//! extern crate lazy_static;
|
||||
//! use lighthouse_metrics::*;
|
||||
//!
|
||||
//! // These metrics are "magically" linked to the global registry defined in `lighthouse_metrics`.
|
||||
//! lazy_static! {
|
||||
//! pub static ref RUN_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
//! "runs_total",
|
||||
//! "Total number of runs"
|
||||
//! );
|
||||
//! pub static ref CURRENT_VALUE: Result<IntGauge> = try_create_int_gauge(
|
||||
//! "current_value",
|
||||
//! "The current value"
|
||||
//! );
|
||||
//! pub static ref RUN_TIME: Result<Histogram> =
|
||||
//! try_create_histogram("run_seconds", "Time taken (measured to high precision)");
|
||||
//! }
|
||||
//!
|
||||
//!
|
||||
//! fn main() {
|
||||
//! for i in 0..100 {
|
||||
//! inc_counter(&RUN_COUNT);
|
||||
//! let timer = start_timer(&RUN_TIME);
|
||||
//!
|
||||
//! for j in 0..10 {
|
||||
//! set_gauge(&CURRENT_VALUE, j);
|
||||
//! println!("Howdy partner");
|
||||
//! }
|
||||
//!
|
||||
//! stop_timer(timer);
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
|
||||
use prometheus::{HistogramOpts, HistogramTimer, Opts};
|
||||
|
||||
pub use prometheus::{Histogram, IntCounter, IntGauge, Result};
|
||||
|
||||
/// Collect all the metrics for reporting.
|
||||
pub fn gather() -> Vec<prometheus::proto::MetricFamily> {
|
||||
prometheus::gather()
|
||||
}
|
||||
|
||||
/// Attempts to crate an `IntCounter`, returning `Err` if the registry does not accept the counter
|
||||
/// (potentially due to naming conflict).
|
||||
pub fn try_create_int_counter(name: &str, help: &str) -> Result<IntCounter> {
|
||||
let opts = Opts::new(name, help);
|
||||
let counter = IntCounter::with_opts(opts)?;
|
||||
prometheus::register(Box::new(counter.clone()))?;
|
||||
Ok(counter)
|
||||
}
|
||||
|
||||
/// Attempts to crate an `IntGauge`, returning `Err` if the registry does not accept the counter
|
||||
/// (potentially due to naming conflict).
|
||||
pub fn try_create_int_gauge(name: &str, help: &str) -> Result<IntGauge> {
|
||||
let opts = Opts::new(name, help);
|
||||
let gauge = IntGauge::with_opts(opts)?;
|
||||
prometheus::register(Box::new(gauge.clone()))?;
|
||||
Ok(gauge)
|
||||
}
|
||||
|
||||
/// Attempts to crate a `Histogram`, returning `Err` if the registry does not accept the counter
|
||||
/// (potentially due to naming conflict).
|
||||
pub fn try_create_histogram(name: &str, help: &str) -> Result<Histogram> {
|
||||
let opts = HistogramOpts::new(name, help);
|
||||
let histogram = Histogram::with_opts(opts)?;
|
||||
prometheus::register(Box::new(histogram.clone()))?;
|
||||
Ok(histogram)
|
||||
}
|
||||
|
||||
/// Starts a timer for the given `Histogram`, stopping when it gets dropped or given to `stop_timer(..)`.
|
||||
pub fn start_timer(histogram: &Result<Histogram>) -> Option<HistogramTimer> {
|
||||
if let Ok(histogram) = histogram {
|
||||
Some(histogram.start_timer())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Stops a timer created with `start_timer(..)`.
|
||||
pub fn stop_timer(timer: Option<HistogramTimer>) {
|
||||
timer.map(|t| t.observe_duration());
|
||||
}
|
||||
|
||||
pub fn inc_counter(counter: &Result<IntCounter>) {
|
||||
if let Ok(counter) = counter {
|
||||
counter.inc();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn inc_counter_by(counter: &Result<IntCounter>, value: i64) {
|
||||
if let Ok(counter) = counter {
|
||||
counter.inc_by(value);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_gauge(gauge: &Result<IntGauge>, value: i64) {
|
||||
if let Ok(gauge) = gauge {
|
||||
gauge.set(value);
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the value of a `Histogram` manually.
|
||||
pub fn observe(histogram: &Result<Histogram>, value: f64) {
|
||||
if let Ok(histogram) = histogram {
|
||||
histogram.observe(value);
|
||||
}
|
||||
}
|
@ -6,3 +6,5 @@ edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
types = { path = "../../types" }
|
||||
lazy_static = "1.3.0"
|
||||
lighthouse_metrics = { path = "../lighthouse_metrics" }
|
||||
|
@ -1,9 +1,15 @@
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
mod metrics;
|
||||
mod system_time_slot_clock;
|
||||
mod testing_slot_clock;
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
pub use crate::system_time_slot_clock::{Error as SystemTimeSlotClockError, SystemTimeSlotClock};
|
||||
pub use crate::testing_slot_clock::{Error as TestingSlotClockError, TestingSlotClock};
|
||||
use std::time::Duration;
|
||||
pub use metrics::scrape_for_metrics;
|
||||
pub use types::Slot;
|
||||
|
||||
pub trait SlotClock: Send + Sync + Sized {
|
||||
@ -17,4 +23,6 @@ pub trait SlotClock: Send + Sync + Sized {
|
||||
fn present_slot(&self) -> Result<Option<Slot>, Self::Error>;
|
||||
|
||||
fn duration_to_next_slot(&self) -> Result<Option<Duration>, Self::Error>;
|
||||
|
||||
fn slot_duration_millis(&self) -> u64;
|
||||
}
|
||||
|
32
eth2/utils/slot_clock/src/metrics.rs
Normal file
32
eth2/utils/slot_clock/src/metrics.rs
Normal file
@ -0,0 +1,32 @@
|
||||
use crate::SlotClock;
|
||||
pub use lighthouse_metrics::*;
|
||||
use types::{EthSpec, Slot};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref PRESENT_SLOT: Result<IntGauge> =
|
||||
try_create_int_gauge("slotclock_present_slot", "The present wall-clock slot");
|
||||
pub static ref PRESENT_EPOCH: Result<IntGauge> =
|
||||
try_create_int_gauge("slotclock_present_epoch", "The present wall-clock epoch");
|
||||
pub static ref SLOTS_PER_EPOCH: Result<IntGauge> =
|
||||
try_create_int_gauge("slotclock_slots_per_epoch", "Slots per epoch (constant)");
|
||||
pub static ref MILLISECONDS_PER_SLOT: Result<IntGauge> = try_create_int_gauge(
|
||||
"slotclock_slot_time_milliseconds",
|
||||
"The duration in milliseconds between each slot"
|
||||
);
|
||||
}
|
||||
|
||||
/// Update the global metrics `DEFAULT_REGISTRY` with info from the slot clock.
|
||||
pub fn scrape_for_metrics<T: EthSpec, U: SlotClock>(clock: &U) {
|
||||
let present_slot = match clock.present_slot() {
|
||||
Ok(Some(slot)) => slot,
|
||||
_ => Slot::new(0),
|
||||
};
|
||||
|
||||
set_gauge(&PRESENT_SLOT, present_slot.as_u64() as i64);
|
||||
set_gauge(
|
||||
&PRESENT_EPOCH,
|
||||
present_slot.epoch(T::slots_per_epoch()).as_u64() as i64,
|
||||
);
|
||||
set_gauge(&SLOTS_PER_EPOCH, T::slots_per_epoch() as i64);
|
||||
set_gauge(&MILLISECONDS_PER_SLOT, clock.slot_duration_millis() as i64);
|
||||
}
|
@ -52,6 +52,10 @@ impl SlotClock for SystemTimeSlotClock {
|
||||
fn duration_to_next_slot(&self) -> Result<Option<Duration>, Error> {
|
||||
duration_to_next_slot(self.genesis_seconds, self.slot_duration_seconds)
|
||||
}
|
||||
|
||||
fn slot_duration_millis(&self) -> u64 {
|
||||
self.slot_duration_seconds * 1000
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SystemTimeError> for Error {
|
||||
|
@ -40,6 +40,10 @@ impl SlotClock for TestingSlotClock {
|
||||
fn duration_to_next_slot(&self) -> Result<Option<Duration>, Error> {
|
||||
Ok(Some(Duration::from_secs(1)))
|
||||
}
|
||||
|
||||
fn slot_duration_millis(&self) -> u64 {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
Loading…
Reference in New Issue
Block a user