Merge branch 'eip4844' into deneb-free-blobs
This commit is contained in:
commit
911a63559b
648
Cargo.lock
generated
648
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -89,6 +89,8 @@ members = [
|
||||
|
||||
"validator_client",
|
||||
"validator_client/slashing_protection",
|
||||
|
||||
"watch",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@ -102,7 +104,7 @@ eth2_hashing = { path = "crypto/eth2_hashing" }
|
||||
tree_hash = { path = "consensus/tree_hash" }
|
||||
tree_hash_derive = { path = "consensus/tree_hash_derive" }
|
||||
eth2_serde_utils = { path = "consensus/serde_utils" }
|
||||
arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="a572fd8743012a4f1ada5ee5968b1b3619c427ba" }
|
||||
arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" }
|
||||
|
||||
[patch."https://github.com/ralexstokes/mev-rs"]
|
||||
mev-rs = { git = "https://github.com/ralexstokes//mev-rs", rev = "7813d4a4a564e0754e9aaab2d95520ba437c3889" }
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "beacon_node"
|
||||
version = "4.0.1-rc.0"
|
||||
version = "4.0.1"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
||||
edition = "2021"
|
||||
|
||||
|
@ -476,6 +476,46 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
type BeaconBlockAndState<T, Payload> = (BeaconBlock<T, Payload>, BeaconState<T>);
|
||||
|
||||
impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// Checks if a block is finalized.
|
||||
/// The finalization check is done with the block slot. The block root is used to verify that
|
||||
/// the finalized slot is in the canonical chain.
|
||||
pub fn is_finalized_block(
|
||||
&self,
|
||||
block_root: &Hash256,
|
||||
block_slot: Slot,
|
||||
) -> Result<bool, Error> {
|
||||
let finalized_slot = self
|
||||
.canonical_head
|
||||
.cached_head()
|
||||
.finalized_checkpoint()
|
||||
.epoch
|
||||
.start_slot(T::EthSpec::slots_per_epoch());
|
||||
let is_canonical = self
|
||||
.block_root_at_slot(block_slot, WhenSlotSkipped::None)?
|
||||
.map_or(false, |canonical_root| block_root == &canonical_root);
|
||||
Ok(block_slot <= finalized_slot && is_canonical)
|
||||
}
|
||||
|
||||
/// Checks if a state is finalized.
|
||||
/// The finalization check is done with the slot. The state root is used to verify that
|
||||
/// the finalized state is in the canonical chain.
|
||||
pub fn is_finalized_state(
|
||||
&self,
|
||||
state_root: &Hash256,
|
||||
state_slot: Slot,
|
||||
) -> Result<bool, Error> {
|
||||
let finalized_slot = self
|
||||
.canonical_head
|
||||
.cached_head()
|
||||
.finalized_checkpoint()
|
||||
.epoch
|
||||
.start_slot(T::EthSpec::slots_per_epoch());
|
||||
let is_canonical = self
|
||||
.state_root_at_slot(state_slot)?
|
||||
.map_or(false, |canonical_root| state_root == &canonical_root);
|
||||
Ok(state_slot <= finalized_slot && is_canonical)
|
||||
}
|
||||
|
||||
/// Persists the head tracker and fork choice.
|
||||
///
|
||||
/// We do it atomically even though no guarantees need to be made about blocks from
|
||||
@ -3014,7 +3054,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_BLOCK_TIMES);
|
||||
let block_delay = self
|
||||
.slot_clock
|
||||
.seconds_from_current_slot_start(self.spec.seconds_per_slot)
|
||||
.seconds_from_current_slot_start()
|
||||
.ok_or(Error::UnableToComputeTimeAtSlot)?;
|
||||
|
||||
fork_choice
|
||||
@ -3881,7 +3921,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
let slot_delay = self
|
||||
.slot_clock
|
||||
.seconds_from_current_slot_start(self.spec.seconds_per_slot)
|
||||
.seconds_from_current_slot_start()
|
||||
.or_else(|| {
|
||||
warn!(
|
||||
self.log,
|
||||
|
@ -68,6 +68,8 @@ pub struct ChainConfig {
|
||||
///
|
||||
/// This is useful for block builders and testing.
|
||||
pub always_prepare_payload: bool,
|
||||
/// Whether backfill sync processing should be rate-limited.
|
||||
pub enable_backfill_rate_limiting: bool,
|
||||
}
|
||||
|
||||
impl Default for ChainConfig {
|
||||
@ -94,6 +96,7 @@ impl Default for ChainConfig {
|
||||
optimistic_finalized_sync: true,
|
||||
shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE,
|
||||
always_prepare_payload: false,
|
||||
enable_backfill_rate_limiting: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -355,12 +355,6 @@ where
|
||||
while block.slot() % slots_per_epoch != 0 {
|
||||
block_slot = (block_slot / slots_per_epoch - 1) * slots_per_epoch;
|
||||
|
||||
debug!(
|
||||
context.log(),
|
||||
"Searching for aligned checkpoint block";
|
||||
"block_slot" => block_slot,
|
||||
);
|
||||
|
||||
debug!(
|
||||
context.log(),
|
||||
"Searching for aligned checkpoint block";
|
||||
|
@ -38,15 +38,15 @@ system_health = { path = "../../common/system_health" }
|
||||
directory = { path = "../../common/directory" }
|
||||
eth2_serde_utils = "0.1.1"
|
||||
operation_pool = { path = "../operation_pool" }
|
||||
sensitive_url = { path = "../../common/sensitive_url" }
|
||||
unused_port = {path = "../../common/unused_port"}
|
||||
logging = { path = "../../common/logging" }
|
||||
store = { path = "../store" }
|
||||
|
||||
[dev-dependencies]
|
||||
store = { path = "../store" }
|
||||
environment = { path = "../../lighthouse/environment" }
|
||||
sensitive_url = { path = "../../common/sensitive_url" }
|
||||
logging = { path = "../../common/logging" }
|
||||
serde_json = "1.0.58"
|
||||
proto_array = { path = "../../consensus/proto_array" }
|
||||
unused_port = {path = "../../common/unused_port"}
|
||||
genesis = { path = "../genesis" }
|
||||
|
||||
[[test]]
|
||||
|
@ -77,8 +77,8 @@ pub fn get_attestation_performance<T: BeaconChainTypes>(
|
||||
// query is within permitted bounds to prevent potential OOM errors.
|
||||
if (end_epoch - start_epoch).as_usize() > MAX_REQUEST_RANGE_EPOCHS {
|
||||
return Err(custom_bad_request(format!(
|
||||
"end_epoch must not exceed start_epoch by more than 100 epochs. start: {}, end: {}",
|
||||
query.start_epoch, query.end_epoch
|
||||
"end_epoch must not exceed start_epoch by more than {} epochs. start: {}, end: {}",
|
||||
MAX_REQUEST_RANGE_EPOCHS, query.start_epoch, query.end_epoch
|
||||
)));
|
||||
}
|
||||
|
||||
|
@ -114,8 +114,10 @@ fn compute_historic_attester_duties<T: BeaconChainTypes>(
|
||||
)?;
|
||||
(state, execution_optimistic)
|
||||
} else {
|
||||
StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch()))
|
||||
.state(chain)?
|
||||
let (state, execution_optimistic, _finalized) =
|
||||
StateId::from_slot(request_epoch.start_slot(T::EthSpec::slots_per_epoch()))
|
||||
.state(chain)?;
|
||||
(state, execution_optimistic)
|
||||
};
|
||||
|
||||
// Sanity-check the state lookup.
|
||||
|
@ -4,13 +4,15 @@ use eth2::types::BlockId as CoreBlockId;
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use types::{BlobSidecarList, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot};
|
||||
use types::{BlobSidecarList, EthSpec, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot};
|
||||
|
||||
/// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given
|
||||
/// `BlockId`.
|
||||
#[derive(Debug)]
|
||||
pub struct BlockId(pub CoreBlockId);
|
||||
|
||||
type Finalized = bool;
|
||||
|
||||
impl BlockId {
|
||||
pub fn from_slot(slot: Slot) -> Self {
|
||||
Self(CoreBlockId::Slot(slot))
|
||||
@ -24,7 +26,7 @@ impl BlockId {
|
||||
pub fn root<T: BeaconChainTypes>(
|
||||
&self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> {
|
||||
) -> Result<(Hash256, ExecutionOptimistic, Finalized), warp::Rejection> {
|
||||
match &self.0 {
|
||||
CoreBlockId::Head => {
|
||||
let (cached_head, execution_status) = chain
|
||||
@ -34,22 +36,23 @@ impl BlockId {
|
||||
Ok((
|
||||
cached_head.head_block_root(),
|
||||
execution_status.is_optimistic_or_invalid(),
|
||||
false,
|
||||
))
|
||||
}
|
||||
CoreBlockId::Genesis => Ok((chain.genesis_block_root, false)),
|
||||
CoreBlockId::Genesis => Ok((chain.genesis_block_root, false, true)),
|
||||
CoreBlockId::Finalized => {
|
||||
let finalized_checkpoint =
|
||||
chain.canonical_head.cached_head().finalized_checkpoint();
|
||||
let (_slot, execution_optimistic) =
|
||||
checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?;
|
||||
Ok((finalized_checkpoint.root, execution_optimistic))
|
||||
Ok((finalized_checkpoint.root, execution_optimistic, true))
|
||||
}
|
||||
CoreBlockId::Justified => {
|
||||
let justified_checkpoint =
|
||||
chain.canonical_head.cached_head().justified_checkpoint();
|
||||
let (_slot, execution_optimistic) =
|
||||
checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?;
|
||||
Ok((justified_checkpoint.root, execution_optimistic))
|
||||
Ok((justified_checkpoint.root, execution_optimistic, false))
|
||||
}
|
||||
CoreBlockId::Slot(slot) => {
|
||||
let execution_optimistic = chain
|
||||
@ -66,7 +69,14 @@ impl BlockId {
|
||||
))
|
||||
})
|
||||
})?;
|
||||
Ok((root, execution_optimistic))
|
||||
let finalized = *slot
|
||||
<= chain
|
||||
.canonical_head
|
||||
.cached_head()
|
||||
.finalized_checkpoint()
|
||||
.epoch
|
||||
.start_slot(T::EthSpec::slots_per_epoch());
|
||||
Ok((root, execution_optimistic, finalized))
|
||||
}
|
||||
CoreBlockId::Root(root) => {
|
||||
// This matches the behaviour of other consensus clients (e.g. Teku).
|
||||
@ -88,7 +98,20 @@ impl BlockId {
|
||||
.is_optimistic_or_invalid_block(root)
|
||||
.map_err(BeaconChainError::ForkChoiceError)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
Ok((*root, execution_optimistic))
|
||||
let blinded_block = chain
|
||||
.get_blinded_block(root)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(format!(
|
||||
"beacon block with root {}",
|
||||
root
|
||||
))
|
||||
})?;
|
||||
let block_slot = blinded_block.slot();
|
||||
let finalized = chain
|
||||
.is_finalized_block(root, block_slot)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
Ok((*root, execution_optimistic, finalized))
|
||||
} else {
|
||||
Err(warp_utils::reject::custom_not_found(format!(
|
||||
"beacon block with root {}",
|
||||
@ -103,7 +126,14 @@ impl BlockId {
|
||||
pub fn blinded_block<T: BeaconChainTypes>(
|
||||
&self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<(SignedBlindedBeaconBlock<T::EthSpec>, ExecutionOptimistic), warp::Rejection> {
|
||||
) -> Result<
|
||||
(
|
||||
SignedBlindedBeaconBlock<T::EthSpec>,
|
||||
ExecutionOptimistic,
|
||||
Finalized,
|
||||
),
|
||||
warp::Rejection,
|
||||
> {
|
||||
match &self.0 {
|
||||
CoreBlockId::Head => {
|
||||
let (cached_head, execution_status) = chain
|
||||
@ -113,10 +143,11 @@ impl BlockId {
|
||||
Ok((
|
||||
cached_head.snapshot.beacon_block.clone_as_blinded(),
|
||||
execution_status.is_optimistic_or_invalid(),
|
||||
false,
|
||||
))
|
||||
}
|
||||
CoreBlockId::Slot(slot) => {
|
||||
let (root, execution_optimistic) = self.root(chain)?;
|
||||
let (root, execution_optimistic, finalized) = self.root(chain)?;
|
||||
chain
|
||||
.get_blinded_block(&root)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)
|
||||
@ -128,7 +159,7 @@ impl BlockId {
|
||||
slot
|
||||
)));
|
||||
}
|
||||
Ok((block, execution_optimistic))
|
||||
Ok((block, execution_optimistic, finalized))
|
||||
}
|
||||
None => Err(warp_utils::reject::custom_not_found(format!(
|
||||
"beacon block with root {}",
|
||||
@ -137,7 +168,7 @@ impl BlockId {
|
||||
})
|
||||
}
|
||||
_ => {
|
||||
let (root, execution_optimistic) = self.root(chain)?;
|
||||
let (root, execution_optimistic, finalized) = self.root(chain)?;
|
||||
let block = chain
|
||||
.get_blinded_block(&root)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)
|
||||
@ -149,7 +180,7 @@ impl BlockId {
|
||||
))
|
||||
})
|
||||
})?;
|
||||
Ok((block, execution_optimistic))
|
||||
Ok((block, execution_optimistic, finalized))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -158,7 +189,14 @@ impl BlockId {
|
||||
pub async fn full_block<T: BeaconChainTypes>(
|
||||
&self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<(Arc<SignedBeaconBlock<T::EthSpec>>, ExecutionOptimistic), warp::Rejection> {
|
||||
) -> Result<
|
||||
(
|
||||
Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||
ExecutionOptimistic,
|
||||
Finalized,
|
||||
),
|
||||
warp::Rejection,
|
||||
> {
|
||||
match &self.0 {
|
||||
CoreBlockId::Head => {
|
||||
let (cached_head, execution_status) = chain
|
||||
@ -168,10 +206,11 @@ impl BlockId {
|
||||
Ok((
|
||||
cached_head.snapshot.beacon_block.clone(),
|
||||
execution_status.is_optimistic_or_invalid(),
|
||||
false,
|
||||
))
|
||||
}
|
||||
CoreBlockId::Slot(slot) => {
|
||||
let (root, execution_optimistic) = self.root(chain)?;
|
||||
let (root, execution_optimistic, finalized) = self.root(chain)?;
|
||||
chain
|
||||
.get_block(&root)
|
||||
.await
|
||||
@ -184,7 +223,7 @@ impl BlockId {
|
||||
slot
|
||||
)));
|
||||
}
|
||||
Ok((Arc::new(block), execution_optimistic))
|
||||
Ok((Arc::new(block), execution_optimistic, finalized))
|
||||
}
|
||||
None => Err(warp_utils::reject::custom_not_found(format!(
|
||||
"beacon block with root {}",
|
||||
@ -193,14 +232,14 @@ impl BlockId {
|
||||
})
|
||||
}
|
||||
_ => {
|
||||
let (root, execution_optimistic) = self.root(chain)?;
|
||||
let (root, execution_optimistic, finalized) = self.root(chain)?;
|
||||
chain
|
||||
.get_block(&root)
|
||||
.await
|
||||
.map_err(warp_utils::reject::beacon_chain_error)
|
||||
.and_then(|block_opt| {
|
||||
block_opt
|
||||
.map(|block| (Arc::new(block), execution_optimistic))
|
||||
.map(|block| (Arc::new(block), execution_optimistic, finalized))
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(format!(
|
||||
"beacon block with root {}",
|
||||
|
@ -19,6 +19,7 @@ mod standard_block_rewards;
|
||||
mod state_id;
|
||||
mod sync_committee_rewards;
|
||||
mod sync_committees;
|
||||
pub mod test_utils;
|
||||
mod ui;
|
||||
mod validator_inclusion;
|
||||
mod version;
|
||||
@ -31,8 +32,8 @@ use beacon_chain::{
|
||||
pub use block_id::BlockId;
|
||||
use directory::DEFAULT_ROOT_DIR;
|
||||
use eth2::types::{
|
||||
self as api_types, EndpointVersion, SignedBlockContents, SkipRandaoVerification, ValidatorId,
|
||||
ValidatorStatus,
|
||||
self as api_types, EndpointVersion, SignedBlockContents, ForkChoice, ForkChoiceNode, SkipRandaoVerification,
|
||||
ValidatorId, ValidatorStatus,
|
||||
};
|
||||
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
|
||||
use lighthouse_version::version_with_platform;
|
||||
@ -64,7 +65,7 @@ use types::{
|
||||
SyncCommitteeMessage, SyncContributionData,
|
||||
};
|
||||
use version::{
|
||||
add_consensus_version_header, execution_optimistic_fork_versioned_response,
|
||||
add_consensus_version_header, execution_optimistic_finalized_fork_versioned_response,
|
||||
fork_versioned_response, inconsistent_fork_rejection, unsupported_version_rejection, V1, V2,
|
||||
};
|
||||
use warp::http::StatusCode;
|
||||
@ -523,12 +524,13 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path::end())
|
||||
.and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
let (root, execution_optimistic) = state_id.root(&chain)?;
|
||||
|
||||
let (root, execution_optimistic, finalized) = state_id.root(&chain)?;
|
||||
Ok(root)
|
||||
.map(api_types::RootData::from)
|
||||
.map(api_types::GenericResponse::from)
|
||||
.map(|resp| resp.add_execution_optimistic(execution_optimistic))
|
||||
.map(|resp| {
|
||||
resp.add_execution_optimistic_finalized(execution_optimistic, finalized)
|
||||
})
|
||||
})
|
||||
});
|
||||
|
||||
@ -539,11 +541,12 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path::end())
|
||||
.and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
let (fork, execution_optimistic) =
|
||||
state_id.fork_and_execution_optimistic(&chain)?;
|
||||
Ok(api_types::ExecutionOptimisticResponse {
|
||||
let (fork, execution_optimistic, finalized) =
|
||||
state_id.fork_and_execution_optimistic_and_finalized(&chain)?;
|
||||
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
||||
data: fork,
|
||||
execution_optimistic: Some(execution_optimistic),
|
||||
finalized: Some(finalized),
|
||||
})
|
||||
})
|
||||
});
|
||||
@ -555,23 +558,26 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path::end())
|
||||
.and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
let (data, execution_optimistic) = state_id.map_state_and_execution_optimistic(
|
||||
&chain,
|
||||
|state, execution_optimistic| {
|
||||
Ok((
|
||||
api_types::FinalityCheckpointsData {
|
||||
previous_justified: state.previous_justified_checkpoint(),
|
||||
current_justified: state.current_justified_checkpoint(),
|
||||
finalized: state.finalized_checkpoint(),
|
||||
},
|
||||
execution_optimistic,
|
||||
))
|
||||
},
|
||||
)?;
|
||||
let (data, execution_optimistic, finalized) = state_id
|
||||
.map_state_and_execution_optimistic_and_finalized(
|
||||
&chain,
|
||||
|state, execution_optimistic, finalized| {
|
||||
Ok((
|
||||
api_types::FinalityCheckpointsData {
|
||||
previous_justified: state.previous_justified_checkpoint(),
|
||||
current_justified: state.current_justified_checkpoint(),
|
||||
finalized: state.finalized_checkpoint(),
|
||||
},
|
||||
execution_optimistic,
|
||||
finalized,
|
||||
))
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(api_types::ExecutionOptimisticResponse {
|
||||
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
||||
data,
|
||||
execution_optimistic: Some(execution_optimistic),
|
||||
finalized: Some(finalized),
|
||||
})
|
||||
})
|
||||
});
|
||||
@ -588,10 +594,10 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
query_res: Result<api_types::ValidatorBalancesQuery, warp::Rejection>| {
|
||||
blocking_json_task(move || {
|
||||
let query = query_res?;
|
||||
let (data, execution_optimistic) = state_id
|
||||
.map_state_and_execution_optimistic(
|
||||
let (data, execution_optimistic, finalized) = state_id
|
||||
.map_state_and_execution_optimistic_and_finalized(
|
||||
&chain,
|
||||
|state, execution_optimistic| {
|
||||
|state, execution_optimistic, finalized| {
|
||||
Ok((
|
||||
state
|
||||
.validators()
|
||||
@ -619,13 +625,15 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
execution_optimistic,
|
||||
finalized,
|
||||
))
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(api_types::ExecutionOptimisticResponse {
|
||||
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
||||
data,
|
||||
execution_optimistic: Some(execution_optimistic),
|
||||
finalized: Some(finalized),
|
||||
})
|
||||
})
|
||||
},
|
||||
@ -643,10 +651,10 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
query_res: Result<api_types::ValidatorsQuery, warp::Rejection>| {
|
||||
blocking_json_task(move || {
|
||||
let query = query_res?;
|
||||
let (data, execution_optimistic) = state_id
|
||||
.map_state_and_execution_optimistic(
|
||||
let (data, execution_optimistic, finalized) = state_id
|
||||
.map_state_and_execution_optimistic_and_finalized(
|
||||
&chain,
|
||||
|state, execution_optimistic| {
|
||||
|state, execution_optimistic, finalized| {
|
||||
let epoch = state.current_epoch();
|
||||
let far_future_epoch = chain.spec.far_future_epoch;
|
||||
|
||||
@ -696,13 +704,15 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
execution_optimistic,
|
||||
finalized,
|
||||
))
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(api_types::ExecutionOptimisticResponse {
|
||||
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
||||
data,
|
||||
execution_optimistic: Some(execution_optimistic),
|
||||
finalized: Some(finalized),
|
||||
})
|
||||
})
|
||||
},
|
||||
@ -721,10 +731,10 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and_then(
|
||||
|state_id: StateId, chain: Arc<BeaconChain<T>>, validator_id: ValidatorId| {
|
||||
blocking_json_task(move || {
|
||||
let (data, execution_optimistic) = state_id
|
||||
.map_state_and_execution_optimistic(
|
||||
let (data, execution_optimistic, finalized) = state_id
|
||||
.map_state_and_execution_optimistic_and_finalized(
|
||||
&chain,
|
||||
|state, execution_optimistic| {
|
||||
|state, execution_optimistic, finalized| {
|
||||
let index_opt = match &validator_id {
|
||||
ValidatorId::PublicKey(pubkey) => {
|
||||
state.validators().iter().position(|v| v.pubkey == *pubkey)
|
||||
@ -758,13 +768,15 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
))
|
||||
})?,
|
||||
execution_optimistic,
|
||||
finalized,
|
||||
))
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(api_types::ExecutionOptimisticResponse {
|
||||
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
||||
data,
|
||||
execution_optimistic: Some(execution_optimistic),
|
||||
finalized: Some(finalized),
|
||||
})
|
||||
})
|
||||
},
|
||||
@ -779,10 +791,10 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and_then(
|
||||
|state_id: StateId, chain: Arc<BeaconChain<T>>, query: api_types::CommitteesQuery| {
|
||||
blocking_json_task(move || {
|
||||
let (data, execution_optimistic) = state_id
|
||||
.map_state_and_execution_optimistic(
|
||||
let (data, execution_optimistic, finalized) = state_id
|
||||
.map_state_and_execution_optimistic_and_finalized(
|
||||
&chain,
|
||||
|state, execution_optimistic| {
|
||||
|state, execution_optimistic, finalized| {
|
||||
let current_epoch = state.current_epoch();
|
||||
let epoch = query.epoch.unwrap_or(current_epoch);
|
||||
|
||||
@ -938,12 +950,13 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
}
|
||||
}
|
||||
|
||||
Ok((response, execution_optimistic))
|
||||
Ok((response, execution_optimistic, finalized))
|
||||
},
|
||||
)?;
|
||||
Ok(api_types::ExecutionOptimisticResponse {
|
||||
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
||||
data,
|
||||
execution_optimistic: Some(execution_optimistic),
|
||||
finalized: Some(finalized),
|
||||
})
|
||||
})
|
||||
},
|
||||
@ -960,10 +973,10 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
query: api_types::SyncCommitteesQuery| {
|
||||
blocking_json_task(move || {
|
||||
let (sync_committee, execution_optimistic) = state_id
|
||||
.map_state_and_execution_optimistic(
|
||||
let (sync_committee, execution_optimistic, finalized) = state_id
|
||||
.map_state_and_execution_optimistic_and_finalized(
|
||||
&chain,
|
||||
|state, execution_optimistic| {
|
||||
|state, execution_optimistic, finalized| {
|
||||
let current_epoch = state.current_epoch();
|
||||
let epoch = query.epoch.unwrap_or(current_epoch);
|
||||
Ok((
|
||||
@ -973,9 +986,10 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.map_err(|e| match e {
|
||||
BeaconStateError::SyncCommitteeNotKnown { .. } => {
|
||||
warp_utils::reject::custom_bad_request(format!(
|
||||
"state at epoch {} has no sync committee for epoch {}",
|
||||
current_epoch, epoch
|
||||
))
|
||||
"state at epoch {} has no \
|
||||
sync committee for epoch {}",
|
||||
current_epoch, epoch
|
||||
))
|
||||
}
|
||||
BeaconStateError::IncorrectStateVariant => {
|
||||
warp_utils::reject::custom_bad_request(format!(
|
||||
@ -986,6 +1000,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
e => warp_utils::reject::beacon_state_error(e),
|
||||
})?,
|
||||
execution_optimistic,
|
||||
finalized,
|
||||
))
|
||||
},
|
||||
)?;
|
||||
@ -1007,7 +1022,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
};
|
||||
|
||||
Ok(api_types::GenericResponse::from(response)
|
||||
.add_execution_optimistic(execution_optimistic))
|
||||
.add_execution_optimistic_finalized(execution_optimistic, finalized))
|
||||
})
|
||||
},
|
||||
);
|
||||
@ -1021,23 +1036,23 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and_then(
|
||||
|state_id: StateId, chain: Arc<BeaconChain<T>>, query: api_types::RandaoQuery| {
|
||||
blocking_json_task(move || {
|
||||
let (randao, execution_optimistic) = state_id
|
||||
.map_state_and_execution_optimistic(
|
||||
let (randao, execution_optimistic, finalized) = state_id
|
||||
.map_state_and_execution_optimistic_and_finalized(
|
||||
&chain,
|
||||
|state, execution_optimistic| {
|
||||
|state, execution_optimistic, finalized| {
|
||||
let epoch = query.epoch.unwrap_or_else(|| state.current_epoch());
|
||||
let randao = *state.get_randao_mix(epoch).map_err(|e| {
|
||||
warp_utils::reject::custom_bad_request(format!(
|
||||
"epoch out of range: {e:?}"
|
||||
))
|
||||
})?;
|
||||
Ok((randao, execution_optimistic))
|
||||
Ok((randao, execution_optimistic, finalized))
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(
|
||||
api_types::GenericResponse::from(api_types::RandaoMix { randao })
|
||||
.add_execution_optimistic(execution_optimistic),
|
||||
.add_execution_optimistic_finalized(execution_optimistic, finalized),
|
||||
)
|
||||
})
|
||||
},
|
||||
@ -1059,72 +1074,73 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and_then(
|
||||
|query: api_types::HeadersQuery, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
let (root, block, execution_optimistic) = match (query.slot, query.parent_root)
|
||||
{
|
||||
// No query parameters, return the canonical head block.
|
||||
(None, None) => {
|
||||
let (cached_head, execution_status) = chain
|
||||
.canonical_head
|
||||
.head_and_execution_status()
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
(
|
||||
cached_head.head_block_root(),
|
||||
cached_head.snapshot.beacon_block.clone_as_blinded(),
|
||||
execution_status.is_optimistic_or_invalid(),
|
||||
)
|
||||
}
|
||||
// Only the parent root parameter, do a forwards-iterator lookup.
|
||||
(None, Some(parent_root)) => {
|
||||
let (parent, execution_optimistic) =
|
||||
BlockId::from_root(parent_root).blinded_block(&chain)?;
|
||||
let (root, _slot) = chain
|
||||
.forwards_iter_block_roots(parent.slot())
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?
|
||||
// Ignore any skip-slots immediately following the parent.
|
||||
.find(|res| {
|
||||
res.as_ref().map_or(false, |(root, _)| *root != parent_root)
|
||||
})
|
||||
.transpose()
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(format!(
|
||||
"child of block with root {}",
|
||||
parent_root
|
||||
))
|
||||
})?;
|
||||
|
||||
BlockId::from_root(root)
|
||||
.blinded_block(&chain)
|
||||
// Ignore this `execution_optimistic` since the first value has
|
||||
// more information about the original request.
|
||||
.map(|(block, _execution_optimistic)| {
|
||||
(root, block, execution_optimistic)
|
||||
})?
|
||||
}
|
||||
// Slot is supplied, search by slot and optionally filter by
|
||||
// parent root.
|
||||
(Some(slot), parent_root_opt) => {
|
||||
let (root, execution_optimistic) =
|
||||
BlockId::from_slot(slot).root(&chain)?;
|
||||
// Ignore the second `execution_optimistic`, the first one is the
|
||||
// most relevant since it knows that we queried by slot.
|
||||
let (block, _execution_optimistic) =
|
||||
BlockId::from_root(root).blinded_block(&chain)?;
|
||||
|
||||
// If the parent root was supplied, check that it matches the block
|
||||
// obtained via a slot lookup.
|
||||
if let Some(parent_root) = parent_root_opt {
|
||||
if block.parent_root() != parent_root {
|
||||
return Err(warp_utils::reject::custom_not_found(format!(
|
||||
"no canonical block at slot {} with parent root {}",
|
||||
slot, parent_root
|
||||
)));
|
||||
}
|
||||
let (root, block, execution_optimistic, finalized) =
|
||||
match (query.slot, query.parent_root) {
|
||||
// No query parameters, return the canonical head block.
|
||||
(None, None) => {
|
||||
let (cached_head, execution_status) = chain
|
||||
.canonical_head
|
||||
.head_and_execution_status()
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
(
|
||||
cached_head.head_block_root(),
|
||||
cached_head.snapshot.beacon_block.clone_as_blinded(),
|
||||
execution_status.is_optimistic_or_invalid(),
|
||||
false,
|
||||
)
|
||||
}
|
||||
// Only the parent root parameter, do a forwards-iterator lookup.
|
||||
(None, Some(parent_root)) => {
|
||||
let (parent, execution_optimistic, _parent_finalized) =
|
||||
BlockId::from_root(parent_root).blinded_block(&chain)?;
|
||||
let (root, _slot) = chain
|
||||
.forwards_iter_block_roots(parent.slot())
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?
|
||||
// Ignore any skip-slots immediately following the parent.
|
||||
.find(|res| {
|
||||
res.as_ref().map_or(false, |(root, _)| *root != parent_root)
|
||||
})
|
||||
.transpose()
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(format!(
|
||||
"child of block with root {}",
|
||||
parent_root
|
||||
))
|
||||
})?;
|
||||
|
||||
(root, block, execution_optimistic)
|
||||
}
|
||||
};
|
||||
BlockId::from_root(root)
|
||||
.blinded_block(&chain)
|
||||
// Ignore this `execution_optimistic` since the first value has
|
||||
// more information about the original request.
|
||||
.map(|(block, _execution_optimistic, finalized)| {
|
||||
(root, block, execution_optimistic, finalized)
|
||||
})?
|
||||
}
|
||||
// Slot is supplied, search by slot and optionally filter by
|
||||
// parent root.
|
||||
(Some(slot), parent_root_opt) => {
|
||||
let (root, execution_optimistic, finalized) =
|
||||
BlockId::from_slot(slot).root(&chain)?;
|
||||
// Ignore the second `execution_optimistic`, the first one is the
|
||||
// most relevant since it knows that we queried by slot.
|
||||
let (block, _execution_optimistic, _finalized) =
|
||||
BlockId::from_root(root).blinded_block(&chain)?;
|
||||
|
||||
// If the parent root was supplied, check that it matches the block
|
||||
// obtained via a slot lookup.
|
||||
if let Some(parent_root) = parent_root_opt {
|
||||
if block.parent_root() != parent_root {
|
||||
return Err(warp_utils::reject::custom_not_found(format!(
|
||||
"no canonical block at slot {} with parent root {}",
|
||||
slot, parent_root
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
(root, block, execution_optimistic, finalized)
|
||||
}
|
||||
};
|
||||
|
||||
let data = api_types::BlockHeaderData {
|
||||
root,
|
||||
@ -1136,7 +1152,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
};
|
||||
|
||||
Ok(api_types::GenericResponse::from(vec![data])
|
||||
.add_execution_optimistic(execution_optimistic))
|
||||
.add_execution_optimistic_finalized(execution_optimistic, finalized))
|
||||
})
|
||||
},
|
||||
);
|
||||
@ -1154,10 +1170,10 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(chain_filter.clone())
|
||||
.and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
let (root, execution_optimistic) = block_id.root(&chain)?;
|
||||
let (root, execution_optimistic, finalized) = block_id.root(&chain)?;
|
||||
// Ignore the second `execution_optimistic` since the first one has more
|
||||
// information about the original request.
|
||||
let (block, _execution_optimistic) =
|
||||
let (block, _execution_optimistic, _finalized) =
|
||||
BlockId::from_root(root).blinded_block(&chain)?;
|
||||
|
||||
let canonical = chain
|
||||
@ -1174,8 +1190,9 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
},
|
||||
};
|
||||
|
||||
Ok(api_types::ExecutionOptimisticResponse {
|
||||
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
||||
execution_optimistic: Some(execution_optimistic),
|
||||
finalized: Some(finalized),
|
||||
data,
|
||||
})
|
||||
})
|
||||
@ -1260,7 +1277,8 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
accept_header: Option<api_types::Accept>| {
|
||||
async move {
|
||||
let (block, execution_optimistic) = block_id.full_block(&chain).await?;
|
||||
let (block, execution_optimistic, finalized) =
|
||||
block_id.full_block(&chain).await?;
|
||||
let fork_name = block
|
||||
.fork_name(&chain.spec)
|
||||
.map_err(inconsistent_fork_rejection)?;
|
||||
@ -1276,10 +1294,11 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
e
|
||||
))
|
||||
}),
|
||||
_ => execution_optimistic_fork_versioned_response(
|
||||
_ => execution_optimistic_finalized_fork_versioned_response(
|
||||
endpoint_version,
|
||||
fork_name,
|
||||
execution_optimistic,
|
||||
finalized,
|
||||
block,
|
||||
)
|
||||
.map(|res| warp::reply::json(&res).into_response()),
|
||||
@ -1296,12 +1315,11 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path::end())
|
||||
.and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
let (block, execution_optimistic) = block_id.blinded_block(&chain)?;
|
||||
|
||||
let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?;
|
||||
Ok(api_types::GenericResponse::from(api_types::RootData::from(
|
||||
block.canonical_root(),
|
||||
))
|
||||
.add_execution_optimistic(execution_optimistic))
|
||||
.add_execution_optimistic_finalized(execution_optimistic, finalized))
|
||||
})
|
||||
});
|
||||
|
||||
@ -1312,11 +1330,10 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path::end())
|
||||
.and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
let (block, execution_optimistic) = block_id.blinded_block(&chain)?;
|
||||
|
||||
let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?;
|
||||
Ok(
|
||||
api_types::GenericResponse::from(block.message().body().attestations().clone())
|
||||
.add_execution_optimistic(execution_optimistic),
|
||||
.add_execution_optimistic_finalized(execution_optimistic, finalized),
|
||||
)
|
||||
})
|
||||
});
|
||||
@ -1334,7 +1351,8 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
accept_header: Option<api_types::Accept>| {
|
||||
blocking_response_task(move || {
|
||||
let (block, execution_optimistic) = block_id.blinded_block(&chain)?;
|
||||
let (block, execution_optimistic, finalized) =
|
||||
block_id.blinded_block(&chain)?;
|
||||
let fork_name = block
|
||||
.fork_name(&chain.spec)
|
||||
.map_err(inconsistent_fork_rejection)?;
|
||||
@ -1352,10 +1370,11 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
}),
|
||||
_ => {
|
||||
// Post as a V2 endpoint so we return the fork version.
|
||||
execution_optimistic_fork_versioned_response(
|
||||
execution_optimistic_finalized_fork_versioned_response(
|
||||
V2,
|
||||
fork_name,
|
||||
execution_optimistic,
|
||||
finalized,
|
||||
block,
|
||||
)
|
||||
.map(|res| warp::reply::json(&res).into_response())
|
||||
@ -1935,11 +1954,13 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path::end())
|
||||
.and_then(|chain: Arc<BeaconChain<T>>, block_id: BlockId| {
|
||||
blocking_json_task(move || {
|
||||
let (rewards, execution_optimistic) =
|
||||
let (rewards, execution_optimistic, finalized) =
|
||||
standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?;
|
||||
Ok(rewards)
|
||||
.map(api_types::GenericResponse::from)
|
||||
.map(|resp| resp.add_execution_optimistic(execution_optimistic))
|
||||
.map(|resp| {
|
||||
resp.add_execution_optimistic_finalized(execution_optimistic, finalized)
|
||||
})
|
||||
})
|
||||
});
|
||||
|
||||
@ -2018,14 +2039,16 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
validators: Vec<ValidatorId>,
|
||||
log: Logger| {
|
||||
blocking_json_task(move || {
|
||||
let (rewards, execution_optimistic) =
|
||||
let (rewards, execution_optimistic, finalized) =
|
||||
sync_committee_rewards::compute_sync_committee_rewards(
|
||||
chain, block_id, validators, log,
|
||||
)?;
|
||||
|
||||
Ok(rewards)
|
||||
.map(api_types::GenericResponse::from)
|
||||
.map(|resp| resp.add_execution_optimistic(execution_optimistic))
|
||||
.map(|resp| {
|
||||
resp.add_execution_optimistic_finalized(execution_optimistic, finalized)
|
||||
})
|
||||
})
|
||||
},
|
||||
);
|
||||
@ -2108,7 +2131,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
// We can ignore the optimistic status for the "fork" since it's a
|
||||
// specification constant that doesn't change across competing heads of the
|
||||
// beacon chain.
|
||||
let (state, _execution_optimistic) = state_id.state(&chain)?;
|
||||
let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?;
|
||||
let fork_name = state
|
||||
.fork_name(&chain.spec)
|
||||
.map_err(inconsistent_fork_rejection)?;
|
||||
@ -2126,16 +2149,17 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
))
|
||||
})
|
||||
}
|
||||
_ => state_id.map_state_and_execution_optimistic(
|
||||
_ => state_id.map_state_and_execution_optimistic_and_finalized(
|
||||
&chain,
|
||||
|state, execution_optimistic| {
|
||||
|state, execution_optimistic, finalized| {
|
||||
let fork_name = state
|
||||
.fork_name(&chain.spec)
|
||||
.map_err(inconsistent_fork_rejection)?;
|
||||
let res = execution_optimistic_fork_versioned_response(
|
||||
let res = execution_optimistic_finalized_fork_versioned_response(
|
||||
endpoint_version,
|
||||
fork_name,
|
||||
execution_optimistic,
|
||||
finalized,
|
||||
&state,
|
||||
)?;
|
||||
Ok(add_consensus_version_header(
|
||||
@ -2185,6 +2209,58 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
},
|
||||
);
|
||||
|
||||
// GET debug/fork_choice
|
||||
let get_debug_fork_choice = eth_v1
|
||||
.and(warp::path("debug"))
|
||||
.and(warp::path("fork_choice"))
|
||||
.and(warp::path::end())
|
||||
.and(chain_filter.clone())
|
||||
.and_then(|chain: Arc<BeaconChain<T>>| {
|
||||
blocking_json_task(move || {
|
||||
let beacon_fork_choice = chain.canonical_head.fork_choice_read_lock();
|
||||
|
||||
let proto_array = beacon_fork_choice.proto_array().core_proto_array();
|
||||
|
||||
let fork_choice_nodes = proto_array
|
||||
.nodes
|
||||
.iter()
|
||||
.map(|node| {
|
||||
let execution_status = if node.execution_status.is_execution_enabled() {
|
||||
Some(node.execution_status.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
ForkChoiceNode {
|
||||
slot: node.slot,
|
||||
block_root: node.root,
|
||||
parent_root: node
|
||||
.parent
|
||||
.and_then(|index| proto_array.nodes.get(index))
|
||||
.map(|parent| parent.root),
|
||||
justified_epoch: node
|
||||
.justified_checkpoint
|
||||
.map(|checkpoint| checkpoint.epoch),
|
||||
finalized_epoch: node
|
||||
.finalized_checkpoint
|
||||
.map(|checkpoint| checkpoint.epoch),
|
||||
weight: node.weight,
|
||||
validity: execution_status,
|
||||
execution_block_hash: node
|
||||
.execution_status
|
||||
.block_hash()
|
||||
.map(|block_hash| block_hash.into_root()),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
Ok(ForkChoice {
|
||||
justified_checkpoint: proto_array.justified_checkpoint,
|
||||
finalized_checkpoint: proto_array.finalized_checkpoint,
|
||||
fork_choice_nodes,
|
||||
})
|
||||
})
|
||||
});
|
||||
|
||||
/*
|
||||
* node
|
||||
*/
|
||||
@ -3470,7 +3546,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| {
|
||||
blocking_response_task(move || {
|
||||
// This debug endpoint provides no indication of optimistic status.
|
||||
let (state, _execution_optimistic) = state_id.state(&chain)?;
|
||||
let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?;
|
||||
Response::builder()
|
||||
.status(200)
|
||||
.header("Content-Type", "application/ssz")
|
||||
@ -3717,6 +3793,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.uor(get_config_deposit_contract)
|
||||
.uor(get_debug_beacon_states)
|
||||
.uor(get_debug_beacon_heads)
|
||||
.uor(get_debug_fork_choice)
|
||||
.uor(get_node_identity)
|
||||
.uor(get_node_version)
|
||||
.uor(get_node_syncing)
|
||||
|
@ -209,7 +209,9 @@ fn compute_historic_proposer_duties<T: BeaconChainTypes>(
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
(state, execution_optimistic)
|
||||
} else {
|
||||
StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)?
|
||||
let (state, execution_optimistic, _finalized) =
|
||||
StateId::from_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())).state(chain)?;
|
||||
(state, execution_optimistic)
|
||||
};
|
||||
|
||||
// Ensure the state lookup was correct.
|
||||
|
@ -10,8 +10,8 @@ use warp_utils::reject::beacon_chain_error;
|
||||
pub fn compute_beacon_block_rewards<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
block_id: BlockId,
|
||||
) -> Result<(StandardBlockReward, ExecutionOptimistic), warp::Rejection> {
|
||||
let (block, execution_optimistic) = block_id.blinded_block(&chain)?;
|
||||
) -> Result<(StandardBlockReward, ExecutionOptimistic, bool), warp::Rejection> {
|
||||
let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?;
|
||||
|
||||
let block_ref = block.message();
|
||||
|
||||
@ -23,5 +23,5 @@ pub fn compute_beacon_block_rewards<T: BeaconChainTypes>(
|
||||
.compute_beacon_block_reward(block_ref, block_root, &mut state)
|
||||
.map_err(beacon_chain_error)?;
|
||||
|
||||
Ok((rewards, execution_optimistic))
|
||||
Ok((rewards, execution_optimistic, finalized))
|
||||
}
|
||||
|
@ -10,6 +10,9 @@ use types::{BeaconState, Checkpoint, EthSpec, Fork, Hash256, Slot};
|
||||
#[derive(Debug)]
|
||||
pub struct StateId(pub CoreStateId);
|
||||
|
||||
// More clarity when returning if the state is finalized or not in the root function.
|
||||
type Finalized = bool;
|
||||
|
||||
impl StateId {
|
||||
pub fn from_slot(slot: Slot) -> Self {
|
||||
Self(CoreStateId::Slot(slot))
|
||||
@ -19,8 +22,8 @@ impl StateId {
|
||||
pub fn root<T: BeaconChainTypes>(
|
||||
&self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<(Hash256, ExecutionOptimistic), warp::Rejection> {
|
||||
let (slot, execution_optimistic) = match &self.0 {
|
||||
) -> Result<(Hash256, ExecutionOptimistic, Finalized), warp::Rejection> {
|
||||
let (slot, execution_optimistic, finalized) = match &self.0 {
|
||||
CoreStateId::Head => {
|
||||
let (cached_head, execution_status) = chain
|
||||
.canonical_head
|
||||
@ -29,24 +32,36 @@ impl StateId {
|
||||
return Ok((
|
||||
cached_head.head_state_root(),
|
||||
execution_status.is_optimistic_or_invalid(),
|
||||
false,
|
||||
));
|
||||
}
|
||||
CoreStateId::Genesis => return Ok((chain.genesis_state_root, false)),
|
||||
CoreStateId::Genesis => return Ok((chain.genesis_state_root, false, true)),
|
||||
CoreStateId::Finalized => {
|
||||
let finalized_checkpoint =
|
||||
chain.canonical_head.cached_head().finalized_checkpoint();
|
||||
checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?
|
||||
let (slot, execution_optimistic) =
|
||||
checkpoint_slot_and_execution_optimistic(chain, finalized_checkpoint)?;
|
||||
(slot, execution_optimistic, true)
|
||||
}
|
||||
CoreStateId::Justified => {
|
||||
let justified_checkpoint =
|
||||
chain.canonical_head.cached_head().justified_checkpoint();
|
||||
checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?
|
||||
let (slot, execution_optimistic) =
|
||||
checkpoint_slot_and_execution_optimistic(chain, justified_checkpoint)?;
|
||||
(slot, execution_optimistic, false)
|
||||
}
|
||||
CoreStateId::Slot(slot) => (
|
||||
*slot,
|
||||
chain
|
||||
.is_optimistic_or_invalid_head()
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?,
|
||||
*slot
|
||||
<= chain
|
||||
.canonical_head
|
||||
.cached_head()
|
||||
.finalized_checkpoint()
|
||||
.epoch
|
||||
.start_slot(T::EthSpec::slots_per_epoch()),
|
||||
),
|
||||
CoreStateId::Root(root) => {
|
||||
if let Some(hot_summary) = chain
|
||||
@ -61,7 +76,10 @@ impl StateId {
|
||||
.is_optimistic_or_invalid_block_no_fallback(&hot_summary.latest_block_root)
|
||||
.map_err(BeaconChainError::ForkChoiceError)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
return Ok((*root, execution_optimistic));
|
||||
let finalized = chain
|
||||
.is_finalized_state(root, hot_summary.slot)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
return Ok((*root, execution_optimistic, finalized));
|
||||
} else if let Some(_cold_state_slot) = chain
|
||||
.store
|
||||
.load_cold_state_slot(root)
|
||||
@ -77,7 +95,7 @@ impl StateId {
|
||||
.is_optimistic_or_invalid_block_no_fallback(&finalized_root)
|
||||
.map_err(BeaconChainError::ForkChoiceError)
|
||||
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||
return Ok((*root, execution_optimistic));
|
||||
return Ok((*root, execution_optimistic, true));
|
||||
} else {
|
||||
return Err(warp_utils::reject::custom_not_found(format!(
|
||||
"beacon state for state root {}",
|
||||
@ -94,7 +112,7 @@ impl StateId {
|
||||
warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot))
|
||||
})?;
|
||||
|
||||
Ok((root, execution_optimistic))
|
||||
Ok((root, execution_optimistic, finalized))
|
||||
}
|
||||
|
||||
/// Return the `fork` field of the state identified by `self`.
|
||||
@ -103,9 +121,25 @@ impl StateId {
|
||||
&self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<(Fork, bool), warp::Rejection> {
|
||||
self.map_state_and_execution_optimistic(chain, |state, execution_optimistic| {
|
||||
Ok((state.fork(), execution_optimistic))
|
||||
})
|
||||
self.map_state_and_execution_optimistic_and_finalized(
|
||||
chain,
|
||||
|state, execution_optimistic, _finalized| Ok((state.fork(), execution_optimistic)),
|
||||
)
|
||||
}
|
||||
|
||||
/// Return the `fork` field of the state identified by `self`.
|
||||
/// Also returns the `execution_optimistic` value of the state.
|
||||
/// Also returns the `finalized` value of the state.
|
||||
pub fn fork_and_execution_optimistic_and_finalized<T: BeaconChainTypes>(
|
||||
&self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<(Fork, bool, bool), warp::Rejection> {
|
||||
self.map_state_and_execution_optimistic_and_finalized(
|
||||
chain,
|
||||
|state, execution_optimistic, finalized| {
|
||||
Ok((state.fork(), execution_optimistic, finalized))
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Convenience function to compute `fork` when `execution_optimistic` isn't desired.
|
||||
@ -121,8 +155,8 @@ impl StateId {
|
||||
pub fn state<T: BeaconChainTypes>(
|
||||
&self,
|
||||
chain: &BeaconChain<T>,
|
||||
) -> Result<(BeaconState<T::EthSpec>, ExecutionOptimistic), warp::Rejection> {
|
||||
let ((state_root, execution_optimistic), slot_opt) = match &self.0 {
|
||||
) -> Result<(BeaconState<T::EthSpec>, ExecutionOptimistic, Finalized), warp::Rejection> {
|
||||
let ((state_root, execution_optimistic, finalized), slot_opt) = match &self.0 {
|
||||
CoreStateId::Head => {
|
||||
let (cached_head, execution_status) = chain
|
||||
.canonical_head
|
||||
@ -134,6 +168,7 @@ impl StateId {
|
||||
.beacon_state
|
||||
.clone_with_only_committee_caches(),
|
||||
execution_status.is_optimistic_or_invalid(),
|
||||
false,
|
||||
));
|
||||
}
|
||||
CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)),
|
||||
@ -152,24 +187,25 @@ impl StateId {
|
||||
})
|
||||
})?;
|
||||
|
||||
Ok((state, execution_optimistic))
|
||||
Ok((state, execution_optimistic, finalized))
|
||||
}
|
||||
|
||||
/// Map a function across the `BeaconState` identified by `self`.
|
||||
///
|
||||
/// The optimistic status of the requested state is also provided to the `func` closure.
|
||||
/// The optimistic and finalization status of the requested state is also provided to the `func`
|
||||
/// closure.
|
||||
///
|
||||
/// This function will avoid instantiating/copying a new state when `self` points to the head
|
||||
/// of the chain.
|
||||
pub fn map_state_and_execution_optimistic<T: BeaconChainTypes, F, U>(
|
||||
pub fn map_state_and_execution_optimistic_and_finalized<T: BeaconChainTypes, F, U>(
|
||||
&self,
|
||||
chain: &BeaconChain<T>,
|
||||
func: F,
|
||||
) -> Result<U, warp::Rejection>
|
||||
where
|
||||
F: Fn(&BeaconState<T::EthSpec>, bool) -> Result<U, warp::Rejection>,
|
||||
F: Fn(&BeaconState<T::EthSpec>, bool, bool) -> Result<U, warp::Rejection>,
|
||||
{
|
||||
let (state, execution_optimistic) = match &self.0 {
|
||||
let (state, execution_optimistic, finalized) = match &self.0 {
|
||||
CoreStateId::Head => {
|
||||
let (head, execution_status) = chain
|
||||
.canonical_head
|
||||
@ -178,12 +214,13 @@ impl StateId {
|
||||
return func(
|
||||
&head.snapshot.beacon_state,
|
||||
execution_status.is_optimistic_or_invalid(),
|
||||
false,
|
||||
);
|
||||
}
|
||||
_ => self.state(chain)?,
|
||||
};
|
||||
|
||||
func(&state, execution_optimistic)
|
||||
func(&state, execution_optimistic, finalized)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,8 +13,8 @@ pub fn compute_sync_committee_rewards<T: BeaconChainTypes>(
|
||||
block_id: BlockId,
|
||||
validators: Vec<ValidatorId>,
|
||||
log: Logger,
|
||||
) -> Result<(Option<Vec<SyncCommitteeReward>>, ExecutionOptimistic), warp::Rejection> {
|
||||
let (block, execution_optimistic) = block_id.blinded_block(&chain)?;
|
||||
) -> Result<(Option<Vec<SyncCommitteeReward>>, ExecutionOptimistic, bool), warp::Rejection> {
|
||||
let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?;
|
||||
|
||||
let mut state = get_state_before_applying_block(chain.clone(), &block)?;
|
||||
|
||||
@ -44,7 +44,7 @@ pub fn compute_sync_committee_rewards<T: BeaconChainTypes>(
|
||||
)
|
||||
};
|
||||
|
||||
Ok((data, execution_optimistic))
|
||||
Ok((data, execution_optimistic, finalized))
|
||||
}
|
||||
|
||||
pub fn get_state_before_applying_block<T: BeaconChainTypes>(
|
||||
|
@ -1,10 +1,10 @@
|
||||
use crate::{Config, Context};
|
||||
use beacon_chain::{
|
||||
test_utils::{BeaconChainHarness, BoxedMutator, Builder, EphemeralHarnessType},
|
||||
BeaconChain, BeaconChainTypes,
|
||||
};
|
||||
use directory::DEFAULT_ROOT_DIR;
|
||||
use eth2::{BeaconNodeHttpClient, Timeouts};
|
||||
use http_api::{Config, Context};
|
||||
use lighthouse_network::{
|
||||
discv5::enr::{CombinedKey, EnrBuilder},
|
||||
libp2p::{
|
||||
@ -179,7 +179,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
|
||||
let eth1_service =
|
||||
eth1::Service::new(eth1::Config::default(), log.clone(), chain.spec.clone()).unwrap();
|
||||
|
||||
let context = Arc::new(Context {
|
||||
let ctx = Arc::new(Context {
|
||||
config: Config {
|
||||
enabled: true,
|
||||
listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
|
||||
@ -190,19 +190,19 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
|
||||
data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR),
|
||||
spec_fork_name: None,
|
||||
},
|
||||
chain: Some(chain.clone()),
|
||||
chain: Some(chain),
|
||||
network_senders: Some(network_senders),
|
||||
network_globals: Some(network_globals),
|
||||
eth1_service: Some(eth1_service),
|
||||
log,
|
||||
});
|
||||
let ctx = context.clone();
|
||||
|
||||
let (shutdown_tx, shutdown_rx) = oneshot::channel();
|
||||
let server_shutdown = async {
|
||||
// It's not really interesting why this triggered, just that it happened.
|
||||
let _ = shutdown_rx.await;
|
||||
};
|
||||
let (listening_socket, server) = http_api::serve(ctx, server_shutdown).unwrap();
|
||||
let (listening_socket, server) = crate::serve(ctx, server_shutdown).unwrap();
|
||||
|
||||
ApiServer {
|
||||
server,
|
@ -18,7 +18,7 @@ fn end_of_epoch_state<T: BeaconChainTypes>(
|
||||
let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch());
|
||||
// The execution status is not returned, any functions which rely upon this method might return
|
||||
// optimistic information without explicitly declaring so.
|
||||
let (state, _execution_status) = StateId::from_slot(target_slot).state(chain)?;
|
||||
let (state, _execution_status, _finalized) = StateId::from_slot(target_slot).state(chain)?;
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
|
@ -1,9 +1,8 @@
|
||||
use crate::api_types::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse;
|
||||
use crate::api_types::EndpointVersion;
|
||||
use eth2::CONSENSUS_VERSION_HEADER;
|
||||
use serde::Serialize;
|
||||
use types::{
|
||||
ExecutionOptimisticForkVersionedResponse, ForkName, ForkVersionedResponse, InconsistentFork,
|
||||
};
|
||||
use types::{ForkName, ForkVersionedResponse, InconsistentFork};
|
||||
use warp::reply::{self, Reply, Response};
|
||||
|
||||
pub const V1: EndpointVersion = EndpointVersion(1);
|
||||
@ -27,12 +26,13 @@ pub fn fork_versioned_response<T: Serialize>(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn execution_optimistic_fork_versioned_response<T: Serialize>(
|
||||
pub fn execution_optimistic_finalized_fork_versioned_response<T: Serialize>(
|
||||
endpoint_version: EndpointVersion,
|
||||
fork_name: ForkName,
|
||||
execution_optimistic: bool,
|
||||
finalized: bool,
|
||||
data: T,
|
||||
) -> Result<ExecutionOptimisticForkVersionedResponse<T>, warp::reject::Rejection> {
|
||||
) -> Result<ExecutionOptimisticFinalizedForkVersionedResponse<T>, warp::reject::Rejection> {
|
||||
let fork_name = if endpoint_version == V1 {
|
||||
None
|
||||
} else if endpoint_version == V2 {
|
||||
@ -40,9 +40,10 @@ pub fn execution_optimistic_fork_versioned_response<T: Serialize>(
|
||||
} else {
|
||||
return Err(unsupported_version_rejection(endpoint_version));
|
||||
};
|
||||
Ok(ExecutionOptimisticForkVersionedResponse {
|
||||
Ok(ExecutionOptimisticFinalizedForkVersionedResponse {
|
||||
version: fork_name,
|
||||
execution_optimistic: Some(execution_optimistic),
|
||||
finalized: Some(finalized),
|
||||
data,
|
||||
})
|
||||
}
|
||||
|
@ -1,11 +1,11 @@
|
||||
//! Tests for API behaviour across fork boundaries.
|
||||
use crate::common::*;
|
||||
use beacon_chain::{
|
||||
test_utils::{RelativeSyncCommittee, DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME},
|
||||
StateSkipConfig,
|
||||
};
|
||||
use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee};
|
||||
use genesis::{bls_withdrawal_credentials, interop_genesis_state_with_withdrawal_credentials};
|
||||
use http_api::test_utils::*;
|
||||
use std::collections::HashSet;
|
||||
use types::{
|
||||
test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs},
|
||||
|
@ -1,11 +1,11 @@
|
||||
//! Generic tests that make use of the (newer) `InteractiveApiTester`
|
||||
use crate::common::*;
|
||||
use beacon_chain::{
|
||||
chain_config::ReOrgThreshold,
|
||||
test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy},
|
||||
};
|
||||
use eth2::types::DepositContractData;
|
||||
use execution_layer::{ForkchoiceState, PayloadAttributes};
|
||||
use http_api::test_utils::InteractiveTester;
|
||||
use parking_lot::Mutex;
|
||||
use slot_clock::SlotClock;
|
||||
use state_processing::{
|
||||
|
@ -1,6 +1,5 @@
|
||||
#![cfg(not(debug_assertions))] // Tests are too slow in debug.
|
||||
|
||||
pub mod common;
|
||||
pub mod fork_tests;
|
||||
pub mod interactive_tests;
|
||||
pub mod tests;
|
||||
|
@ -1,4 +1,3 @@
|
||||
use crate::common::{create_api_server, create_api_server_on_port, ApiServer};
|
||||
use beacon_chain::test_utils::RelativeSyncCommittee;
|
||||
use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
|
||||
@ -8,7 +7,7 @@ use environment::null_logger;
|
||||
use eth2::{
|
||||
mixin::{RequestAccept, ResponseForkName, ResponseOptional},
|
||||
reqwest::RequestBuilder,
|
||||
types::{BlockId as CoreBlockId, StateId as CoreStateId, *},
|
||||
types::{BlockId as CoreBlockId, ForkChoiceNode, StateId as CoreStateId, *},
|
||||
BeaconNodeHttpClient, Error, StatusCode, Timeouts,
|
||||
};
|
||||
use execution_layer::test_utils::TestingBuilder;
|
||||
@ -18,7 +17,10 @@ use execution_layer::test_utils::{
|
||||
};
|
||||
use futures::stream::{Stream, StreamExt};
|
||||
use futures::FutureExt;
|
||||
use http_api::{BlockId, StateId};
|
||||
use http_api::{
|
||||
test_utils::{create_api_server, create_api_server_on_port, ApiServer},
|
||||
BlockId, StateId,
|
||||
};
|
||||
use lighthouse_network::{Enr, EnrExt, PeerId};
|
||||
use network::NetworkReceivers;
|
||||
use proto_array::ExecutionStatus;
|
||||
@ -466,6 +468,264 @@ impl ApiTester {
|
||||
self
|
||||
}
|
||||
|
||||
// finalization tests
|
||||
pub async fn test_beacon_states_root_finalized(self) -> Self {
|
||||
for state_id in self.interesting_state_ids() {
|
||||
let state_root = state_id.root(&self.chain);
|
||||
let state = state_id.state(&self.chain);
|
||||
|
||||
// if .root or .state fail, skip the test. those would be errors outside the scope
|
||||
// of this test, here we're testing the finalized field assuming the call to .is_finalized_state
|
||||
// occurs after the state_root and state calls, and that the state_root and state calls
|
||||
// were correct.
|
||||
if state_root.is_err() || state.is_err() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// now that we know the state is valid, we can unwrap() everything we need
|
||||
let result = self
|
||||
.client
|
||||
.get_beacon_states_root(state_id.0)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.finalized
|
||||
.unwrap();
|
||||
|
||||
let (state_root, _, _) = state_root.unwrap();
|
||||
let (state, _, _) = state.unwrap();
|
||||
let state_slot = state.slot();
|
||||
let expected = self
|
||||
.chain
|
||||
.is_finalized_state(&state_root, state_slot)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result, expected, "{:?}", state_id);
|
||||
}
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_beacon_states_fork_finalized(self) -> Self {
|
||||
for state_id in self.interesting_state_ids() {
|
||||
let state_root = state_id.root(&self.chain);
|
||||
let state = state_id.state(&self.chain);
|
||||
|
||||
// if .root or .state fail, skip the test. those would be errors outside the scope
|
||||
// of this test, here we're testing the finalized field assuming the call to .is_finalized_state
|
||||
// occurs after the state_root and state calls, and that the state_root and state calls
|
||||
// were correct.
|
||||
if state_root.is_err() || state.is_err() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// now that we know the state is valid, we can unwrap() everything we need
|
||||
let result = self
|
||||
.client
|
||||
.get_beacon_states_fork(state_id.0)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.finalized
|
||||
.unwrap();
|
||||
|
||||
let (state_root, _, _) = state_root.unwrap();
|
||||
let (state, _, _) = state.unwrap();
|
||||
let state_slot = state.slot();
|
||||
let expected = self
|
||||
.chain
|
||||
.is_finalized_state(&state_root, state_slot)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result, expected, "{:?}", state_id);
|
||||
}
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_beacon_states_finality_checkpoints_finalized(self) -> Self {
|
||||
for state_id in self.interesting_state_ids() {
|
||||
let state_root = state_id.root(&self.chain);
|
||||
let state = state_id.state(&self.chain);
|
||||
|
||||
// if .root or .state fail, skip the test. those would be errors outside the scope
|
||||
// of this test, here we're testing the finalized field assuming the call to .is_finalized_state
|
||||
// occurs after the state_root and state calls, and that the state_root and state calls
|
||||
// were correct.
|
||||
if state_root.is_err() || state.is_err() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// now that we know the state is valid, we can unwrap() everything we need
|
||||
let result = self
|
||||
.client
|
||||
.get_beacon_states_finality_checkpoints(state_id.0)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.finalized
|
||||
.unwrap();
|
||||
|
||||
let (state_root, _, _) = state_root.unwrap();
|
||||
let (state, _, _) = state.unwrap();
|
||||
let state_slot = state.slot();
|
||||
let expected = self
|
||||
.chain
|
||||
.is_finalized_state(&state_root, state_slot)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result, expected, "{:?}", state_id);
|
||||
}
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_beacon_headers_block_id_finalized(self) -> Self {
|
||||
for block_id in self.interesting_block_ids() {
|
||||
let block_root = block_id.root(&self.chain);
|
||||
let block = block_id.full_block(&self.chain).await;
|
||||
|
||||
// if .root or .state fail, skip the test. those would be errors outside the scope
|
||||
// of this test, here we're testing the finalized field assuming the call to .is_finalized_state
|
||||
// occurs after the state_root and state calls, and that the state_root and state calls
|
||||
// were correct.
|
||||
if block_root.is_err() || block.is_err() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// now that we know the block is valid, we can unwrap() everything we need
|
||||
let result = self
|
||||
.client
|
||||
.get_beacon_headers_block_id(block_id.0)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.finalized
|
||||
.unwrap();
|
||||
|
||||
let (block_root, _, _) = block_root.unwrap();
|
||||
let (block, _, _) = block.unwrap();
|
||||
let block_slot = block.slot();
|
||||
let expected = self
|
||||
.chain
|
||||
.is_finalized_block(&block_root, block_slot)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result, expected, "{:?}", block_id);
|
||||
}
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_beacon_blocks_finalized<T: EthSpec>(self) -> Self {
|
||||
for block_id in self.interesting_block_ids() {
|
||||
let block_root = block_id.root(&self.chain);
|
||||
let block = block_id.full_block(&self.chain).await;
|
||||
|
||||
// if .root or .full_block fail, skip the test. those would be errors outside the scope
|
||||
// of this test, here we're testing the finalized field assuming the call to .is_finalized_block
|
||||
// occurs after those calls, and that they were correct.
|
||||
if block_root.is_err() || block.is_err() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// now that we know the block is valid, we can unwrap() everything we need
|
||||
let result = self
|
||||
.client
|
||||
.get_beacon_blocks::<MainnetEthSpec>(block_id.0)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.finalized
|
||||
.unwrap();
|
||||
|
||||
let (block_root, _, _) = block_root.unwrap();
|
||||
let (block, _, _) = block.unwrap();
|
||||
let block_slot = block.slot();
|
||||
let expected = self
|
||||
.chain
|
||||
.is_finalized_block(&block_root, block_slot)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result, expected, "{:?}", block_id);
|
||||
}
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_beacon_blinded_blocks_finalized<T: EthSpec>(self) -> Self {
|
||||
for block_id in self.interesting_block_ids() {
|
||||
let block_root = block_id.root(&self.chain);
|
||||
let block = block_id.full_block(&self.chain).await;
|
||||
|
||||
// if .root or .full_block fail, skip the test. those would be errors outside the scope
|
||||
// of this test, here we're testing the finalized field assuming the call to .is_finalized_block
|
||||
// occurs after those calls, and that they were correct.
|
||||
if block_root.is_err() || block.is_err() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// now that we know the block is valid, we can unwrap() everything we need
|
||||
let result = self
|
||||
.client
|
||||
.get_beacon_blinded_blocks::<MainnetEthSpec>(block_id.0)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.finalized
|
||||
.unwrap();
|
||||
|
||||
let (block_root, _, _) = block_root.unwrap();
|
||||
let (block, _, _) = block.unwrap();
|
||||
let block_slot = block.slot();
|
||||
let expected = self
|
||||
.chain
|
||||
.is_finalized_block(&block_root, block_slot)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result, expected, "{:?}", block_id);
|
||||
}
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_debug_beacon_states_finalized(self) -> Self {
|
||||
for state_id in self.interesting_state_ids() {
|
||||
let state_root = state_id.root(&self.chain);
|
||||
let state = state_id.state(&self.chain);
|
||||
|
||||
// if .root or .state fail, skip the test. those would be errors outside the scope
|
||||
// of this test, here we're testing the finalized field assuming the call to .is_finalized_state
|
||||
// occurs after the state_root and state calls, and that the state_root and state calls
|
||||
// were correct.
|
||||
if state_root.is_err() || state.is_err() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// now that we know the state is valid, we can unwrap() everything we need
|
||||
let result = self
|
||||
.client
|
||||
.get_debug_beacon_states::<MainnetEthSpec>(state_id.0)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.finalized
|
||||
.unwrap();
|
||||
|
||||
let (state_root, _, _) = state_root.unwrap();
|
||||
let (state, _, _) = state.unwrap();
|
||||
let state_slot = state.slot();
|
||||
let expected = self
|
||||
.chain
|
||||
.is_finalized_state(&state_root, state_slot)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(result, expected, "{:?}", state_id);
|
||||
}
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_beacon_states_root(self) -> Self {
|
||||
for state_id in self.interesting_state_ids() {
|
||||
let result = self
|
||||
@ -478,7 +738,7 @@ impl ApiTester {
|
||||
let expected = state_id
|
||||
.root(&self.chain)
|
||||
.ok()
|
||||
.map(|(root, _execution_optimistic)| root);
|
||||
.map(|(root, _execution_optimistic, _finalized)| root);
|
||||
|
||||
assert_eq!(result, expected, "{:?}", state_id);
|
||||
}
|
||||
@ -512,15 +772,13 @@ impl ApiTester {
|
||||
.unwrap()
|
||||
.map(|res| res.data);
|
||||
|
||||
let expected =
|
||||
state_id
|
||||
.state(&self.chain)
|
||||
.ok()
|
||||
.map(|(state, _execution_optimistic)| FinalityCheckpointsData {
|
||||
previous_justified: state.previous_justified_checkpoint(),
|
||||
current_justified: state.current_justified_checkpoint(),
|
||||
finalized: state.finalized_checkpoint(),
|
||||
});
|
||||
let expected = state_id.state(&self.chain).ok().map(
|
||||
|(state, _execution_optimistic, _finalized)| FinalityCheckpointsData {
|
||||
previous_justified: state.previous_justified_checkpoint(),
|
||||
current_justified: state.current_justified_checkpoint(),
|
||||
finalized: state.finalized_checkpoint(),
|
||||
},
|
||||
);
|
||||
|
||||
assert_eq!(result, expected, "{:?}", state_id);
|
||||
}
|
||||
@ -533,7 +791,9 @@ impl ApiTester {
|
||||
for validator_indices in self.interesting_validator_indices() {
|
||||
let state_opt = state_id.state(&self.chain).ok();
|
||||
let validators: Vec<Validator> = match state_opt.as_ref() {
|
||||
Some((state, _execution_optimistic)) => state.validators().clone().into(),
|
||||
Some((state, _execution_optimistic, _finalized)) => {
|
||||
state.validators().clone().into()
|
||||
}
|
||||
None => vec![],
|
||||
};
|
||||
let validator_index_ids = validator_indices
|
||||
@ -572,7 +832,7 @@ impl ApiTester {
|
||||
.unwrap()
|
||||
.map(|res| res.data);
|
||||
|
||||
let expected = state_opt.map(|(state, _execution_optimistic)| {
|
||||
let expected = state_opt.map(|(state, _execution_optimistic, _finalized)| {
|
||||
let mut validators = Vec::with_capacity(validator_indices.len());
|
||||
|
||||
for i in validator_indices {
|
||||
@ -602,7 +862,7 @@ impl ApiTester {
|
||||
let state_opt = state_id
|
||||
.state(&self.chain)
|
||||
.ok()
|
||||
.map(|(state, _execution_optimistic)| state);
|
||||
.map(|(state, _execution_optimistic, _finalized)| state);
|
||||
let validators: Vec<Validator> = match state_opt.as_ref() {
|
||||
Some(state) => state.validators().clone().into(),
|
||||
None => vec![],
|
||||
@ -692,7 +952,7 @@ impl ApiTester {
|
||||
let state_opt = state_id
|
||||
.state(&self.chain)
|
||||
.ok()
|
||||
.map(|(state, _execution_optimistic)| state);
|
||||
.map(|(state, _execution_optimistic, _finalized)| state);
|
||||
let validators = match state_opt.as_ref() {
|
||||
Some(state) => state.validators().clone().into(),
|
||||
None => vec![],
|
||||
@ -747,7 +1007,7 @@ impl ApiTester {
|
||||
let mut state_opt = state_id
|
||||
.state(&self.chain)
|
||||
.ok()
|
||||
.map(|(state, _execution_optimistic)| state);
|
||||
.map(|(state, _execution_optimistic, _finalized)| state);
|
||||
|
||||
let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch());
|
||||
let results = self
|
||||
@ -794,7 +1054,7 @@ impl ApiTester {
|
||||
let mut state_opt = state_id
|
||||
.state(&self.chain)
|
||||
.ok()
|
||||
.map(|(state, _execution_optimistic)| state);
|
||||
.map(|(state, _execution_optimistic, _finalized)| state);
|
||||
|
||||
let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch());
|
||||
let result = self
|
||||
@ -904,7 +1164,7 @@ impl ApiTester {
|
||||
let block_root_opt = block_id
|
||||
.root(&self.chain)
|
||||
.ok()
|
||||
.map(|(root, _execution_optimistic)| root);
|
||||
.map(|(root, _execution_optimistic, _finalized)| root);
|
||||
|
||||
if let CoreBlockId::Slot(slot) = block_id.0 {
|
||||
if block_root_opt.is_none() {
|
||||
@ -918,7 +1178,7 @@ impl ApiTester {
|
||||
.full_block(&self.chain)
|
||||
.await
|
||||
.ok()
|
||||
.map(|(block, _execution_optimistic)| block);
|
||||
.map(|(block, _execution_optimistic, _finalized)| block);
|
||||
|
||||
if block_opt.is_none() && result.is_none() {
|
||||
continue;
|
||||
@ -964,7 +1224,7 @@ impl ApiTester {
|
||||
let expected = block_id
|
||||
.root(&self.chain)
|
||||
.ok()
|
||||
.map(|(root, _execution_optimistic)| root);
|
||||
.map(|(root, _execution_optimistic, _finalized)| root);
|
||||
if let CoreBlockId::Slot(slot) = block_id.0 {
|
||||
if expected.is_none() {
|
||||
assert!(SKIPPED_SLOTS.contains(&slot.as_u64()));
|
||||
@ -1015,7 +1275,7 @@ impl ApiTester {
|
||||
.full_block(&self.chain)
|
||||
.await
|
||||
.ok()
|
||||
.map(|(block, _execution_optimistic)| block);
|
||||
.map(|(block, _execution_optimistic, _finalized)| block);
|
||||
|
||||
if let CoreBlockId::Slot(slot) = block_id.0 {
|
||||
if expected.is_none() {
|
||||
@ -1099,7 +1359,7 @@ impl ApiTester {
|
||||
let expected = block_id
|
||||
.blinded_block(&self.chain)
|
||||
.ok()
|
||||
.map(|(block, _execution_optimistic)| block);
|
||||
.map(|(block, _execution_optimistic, _finalized)| block);
|
||||
|
||||
if let CoreBlockId::Slot(slot) = block_id.0 {
|
||||
if expected.is_none() {
|
||||
@ -1180,7 +1440,7 @@ impl ApiTester {
|
||||
.map(|res| res.data);
|
||||
|
||||
let expected = block_id.full_block(&self.chain).await.ok().map(
|
||||
|(block, _execution_optimistic)| {
|
||||
|(block, _execution_optimistic, _finalized)| {
|
||||
block.message().body().attestations().clone().into()
|
||||
},
|
||||
);
|
||||
@ -1601,7 +1861,7 @@ impl ApiTester {
|
||||
let mut expected = state_id
|
||||
.state(&self.chain)
|
||||
.ok()
|
||||
.map(|(state, _execution_optimistic)| state);
|
||||
.map(|(state, _execution_optimistic, _finalized)| state);
|
||||
expected.as_mut().map(|state| state.drop_all_caches());
|
||||
|
||||
if let (Some(json), Some(expected)) = (&result_json, &expected) {
|
||||
@ -1687,6 +1947,59 @@ impl ApiTester {
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_get_debug_fork_choice(self) -> Self {
|
||||
let result = self.client.get_debug_fork_choice().await.unwrap();
|
||||
|
||||
let beacon_fork_choice = self.chain.canonical_head.fork_choice_read_lock();
|
||||
|
||||
let expected_proto_array = beacon_fork_choice.proto_array().core_proto_array();
|
||||
|
||||
assert_eq!(
|
||||
result.justified_checkpoint,
|
||||
expected_proto_array.justified_checkpoint
|
||||
);
|
||||
assert_eq!(
|
||||
result.finalized_checkpoint,
|
||||
expected_proto_array.finalized_checkpoint
|
||||
);
|
||||
|
||||
let expected_fork_choice_nodes: Vec<ForkChoiceNode> = expected_proto_array
|
||||
.nodes
|
||||
.iter()
|
||||
.map(|node| {
|
||||
let execution_status = if node.execution_status.is_execution_enabled() {
|
||||
Some(node.execution_status.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
ForkChoiceNode {
|
||||
slot: node.slot,
|
||||
block_root: node.root,
|
||||
parent_root: node
|
||||
.parent
|
||||
.and_then(|index| expected_proto_array.nodes.get(index))
|
||||
.map(|parent| parent.root),
|
||||
justified_epoch: node.justified_checkpoint.map(|checkpoint| checkpoint.epoch),
|
||||
finalized_epoch: node.finalized_checkpoint.map(|checkpoint| checkpoint.epoch),
|
||||
weight: node.weight,
|
||||
validity: execution_status,
|
||||
execution_block_hash: node
|
||||
.execution_status
|
||||
.block_hash()
|
||||
.map(|block_hash| block_hash.into_root()),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
assert_eq!(result.fork_choice_nodes, expected_fork_choice_nodes);
|
||||
|
||||
// need to drop beacon_fork_choice here, else borrow checker will complain
|
||||
// that self cannot be moved out since beacon_fork_choice borrowed self.chain
|
||||
// and might still live after self is moved out
|
||||
drop(beacon_fork_choice);
|
||||
self
|
||||
}
|
||||
|
||||
fn validator_count(&self) -> usize {
|
||||
self.chain.head_snapshot().beacon_state.validators().len()
|
||||
}
|
||||
@ -3620,7 +3933,7 @@ impl ApiTester {
|
||||
let mut expected = state_id
|
||||
.state(&self.chain)
|
||||
.ok()
|
||||
.map(|(state, _execution_optimistic)| state);
|
||||
.map(|(state, _execution_optimistic, _finalized)| state);
|
||||
expected.as_mut().map(|state| state.drop_all_caches());
|
||||
|
||||
assert_eq!(result, expected, "{:?}", state_id);
|
||||
@ -4032,6 +4345,20 @@ async fn beacon_get() {
|
||||
.await
|
||||
.test_beacon_genesis()
|
||||
.await
|
||||
.test_beacon_states_root_finalized()
|
||||
.await
|
||||
.test_beacon_states_fork_finalized()
|
||||
.await
|
||||
.test_beacon_states_finality_checkpoints_finalized()
|
||||
.await
|
||||
.test_beacon_headers_block_id_finalized()
|
||||
.await
|
||||
.test_beacon_blocks_finalized::<MainnetEthSpec>()
|
||||
.await
|
||||
.test_beacon_blinded_blocks_finalized::<MainnetEthSpec>()
|
||||
.await
|
||||
.test_debug_beacon_states_finalized()
|
||||
.await
|
||||
.test_beacon_states_root()
|
||||
.await
|
||||
.test_beacon_states_fork()
|
||||
@ -4168,6 +4495,8 @@ async fn debug_get() {
|
||||
.test_get_debug_beacon_states()
|
||||
.await
|
||||
.test_get_debug_beacon_heads()
|
||||
.await
|
||||
.test_get_debug_fork_choice()
|
||||
.await;
|
||||
}
|
||||
|
||||
|
@ -207,7 +207,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
||||
let local_node_id = local_enr.node_id();
|
||||
|
||||
info!(log, "ENR Initialised"; "enr" => local_enr.to_base64(), "seq" => local_enr.seq(), "id"=> %local_enr.node_id(),
|
||||
"ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp6()
|
||||
"ip4" => ?local_enr.ip4(), "udp4"=> ?local_enr.udp4(), "tcp4" => ?local_enr.tcp4(), "tcp6" => ?local_enr.tcp6(), "udp6" => ?local_enr.udp6()
|
||||
);
|
||||
let listen_socket = match config.listen_addrs() {
|
||||
crate::listen_addr::ListenAddress::V4(v4_addr) => v4_addr.udp_socket_addr(),
|
||||
|
@ -290,11 +290,20 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
|
||||
// If a peer is being banned, this trumps any temporary ban the peer might be
|
||||
// under. We no longer track it in the temporary ban list.
|
||||
self.temporary_banned_peers.raw_remove(peer_id);
|
||||
|
||||
// Inform the Swarm to ban the peer
|
||||
self.events
|
||||
.push(PeerManagerEvent::Banned(*peer_id, banned_ips));
|
||||
if !self.temporary_banned_peers.raw_remove(peer_id) {
|
||||
// If the peer is not already banned, inform the Swarm to ban the peer
|
||||
self.events
|
||||
.push(PeerManagerEvent::Banned(*peer_id, banned_ips));
|
||||
// If the peer was in the process of being un-banned, remove it (a rare race
|
||||
// condition)
|
||||
self.events.retain(|event| {
|
||||
if let PeerManagerEvent::UnBanned(unbanned_peer_id, _) = event {
|
||||
unbanned_peer_id != peer_id // Remove matching peer ids
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -562,8 +571,8 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
Protocol::BlobsByRoot => return,
|
||||
Protocol::Goodbye => return,
|
||||
Protocol::LightClientBootstrap => return,
|
||||
Protocol::MetaData => PeerAction::LowToleranceError,
|
||||
Protocol::Status => PeerAction::LowToleranceError,
|
||||
Protocol::MetaData => PeerAction::Fatal,
|
||||
Protocol::Status => PeerAction::Fatal,
|
||||
}
|
||||
}
|
||||
RPCError::StreamTimeout => match direction {
|
||||
|
@ -156,8 +156,10 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
BanResult::BadScore => {
|
||||
// This is a faulty state
|
||||
error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id);
|
||||
// Reban the peer
|
||||
// Disconnect the peer.
|
||||
self.goodbye_peer(&peer_id, GoodbyeReason::Banned, ReportSource::PeerManager);
|
||||
// Re-ban the peer to prevent repeated errors.
|
||||
self.events.push(PeerManagerEvent::Banned(peer_id, vec![]));
|
||||
return;
|
||||
}
|
||||
BanResult::BannedIp(ip_addr) => {
|
||||
|
@ -1128,7 +1128,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
||||
debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id);
|
||||
self.peer_manager_mut().report_peer(
|
||||
&peer_id,
|
||||
PeerAction::LowToleranceError,
|
||||
PeerAction::Fatal,
|
||||
ReportSource::Gossipsub,
|
||||
Some(GoodbyeReason::Unknown),
|
||||
"does_not_support_gossipsub",
|
||||
|
@ -63,6 +63,7 @@ use std::time::Duration;
|
||||
use std::{cmp, collections::HashSet};
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::mpsc::error::TrySendError;
|
||||
use types::{
|
||||
Attestation, AttesterSlashing, Hash256, LightClientFinalityUpdate, LightClientOptimisticUpdate,
|
||||
ProposerSlashing, SignedAggregateAndProof, SignedBeaconBlock, SignedBlobSidecar,
|
||||
@ -80,7 +81,9 @@ mod tests;
|
||||
mod work_reprocessing_queue;
|
||||
mod worker;
|
||||
|
||||
use crate::beacon_processor::work_reprocessing_queue::QueuedGossipBlock;
|
||||
use crate::beacon_processor::work_reprocessing_queue::{
|
||||
QueuedBackfillBatch, QueuedGossipBlock, ReprocessQueueMessage,
|
||||
};
|
||||
pub use worker::{ChainSegmentProcessId, GossipAggregatePackage, GossipAttestationPackage};
|
||||
|
||||
/// The maximum size of the channel for work events to the `BeaconProcessor`.
|
||||
@ -230,6 +233,7 @@ pub const GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE: &str = "light_client_finality_upd
|
||||
pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update";
|
||||
pub const RPC_BLOCK: &str = "rpc_block";
|
||||
pub const CHAIN_SEGMENT: &str = "chain_segment";
|
||||
pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill";
|
||||
pub const STATUS_PROCESSING: &str = "status_processing";
|
||||
pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request";
|
||||
pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request";
|
||||
@ -804,6 +808,9 @@ impl<T: BeaconChainTypes> std::convert::From<ReadyWork<T>> for WorkEvent<T> {
|
||||
seen_timestamp,
|
||||
},
|
||||
},
|
||||
ReadyWork::BackfillSync(QueuedBackfillBatch { process_id, blocks }) => {
|
||||
WorkEvent::chain_segment(process_id, blocks)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -978,6 +985,10 @@ impl<T: BeaconChainTypes> Work<T> {
|
||||
Work::GossipLightClientFinalityUpdate { .. } => GOSSIP_LIGHT_CLIENT_FINALITY_UPDATE,
|
||||
Work::GossipLightClientOptimisticUpdate { .. } => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE,
|
||||
Work::RpcBlock { .. } => RPC_BLOCK,
|
||||
Work::ChainSegment {
|
||||
process_id: ChainSegmentProcessId::BackSyncBatchId { .. },
|
||||
..
|
||||
} => CHAIN_SEGMENT_BACKFILL,
|
||||
Work::ChainSegment { .. } => CHAIN_SEGMENT,
|
||||
Work::Status { .. } => STATUS_PROCESSING,
|
||||
Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST,
|
||||
@ -1145,23 +1156,23 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
||||
FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN);
|
||||
|
||||
let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN);
|
||||
|
||||
let chain = match self.beacon_chain.upgrade() {
|
||||
Some(chain) => chain,
|
||||
// No need to proceed any further if the beacon chain has been dropped, the client
|
||||
// is shutting down.
|
||||
None => return,
|
||||
};
|
||||
|
||||
// Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to
|
||||
// receive them back once they are ready (`ready_work_rx`).
|
||||
let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN);
|
||||
let work_reprocessing_tx = {
|
||||
if let Some(chain) = self.beacon_chain.upgrade() {
|
||||
spawn_reprocess_scheduler(
|
||||
ready_work_tx,
|
||||
&self.executor,
|
||||
chain.slot_clock.clone(),
|
||||
self.log.clone(),
|
||||
)
|
||||
} else {
|
||||
// No need to proceed any further if the beacon chain has been dropped, the client
|
||||
// is shutting down.
|
||||
return;
|
||||
}
|
||||
};
|
||||
let work_reprocessing_tx = spawn_reprocess_scheduler(
|
||||
ready_work_tx,
|
||||
&self.executor,
|
||||
chain.slot_clock.clone(),
|
||||
self.log.clone(),
|
||||
);
|
||||
|
||||
let executor = self.executor.clone();
|
||||
|
||||
@ -1174,12 +1185,55 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
||||
reprocess_work_rx: ready_work_rx,
|
||||
};
|
||||
|
||||
let enable_backfill_rate_limiting = chain.config.enable_backfill_rate_limiting;
|
||||
|
||||
loop {
|
||||
let work_event = match inbound_events.next().await {
|
||||
Some(InboundEvent::WorkerIdle) => {
|
||||
self.current_workers = self.current_workers.saturating_sub(1);
|
||||
None
|
||||
}
|
||||
Some(InboundEvent::WorkEvent(event)) if enable_backfill_rate_limiting => {
|
||||
match QueuedBackfillBatch::try_from(event) {
|
||||
Ok(backfill_batch) => {
|
||||
match work_reprocessing_tx
|
||||
.try_send(ReprocessQueueMessage::BackfillSync(backfill_batch))
|
||||
{
|
||||
Err(e) => {
|
||||
warn!(
|
||||
self.log,
|
||||
"Unable to queue backfill work event. Will try to process now.";
|
||||
"error" => %e
|
||||
);
|
||||
match e {
|
||||
TrySendError::Full(reprocess_queue_message)
|
||||
| TrySendError::Closed(reprocess_queue_message) => {
|
||||
match reprocess_queue_message {
|
||||
ReprocessQueueMessage::BackfillSync(
|
||||
backfill_batch,
|
||||
) => Some(backfill_batch.into()),
|
||||
other => {
|
||||
crit!(
|
||||
self.log,
|
||||
"Unexpected queue message type";
|
||||
"message_type" => other.as_ref()
|
||||
);
|
||||
// This is an unhandled exception, drop the message.
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(..) => {
|
||||
// backfill work sent to "reprocessing" queue. Process the next event.
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(event) => Some(event),
|
||||
}
|
||||
}
|
||||
Some(InboundEvent::WorkEvent(event))
|
||||
| Some(InboundEvent::ReprocessingWork(event)) => Some(event),
|
||||
None => {
|
||||
|
@ -9,7 +9,7 @@ use crate::{service::NetworkMessage, sync::SyncMessage};
|
||||
use beacon_chain::test_utils::{
|
||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
||||
};
|
||||
use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
|
||||
use beacon_chain::{BeaconChain, ChainConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
|
||||
use lighthouse_network::{
|
||||
discv5::enr::{CombinedKey, EnrBuilder},
|
||||
rpc::methods::{MetaData, MetaDataV2},
|
||||
@ -23,8 +23,8 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::mpsc;
|
||||
use types::{
|
||||
Attestation, AttesterSlashing, EthSpec, MainnetEthSpec, ProposerSlashing, SignedBeaconBlock,
|
||||
SignedVoluntaryExit, SubnetId,
|
||||
Attestation, AttesterSlashing, Epoch, EthSpec, MainnetEthSpec, ProposerSlashing,
|
||||
SignedBeaconBlock, SignedVoluntaryExit, SubnetId,
|
||||
};
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
@ -70,6 +70,10 @@ impl Drop for TestRig {
|
||||
|
||||
impl TestRig {
|
||||
pub async fn new(chain_length: u64) -> Self {
|
||||
Self::new_with_chain_config(chain_length, ChainConfig::default()).await
|
||||
}
|
||||
|
||||
pub async fn new_with_chain_config(chain_length: u64, chain_config: ChainConfig) -> Self {
|
||||
// This allows for testing voluntary exits without building out a massive chain.
|
||||
let mut spec = E::default_spec();
|
||||
spec.shard_committee_period = 2;
|
||||
@ -78,6 +82,7 @@ impl TestRig {
|
||||
.spec(spec)
|
||||
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||
.fresh_ephemeral_store()
|
||||
.chain_config(chain_config)
|
||||
.build();
|
||||
|
||||
harness.advance_slot();
|
||||
@ -261,6 +266,14 @@ impl TestRig {
|
||||
self.beacon_processor_tx.try_send(event).unwrap();
|
||||
}
|
||||
|
||||
pub fn enqueue_backfill_batch(&self) {
|
||||
let event = WorkEvent::chain_segment(
|
||||
ChainSegmentProcessId::BackSyncBatchId(Epoch::default()),
|
||||
Vec::default(),
|
||||
);
|
||||
self.beacon_processor_tx.try_send(event).unwrap();
|
||||
}
|
||||
|
||||
pub fn enqueue_unaggregated_attestation(&self) {
|
||||
let (attestation, subnet_id) = self.attestations.first().unwrap().clone();
|
||||
self.beacon_processor_tx
|
||||
@ -873,3 +886,49 @@ async fn test_rpc_block_reprocessing() {
|
||||
// cache handle was dropped.
|
||||
assert_eq!(next_block_root, rig.head_root());
|
||||
}
|
||||
|
||||
/// Ensure that backfill batches get rate-limited and processing is scheduled at specified intervals.
|
||||
#[tokio::test]
|
||||
async fn test_backfill_sync_processing() {
|
||||
let mut rig = TestRig::new(SMALL_CHAIN).await;
|
||||
// Note: to verify the exact event times in an integration test is not straight forward here
|
||||
// (not straight forward to manipulate `TestingSlotClock` due to cloning of `SlotClock` in code)
|
||||
// and makes the test very slow, hence timing calculation is unit tested separately in
|
||||
// `work_reprocessing_queue`.
|
||||
for _ in 0..1 {
|
||||
rig.enqueue_backfill_batch();
|
||||
// ensure queued batch is not processed until later
|
||||
rig.assert_no_events_for(Duration::from_millis(100)).await;
|
||||
// A new batch should be processed within a slot.
|
||||
rig.assert_event_journal_with_timeout(
|
||||
&[CHAIN_SEGMENT_BACKFILL, WORKER_FREED, NOTHING_TO_DO],
|
||||
rig.chain.slot_clock.slot_duration(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure that backfill batches get processed as fast as they can when rate-limiting is disabled.
|
||||
#[tokio::test]
|
||||
async fn test_backfill_sync_processing_rate_limiting_disabled() {
|
||||
let chain_config = ChainConfig {
|
||||
enable_backfill_rate_limiting: false,
|
||||
..Default::default()
|
||||
};
|
||||
let mut rig = TestRig::new_with_chain_config(SMALL_CHAIN, chain_config).await;
|
||||
|
||||
for _ in 0..3 {
|
||||
rig.enqueue_backfill_batch();
|
||||
}
|
||||
|
||||
// ensure all batches are processed
|
||||
rig.assert_event_journal_with_timeout(
|
||||
&[
|
||||
CHAIN_SEGMENT_BACKFILL,
|
||||
CHAIN_SEGMENT_BACKFILL,
|
||||
CHAIN_SEGMENT_BACKFILL,
|
||||
],
|
||||
Duration::from_millis(100),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
@ -11,6 +11,7 @@
|
||||
//! Aggregated and unaggregated attestations that failed verification due to referencing an unknown
|
||||
//! block will be re-queued until their block is imported, or until they expire.
|
||||
use super::MAX_SCHEDULED_WORK_QUEUE_LEN;
|
||||
use crate::beacon_processor::{ChainSegmentProcessId, Work, WorkEvent};
|
||||
use crate::metrics;
|
||||
use crate::sync::manager::BlockProcessType;
|
||||
use beacon_chain::blob_verification::AsBlock;
|
||||
@ -19,14 +20,17 @@ use beacon_chain::{BeaconChainTypes, GossipVerifiedBlock, MAXIMUM_GOSSIP_CLOCK_D
|
||||
use fnv::FnvHashMap;
|
||||
use futures::task::Poll;
|
||||
use futures::{Stream, StreamExt};
|
||||
use itertools::Itertools;
|
||||
use lighthouse_network::{MessageId, PeerId};
|
||||
use logging::TimeLatch;
|
||||
use slog::{crit, debug, error, trace, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::Context;
|
||||
use std::time::Duration;
|
||||
use strum::AsRefStr;
|
||||
use task_executor::TaskExecutor;
|
||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||
use tokio::time::error::Error as TimeError;
|
||||
@ -65,7 +69,21 @@ const MAXIMUM_QUEUED_ATTESTATIONS: usize = 16_384;
|
||||
/// How many light client updates we keep before new ones get dropped.
|
||||
const MAXIMUM_QUEUED_LIGHT_CLIENT_UPDATES: usize = 128;
|
||||
|
||||
// Process backfill batch 50%, 60%, 80% through each slot.
|
||||
//
|
||||
// Note: use caution to set these fractions in a way that won't cause panic-y
|
||||
// arithmetic.
|
||||
pub const BACKFILL_SCHEDULE_IN_SLOT: [(u32, u32); 3] = [
|
||||
// One half: 6s on mainnet, 2.5s on Gnosis.
|
||||
(1, 2),
|
||||
// Three fifths: 7.2s on mainnet, 3s on Gnosis.
|
||||
(3, 5),
|
||||
// Four fifths: 9.6s on mainnet, 4s on Gnosis.
|
||||
(4, 5),
|
||||
];
|
||||
|
||||
/// Messages that the scheduler can receive.
|
||||
#[derive(AsRefStr)]
|
||||
pub enum ReprocessQueueMessage<T: BeaconChainTypes> {
|
||||
/// A block that has been received early and we should queue for later processing.
|
||||
EarlyBlock(QueuedGossipBlock<T>),
|
||||
@ -84,6 +102,8 @@ pub enum ReprocessQueueMessage<T: BeaconChainTypes> {
|
||||
UnknownBlockAggregate(QueuedAggregate<T::EthSpec>),
|
||||
/// A light client optimistic update that references a parent root that has not been seen as a parent.
|
||||
UnknownLightClientOptimisticUpdate(QueuedLightClientUpdate<T::EthSpec>),
|
||||
/// A new backfill batch that needs to be scheduled for processing.
|
||||
BackfillSync(QueuedBackfillBatch<T::EthSpec>),
|
||||
}
|
||||
|
||||
/// Events sent by the scheduler once they are ready for re-processing.
|
||||
@ -93,6 +113,7 @@ pub enum ReadyWork<T: BeaconChainTypes> {
|
||||
Unaggregate(QueuedUnaggregate<T::EthSpec>),
|
||||
Aggregate(QueuedAggregate<T::EthSpec>),
|
||||
LightClientUpdate(QueuedLightClientUpdate<T::EthSpec>),
|
||||
BackfillSync(QueuedBackfillBatch<T::EthSpec>),
|
||||
}
|
||||
|
||||
/// An Attestation for which the corresponding block was not seen while processing, queued for
|
||||
@ -144,6 +165,40 @@ pub struct QueuedRpcBlock<T: EthSpec> {
|
||||
pub should_process: bool,
|
||||
}
|
||||
|
||||
/// A backfill batch work that has been queued for processing later.
|
||||
#[derive(Clone)]
|
||||
pub struct QueuedBackfillBatch<E: EthSpec> {
|
||||
pub process_id: ChainSegmentProcessId,
|
||||
pub blocks: Vec<BlockWrapper<E>>,
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> TryFrom<WorkEvent<T>> for QueuedBackfillBatch<T::EthSpec> {
|
||||
type Error = WorkEvent<T>;
|
||||
|
||||
fn try_from(event: WorkEvent<T>) -> Result<Self, WorkEvent<T>> {
|
||||
match event {
|
||||
WorkEvent {
|
||||
work:
|
||||
Work::ChainSegment {
|
||||
process_id: process_id @ ChainSegmentProcessId::BackSyncBatchId(_),
|
||||
blocks,
|
||||
},
|
||||
..
|
||||
} => Ok(QueuedBackfillBatch { process_id, blocks }),
|
||||
_ => Err(event),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> From<QueuedBackfillBatch<T::EthSpec>> for WorkEvent<T> {
|
||||
fn from(queued_backfill_batch: QueuedBackfillBatch<T::EthSpec>) -> WorkEvent<T> {
|
||||
WorkEvent::chain_segment(
|
||||
queued_backfill_batch.process_id,
|
||||
queued_backfill_batch.blocks,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Unifies the different messages processed by the block delay queue.
|
||||
enum InboundEvent<T: BeaconChainTypes> {
|
||||
/// A gossip block that was queued for later processing and is ready for import.
|
||||
@ -155,6 +210,8 @@ enum InboundEvent<T: BeaconChainTypes> {
|
||||
ReadyAttestation(QueuedAttestationId),
|
||||
/// A light client update that is ready for re-processing.
|
||||
ReadyLightClientUpdate(QueuedLightClientUpdateId),
|
||||
/// A backfill batch that was queued is ready for processing.
|
||||
ReadyBackfillSync(QueuedBackfillBatch<T::EthSpec>),
|
||||
/// A `DelayQueue` returned an error.
|
||||
DelayQueueError(TimeError, &'static str),
|
||||
/// A message sent to the `ReprocessQueue`
|
||||
@ -191,6 +248,8 @@ struct ReprocessQueue<T: BeaconChainTypes> {
|
||||
queued_lc_updates: FnvHashMap<usize, (QueuedLightClientUpdate<T::EthSpec>, DelayKey)>,
|
||||
/// Light Client Updates per parent_root.
|
||||
awaiting_lc_updates_per_parent_root: HashMap<Hash256, Vec<QueuedLightClientUpdateId>>,
|
||||
/// Queued backfill batches
|
||||
queued_backfill_batches: Vec<QueuedBackfillBatch<T::EthSpec>>,
|
||||
|
||||
/* Aux */
|
||||
/// Next attestation id, used for both aggregated and unaggregated attestations
|
||||
@ -200,6 +259,8 @@ struct ReprocessQueue<T: BeaconChainTypes> {
|
||||
rpc_block_debounce: TimeLatch,
|
||||
attestation_delay_debounce: TimeLatch,
|
||||
lc_update_delay_debounce: TimeLatch,
|
||||
next_backfill_batch_event: Option<Pin<Box<tokio::time::Sleep>>>,
|
||||
slot_clock: Pin<Box<T::SlotClock>>,
|
||||
}
|
||||
|
||||
pub type QueuedLightClientUpdateId = usize;
|
||||
@ -287,6 +348,20 @@ impl<T: BeaconChainTypes> Stream for ReprocessQueue<T> {
|
||||
Poll::Ready(None) | Poll::Pending => (),
|
||||
}
|
||||
|
||||
if let Some(next_backfill_batch_event) = self.next_backfill_batch_event.as_mut() {
|
||||
match next_backfill_batch_event.as_mut().poll(cx) {
|
||||
Poll::Ready(_) => {
|
||||
let maybe_batch = self.queued_backfill_batches.pop();
|
||||
self.recompute_next_backfill_batch_event();
|
||||
|
||||
if let Some(batch) = maybe_batch {
|
||||
return Poll::Ready(Some(InboundEvent::ReadyBackfillSync(batch)));
|
||||
}
|
||||
}
|
||||
Poll::Pending => (),
|
||||
}
|
||||
}
|
||||
|
||||
// Last empty the messages channel.
|
||||
match self.work_reprocessing_rx.poll_recv(cx) {
|
||||
Poll::Ready(Some(message)) => return Poll::Ready(Some(InboundEvent::Msg(message))),
|
||||
@ -323,12 +398,15 @@ pub fn spawn_reprocess_scheduler<T: BeaconChainTypes>(
|
||||
queued_unaggregates: FnvHashMap::default(),
|
||||
awaiting_attestations_per_root: HashMap::new(),
|
||||
awaiting_lc_updates_per_parent_root: HashMap::new(),
|
||||
queued_backfill_batches: Vec::new(),
|
||||
next_attestation: 0,
|
||||
next_lc_update: 0,
|
||||
early_block_debounce: TimeLatch::default(),
|
||||
rpc_block_debounce: TimeLatch::default(),
|
||||
attestation_delay_debounce: TimeLatch::default(),
|
||||
lc_update_delay_debounce: TimeLatch::default(),
|
||||
next_backfill_batch_event: None,
|
||||
slot_clock: Box::pin(slot_clock.clone()),
|
||||
};
|
||||
|
||||
executor.spawn(
|
||||
@ -679,6 +757,14 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
|
||||
}
|
||||
}
|
||||
}
|
||||
InboundEvent::Msg(BackfillSync(queued_backfill_batch)) => {
|
||||
self.queued_backfill_batches
|
||||
.insert(0, queued_backfill_batch);
|
||||
// only recompute if there is no `next_backfill_batch_event` already scheduled
|
||||
if self.next_backfill_batch_event.is_none() {
|
||||
self.recompute_next_backfill_batch_event();
|
||||
}
|
||||
}
|
||||
// A block that was queued for later processing is now ready to be processed.
|
||||
InboundEvent::ReadyGossipBlock(ready_block) => {
|
||||
let block_root = ready_block.block.block_root;
|
||||
@ -786,6 +872,33 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
|
||||
}
|
||||
}
|
||||
}
|
||||
InboundEvent::ReadyBackfillSync(queued_backfill_batch) => {
|
||||
let millis_from_slot_start = slot_clock
|
||||
.millis_from_current_slot_start()
|
||||
.map_or("null".to_string(), |duration| {
|
||||
duration.as_millis().to_string()
|
||||
});
|
||||
|
||||
debug!(
|
||||
log,
|
||||
"Sending scheduled backfill work";
|
||||
"millis_from_slot_start" => millis_from_slot_start
|
||||
);
|
||||
|
||||
if self
|
||||
.ready_work_tx
|
||||
.try_send(ReadyWork::BackfillSync(queued_backfill_batch.clone()))
|
||||
.is_err()
|
||||
{
|
||||
error!(
|
||||
log,
|
||||
"Failed to send scheduled backfill work";
|
||||
"info" => "sending work back to queue"
|
||||
);
|
||||
self.queued_backfill_batches
|
||||
.insert(0, queued_backfill_batch);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
metrics::set_gauge_vec(
|
||||
@ -809,4 +922,95 @@ impl<T: BeaconChainTypes> ReprocessQueue<T> {
|
||||
self.lc_updates_delay_queue.len() as i64,
|
||||
);
|
||||
}
|
||||
|
||||
fn recompute_next_backfill_batch_event(&mut self) {
|
||||
// only recompute the `next_backfill_batch_event` if there are backfill batches in the queue
|
||||
if !self.queued_backfill_batches.is_empty() {
|
||||
self.next_backfill_batch_event = Some(Box::pin(tokio::time::sleep(
|
||||
ReprocessQueue::<T>::duration_until_next_backfill_batch_event(&self.slot_clock),
|
||||
)));
|
||||
} else {
|
||||
self.next_backfill_batch_event = None
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns duration until the next scheduled processing time. The schedule ensure that backfill
|
||||
/// processing is done in windows of time that aren't critical
|
||||
fn duration_until_next_backfill_batch_event(slot_clock: &T::SlotClock) -> Duration {
|
||||
let slot_duration = slot_clock.slot_duration();
|
||||
slot_clock
|
||||
.millis_from_current_slot_start()
|
||||
.and_then(|duration_from_slot_start| {
|
||||
BACKFILL_SCHEDULE_IN_SLOT
|
||||
.into_iter()
|
||||
// Convert fractions to seconds from slot start.
|
||||
.map(|(multiplier, divisor)| (slot_duration / divisor) * multiplier)
|
||||
.find_or_first(|&event_duration_from_slot_start| {
|
||||
event_duration_from_slot_start > duration_from_slot_start
|
||||
})
|
||||
.map(|next_event_time| {
|
||||
if duration_from_slot_start >= next_event_time {
|
||||
// event is in the next slot, add duration to next slot
|
||||
let duration_to_next_slot = slot_duration - duration_from_slot_start;
|
||||
duration_to_next_slot + next_event_time
|
||||
} else {
|
||||
next_event_time - duration_from_slot_start
|
||||
}
|
||||
})
|
||||
})
|
||||
// If we can't read the slot clock, just wait another slot.
|
||||
.unwrap_or(slot_duration)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use beacon_chain::builder::Witness;
|
||||
use beacon_chain::eth1_chain::CachingEth1Backend;
|
||||
use slot_clock::TestingSlotClock;
|
||||
use store::MemoryStore;
|
||||
use types::MainnetEthSpec as E;
|
||||
use types::Slot;
|
||||
|
||||
type TestBeaconChainType =
|
||||
Witness<TestingSlotClock, CachingEth1Backend<E>, E, MemoryStore<E>, MemoryStore<E>>;
|
||||
|
||||
#[test]
|
||||
fn backfill_processing_schedule_calculation() {
|
||||
let slot_duration = Duration::from_secs(12);
|
||||
let slot_clock = TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), slot_duration);
|
||||
let current_slot_start = slot_clock.start_of(Slot::new(100)).unwrap();
|
||||
slot_clock.set_current_time(current_slot_start);
|
||||
|
||||
let event_times = BACKFILL_SCHEDULE_IN_SLOT
|
||||
.map(|(multiplier, divisor)| (slot_duration / divisor) * multiplier);
|
||||
|
||||
for &event_duration_from_slot_start in event_times.iter() {
|
||||
let duration_to_next_event =
|
||||
ReprocessQueue::<TestBeaconChainType>::duration_until_next_backfill_batch_event(
|
||||
&slot_clock,
|
||||
);
|
||||
|
||||
let current_time = slot_clock.millis_from_current_slot_start().unwrap();
|
||||
|
||||
assert_eq!(
|
||||
duration_to_next_event,
|
||||
event_duration_from_slot_start - current_time
|
||||
);
|
||||
|
||||
slot_clock.set_current_time(current_slot_start + event_duration_from_slot_start)
|
||||
}
|
||||
|
||||
// check for next event beyond the current slot
|
||||
let duration_to_next_slot = slot_clock.duration_to_next_slot().unwrap();
|
||||
let duration_to_next_event =
|
||||
ReprocessQueue::<TestBeaconChainType>::duration_until_next_backfill_batch_event(
|
||||
&slot_clock,
|
||||
);
|
||||
assert_eq!(
|
||||
duration_to_next_event,
|
||||
duration_to_next_slot + event_times[0]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -268,6 +268,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||
.min_values(0)
|
||||
.hidden(true)
|
||||
)
|
||||
.arg(
|
||||
Arg::with_name("disable-backfill-rate-limiting")
|
||||
.long("disable-backfill-rate-limiting")
|
||||
.help("Disable the backfill sync rate-limiting. This allow users to just sync the entire chain as fast \
|
||||
as possible, however it can result in resource contention which degrades staking performance. Stakers \
|
||||
should generally choose to avoid this flag since backfill sync is not required for staking.")
|
||||
.takes_value(false),
|
||||
)
|
||||
/* REST API related arguments */
|
||||
.arg(
|
||||
Arg::with_name("http")
|
||||
|
@ -796,6 +796,10 @@ pub fn get_config<E: EthSpec>(
|
||||
client_config.always_prefer_builder_payload = true;
|
||||
}
|
||||
|
||||
// Backfill sync rate-limiting
|
||||
client_config.chain.enable_backfill_rate_limiting =
|
||||
!cli_args.is_present("disable-backfill-rate-limiting");
|
||||
|
||||
Ok(client_config)
|
||||
}
|
||||
|
||||
|
@ -2180,7 +2180,7 @@ fn no_state_root_iter() -> Option<std::iter::Empty<Result<(Hash256, Slot), Error
|
||||
/// Allows full reconstruction by replaying blocks.
|
||||
#[derive(Debug, Clone, Copy, Default, Encode, Decode)]
|
||||
pub struct HotStateSummary {
|
||||
slot: Slot,
|
||||
pub slot: Slot,
|
||||
pub latest_block_root: Hash256,
|
||||
epoch_boundary_state_root: Hash256,
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ validator client or the slasher**.
|
||||
| v3.3.0 | Nov 2022 | v13 | yes |
|
||||
| v3.4.0 | Jan 2023 | v13 | yes |
|
||||
| v3.5.0 | Feb 2023 | v15 | yes before Capella |
|
||||
| v4.0.1 | Mar 2023 | v16 | yes before Capella |
|
||||
|
||||
> **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release
|
||||
> (e.g. v2.3.0).
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "boot_node"
|
||||
version = "4.0.1-rc.0"
|
||||
version = "4.0.1"
|
||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2021"
|
||||
|
||||
|
@ -22,13 +22,14 @@ use lighthouse_network::PeerId;
|
||||
pub use reqwest;
|
||||
use reqwest::{IntoUrl, RequestBuilder, Response};
|
||||
pub use reqwest::{StatusCode, Url};
|
||||
pub use sensitive_url::SensitiveUrl;
|
||||
pub use sensitive_url::{SensitiveError, SensitiveUrl};
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt;
|
||||
use std::iter::Iterator;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
use store::fork_versioned_response::ExecutionOptimisticFinalizedForkVersionedResponse;
|
||||
|
||||
pub const V1: EndpointVersion = EndpointVersion(1);
|
||||
pub const V2: EndpointVersion = EndpointVersion(2);
|
||||
@ -338,7 +339,7 @@ impl BeaconNodeHttpClient {
|
||||
pub async fn get_beacon_states_root(
|
||||
&self,
|
||||
state_id: StateId,
|
||||
) -> Result<Option<ExecutionOptimisticResponse<RootData>>, Error> {
|
||||
) -> Result<Option<ExecutionOptimisticFinalizedResponse<RootData>>, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -357,7 +358,7 @@ impl BeaconNodeHttpClient {
|
||||
pub async fn get_beacon_states_fork(
|
||||
&self,
|
||||
state_id: StateId,
|
||||
) -> Result<Option<ExecutionOptimisticResponse<Fork>>, Error> {
|
||||
) -> Result<Option<ExecutionOptimisticFinalizedResponse<Fork>>, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -376,7 +377,7 @@ impl BeaconNodeHttpClient {
|
||||
pub async fn get_beacon_states_finality_checkpoints(
|
||||
&self,
|
||||
state_id: StateId,
|
||||
) -> Result<Option<ExecutionOptimisticResponse<FinalityCheckpointsData>>, Error> {
|
||||
) -> Result<Option<ExecutionOptimisticFinalizedResponse<FinalityCheckpointsData>>, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -396,7 +397,8 @@ impl BeaconNodeHttpClient {
|
||||
&self,
|
||||
state_id: StateId,
|
||||
ids: Option<&[ValidatorId]>,
|
||||
) -> Result<Option<ExecutionOptimisticResponse<Vec<ValidatorBalanceData>>>, Error> {
|
||||
) -> Result<Option<ExecutionOptimisticFinalizedResponse<Vec<ValidatorBalanceData>>>, Error>
|
||||
{
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -426,7 +428,7 @@ impl BeaconNodeHttpClient {
|
||||
state_id: StateId,
|
||||
ids: Option<&[ValidatorId]>,
|
||||
statuses: Option<&[ValidatorStatus]>,
|
||||
) -> Result<Option<ExecutionOptimisticResponse<Vec<ValidatorData>>>, Error> {
|
||||
) -> Result<Option<ExecutionOptimisticFinalizedResponse<Vec<ValidatorData>>>, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -466,7 +468,7 @@ impl BeaconNodeHttpClient {
|
||||
slot: Option<Slot>,
|
||||
index: Option<u64>,
|
||||
epoch: Option<Epoch>,
|
||||
) -> Result<Option<ExecutionOptimisticResponse<Vec<CommitteeData>>>, Error> {
|
||||
) -> Result<Option<ExecutionOptimisticFinalizedResponse<Vec<CommitteeData>>>, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -499,7 +501,7 @@ impl BeaconNodeHttpClient {
|
||||
&self,
|
||||
state_id: StateId,
|
||||
epoch: Option<Epoch>,
|
||||
) -> Result<ExecutionOptimisticResponse<SyncCommitteeByValidatorIndices>, Error> {
|
||||
) -> Result<ExecutionOptimisticFinalizedResponse<SyncCommitteeByValidatorIndices>, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -522,7 +524,7 @@ impl BeaconNodeHttpClient {
|
||||
&self,
|
||||
state_id: StateId,
|
||||
epoch: Option<Epoch>,
|
||||
) -> Result<Option<ExecutionOptimisticResponse<RandaoMix>>, Error> {
|
||||
) -> Result<Option<ExecutionOptimisticFinalizedResponse<RandaoMix>>, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -547,7 +549,7 @@ impl BeaconNodeHttpClient {
|
||||
&self,
|
||||
state_id: StateId,
|
||||
validator_id: &ValidatorId,
|
||||
) -> Result<Option<ExecutionOptimisticResponse<ValidatorData>>, Error> {
|
||||
) -> Result<Option<ExecutionOptimisticFinalizedResponse<ValidatorData>>, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -568,7 +570,7 @@ impl BeaconNodeHttpClient {
|
||||
&self,
|
||||
slot: Option<Slot>,
|
||||
parent_root: Option<Hash256>,
|
||||
) -> Result<Option<ExecutionOptimisticResponse<Vec<BlockHeaderData>>>, Error> {
|
||||
) -> Result<Option<ExecutionOptimisticFinalizedResponse<Vec<BlockHeaderData>>>, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -595,7 +597,7 @@ impl BeaconNodeHttpClient {
|
||||
pub async fn get_beacon_headers_block_id(
|
||||
&self,
|
||||
block_id: BlockId,
|
||||
) -> Result<Option<ExecutionOptimisticResponse<BlockHeaderData>>, Error> {
|
||||
) -> Result<Option<ExecutionOptimisticFinalizedResponse<BlockHeaderData>>, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -686,7 +688,10 @@ impl BeaconNodeHttpClient {
|
||||
pub async fn get_beacon_blocks<T: EthSpec>(
|
||||
&self,
|
||||
block_id: BlockId,
|
||||
) -> Result<Option<ExecutionOptimisticForkVersionedResponse<SignedBeaconBlock<T>>>, Error> {
|
||||
) -> Result<
|
||||
Option<ExecutionOptimisticFinalizedForkVersionedResponse<SignedBeaconBlock<T>>>,
|
||||
Error,
|
||||
> {
|
||||
let path = self.get_beacon_blocks_path(block_id)?;
|
||||
let response = match self.get_response(path, |b| b).await.optional()? {
|
||||
Some(res) => res,
|
||||
@ -719,8 +724,10 @@ impl BeaconNodeHttpClient {
|
||||
pub async fn get_beacon_blinded_blocks<T: EthSpec>(
|
||||
&self,
|
||||
block_id: BlockId,
|
||||
) -> Result<Option<ExecutionOptimisticForkVersionedResponse<SignedBlindedBeaconBlock<T>>>, Error>
|
||||
{
|
||||
) -> Result<
|
||||
Option<ExecutionOptimisticFinalizedForkVersionedResponse<SignedBlindedBeaconBlock<T>>>,
|
||||
Error,
|
||||
> {
|
||||
let path = self.get_beacon_blinded_blocks_path(block_id)?;
|
||||
let response = match self.get_response(path, |b| b).await.optional()? {
|
||||
Some(res) => res,
|
||||
@ -788,7 +795,7 @@ impl BeaconNodeHttpClient {
|
||||
pub async fn get_beacon_blocks_root(
|
||||
&self,
|
||||
block_id: BlockId,
|
||||
) -> Result<Option<ExecutionOptimisticResponse<RootData>>, Error> {
|
||||
) -> Result<Option<ExecutionOptimisticFinalizedResponse<RootData>>, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -807,7 +814,7 @@ impl BeaconNodeHttpClient {
|
||||
pub async fn get_beacon_blocks_attestations<T: EthSpec>(
|
||||
&self,
|
||||
block_id: BlockId,
|
||||
) -> Result<Option<ExecutionOptimisticResponse<Vec<Attestation<T>>>>, Error> {
|
||||
) -> Result<Option<ExecutionOptimisticFinalizedResponse<Vec<Attestation<T>>>>, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
@ -1295,7 +1302,8 @@ impl BeaconNodeHttpClient {
|
||||
pub async fn get_debug_beacon_states<T: EthSpec>(
|
||||
&self,
|
||||
state_id: StateId,
|
||||
) -> Result<Option<ExecutionOptimisticForkVersionedResponse<BeaconState<T>>>, Error> {
|
||||
) -> Result<Option<ExecutionOptimisticFinalizedForkVersionedResponse<BeaconState<T>>>, Error>
|
||||
{
|
||||
let path = self.get_debug_beacon_states_path(state_id)?;
|
||||
self.get_opt(path).await
|
||||
}
|
||||
@ -1362,6 +1370,18 @@ impl BeaconNodeHttpClient {
|
||||
self.get(path).await
|
||||
}
|
||||
|
||||
/// `GET v1/debug/fork_choice`
|
||||
pub async fn get_debug_fork_choice(&self) -> Result<ForkChoice, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||
.push("debug")
|
||||
.push("fork_choice");
|
||||
|
||||
self.get(path).await
|
||||
}
|
||||
|
||||
/// `GET validator/duties/proposer/{epoch}`
|
||||
pub async fn get_validator_duties_proposer(
|
||||
&self,
|
||||
@ -1703,7 +1723,7 @@ impl BeaconNodeHttpClient {
|
||||
&self,
|
||||
epoch: Epoch,
|
||||
indices: &[u64],
|
||||
) -> Result<ExecutionOptimisticResponse<Vec<SyncDuty>>, Error> {
|
||||
) -> Result<ExecutionOptimisticFinalizedResponse<Vec<SyncDuty>>, Error> {
|
||||
let mut path = self.eth_path(V1)?;
|
||||
|
||||
path.path_segments_mut()
|
||||
|
@ -13,7 +13,7 @@ use crate::{
|
||||
BeaconState, ChainSpec, DepositTreeSnapshot, Epoch, EthSpec, FinalizedExecutionBlock,
|
||||
GenericResponse, ValidatorId,
|
||||
},
|
||||
BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, StateId, StatusCode,
|
||||
BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, Slot, StateId, StatusCode,
|
||||
};
|
||||
use proto_array::core::ProtoArray;
|
||||
use reqwest::IntoUrl;
|
||||
@ -566,4 +566,73 @@ impl BeaconNodeHttpClient {
|
||||
|
||||
self.post_with_response(path, &()).await
|
||||
}
|
||||
|
||||
///
|
||||
/// Analysis endpoints.
|
||||
///
|
||||
|
||||
/// `GET` lighthouse/analysis/block_rewards?start_slot,end_slot
|
||||
pub async fn get_lighthouse_analysis_block_rewards(
|
||||
&self,
|
||||
start_slot: Slot,
|
||||
end_slot: Slot,
|
||||
) -> Result<Vec<BlockReward>, Error> {
|
||||
let mut path = self.server.full.clone();
|
||||
|
||||
path.path_segments_mut()
|
||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||
.push("lighthouse")
|
||||
.push("analysis")
|
||||
.push("block_rewards");
|
||||
|
||||
path.query_pairs_mut()
|
||||
.append_pair("start_slot", &start_slot.to_string())
|
||||
.append_pair("end_slot", &end_slot.to_string());
|
||||
|
||||
self.get(path).await
|
||||
}
|
||||
|
||||
/// `GET` lighthouse/analysis/block_packing?start_epoch,end_epoch
|
||||
pub async fn get_lighthouse_analysis_block_packing(
|
||||
&self,
|
||||
start_epoch: Epoch,
|
||||
end_epoch: Epoch,
|
||||
) -> Result<Vec<BlockPackingEfficiency>, Error> {
|
||||
let mut path = self.server.full.clone();
|
||||
|
||||
path.path_segments_mut()
|
||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||
.push("lighthouse")
|
||||
.push("analysis")
|
||||
.push("block_packing_efficiency");
|
||||
|
||||
path.query_pairs_mut()
|
||||
.append_pair("start_epoch", &start_epoch.to_string())
|
||||
.append_pair("end_epoch", &end_epoch.to_string());
|
||||
|
||||
self.get(path).await
|
||||
}
|
||||
|
||||
/// `GET` lighthouse/analysis/attestation_performance/{index}?start_epoch,end_epoch
|
||||
pub async fn get_lighthouse_analysis_attestation_performance(
|
||||
&self,
|
||||
start_epoch: Epoch,
|
||||
end_epoch: Epoch,
|
||||
target: String,
|
||||
) -> Result<Vec<AttestationPerformance>, Error> {
|
||||
let mut path = self.server.full.clone();
|
||||
|
||||
path.path_segments_mut()
|
||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||
.push("lighthouse")
|
||||
.push("analysis")
|
||||
.push("attestation_performance")
|
||||
.push(&target);
|
||||
|
||||
path.query_pairs_mut()
|
||||
.append_pair("start_epoch", &start_epoch.to_string())
|
||||
.append_pair("end_epoch", &end_epoch.to_string());
|
||||
|
||||
self.get(path).await
|
||||
}
|
||||
}
|
||||
|
@ -642,6 +642,30 @@ impl ValidatorClientHttpClient {
|
||||
let url = self.make_gas_limit_url(pubkey)?;
|
||||
self.delete_with_raw_response(url, &()).await
|
||||
}
|
||||
|
||||
/// `POST /eth/v1/validator/{pubkey}/voluntary_exit`
|
||||
pub async fn post_validator_voluntary_exit(
|
||||
&self,
|
||||
pubkey: &PublicKeyBytes,
|
||||
epoch: Option<Epoch>,
|
||||
) -> Result<SignedVoluntaryExit, Error> {
|
||||
let mut path = self.server.full.clone();
|
||||
|
||||
path.path_segments_mut()
|
||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||
.push("eth")
|
||||
.push("v1")
|
||||
.push("validator")
|
||||
.push(&pubkey.to_string())
|
||||
.push("voluntary_exit");
|
||||
|
||||
if let Some(epoch) = epoch {
|
||||
path.query_pairs_mut()
|
||||
.append_pair("epoch", &epoch.to_string());
|
||||
}
|
||||
|
||||
self.post(path, &()).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `Ok(response)` if the response is a `200 OK` response or a
|
||||
|
@ -144,3 +144,8 @@ pub struct UpdateGasLimitRequest {
|
||||
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||
pub gas_limit: u64,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct VoluntaryExitQuery {
|
||||
pub epoch: Option<Epoch>,
|
||||
}
|
||||
|
@ -201,6 +201,14 @@ pub struct ExecutionOptimisticResponse<T: Serialize + serde::de::DeserializeOwne
|
||||
pub data: T,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
#[serde(bound = "T: Serialize + serde::de::DeserializeOwned")]
|
||||
pub struct ExecutionOptimisticFinalizedResponse<T: Serialize + serde::de::DeserializeOwned> {
|
||||
pub execution_optimistic: Option<bool>,
|
||||
pub finalized: Option<bool>,
|
||||
pub data: T,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
|
||||
#[serde(bound = "T: Serialize + serde::de::DeserializeOwned")]
|
||||
pub struct GenericResponse<T: Serialize + serde::de::DeserializeOwned> {
|
||||
@ -223,6 +231,18 @@ impl<T: Serialize + serde::de::DeserializeOwned> GenericResponse<T> {
|
||||
data: self.data,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_execution_optimistic_finalized(
|
||||
self,
|
||||
execution_optimistic: bool,
|
||||
finalized: bool,
|
||||
) -> ExecutionOptimisticFinalizedResponse<T> {
|
||||
ExecutionOptimisticFinalizedResponse {
|
||||
execution_optimistic: Some(execution_optimistic),
|
||||
finalized: Some(finalized),
|
||||
data: self.data,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize)]
|
||||
@ -1229,6 +1249,25 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> ForkVersionDeserialize
|
||||
})
|
||||
}
|
||||
}
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ForkChoice {
|
||||
pub justified_checkpoint: Checkpoint,
|
||||
pub finalized_checkpoint: Checkpoint,
|
||||
pub fork_choice_nodes: Vec<ForkChoiceNode>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct ForkChoiceNode {
|
||||
pub slot: Slot,
|
||||
pub block_root: Hash256,
|
||||
pub parent_root: Option<Hash256>,
|
||||
pub justified_epoch: Option<Epoch>,
|
||||
pub finalized_epoch: Option<Epoch>,
|
||||
#[serde(with = "eth2_serde_utils::quoted_u64")]
|
||||
pub weight: u64,
|
||||
pub validity: Option<String>,
|
||||
pub execution_block_hash: Option<Hash256>,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!(
|
||||
// NOTE: using --match instead of --exclude for compatibility with old Git
|
||||
"--match=thiswillnevermatchlol"
|
||||
],
|
||||
prefix = "Lighthouse/v4.0.1-rc.0-",
|
||||
fallback = "Lighthouse/v4.0.1-rc.0"
|
||||
prefix = "Lighthouse/v4.0.1-",
|
||||
fallback = "Lighthouse/v4.0.1"
|
||||
);
|
||||
|
||||
/// Returns `VERSION`, but with platform information appended to the end.
|
||||
|
@ -104,12 +104,23 @@ pub trait SlotClock: Send + Sync + Sized + Clone {
|
||||
self.slot_duration() * 2 / INTERVALS_PER_SLOT as u32
|
||||
}
|
||||
|
||||
/// Returns the `Duration` since the start of the current `Slot`. Useful in determining whether to apply proposer boosts.
|
||||
fn seconds_from_current_slot_start(&self, seconds_per_slot: u64) -> Option<Duration> {
|
||||
/// Returns the `Duration` since the start of the current `Slot` at seconds precision. Useful in determining whether to apply proposer boosts.
|
||||
fn seconds_from_current_slot_start(&self) -> Option<Duration> {
|
||||
self.now_duration()
|
||||
.and_then(|now| now.checked_sub(self.genesis_duration()))
|
||||
.map(|duration_into_slot| {
|
||||
Duration::from_secs(duration_into_slot.as_secs() % seconds_per_slot)
|
||||
Duration::from_secs(duration_into_slot.as_secs() % self.slot_duration().as_secs())
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the `Duration` since the start of the current `Slot` at milliseconds precision.
|
||||
fn millis_from_current_slot_start(&self) -> Option<Duration> {
|
||||
self.now_duration()
|
||||
.and_then(|now| now.checked_sub(self.genesis_duration()))
|
||||
.map(|duration_into_slot| {
|
||||
Duration::from_millis(
|
||||
(duration_into_slot.as_millis() % self.slot_duration().as_millis()) as u64,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -6,3 +6,6 @@ edition = "2021"
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
lru_cache = { path = "../lru_cache" }
|
||||
lazy_static = "1.4.0"
|
||||
parking_lot = "0.12.0"
|
||||
|
@ -1,4 +1,8 @@
|
||||
use std::net::{TcpListener, UdpSocket};
|
||||
use lazy_static::lazy_static;
|
||||
use lru_cache::LRUTimeCache;
|
||||
use parking_lot::Mutex;
|
||||
use std::net::{SocketAddr, TcpListener, UdpSocket};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub enum Transport {
|
||||
@ -12,6 +16,13 @@ pub enum IpVersion {
|
||||
Ipv6,
|
||||
}
|
||||
|
||||
pub const CACHED_PORTS_TTL: Duration = Duration::from_secs(300);
|
||||
|
||||
lazy_static! {
|
||||
static ref FOUND_PORTS_CACHE: Mutex<LRUTimeCache<u16>> =
|
||||
Mutex::new(LRUTimeCache::new(CACHED_PORTS_TTL));
|
||||
}
|
||||
|
||||
/// A convenience wrapper over [`zero_port`].
|
||||
pub fn unused_tcp4_port() -> Result<u16, String> {
|
||||
zero_port(Transport::Tcp, IpVersion::Ipv4)
|
||||
@ -48,6 +59,20 @@ pub fn zero_port(transport: Transport, ipv: IpVersion) -> Result<u16, String> {
|
||||
IpVersion::Ipv6 => std::net::Ipv6Addr::LOCALHOST.into(),
|
||||
};
|
||||
let socket_addr = std::net::SocketAddr::new(localhost, 0);
|
||||
let mut unused_port: u16;
|
||||
loop {
|
||||
unused_port = find_unused_port(transport, socket_addr)?;
|
||||
let mut cache_lock = FOUND_PORTS_CACHE.lock();
|
||||
if !cache_lock.contains(&unused_port) {
|
||||
cache_lock.insert(unused_port);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(unused_port)
|
||||
}
|
||||
|
||||
fn find_unused_port(transport: Transport, socket_addr: SocketAddr) -> Result<u16, String> {
|
||||
let local_addr = match transport {
|
||||
Transport::Tcp => {
|
||||
let listener = TcpListener::bind(socket_addr).map_err(|e| {
|
||||
|
@ -10,7 +10,10 @@ use crate::{
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use ssz::{Decode, Encode};
|
||||
use ssz_derive::{Decode, Encode};
|
||||
use std::collections::{BTreeSet, HashMap};
|
||||
use std::{
|
||||
collections::{BTreeSet, HashMap},
|
||||
fmt,
|
||||
};
|
||||
use types::{
|
||||
AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256,
|
||||
Slot,
|
||||
@ -125,6 +128,17 @@ impl ExecutionStatus {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ExecutionStatus {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
ExecutionStatus::Valid(_) => write!(f, "valid"),
|
||||
ExecutionStatus::Invalid(_) => write!(f, "invalid"),
|
||||
ExecutionStatus::Optimistic(_) => write!(f, "optimistic"),
|
||||
ExecutionStatus::Irrelevant(_) => write!(f, "irrelevant"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A block that is to be applied to the fork choice.
|
||||
///
|
||||
/// A simplified version of `types::BeaconBlock`.
|
||||
|
@ -5,6 +5,46 @@ use serde_json::value::Value;
|
||||
use std::sync::Arc;
|
||||
|
||||
// Deserialize is only implemented for types that implement ForkVersionDeserialize
|
||||
#[derive(Debug, PartialEq, Clone, Serialize)]
|
||||
pub struct ExecutionOptimisticFinalizedForkVersionedResponse<T> {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub version: Option<ForkName>,
|
||||
pub execution_optimistic: Option<bool>,
|
||||
pub finalized: Option<bool>,
|
||||
pub data: T,
|
||||
}
|
||||
|
||||
impl<'de, F> serde::Deserialize<'de> for ExecutionOptimisticFinalizedForkVersionedResponse<F>
|
||||
where
|
||||
F: ForkVersionDeserialize,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
#[derive(Deserialize)]
|
||||
struct Helper {
|
||||
version: Option<ForkName>,
|
||||
execution_optimistic: Option<bool>,
|
||||
finalized: Option<bool>,
|
||||
data: serde_json::Value,
|
||||
}
|
||||
|
||||
let helper = Helper::deserialize(deserializer)?;
|
||||
let data = match helper.version {
|
||||
Some(fork_name) => F::deserialize_by_fork::<'de, D>(helper.data, fork_name)?,
|
||||
None => serde_json::from_value(helper.data).map_err(serde::de::Error::custom)?,
|
||||
};
|
||||
|
||||
Ok(ExecutionOptimisticFinalizedForkVersionedResponse {
|
||||
version: helper.version,
|
||||
execution_optimistic: helper.execution_optimistic,
|
||||
finalized: helper.finalized,
|
||||
data,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Serialize)]
|
||||
pub struct ExecutionOptimisticForkVersionedResponse<T> {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "lcli"
|
||||
description = "Lighthouse CLI (modeled after zcli)"
|
||||
version = "4.0.1-rc.0"
|
||||
version = "4.0.1"
|
||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||
edition = "2021"
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
# `lcli` requires the full project to be in scope, so this should be built either:
|
||||
# - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .`
|
||||
# - from the current directory with the command: `docker build -f ./Dockerfile ../`
|
||||
FROM rust:1.65.0-bullseye AS builder
|
||||
FROM rust:1.66.0-bullseye AS builder
|
||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake clang libclang-dev protobuf-compiler
|
||||
COPY . lighthouse
|
||||
ARG PORTABLE
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lighthouse"
|
||||
version = "4.0.1-rc.0"
|
||||
version = "4.0.1"
|
||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = "2021"
|
||||
autotests = false
|
||||
|
@ -1052,6 +1052,19 @@ fn disable_upnp_flag() {
|
||||
.with_config(|config| assert!(!config.network.upnp_enabled));
|
||||
}
|
||||
#[test]
|
||||
fn disable_backfill_rate_limiting_flag() {
|
||||
CommandLineTest::new()
|
||||
.flag("disable-backfill-rate-limiting", None)
|
||||
.run_with_zero_port()
|
||||
.with_config(|config| assert!(!config.chain.enable_backfill_rate_limiting));
|
||||
}
|
||||
#[test]
|
||||
fn default_backfill_rate_limiting_flag() {
|
||||
CommandLineTest::new()
|
||||
.run_with_zero_port()
|
||||
.with_config(|config| assert!(config.chain.enable_backfill_rate_limiting));
|
||||
}
|
||||
#[test]
|
||||
fn default_boot_nodes() {
|
||||
let mainnet = vec![
|
||||
// Lighthouse Team (Sigma Prime)
|
||||
|
@ -20,7 +20,7 @@ Modify `vars.env` as desired.
|
||||
Start a local eth1 ganache server plus boot node along with `BN_COUNT`
|
||||
number of beacon nodes and `VC_COUNT` validator clients.
|
||||
|
||||
The `start_local_testnet.sh` script takes three options `-v VC_COUNT`, `-d DEBUG_LEVEL` and `-h` for help.
|
||||
The `start_local_testnet.sh` script takes four options `-v VC_COUNT`, `-d DEBUG_LEVEL`, `-p` to enable builder proposals and `-h` for help.
|
||||
The options may be in any order or absent in which case they take the default value specified.
|
||||
- VC_COUNT: the number of validator clients to create, default: `BN_COUNT`
|
||||
- DEBUG_LEVEL: one of { error, warn, info, debug, trace }, default: `info`
|
||||
|
@ -28,7 +28,7 @@ while getopts "v:d:ph" flag; do
|
||||
echo "Options:"
|
||||
echo " -v: VC_COUNT default: $VC_COUNT"
|
||||
echo " -d: DEBUG_LEVEL default: info"
|
||||
echo " -p: enable private tx proposals"
|
||||
echo " -p: enable builder proposals"
|
||||
echo " -h: this help"
|
||||
exit
|
||||
;;
|
||||
|
@ -425,7 +425,7 @@ impl<E: EthSpec> Tester<E> {
|
||||
.harness
|
||||
.chain
|
||||
.slot_clock
|
||||
.seconds_from_current_slot_start(self.spec.seconds_per_slot)
|
||||
.seconds_from_current_slot_start()
|
||||
.unwrap();
|
||||
|
||||
let result = self
|
||||
|
@ -0,0 +1,69 @@
|
||||
use crate::validator_store::ValidatorStore;
|
||||
use bls::{PublicKey, PublicKeyBytes};
|
||||
use slog::{info, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use std::sync::Arc;
|
||||
use types::{Epoch, EthSpec, SignedVoluntaryExit, VoluntaryExit};
|
||||
|
||||
pub async fn create_signed_voluntary_exit<T: 'static + SlotClock + Clone, E: EthSpec>(
|
||||
pubkey: PublicKey,
|
||||
maybe_epoch: Option<Epoch>,
|
||||
validator_store: Arc<ValidatorStore<T, E>>,
|
||||
slot_clock: T,
|
||||
log: Logger,
|
||||
) -> Result<SignedVoluntaryExit, warp::Rejection> {
|
||||
let epoch = match maybe_epoch {
|
||||
Some(epoch) => epoch,
|
||||
None => get_current_epoch::<T, E>(slot_clock).ok_or_else(|| {
|
||||
warp_utils::reject::custom_server_error("Unable to determine current epoch".to_string())
|
||||
})?,
|
||||
};
|
||||
|
||||
let pubkey_bytes = PublicKeyBytes::from(pubkey);
|
||||
if !validator_store.has_validator(&pubkey_bytes) {
|
||||
return Err(warp_utils::reject::custom_not_found(format!(
|
||||
"{} is disabled or not managed by this validator client",
|
||||
pubkey_bytes.as_hex_string()
|
||||
)));
|
||||
}
|
||||
|
||||
let validator_index = validator_store
|
||||
.validator_index(&pubkey_bytes)
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(format!(
|
||||
"The validator index for {} is not known. The validator client \
|
||||
may still be initializing or the validator has not yet had a \
|
||||
deposit processed.",
|
||||
pubkey_bytes.as_hex_string()
|
||||
))
|
||||
})?;
|
||||
|
||||
let voluntary_exit = VoluntaryExit {
|
||||
epoch,
|
||||
validator_index,
|
||||
};
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Signing voluntary exit";
|
||||
"validator" => pubkey_bytes.as_hex_string(),
|
||||
"epoch" => epoch
|
||||
);
|
||||
|
||||
let signed_voluntary_exit = validator_store
|
||||
.sign_voluntary_exit(pubkey_bytes, voluntary_exit)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"Failed to sign voluntary exit: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(signed_voluntary_exit)
|
||||
}
|
||||
|
||||
/// Calculates the current epoch from the genesis time and current time.
|
||||
fn get_current_epoch<T: 'static + SlotClock + Clone, E: EthSpec>(slot_clock: T) -> Option<Epoch> {
|
||||
slot_clock.now().map(|s| s.epoch(E::slots_per_epoch()))
|
||||
}
|
@ -1,9 +1,11 @@
|
||||
mod api_secret;
|
||||
mod create_signed_voluntary_exit;
|
||||
mod create_validator;
|
||||
mod keystores;
|
||||
mod remotekeys;
|
||||
mod tests;
|
||||
|
||||
use crate::http_api::create_signed_voluntary_exit::create_signed_voluntary_exit;
|
||||
use crate::{determine_graffiti, GraffitiFile, ValidatorStore};
|
||||
use account_utils::{
|
||||
mnemonic_from_phrase,
|
||||
@ -71,6 +73,7 @@ pub struct Context<T: SlotClock, E: EthSpec> {
|
||||
pub spec: ChainSpec,
|
||||
pub config: Config,
|
||||
pub log: Logger,
|
||||
pub slot_clock: T,
|
||||
pub _phantom: PhantomData<E>,
|
||||
}
|
||||
|
||||
@ -189,6 +192,9 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
||||
let inner_ctx = ctx.clone();
|
||||
let log_filter = warp::any().map(move || inner_ctx.log.clone());
|
||||
|
||||
let inner_slot_clock = ctx.slot_clock.clone();
|
||||
let slot_clock_filter = warp::any().map(move || inner_slot_clock.clone());
|
||||
|
||||
let inner_spec = Arc::new(ctx.spec.clone());
|
||||
let spec_filter = warp::any().map(move || inner_spec.clone());
|
||||
|
||||
@ -904,6 +910,46 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
||||
)
|
||||
.map(|reply| warp::reply::with_status(reply, warp::http::StatusCode::NO_CONTENT));
|
||||
|
||||
// POST /eth/v1/validator/{pubkey}/voluntary_exit
|
||||
let post_validators_voluntary_exits = eth_v1
|
||||
.and(warp::path("validator"))
|
||||
.and(warp::path::param::<PublicKey>())
|
||||
.and(warp::path("voluntary_exit"))
|
||||
.and(warp::query::<api_types::VoluntaryExitQuery>())
|
||||
.and(warp::path::end())
|
||||
.and(validator_store_filter.clone())
|
||||
.and(slot_clock_filter)
|
||||
.and(log_filter.clone())
|
||||
.and(signer.clone())
|
||||
.and(task_executor_filter.clone())
|
||||
.and_then(
|
||||
|pubkey: PublicKey,
|
||||
query: api_types::VoluntaryExitQuery,
|
||||
validator_store: Arc<ValidatorStore<T, E>>,
|
||||
slot_clock: T,
|
||||
log,
|
||||
signer,
|
||||
task_executor: TaskExecutor| {
|
||||
blocking_signed_json_task(signer, move || {
|
||||
if let Some(handle) = task_executor.handle() {
|
||||
let signed_voluntary_exit =
|
||||
handle.block_on(create_signed_voluntary_exit(
|
||||
pubkey,
|
||||
query.epoch,
|
||||
validator_store,
|
||||
slot_clock,
|
||||
log,
|
||||
))?;
|
||||
Ok(signed_voluntary_exit)
|
||||
} else {
|
||||
Err(warp_utils::reject::custom_server_error(
|
||||
"Lighthouse shutting down".into(),
|
||||
))
|
||||
}
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// GET /eth/v1/keystores
|
||||
let get_std_keystores = std_keystores
|
||||
.and(signer.clone())
|
||||
@ -1001,6 +1047,7 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
||||
.or(post_validators_keystore)
|
||||
.or(post_validators_mnemonic)
|
||||
.or(post_validators_web3signer)
|
||||
.or(post_validators_voluntary_exits)
|
||||
.or(post_fee_recipient)
|
||||
.or(post_gas_limit)
|
||||
.or(post_std_keystores)
|
||||
|
@ -45,6 +45,7 @@ struct ApiTester {
|
||||
initialized_validators: Arc<RwLock<InitializedValidators>>,
|
||||
validator_store: Arc<ValidatorStore<TestingSlotClock, E>>,
|
||||
url: SensitiveUrl,
|
||||
slot_clock: TestingSlotClock,
|
||||
_server_shutdown: oneshot::Sender<()>,
|
||||
_validator_dir: TempDir,
|
||||
_runtime_shutdown: exit_future::Signal,
|
||||
@ -90,8 +91,12 @@ impl ApiTester {
|
||||
let slashing_db_path = config.validator_dir.join(SLASHING_PROTECTION_FILENAME);
|
||||
let slashing_protection = SlashingDatabase::open_or_create(&slashing_db_path).unwrap();
|
||||
|
||||
let slot_clock =
|
||||
TestingSlotClock::new(Slot::new(0), Duration::from_secs(0), Duration::from_secs(1));
|
||||
let genesis_time: u64 = 0;
|
||||
let slot_clock = TestingSlotClock::new(
|
||||
Slot::new(0),
|
||||
Duration::from_secs(genesis_time),
|
||||
Duration::from_secs(1),
|
||||
);
|
||||
|
||||
let (runtime_shutdown, exit) = exit_future::signal();
|
||||
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
||||
@ -101,9 +106,9 @@ impl ApiTester {
|
||||
initialized_validators,
|
||||
slashing_protection,
|
||||
Hash256::repeat_byte(42),
|
||||
spec,
|
||||
spec.clone(),
|
||||
Some(Arc::new(DoppelgangerService::new(log.clone()))),
|
||||
slot_clock,
|
||||
slot_clock.clone(),
|
||||
&config,
|
||||
executor.clone(),
|
||||
log.clone(),
|
||||
@ -129,7 +134,8 @@ impl ApiTester {
|
||||
listen_port: 0,
|
||||
allow_origin: None,
|
||||
},
|
||||
log,
|
||||
log: log.clone(),
|
||||
slot_clock: slot_clock.clone(),
|
||||
_phantom: PhantomData,
|
||||
});
|
||||
let ctx = context.clone();
|
||||
@ -156,6 +162,7 @@ impl ApiTester {
|
||||
initialized_validators,
|
||||
validator_store,
|
||||
url,
|
||||
slot_clock,
|
||||
_server_shutdown: shutdown_tx,
|
||||
_validator_dir: validator_dir,
|
||||
_runtime_shutdown: runtime_shutdown,
|
||||
@ -494,6 +501,33 @@ impl ApiTester {
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_sign_voluntary_exits(self, index: usize, maybe_epoch: Option<Epoch>) -> Self {
|
||||
let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index];
|
||||
// manually setting validator index in `ValidatorStore`
|
||||
self.initialized_validators
|
||||
.write()
|
||||
.set_index(&validator.voting_pubkey, 0);
|
||||
|
||||
let expected_exit_epoch = maybe_epoch.unwrap_or_else(|| self.get_current_epoch());
|
||||
|
||||
let resp = self
|
||||
.client
|
||||
.post_validator_voluntary_exit(&validator.voting_pubkey, maybe_epoch)
|
||||
.await;
|
||||
|
||||
assert!(resp.is_ok());
|
||||
assert_eq!(resp.unwrap().message.epoch, expected_exit_epoch);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
fn get_current_epoch(&self) -> Epoch {
|
||||
self.slot_clock
|
||||
.now()
|
||||
.map(|s| s.epoch(E::slots_per_epoch()))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn set_validator_enabled(self, index: usize, enabled: bool) -> Self {
|
||||
let validator = &self.client.get_lighthouse_validators().await.unwrap().data[index];
|
||||
|
||||
@ -778,6 +812,29 @@ fn hd_validator_creation() {
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validator_exit() {
|
||||
let runtime = build_runtime();
|
||||
let weak_runtime = Arc::downgrade(&runtime);
|
||||
runtime.block_on(async {
|
||||
ApiTester::new(weak_runtime)
|
||||
.await
|
||||
.create_hd_validators(HdValidatorScenario {
|
||||
count: 2,
|
||||
specify_mnemonic: false,
|
||||
key_derivation_path_offset: 0,
|
||||
disabled: vec![],
|
||||
})
|
||||
.await
|
||||
.assert_enabled_validators_count(2)
|
||||
.assert_validators_count(2)
|
||||
.test_sign_voluntary_exits(0, None)
|
||||
.await
|
||||
.test_sign_voluntary_exits(0, Some(Epoch::new(256)))
|
||||
.await;
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validator_enabling() {
|
||||
let runtime = build_runtime();
|
||||
|
@ -93,6 +93,11 @@ lazy_static::lazy_static! {
|
||||
"Total count of attempted SyncSelectionProof signings",
|
||||
&["status"]
|
||||
);
|
||||
pub static ref SIGNED_VOLUNTARY_EXITS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"vc_signed_voluntary_exits_total",
|
||||
"Total count of VoluntaryExit signings",
|
||||
&["status"]
|
||||
);
|
||||
pub static ref SIGNED_VALIDATOR_REGISTRATIONS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||
"builder_validator_registrations_total",
|
||||
"Total count of ValidatorRegistrationData signings",
|
||||
|
@ -989,7 +989,23 @@ impl InitializedValidators {
|
||||
|
||||
let cache =
|
||||
KeyCache::open_or_create(&self.validators_dir).map_err(Error::UnableToOpenKeyCache)?;
|
||||
let mut key_cache = self.decrypt_key_cache(cache, &mut key_stores).await?;
|
||||
|
||||
// Check if there is at least one local definition.
|
||||
let has_local_definitions = self.definitions.as_slice().iter().any(|def| {
|
||||
matches!(
|
||||
def.signing_definition,
|
||||
SigningDefinition::LocalKeystore { .. }
|
||||
)
|
||||
});
|
||||
|
||||
// Only decrypt cache when there is at least one local definition.
|
||||
// Decrypting cache is a very expensive operation which is never used for web3signer.
|
||||
let mut key_cache = if has_local_definitions {
|
||||
self.decrypt_key_cache(cache, &mut key_stores).await?
|
||||
} else {
|
||||
// Assign an empty KeyCache if all definitions are of the Web3Signer type.
|
||||
KeyCache::new()
|
||||
};
|
||||
|
||||
let mut disabled_uuids = HashSet::new();
|
||||
for def in self.definitions.as_slice() {
|
||||
@ -1115,13 +1131,16 @@ impl InitializedValidators {
|
||||
);
|
||||
}
|
||||
}
|
||||
for uuid in disabled_uuids {
|
||||
key_cache.remove(&uuid);
|
||||
|
||||
if has_local_definitions {
|
||||
for uuid in disabled_uuids {
|
||||
key_cache.remove(&uuid);
|
||||
}
|
||||
}
|
||||
|
||||
let validators_dir = self.validators_dir.clone();
|
||||
let log = self.log.clone();
|
||||
if key_cache.is_modified() {
|
||||
if has_local_definitions && key_cache.is_modified() {
|
||||
tokio::task::spawn_blocking(move || {
|
||||
match key_cache.save(validators_dir) {
|
||||
Err(e) => warn!(
|
||||
|
@ -94,6 +94,7 @@ pub struct ProductionValidatorClient<T: EthSpec> {
|
||||
doppelganger_service: Option<Arc<DoppelgangerService>>,
|
||||
preparation_service: PreparationService<SystemTimeSlotClock, T>,
|
||||
validator_store: Arc<ValidatorStore<SystemTimeSlotClock, T>>,
|
||||
slot_clock: SystemTimeSlotClock,
|
||||
http_api_listen_addr: Option<SocketAddr>,
|
||||
config: Config,
|
||||
}
|
||||
@ -461,7 +462,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
let sync_committee_service = SyncCommitteeService::new(
|
||||
duties_service.clone(),
|
||||
validator_store.clone(),
|
||||
slot_clock,
|
||||
slot_clock.clone(),
|
||||
beacon_nodes.clone(),
|
||||
context.service_context("sync_committee".into()),
|
||||
);
|
||||
@ -482,6 +483,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
preparation_service,
|
||||
validator_store,
|
||||
config,
|
||||
slot_clock,
|
||||
http_api_listen_addr: None,
|
||||
})
|
||||
}
|
||||
@ -544,6 +546,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
||||
graffiti_flag: self.config.graffiti,
|
||||
spec: self.context.eth2_config.spec.clone(),
|
||||
config: self.config.http_api.clone(),
|
||||
slot_clock: self.slot_clock.clone(),
|
||||
log: log.clone(),
|
||||
_phantom: PhantomData,
|
||||
});
|
||||
|
@ -48,6 +48,7 @@ pub enum SignableMessage<'a, T: EthSpec, Payload: AbstractExecPayload<T> = FullP
|
||||
},
|
||||
SignedContributionAndProof(&'a ContributionAndProof<T>),
|
||||
ValidatorRegistration(&'a ValidatorRegistrationData),
|
||||
VoluntaryExit(&'a VoluntaryExit),
|
||||
}
|
||||
|
||||
impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> SignableMessage<'a, T, Payload> {
|
||||
@ -69,6 +70,7 @@ impl<'a, T: EthSpec, Payload: AbstractExecPayload<T>> SignableMessage<'a, T, Pay
|
||||
} => beacon_block_root.signing_root(domain),
|
||||
SignableMessage::SignedContributionAndProof(c) => c.signing_root(domain),
|
||||
SignableMessage::ValidatorRegistration(v) => v.signing_root(domain),
|
||||
SignableMessage::VoluntaryExit(exit) => exit.signing_root(domain),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -209,6 +211,7 @@ impl SigningMethod {
|
||||
SignableMessage::ValidatorRegistration(v) => {
|
||||
Web3SignerObject::ValidatorRegistration(v)
|
||||
}
|
||||
SignableMessage::VoluntaryExit(e) => Web3SignerObject::VoluntaryExit(e),
|
||||
};
|
||||
|
||||
// Determine the Web3Signer message type.
|
||||
|
@ -63,7 +63,6 @@ pub enum Web3SignerObject<'a, T: EthSpec, Payload: AbstractExecPayload<T>> {
|
||||
RandaoReveal {
|
||||
epoch: Epoch,
|
||||
},
|
||||
#[allow(dead_code)]
|
||||
VoluntaryExit(&'a VoluntaryExit),
|
||||
SyncCommitteeMessage {
|
||||
beacon_block_root: Hash256,
|
||||
|
@ -25,7 +25,7 @@ use types::{
|
||||
SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBlobSidecar,
|
||||
SignedBlobSidecarList, SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData,
|
||||
Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage,
|
||||
SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData,
|
||||
SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, SignedVoluntaryExit, VoluntaryExit,
|
||||
};
|
||||
use validator_dir::ValidatorDir;
|
||||
|
||||
@ -157,6 +157,14 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> {
|
||||
self.validators.clone()
|
||||
}
|
||||
|
||||
/// Indicates if the `voting_public_key` exists in self and is enabled.
|
||||
pub fn has_validator(&self, voting_public_key: &PublicKeyBytes) -> bool {
|
||||
self.validators
|
||||
.read()
|
||||
.validator(voting_public_key)
|
||||
.is_some()
|
||||
}
|
||||
|
||||
/// Insert a new validator to `self`, where the validator is represented by an EIP-2335
|
||||
/// keystore on the filesystem.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
@ -651,6 +659,32 @@ impl<T: SlotClock + 'static, E: EthSpec> ValidatorStore<T, E> {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn sign_voluntary_exit(
|
||||
&self,
|
||||
validator_pubkey: PublicKeyBytes,
|
||||
voluntary_exit: VoluntaryExit,
|
||||
) -> Result<SignedVoluntaryExit, Error> {
|
||||
let signing_epoch = voluntary_exit.epoch;
|
||||
let signing_context = self.signing_context(Domain::VoluntaryExit, signing_epoch);
|
||||
let signing_method = self.doppelganger_bypassed_signing_method(validator_pubkey)?;
|
||||
|
||||
let signature = signing_method
|
||||
.get_signature::<E, BlindedPayload<E>>(
|
||||
SignableMessage::VoluntaryExit(&voluntary_exit),
|
||||
signing_context,
|
||||
&self.spec,
|
||||
&self.task_executor,
|
||||
)
|
||||
.await?;
|
||||
|
||||
metrics::inc_counter_vec(&metrics::SIGNED_VOLUNTARY_EXITS_TOTAL, &[metrics::SUCCESS]);
|
||||
|
||||
Ok(SignedVoluntaryExit {
|
||||
message: voluntary_exit,
|
||||
signature,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn sign_validator_registration_data(
|
||||
&self,
|
||||
validator_registration_data: ValidatorRegistrationData,
|
||||
|
1
watch/.gitignore
vendored
Normal file
1
watch/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
config.yaml
|
45
watch/Cargo.toml
Normal file
45
watch/Cargo.toml
Normal file
@ -0,0 +1,45 @@
|
||||
[package]
|
||||
name = "watch"
|
||||
version = "0.1.0"
|
||||
edition = "2018"
|
||||
|
||||
[lib]
|
||||
name = "watch"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[[bin]]
|
||||
name = "watch"
|
||||
path = "src/main.rs"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
clap = "2.33.3"
|
||||
log = "0.4.14"
|
||||
env_logger = "0.9.0"
|
||||
types = { path = "../consensus/types" }
|
||||
eth2 = { path = "../common/eth2" }
|
||||
beacon_node = { path = "../beacon_node"}
|
||||
tokio = { version = "1.14.0", features = ["time"] }
|
||||
axum = "0.5.15"
|
||||
hyper = "0.14.20"
|
||||
serde = "1.0.116"
|
||||
serde_json = "1.0.58"
|
||||
reqwest = { version = "0.11.0", features = ["json","stream"] }
|
||||
url = "2.2.2"
|
||||
rand = "0.7.3"
|
||||
diesel = { version = "2.0.2", features = ["postgres", "r2d2"] }
|
||||
diesel_migrations = { version = "2.0.0", features = ["postgres"] }
|
||||
byteorder = "1.4.3"
|
||||
bls = { path = "../crypto/bls" }
|
||||
hex = "0.4.2"
|
||||
r2d2 = "0.8.9"
|
||||
serde_yaml = "0.8.24"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-postgres = "0.7.5"
|
||||
http_api = { path = "../beacon_node/http_api" }
|
||||
beacon_chain = { path = "../beacon_node/beacon_chain" }
|
||||
network = { path = "../beacon_node/network" }
|
||||
testcontainers = "0.14.0"
|
||||
unused_port = { path = "../common/unused_port" }
|
460
watch/README.md
Normal file
460
watch/README.md
Normal file
@ -0,0 +1,460 @@
|
||||
## beacon.watch
|
||||
|
||||
>beacon.watch is pre-MVP and still under active development and subject to change.
|
||||
|
||||
beacon.watch is an Ethereum Beacon Chain monitoring platform whose goal is to provide fast access to
|
||||
data which is:
|
||||
1. Not already stored natively in the Beacon Chain
|
||||
2. Too specialized for Block Explorers
|
||||
3. Too sensitive for public Block Explorers
|
||||
|
||||
|
||||
### Requirements
|
||||
- `git`
|
||||
- `rust` : https://rustup.rs/
|
||||
- `libpg` : https://www.postgresql.org/download/
|
||||
- `diesel_cli` :
|
||||
```
|
||||
cargo install diesel_cli --no-default-features --features postgres
|
||||
```
|
||||
- `docker` : https://docs.docker.com/engine/install/
|
||||
- `docker-compose` : https://docs.docker.com/compose/install/
|
||||
|
||||
### Setup
|
||||
1. Setup the database:
|
||||
```
|
||||
cd postgres_docker_compose
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
1. Ensure the tests pass:
|
||||
```
|
||||
cargo test --release
|
||||
```
|
||||
|
||||
1. Drop the database (if it already exists) and run the required migrations:
|
||||
```
|
||||
diesel database reset --database-url postgres://postgres:postgres@localhost/dev
|
||||
```
|
||||
|
||||
1. Ensure a synced Lighthouse beacon node with historical states is available
|
||||
at `localhost:5052`.
|
||||
The smaller the value of `--slots-per-restore-point` the faster beacon.watch
|
||||
will be able to sync to the beacon node.
|
||||
|
||||
1. Run the updater daemon:
|
||||
```
|
||||
cargo run --release -- run-updater
|
||||
```
|
||||
|
||||
1. Start the HTTP API server:
|
||||
```
|
||||
cargo run --release -- serve
|
||||
```
|
||||
|
||||
1. Ensure connectivity:
|
||||
```
|
||||
curl "http://localhost:5059/v1/slots/highest"
|
||||
```
|
||||
|
||||
> Functionality on MacOS has not been tested. Windows is not supported.
|
||||
|
||||
|
||||
### Configuration
|
||||
beacon.watch can be configured through the use of a config file.
|
||||
Available options can be seen in `config.yaml.default`.
|
||||
|
||||
You can specify a config file during runtime:
|
||||
```
|
||||
cargo run -- run-updater --config path/to/config.yaml
|
||||
cargo run -- serve --config path/to/config.yaml
|
||||
```
|
||||
|
||||
You can specify only the parts of the config file which you need changed.
|
||||
Missing values will remain as their defaults.
|
||||
|
||||
For example, if you wish to run with default settings but only wish to alter `log_level`
|
||||
your config file would be:
|
||||
```yaml
|
||||
# config.yaml
|
||||
log_level = "info"
|
||||
```
|
||||
|
||||
### Available Endpoints
|
||||
As beacon.watch continues to develop, more endpoints will be added.
|
||||
|
||||
> In these examples any data containing information from blockprint has either been redacted or fabricated.
|
||||
|
||||
#### `/v1/slots/{slot}`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/slots/4635296"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"slot": "4635296",
|
||||
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
|
||||
"skipped": false,
|
||||
"beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62"
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/slots?start_slot={}&end_slot={}`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/slots?start_slot=4635296&end_slot=4635297"
|
||||
```
|
||||
```json
|
||||
[
|
||||
{
|
||||
"slot": "4635297",
|
||||
"root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182",
|
||||
"skipped": false,
|
||||
"beacon_block": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182"
|
||||
},
|
||||
{
|
||||
"slot": "4635296",
|
||||
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
|
||||
"skipped": false,
|
||||
"beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
#### `/v1/slots/lowest`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/slots/lowest"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"slot": "4635296",
|
||||
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
|
||||
"skipped": false,
|
||||
"beacon_block": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62"
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/slots/highest`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/slots/highest"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"slot": "4635358",
|
||||
"root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b",
|
||||
"skipped": false,
|
||||
"beacon_block": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b"
|
||||
}
|
||||
```
|
||||
|
||||
#### `v1/slots/{slot}/block`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/slots/4635296/block"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"slot": "4635296",
|
||||
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
|
||||
"parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b"
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/blocks/{block_id}`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/blocks/4635296"
|
||||
# OR
|
||||
curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"slot": "4635296",
|
||||
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
|
||||
"parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b"
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/blocks?start_slot={}&end_slot={}`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/blocks?start_slot=4635296&end_slot=4635297"
|
||||
```
|
||||
```json
|
||||
[
|
||||
{
|
||||
"slot": "4635297",
|
||||
"root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182",
|
||||
"parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62"
|
||||
},
|
||||
{
|
||||
"slot": "4635296",
|
||||
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
|
||||
"parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
#### `/v1/blocks/{block_id}/previous`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/blocks/4635297/previous"
|
||||
# OR
|
||||
curl "http://localhost:5059/v1/blocks/0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182/previous"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"slot": "4635296",
|
||||
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
|
||||
"parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b"
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/blocks/{block_id}/next`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/blocks/4635296/next"
|
||||
# OR
|
||||
curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/next"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"slot": "4635297",
|
||||
"root": "0x04ad2e963811207e344bebeba5b1217805bcc3a9e2ed9fcf2205d491778c6182",
|
||||
"parent_root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62"
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/blocks/lowest`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/blocks/lowest"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"slot": "4635296",
|
||||
"root": "0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62",
|
||||
"parent_root": "0x7c4860b420a23de9d126da71f9043b3744af98c847efd9e1440f2da8fbf7f31b"
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/blocks/highest`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/blocks/highest"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"slot": "4635358",
|
||||
"root": "0xe9eff13560688f1bf15cf07b60c84963d4d04a4a885ed0eb19ceb8450011894b",
|
||||
"parent_root": "0xb66e05418bb5b1d4a965c994e1f0e5b5f0d7b780e0df12f3f6321510654fa1d2"
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/blocks/{block_id}/proposer`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/blocks/4635296/proposer"
|
||||
# OR
|
||||
curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/proposer"
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"slot": "4635296",
|
||||
"proposer_index": 223126,
|
||||
"graffiti": ""
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/blocks/{block_id}/rewards`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/blocks/4635296/reward"
|
||||
# OR
|
||||
curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/reward"
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"slot": "4635296",
|
||||
"total": 25380059,
|
||||
"attestation_reward": 24351867,
|
||||
"sync_committee_reward": 1028192
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/blocks/{block_id}/packing`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/blocks/4635296/packing"
|
||||
# OR
|
||||
curl "http://localhost:5059/v1/blocks/0xf7063a9d6c663682e59bd0b41d29ce80c3ff0b089049ff8676d6f9ee79622c62/packing"
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"slot": "4635296",
|
||||
"available": 16152,
|
||||
"included": 13101,
|
||||
"prior_skip_slots": 0
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/validators/{validator}`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/validators/1"
|
||||
# OR
|
||||
curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"index": 1,
|
||||
"public_key": "0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c",
|
||||
"status": "active_ongoing",
|
||||
"client": null,
|
||||
"activation_epoch": 0,
|
||||
"exit_epoch": null
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/validators/{validator}/attestation/{epoch}`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/validators/1/attestation/144853"
|
||||
# OR
|
||||
curl "http://localhost:5059/v1/validators/0xa1d1ad0714035353258038e964ae9675dc0252ee22cea896825c01458e1807bfad2f9969338798548d9858a571f7425c/attestation/144853"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"index": 1,
|
||||
"epoch": "144853",
|
||||
"source": true,
|
||||
"head": true,
|
||||
"target": true
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/validators/missed/{vote}/{epoch}`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/validators/missed/head/144853"
|
||||
```
|
||||
```json
|
||||
[
|
||||
63,
|
||||
67,
|
||||
98,
|
||||
...
|
||||
]
|
||||
```
|
||||
|
||||
#### `/v1/validators/missed/{vote}/{epoch}/graffiti`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/validators/missed/head/144853/graffiti"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"Mr F was here": 3,
|
||||
"Lighthouse/v3.1.0-aa022f4": 5,
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/clients/missed/{vote}/{epoch}`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/clients/missed/source/144853"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"Lighthouse": 100,
|
||||
"Lodestar": 100,
|
||||
"Nimbus": 100,
|
||||
"Prysm": 100,
|
||||
"Teku": 100,
|
||||
"Unknown": 100
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/clients/missed/{vote}/{epoch}/percentages`
|
||||
Note that this endpoint expresses the following:
|
||||
```
|
||||
What percentage of each client implementation missed this vote?
|
||||
```
|
||||
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/clients/missed/target/144853/percentages"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"Lighthouse": 0.51234567890,
|
||||
"Lodestar": 0.51234567890,
|
||||
"Nimbus": 0.51234567890,
|
||||
"Prysm": 0.09876543210,
|
||||
"Teku": 0.09876543210,
|
||||
"Unknown": 0.05647382910
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/clients/missed/{vote}/{epoch}/percentages/relative`
|
||||
Note that this endpoint expresses the following:
|
||||
```
|
||||
For the validators which did miss this vote, what percentage of them were from each client implementation?
|
||||
```
|
||||
You can check these values against the output of `/v1/clients/percentages` to see any discrepancies.
|
||||
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/clients/missed/target/144853/percentages/relative"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"Lighthouse": 11.11111111111111,
|
||||
"Lodestar": 11.11111111111111,
|
||||
"Nimbus": 11.11111111111111,
|
||||
"Prysm": 16.66666666666667,
|
||||
"Teku": 16.66666666666667,
|
||||
"Unknown": 33.33333333333333
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
#### `/v1/clients`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/clients"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"Lighthouse": 5000,
|
||||
"Lodestar": 5000,
|
||||
"Nimbus": 5000,
|
||||
"Prysm": 5000,
|
||||
"Teku": 5000,
|
||||
"Unknown": 5000
|
||||
}
|
||||
```
|
||||
|
||||
#### `/v1/clients/percentages`
|
||||
```bash
|
||||
curl "http://localhost:5059/v1/clients/percentages"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"Lighthouse": 16.66666666666667,
|
||||
"Lodestar": 16.66666666666667,
|
||||
"Nimbus": 16.66666666666667,
|
||||
"Prysm": 16.66666666666667,
|
||||
"Teku": 16.66666666666667,
|
||||
"Unknown": 16.66666666666667
|
||||
}
|
||||
```
|
||||
|
||||
### Future work
|
||||
- New tables
|
||||
- `skip_slots`?
|
||||
|
||||
|
||||
- More API endpoints
|
||||
- `/v1/proposers?start_epoch={}&end_epoch={}` and similar
|
||||
- `/v1/validators/{status}/count`
|
||||
|
||||
|
||||
- Concurrently backfill and forwards fill, so forwards fill is not bottlenecked by large backfills.
|
||||
|
||||
|
||||
- Better/prettier (async?) logging.
|
||||
|
||||
|
||||
- Connect to a range of beacon_nodes to sync different components concurrently.
|
||||
Generally, processing certain api queries such as `block_packing` and `attestation_performance` take the longest to sync.
|
||||
|
||||
|
||||
### Architecture
|
||||
Connection Pooling:
|
||||
- 1 Pool for Updater (read and write)
|
||||
- 1 Pool for HTTP Server (should be read only, although not sure if we can enforce this)
|
49
watch/config.yaml.default
Normal file
49
watch/config.yaml.default
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
database:
|
||||
user: "postgres"
|
||||
password: "postgres"
|
||||
dbname: "dev"
|
||||
default_dbname: "postgres"
|
||||
host: "localhost"
|
||||
port: 5432
|
||||
connect_timeout_millis: 2000
|
||||
|
||||
server:
|
||||
listen_addr: "127.0.0.1"
|
||||
listen_port: 5059
|
||||
|
||||
updater:
|
||||
# The URL of the Beacon Node to perform sync tasks with.
|
||||
# Cannot yet accept multiple beacon nodes.
|
||||
beacon_node_url: "http://localhost:5052"
|
||||
# The number of epochs to backfill. Must be below 100.
|
||||
max_backfill_size_epochs: 2
|
||||
# The epoch at which to stop backfilling.
|
||||
backfill_stop_epoch: 0
|
||||
# Whether to sync the attestations table.
|
||||
attestations: true
|
||||
# Whether to sync the proposer_info table.
|
||||
proposer_info: true
|
||||
# Whether to sync the block_rewards table.
|
||||
block_rewards: true
|
||||
# Whether to sync the block_packing table.
|
||||
block_packing: true
|
||||
|
||||
blockprint:
|
||||
# Whether to sync client information from blockprint.
|
||||
enabled: false
|
||||
# The URL of the blockprint server.
|
||||
url: ""
|
||||
# The username used to authenticate to the blockprint server.
|
||||
username: ""
|
||||
# The password used to authenticate to the blockprint server.
|
||||
password: ""
|
||||
|
||||
# Log level.
|
||||
# Valid options are:
|
||||
# - "trace"
|
||||
# - "debug"
|
||||
# - "info"
|
||||
# - "warn"
|
||||
# - "error"
|
||||
log_level: "debug"
|
5
watch/diesel.toml
Normal file
5
watch/diesel.toml
Normal file
@ -0,0 +1,5 @@
|
||||
# For documentation on how to configure this file,
|
||||
# see diesel.rs/guides/configuring-diesel-cli
|
||||
|
||||
[print_schema]
|
||||
file = "src/database/schema.rs"
|
0
watch/migrations/.gitkeep
Normal file
0
watch/migrations/.gitkeep
Normal file
@ -0,0 +1,6 @@
|
||||
-- This file was automatically created by Diesel to setup helper functions
|
||||
-- and other internal bookkeeping. This file is safe to edit, any future
|
||||
-- changes will be added to existing projects as new migrations.
|
||||
|
||||
DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
|
||||
DROP FUNCTION IF EXISTS diesel_set_updated_at();
|
36
watch/migrations/00000000000000_diesel_initial_setup/up.sql
Normal file
36
watch/migrations/00000000000000_diesel_initial_setup/up.sql
Normal file
@ -0,0 +1,36 @@
|
||||
-- This file was automatically created by Diesel to setup helper functions
|
||||
-- and other internal bookkeeping. This file is safe to edit, any future
|
||||
-- changes will be added to existing projects as new migrations.
|
||||
|
||||
|
||||
|
||||
|
||||
-- Sets up a trigger for the given table to automatically set a column called
|
||||
-- `updated_at` whenever the row is modified (unless `updated_at` was included
|
||||
-- in the modified columns)
|
||||
--
|
||||
-- # Example
|
||||
--
|
||||
-- ```sql
|
||||
-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
|
||||
--
|
||||
-- SELECT diesel_manage_updated_at('users');
|
||||
-- ```
|
||||
CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
|
||||
BEGIN
|
||||
EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
|
||||
FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
|
||||
BEGIN
|
||||
IF (
|
||||
NEW IS DISTINCT FROM OLD AND
|
||||
NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
|
||||
) THEN
|
||||
NEW.updated_at := current_timestamp;
|
||||
END IF;
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
@ -0,0 +1 @@
|
||||
DROP TABLE canonical_slots
|
@ -0,0 +1,6 @@
|
||||
CREATE TABLE canonical_slots (
|
||||
slot integer PRIMARY KEY,
|
||||
root bytea NOT NULL,
|
||||
skipped boolean NOT NULL,
|
||||
beacon_block bytea UNIQUE
|
||||
)
|
@ -0,0 +1 @@
|
||||
DROP TABLE beacon_blocks
|
7
watch/migrations/2022-01-01-000001_beacon_blocks/up.sql
Normal file
7
watch/migrations/2022-01-01-000001_beacon_blocks/up.sql
Normal file
@ -0,0 +1,7 @@
|
||||
CREATE TABLE beacon_blocks (
|
||||
slot integer PRIMARY KEY REFERENCES canonical_slots(slot) ON DELETE CASCADE,
|
||||
root bytea REFERENCES canonical_slots(beacon_block) NOT NULL,
|
||||
parent_root bytea NOT NULL,
|
||||
attestation_count integer NOT NULL,
|
||||
transaction_count integer
|
||||
)
|
1
watch/migrations/2022-01-01-000002_validators/down.sql
Normal file
1
watch/migrations/2022-01-01-000002_validators/down.sql
Normal file
@ -0,0 +1 @@
|
||||
DROP TABLE validators
|
7
watch/migrations/2022-01-01-000002_validators/up.sql
Normal file
7
watch/migrations/2022-01-01-000002_validators/up.sql
Normal file
@ -0,0 +1,7 @@
|
||||
CREATE TABLE validators (
|
||||
index integer PRIMARY KEY,
|
||||
public_key bytea NOT NULL,
|
||||
status text NOT NULL,
|
||||
activation_epoch integer,
|
||||
exit_epoch integer
|
||||
)
|
@ -0,0 +1 @@
|
||||
DROP TABLE proposer_info
|
5
watch/migrations/2022-01-01-000003_proposer_info/up.sql
Normal file
5
watch/migrations/2022-01-01-000003_proposer_info/up.sql
Normal file
@ -0,0 +1,5 @@
|
||||
CREATE TABLE proposer_info (
|
||||
slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE,
|
||||
proposer_index integer REFERENCES validators(index) ON DELETE CASCADE NOT NULL,
|
||||
graffiti text NOT NULL
|
||||
)
|
@ -0,0 +1 @@
|
||||
DROP TABLE active_config
|
5
watch/migrations/2022-01-01-000004_active_config/up.sql
Normal file
5
watch/migrations/2022-01-01-000004_active_config/up.sql
Normal file
@ -0,0 +1,5 @@
|
||||
CREATE TABLE active_config (
|
||||
id integer PRIMARY KEY CHECK (id=1),
|
||||
config_name text NOT NULL,
|
||||
slots_per_epoch integer NOT NULL
|
||||
)
|
1
watch/migrations/2022-01-01-000010_blockprint/down.sql
Normal file
1
watch/migrations/2022-01-01-000010_blockprint/down.sql
Normal file
@ -0,0 +1 @@
|
||||
DROP TABLE blockprint
|
4
watch/migrations/2022-01-01-000010_blockprint/up.sql
Normal file
4
watch/migrations/2022-01-01-000010_blockprint/up.sql
Normal file
@ -0,0 +1,4 @@
|
||||
CREATE TABLE blockprint (
|
||||
slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE,
|
||||
best_guess text NOT NULL
|
||||
)
|
@ -0,0 +1 @@
|
||||
DROP TABLE block_rewards
|
6
watch/migrations/2022-01-01-000011_block_rewards/up.sql
Normal file
6
watch/migrations/2022-01-01-000011_block_rewards/up.sql
Normal file
@ -0,0 +1,6 @@
|
||||
CREATE TABLE block_rewards (
|
||||
slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE,
|
||||
total integer NOT NULL,
|
||||
attestation_reward integer NOT NULL,
|
||||
sync_committee_reward integer NOT NULL
|
||||
)
|
@ -0,0 +1 @@
|
||||
DROP TABLE block_packing
|
6
watch/migrations/2022-01-01-000012_block_packing/up.sql
Normal file
6
watch/migrations/2022-01-01-000012_block_packing/up.sql
Normal file
@ -0,0 +1,6 @@
|
||||
CREATE TABLE block_packing (
|
||||
slot integer PRIMARY KEY REFERENCES beacon_blocks(slot) ON DELETE CASCADE,
|
||||
available integer NOT NULL,
|
||||
included integer NOT NULL,
|
||||
prior_skip_slots integer NOT NULL
|
||||
)
|
@ -0,0 +1 @@
|
||||
DROP TABLE suboptimal_attestations
|
@ -0,0 +1,8 @@
|
||||
CREATE TABLE suboptimal_attestations (
|
||||
epoch_start_slot integer CHECK (epoch_start_slot % 32 = 0) REFERENCES canonical_slots(slot) ON DELETE CASCADE,
|
||||
index integer NOT NULL REFERENCES validators(index) ON DELETE CASCADE,
|
||||
source boolean NOT NULL,
|
||||
head boolean NOT NULL,
|
||||
target boolean NOT NULL,
|
||||
PRIMARY KEY(epoch_start_slot, index)
|
||||
)
|
2
watch/migrations/2022-01-01-000020_capella/down.sql
Normal file
2
watch/migrations/2022-01-01-000020_capella/down.sql
Normal file
@ -0,0 +1,2 @@
|
||||
ALTER TABLE beacon_blocks
|
||||
DROP COLUMN withdrawal_count;
|
3
watch/migrations/2022-01-01-000020_capella/up.sql
Normal file
3
watch/migrations/2022-01-01-000020_capella/up.sql
Normal file
@ -0,0 +1,3 @@
|
||||
ALTER TABLE beacon_blocks
|
||||
ADD COLUMN withdrawal_count integer;
|
||||
|
16
watch/postgres_docker_compose/compose.yml
Normal file
16
watch/postgres_docker_compose/compose.yml
Normal file
@ -0,0 +1,16 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:12.3-alpine
|
||||
restart: always
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_USER: postgres
|
||||
volumes:
|
||||
- postgres:/var/lib/postgresql/data
|
||||
ports:
|
||||
- 127.0.0.1:5432:5432
|
||||
|
||||
volumes:
|
||||
postgres:
|
140
watch/src/block_packing/database.rs
Normal file
140
watch/src/block_packing/database.rs
Normal file
@ -0,0 +1,140 @@
|
||||
use crate::database::{
|
||||
schema::{beacon_blocks, block_packing},
|
||||
watch_types::{WatchHash, WatchSlot},
|
||||
Error, PgConn, MAX_SIZE_BATCH_INSERT,
|
||||
};
|
||||
|
||||
use diesel::prelude::*;
|
||||
use diesel::{Insertable, Queryable};
|
||||
use log::debug;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Instant;
|
||||
|
||||
#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)]
|
||||
#[diesel(table_name = block_packing)]
|
||||
pub struct WatchBlockPacking {
|
||||
pub slot: WatchSlot,
|
||||
pub available: i32,
|
||||
pub included: i32,
|
||||
pub prior_skip_slots: i32,
|
||||
}
|
||||
|
||||
/// Insert a batch of values into the `block_packing` table.
|
||||
///
|
||||
/// On a conflict, it will do nothing, leaving the old value.
|
||||
pub fn insert_batch_block_packing(
|
||||
conn: &mut PgConn,
|
||||
packing: Vec<WatchBlockPacking>,
|
||||
) -> Result<(), Error> {
|
||||
use self::block_packing::dsl::*;
|
||||
|
||||
let mut count = 0;
|
||||
let timer = Instant::now();
|
||||
|
||||
for chunk in packing.chunks(MAX_SIZE_BATCH_INSERT) {
|
||||
count += diesel::insert_into(block_packing)
|
||||
.values(chunk)
|
||||
.on_conflict_do_nothing()
|
||||
.execute(conn)?;
|
||||
}
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block packing inserted, count: {count}, time taken: {time_taken:?}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Selects the row from the `block_packing` table where `slot` is minimum.
|
||||
pub fn get_lowest_block_packing(conn: &mut PgConn) -> Result<Option<WatchBlockPacking>, Error> {
|
||||
use self::block_packing::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let result = block_packing
|
||||
.order_by(slot.asc())
|
||||
.limit(1)
|
||||
.first::<WatchBlockPacking>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block packing requested: lowest, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects the row from the `block_packing` table where `slot` is maximum.
|
||||
pub fn get_highest_block_packing(conn: &mut PgConn) -> Result<Option<WatchBlockPacking>, Error> {
|
||||
use self::block_packing::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let result = block_packing
|
||||
.order_by(slot.desc())
|
||||
.limit(1)
|
||||
.first::<WatchBlockPacking>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block packing requested: highest, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects a single row of the `block_packing` table corresponding to a given `root_query`.
|
||||
pub fn get_block_packing_by_root(
|
||||
conn: &mut PgConn,
|
||||
root_query: WatchHash,
|
||||
) -> Result<Option<WatchBlockPacking>, Error> {
|
||||
use self::beacon_blocks::dsl::{beacon_blocks, root};
|
||||
use self::block_packing::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let join = beacon_blocks.inner_join(block_packing);
|
||||
|
||||
let result = join
|
||||
.select((slot, available, included, prior_skip_slots))
|
||||
.filter(root.eq(root_query))
|
||||
.first::<WatchBlockPacking>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block packing requested: {root_query}, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects a single row of the `block_packing` table corresponding to a given `slot_query`.
|
||||
pub fn get_block_packing_by_slot(
|
||||
conn: &mut PgConn,
|
||||
slot_query: WatchSlot,
|
||||
) -> Result<Option<WatchBlockPacking>, Error> {
|
||||
use self::block_packing::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let result = block_packing
|
||||
.filter(slot.eq(slot_query))
|
||||
.first::<WatchBlockPacking>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block packing requested: {slot_query}, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding
|
||||
/// row in `block_packing`.
|
||||
#[allow(dead_code)]
|
||||
pub fn get_unknown_block_packing(
|
||||
conn: &mut PgConn,
|
||||
slots_per_epoch: u64,
|
||||
) -> Result<Vec<Option<WatchSlot>>, Error> {
|
||||
use self::beacon_blocks::dsl::{beacon_blocks, root, slot};
|
||||
use self::block_packing::dsl::block_packing;
|
||||
|
||||
let join = beacon_blocks.left_join(block_packing);
|
||||
|
||||
let result = join
|
||||
.select(slot)
|
||||
.filter(root.is_null())
|
||||
// Block packing cannot be retrieved for epoch 0 so we need to exclude them.
|
||||
.filter(slot.ge(slots_per_epoch as i32))
|
||||
.order_by(slot.desc())
|
||||
.nullable()
|
||||
.load::<Option<WatchSlot>>(conn)?;
|
||||
|
||||
Ok(result)
|
||||
}
|
38
watch/src/block_packing/mod.rs
Normal file
38
watch/src/block_packing/mod.rs
Normal file
@ -0,0 +1,38 @@
|
||||
pub mod database;
|
||||
pub mod server;
|
||||
pub mod updater;
|
||||
|
||||
use crate::database::watch_types::WatchSlot;
|
||||
use crate::updater::error::Error;
|
||||
|
||||
pub use database::{
|
||||
get_block_packing_by_root, get_block_packing_by_slot, get_highest_block_packing,
|
||||
get_lowest_block_packing, get_unknown_block_packing, insert_batch_block_packing,
|
||||
WatchBlockPacking,
|
||||
};
|
||||
pub use server::block_packing_routes;
|
||||
|
||||
use eth2::BeaconNodeHttpClient;
|
||||
use types::Epoch;
|
||||
|
||||
/// Sends a request to `lighthouse/analysis/block_packing`.
|
||||
/// Formats the response into a vector of `WatchBlockPacking`.
|
||||
///
|
||||
/// Will fail if `start_epoch == 0`.
|
||||
pub async fn get_block_packing(
|
||||
bn: &BeaconNodeHttpClient,
|
||||
start_epoch: Epoch,
|
||||
end_epoch: Epoch,
|
||||
) -> Result<Vec<WatchBlockPacking>, Error> {
|
||||
Ok(bn
|
||||
.get_lighthouse_analysis_block_packing(start_epoch, end_epoch)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|data| WatchBlockPacking {
|
||||
slot: WatchSlot::from_slot(data.slot),
|
||||
available: data.available_attestations as i32,
|
||||
included: data.included_attestations as i32,
|
||||
prior_skip_slots: data.prior_skip_slots as i32,
|
||||
})
|
||||
.collect())
|
||||
}
|
31
watch/src/block_packing/server.rs
Normal file
31
watch/src/block_packing/server.rs
Normal file
@ -0,0 +1,31 @@
|
||||
use crate::block_packing::database::{
|
||||
get_block_packing_by_root, get_block_packing_by_slot, WatchBlockPacking,
|
||||
};
|
||||
use crate::database::{get_connection, PgPool, WatchHash, WatchSlot};
|
||||
use crate::server::Error;
|
||||
|
||||
use axum::{extract::Path, routing::get, Extension, Json, Router};
|
||||
use eth2::types::BlockId;
|
||||
use std::str::FromStr;
|
||||
|
||||
pub async fn get_block_packing(
|
||||
Path(block_query): Path<String>,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
) -> Result<Json<Option<WatchBlockPacking>>, Error> {
|
||||
let mut conn = get_connection(&pool).map_err(Error::Database)?;
|
||||
match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? {
|
||||
BlockId::Root(root) => Ok(Json(get_block_packing_by_root(
|
||||
&mut conn,
|
||||
WatchHash::from_hash(root),
|
||||
)?)),
|
||||
BlockId::Slot(slot) => Ok(Json(get_block_packing_by_slot(
|
||||
&mut conn,
|
||||
WatchSlot::from_slot(slot),
|
||||
)?)),
|
||||
_ => Err(Error::BadRequest),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_packing_routes() -> Router {
|
||||
Router::new().route("/v1/blocks/:block/packing", get(get_block_packing))
|
||||
}
|
211
watch/src/block_packing/updater.rs
Normal file
211
watch/src/block_packing/updater.rs
Normal file
@ -0,0 +1,211 @@
|
||||
use crate::database::{self, Error as DbError};
|
||||
use crate::updater::{Error, UpdateHandler};
|
||||
|
||||
use crate::block_packing::get_block_packing;
|
||||
|
||||
use eth2::types::{Epoch, EthSpec};
|
||||
use log::{debug, error, warn};
|
||||
|
||||
const MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING: u64 = 50;
|
||||
|
||||
impl<T: EthSpec> UpdateHandler<T> {
|
||||
/// Forward fills the `block_packing` table starting from the entry with the
|
||||
/// highest slot.
|
||||
///
|
||||
/// It constructs a request to the `get_block_packing` API with:
|
||||
/// `start_epoch` -> highest completely filled epoch + 1 (or epoch of lowest beacon block)
|
||||
/// `end_epoch` -> epoch of highest beacon block
|
||||
///
|
||||
/// It will resync the latest epoch if it is not fully filled.
|
||||
/// That is, `if highest_filled_slot % slots_per_epoch != 31`
|
||||
/// This means that if the last slot of an epoch is a skip slot, the whole epoch will be
|
||||
//// resynced during the next head update.
|
||||
///
|
||||
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`.
|
||||
pub async fn fill_block_packing(&mut self) -> Result<(), Error> {
|
||||
let mut conn = database::get_connection(&self.pool)?;
|
||||
|
||||
// Get the slot of the highest entry in the `block_packing` table.
|
||||
let highest_filled_slot_opt = if self.config.block_packing {
|
||||
database::get_highest_block_packing(&mut conn)?.map(|packing| packing.slot)
|
||||
} else {
|
||||
return Err(Error::NotEnabled("block_packing".to_string()));
|
||||
};
|
||||
|
||||
let mut start_epoch = if let Some(highest_filled_slot) = highest_filled_slot_opt {
|
||||
if highest_filled_slot.as_slot() % self.slots_per_epoch
|
||||
== self.slots_per_epoch.saturating_sub(1)
|
||||
{
|
||||
// The whole epoch is filled so we can begin syncing the next one.
|
||||
highest_filled_slot.as_slot().epoch(self.slots_per_epoch) + 1
|
||||
} else {
|
||||
// The epoch is only partially synced. Try to sync it fully.
|
||||
highest_filled_slot.as_slot().epoch(self.slots_per_epoch)
|
||||
}
|
||||
} else {
|
||||
// No entries in the `block_packing` table. Use `beacon_blocks` instead.
|
||||
if let Some(lowest_beacon_block) = database::get_lowest_beacon_block(&mut conn)? {
|
||||
lowest_beacon_block
|
||||
.slot
|
||||
.as_slot()
|
||||
.epoch(self.slots_per_epoch)
|
||||
} else {
|
||||
// There are no blocks in the database, do not fill the `block_packing` table.
|
||||
warn!("Refusing to fill block packing as there are no blocks in the database");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
// The `get_block_packing` API endpoint cannot accept `start_epoch == 0`.
|
||||
if start_epoch == 0 {
|
||||
start_epoch += 1
|
||||
}
|
||||
|
||||
if let Some(highest_block_slot) =
|
||||
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot())
|
||||
{
|
||||
let mut end_epoch = highest_block_slot.epoch(self.slots_per_epoch);
|
||||
|
||||
if start_epoch > end_epoch {
|
||||
debug!("Block packing is up to date with the head of the database");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Ensure the size of the request does not exceed the maximum allowed value.
|
||||
if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) {
|
||||
end_epoch = start_epoch + MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING
|
||||
}
|
||||
|
||||
if let Some(lowest_block_slot) =
|
||||
database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot())
|
||||
{
|
||||
let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?;
|
||||
|
||||
// Since we pull a full epoch of data but are not guaranteed to have all blocks of
|
||||
// that epoch available, only insert blocks with corresponding `beacon_block`s.
|
||||
packing.retain(|packing| {
|
||||
packing.slot.as_slot() >= lowest_block_slot
|
||||
&& packing.slot.as_slot() <= highest_block_slot
|
||||
});
|
||||
database::insert_batch_block_packing(&mut conn, packing)?;
|
||||
} else {
|
||||
return Err(Error::Database(DbError::Other(
|
||||
"Database did not return a lowest block when one exists".to_string(),
|
||||
)));
|
||||
}
|
||||
} else {
|
||||
// There are no blocks in the `beacon_blocks` database, but there are entries in the
|
||||
// `block_packing` table. This is a critical failure. It usually means someone has
|
||||
// manually tampered with the database tables and should not occur during normal
|
||||
// operation.
|
||||
error!("Database is corrupted. Please re-sync the database");
|
||||
return Err(Error::Database(DbError::DatabaseCorrupted));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Backfill the `block_packing` table starting from the entry with the lowest slot.
|
||||
///
|
||||
/// It constructs a request to the `get_block_packing` function with:
|
||||
/// `start_epoch` -> epoch of lowest_beacon_block
|
||||
/// `end_epoch` -> epoch of lowest filled `block_packing` - 1 (or epoch of highest beacon block)
|
||||
///
|
||||
/// It will resync the lowest epoch if it is not fully filled.
|
||||
/// That is, `if lowest_filled_slot % slots_per_epoch != 0`
|
||||
/// This means that if the last slot of an epoch is a skip slot, the whole epoch will be
|
||||
//// resynced during the next head update.
|
||||
///
|
||||
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`.
|
||||
pub async fn backfill_block_packing(&mut self) -> Result<(), Error> {
|
||||
let mut conn = database::get_connection(&self.pool)?;
|
||||
let max_block_packing_backfill = self.config.max_backfill_size_epochs;
|
||||
|
||||
// Get the slot of the lowest entry in the `block_packing` table.
|
||||
let lowest_filled_slot_opt = if self.config.block_packing {
|
||||
database::get_lowest_block_packing(&mut conn)?.map(|packing| packing.slot)
|
||||
} else {
|
||||
return Err(Error::NotEnabled("block_packing".to_string()));
|
||||
};
|
||||
|
||||
let end_epoch = if let Some(lowest_filled_slot) = lowest_filled_slot_opt {
|
||||
if lowest_filled_slot.as_slot() % self.slots_per_epoch == 0 {
|
||||
lowest_filled_slot
|
||||
.as_slot()
|
||||
.epoch(self.slots_per_epoch)
|
||||
.saturating_sub(Epoch::new(1))
|
||||
} else {
|
||||
// The epoch is only partially synced. Try to sync it fully.
|
||||
lowest_filled_slot.as_slot().epoch(self.slots_per_epoch)
|
||||
}
|
||||
} else {
|
||||
// No entries in the `block_packing` table. Use `beacon_blocks` instead.
|
||||
if let Some(highest_beacon_block) =
|
||||
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot)
|
||||
{
|
||||
highest_beacon_block.as_slot().epoch(self.slots_per_epoch)
|
||||
} else {
|
||||
// There are no blocks in the database, do not backfill the `block_packing` table.
|
||||
warn!("Refusing to backfill block packing as there are no blocks in the database");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
if end_epoch <= 1 {
|
||||
debug!("Block packing backfill is complete");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(lowest_block_slot) =
|
||||
database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot())
|
||||
{
|
||||
let mut start_epoch = lowest_block_slot.epoch(self.slots_per_epoch);
|
||||
|
||||
if start_epoch >= end_epoch {
|
||||
debug!("Block packing is up to date with the base of the database");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Ensure that the request range does not exceed `max_block_packing_backfill` or
|
||||
// `MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING`.
|
||||
if start_epoch < end_epoch.saturating_sub(max_block_packing_backfill) {
|
||||
start_epoch = end_epoch.saturating_sub(max_block_packing_backfill)
|
||||
}
|
||||
if start_epoch < end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING) {
|
||||
start_epoch = end_epoch.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_PACKING)
|
||||
}
|
||||
|
||||
// The `block_packing` API cannot accept `start_epoch == 0`.
|
||||
if start_epoch == 0 {
|
||||
start_epoch += 1
|
||||
}
|
||||
|
||||
if let Some(highest_block_slot) =
|
||||
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot.as_slot())
|
||||
{
|
||||
let mut packing = get_block_packing(&self.bn, start_epoch, end_epoch).await?;
|
||||
|
||||
// Only insert blocks with corresponding `beacon_block`s.
|
||||
packing.retain(|packing| {
|
||||
packing.slot.as_slot() >= lowest_block_slot
|
||||
&& packing.slot.as_slot() <= highest_block_slot
|
||||
});
|
||||
|
||||
database::insert_batch_block_packing(&mut conn, packing)?;
|
||||
} else {
|
||||
return Err(Error::Database(DbError::Other(
|
||||
"Database did not return a lowest block when one exists".to_string(),
|
||||
)));
|
||||
}
|
||||
} else {
|
||||
// There are no blocks in the `beacon_blocks` database, but there are entries in the
|
||||
// `block_packing` table. This is a critical failure. It usually means someone has
|
||||
// manually tampered with the database tables and should not occur during normal
|
||||
// operation.
|
||||
error!("Database is corrupted. Please re-sync the database");
|
||||
return Err(Error::Database(DbError::DatabaseCorrupted));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
137
watch/src/block_rewards/database.rs
Normal file
137
watch/src/block_rewards/database.rs
Normal file
@ -0,0 +1,137 @@
|
||||
use crate::database::{
|
||||
schema::{beacon_blocks, block_rewards},
|
||||
watch_types::{WatchHash, WatchSlot},
|
||||
Error, PgConn, MAX_SIZE_BATCH_INSERT,
|
||||
};
|
||||
|
||||
use diesel::prelude::*;
|
||||
use diesel::{Insertable, Queryable};
|
||||
use log::debug;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Instant;
|
||||
|
||||
#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)]
|
||||
#[diesel(table_name = block_rewards)]
|
||||
pub struct WatchBlockRewards {
|
||||
pub slot: WatchSlot,
|
||||
pub total: i32,
|
||||
pub attestation_reward: i32,
|
||||
pub sync_committee_reward: i32,
|
||||
}
|
||||
|
||||
/// Insert a batch of values into the `block_rewards` table.
|
||||
///
|
||||
/// On a conflict, it will do nothing, leaving the old value.
|
||||
pub fn insert_batch_block_rewards(
|
||||
conn: &mut PgConn,
|
||||
rewards: Vec<WatchBlockRewards>,
|
||||
) -> Result<(), Error> {
|
||||
use self::block_rewards::dsl::*;
|
||||
|
||||
let mut count = 0;
|
||||
let timer = Instant::now();
|
||||
|
||||
for chunk in rewards.chunks(MAX_SIZE_BATCH_INSERT) {
|
||||
count += diesel::insert_into(block_rewards)
|
||||
.values(chunk)
|
||||
.on_conflict_do_nothing()
|
||||
.execute(conn)?;
|
||||
}
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block rewards inserted, count: {count}, time_taken: {time_taken:?}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Selects the row from the `block_rewards` table where `slot` is minimum.
|
||||
pub fn get_lowest_block_rewards(conn: &mut PgConn) -> Result<Option<WatchBlockRewards>, Error> {
|
||||
use self::block_rewards::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let result = block_rewards
|
||||
.order_by(slot.asc())
|
||||
.limit(1)
|
||||
.first::<WatchBlockRewards>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block rewards requested: lowest, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects the row from the `block_rewards` table where `slot` is maximum.
|
||||
pub fn get_highest_block_rewards(conn: &mut PgConn) -> Result<Option<WatchBlockRewards>, Error> {
|
||||
use self::block_rewards::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let result = block_rewards
|
||||
.order_by(slot.desc())
|
||||
.limit(1)
|
||||
.first::<WatchBlockRewards>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block rewards requested: highest, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects a single row of the `block_rewards` table corresponding to a given `root_query`.
|
||||
pub fn get_block_rewards_by_root(
|
||||
conn: &mut PgConn,
|
||||
root_query: WatchHash,
|
||||
) -> Result<Option<WatchBlockRewards>, Error> {
|
||||
use self::beacon_blocks::dsl::{beacon_blocks, root};
|
||||
use self::block_rewards::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let join = beacon_blocks.inner_join(block_rewards);
|
||||
|
||||
let result = join
|
||||
.select((slot, total, attestation_reward, sync_committee_reward))
|
||||
.filter(root.eq(root_query))
|
||||
.first::<WatchBlockRewards>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block rewards requested: {root_query}, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects a single row of the `block_rewards` table corresponding to a given `slot_query`.
|
||||
pub fn get_block_rewards_by_slot(
|
||||
conn: &mut PgConn,
|
||||
slot_query: WatchSlot,
|
||||
) -> Result<Option<WatchBlockRewards>, Error> {
|
||||
use self::block_rewards::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let result = block_rewards
|
||||
.filter(slot.eq(slot_query))
|
||||
.first::<WatchBlockRewards>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Block rewards requested: {slot_query}, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding
|
||||
/// row in `block_rewards`.
|
||||
#[allow(dead_code)]
|
||||
pub fn get_unknown_block_rewards(conn: &mut PgConn) -> Result<Vec<Option<WatchSlot>>, Error> {
|
||||
use self::beacon_blocks::dsl::{beacon_blocks, root, slot};
|
||||
use self::block_rewards::dsl::block_rewards;
|
||||
|
||||
let join = beacon_blocks.left_join(block_rewards);
|
||||
|
||||
let result = join
|
||||
.select(slot)
|
||||
.filter(root.is_null())
|
||||
// Block rewards cannot be retrieved for `slot == 0` so we need to exclude it.
|
||||
.filter(slot.ne(0))
|
||||
.order_by(slot.desc())
|
||||
.nullable()
|
||||
.load::<Option<WatchSlot>>(conn)?;
|
||||
|
||||
Ok(result)
|
||||
}
|
38
watch/src/block_rewards/mod.rs
Normal file
38
watch/src/block_rewards/mod.rs
Normal file
@ -0,0 +1,38 @@
|
||||
pub mod database;
|
||||
mod server;
|
||||
mod updater;
|
||||
|
||||
use crate::database::watch_types::WatchSlot;
|
||||
use crate::updater::error::Error;
|
||||
|
||||
pub use database::{
|
||||
get_block_rewards_by_root, get_block_rewards_by_slot, get_highest_block_rewards,
|
||||
get_lowest_block_rewards, get_unknown_block_rewards, insert_batch_block_rewards,
|
||||
WatchBlockRewards,
|
||||
};
|
||||
pub use server::block_rewards_routes;
|
||||
|
||||
use eth2::BeaconNodeHttpClient;
|
||||
use types::Slot;
|
||||
|
||||
/// Sends a request to `lighthouse/analysis/block_rewards`.
|
||||
/// Formats the response into a vector of `WatchBlockRewards`.
|
||||
///
|
||||
/// Will fail if `start_slot == 0`.
|
||||
pub async fn get_block_rewards(
|
||||
bn: &BeaconNodeHttpClient,
|
||||
start_slot: Slot,
|
||||
end_slot: Slot,
|
||||
) -> Result<Vec<WatchBlockRewards>, Error> {
|
||||
Ok(bn
|
||||
.get_lighthouse_analysis_block_rewards(start_slot, end_slot)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|data| WatchBlockRewards {
|
||||
slot: WatchSlot::from_slot(data.meta.slot),
|
||||
total: data.total as i32,
|
||||
attestation_reward: data.attestation_rewards.total as i32,
|
||||
sync_committee_reward: data.sync_committee_rewards as i32,
|
||||
})
|
||||
.collect())
|
||||
}
|
31
watch/src/block_rewards/server.rs
Normal file
31
watch/src/block_rewards/server.rs
Normal file
@ -0,0 +1,31 @@
|
||||
use crate::block_rewards::database::{
|
||||
get_block_rewards_by_root, get_block_rewards_by_slot, WatchBlockRewards,
|
||||
};
|
||||
use crate::database::{get_connection, PgPool, WatchHash, WatchSlot};
|
||||
use crate::server::Error;
|
||||
|
||||
use axum::{extract::Path, routing::get, Extension, Json, Router};
|
||||
use eth2::types::BlockId;
|
||||
use std::str::FromStr;
|
||||
|
||||
pub async fn get_block_rewards(
|
||||
Path(block_query): Path<String>,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
) -> Result<Json<Option<WatchBlockRewards>>, Error> {
|
||||
let mut conn = get_connection(&pool).map_err(Error::Database)?;
|
||||
match BlockId::from_str(&block_query).map_err(|_| Error::BadRequest)? {
|
||||
BlockId::Root(root) => Ok(Json(get_block_rewards_by_root(
|
||||
&mut conn,
|
||||
WatchHash::from_hash(root),
|
||||
)?)),
|
||||
BlockId::Slot(slot) => Ok(Json(get_block_rewards_by_slot(
|
||||
&mut conn,
|
||||
WatchSlot::from_slot(slot),
|
||||
)?)),
|
||||
_ => Err(Error::BadRequest),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_rewards_routes() -> Router {
|
||||
Router::new().route("/v1/blocks/:block/rewards", get(get_block_rewards))
|
||||
}
|
157
watch/src/block_rewards/updater.rs
Normal file
157
watch/src/block_rewards/updater.rs
Normal file
@ -0,0 +1,157 @@
|
||||
use crate::database::{self, Error as DbError};
|
||||
use crate::updater::{Error, UpdateHandler};
|
||||
|
||||
use crate::block_rewards::get_block_rewards;
|
||||
|
||||
use eth2::types::EthSpec;
|
||||
use log::{debug, error, warn};
|
||||
|
||||
const MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS: u64 = 1600;
|
||||
|
||||
impl<T: EthSpec> UpdateHandler<T> {
|
||||
/// Forward fills the `block_rewards` table starting from the entry with the
|
||||
/// highest slot.
|
||||
///
|
||||
/// It constructs a request to the `get_block_rewards` API with:
|
||||
/// `start_slot` -> highest filled `block_rewards` + 1 (or lowest beacon block)
|
||||
/// `end_slot` -> highest beacon block
|
||||
///
|
||||
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`.
|
||||
pub async fn fill_block_rewards(&mut self) -> Result<(), Error> {
|
||||
let mut conn = database::get_connection(&self.pool)?;
|
||||
|
||||
// Get the slot of the highest entry in the `block_rewards` table.
|
||||
let highest_filled_slot_opt = if self.config.block_rewards {
|
||||
database::get_highest_block_rewards(&mut conn)?.map(|reward| reward.slot)
|
||||
} else {
|
||||
return Err(Error::NotEnabled("block_rewards".to_string()));
|
||||
};
|
||||
|
||||
let mut start_slot = if let Some(highest_filled_slot) = highest_filled_slot_opt {
|
||||
highest_filled_slot.as_slot() + 1
|
||||
} else {
|
||||
// No entries in the `block_rewards` table. Use `beacon_blocks` instead.
|
||||
if let Some(lowest_beacon_block) =
|
||||
database::get_lowest_beacon_block(&mut conn)?.map(|block| block.slot)
|
||||
{
|
||||
lowest_beacon_block.as_slot()
|
||||
} else {
|
||||
// There are no blocks in the database, do not fill the `block_rewards` table.
|
||||
warn!("Refusing to fill block rewards as there are no blocks in the database");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
// The `block_rewards` API cannot accept `start_slot == 0`.
|
||||
if start_slot == 0 {
|
||||
start_slot += 1;
|
||||
}
|
||||
|
||||
if let Some(highest_beacon_block) =
|
||||
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot)
|
||||
{
|
||||
let mut end_slot = highest_beacon_block.as_slot();
|
||||
|
||||
if start_slot > end_slot {
|
||||
debug!("Block rewards are up to date with the head of the database");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Ensure the size of the request does not exceed the maximum allowed value.
|
||||
if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) {
|
||||
end_slot = start_slot + MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS
|
||||
}
|
||||
|
||||
let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?;
|
||||
database::insert_batch_block_rewards(&mut conn, rewards)?;
|
||||
} else {
|
||||
// There are no blocks in the `beacon_blocks` database, but there are entries in the
|
||||
// `block_rewards` table. This is a critical failure. It usually means someone has
|
||||
// manually tampered with the database tables and should not occur during normal
|
||||
// operation.
|
||||
error!("Database is corrupted. Please re-sync the database");
|
||||
return Err(Error::Database(DbError::DatabaseCorrupted));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Backfill the `block_rewards` tables starting from the entry with the
|
||||
/// lowest slot.
|
||||
///
|
||||
/// It constructs a request to the `get_block_rewards` API with:
|
||||
/// `start_slot` -> lowest_beacon_block
|
||||
/// `end_slot` -> lowest filled `block_rewards` - 1 (or highest beacon block)
|
||||
///
|
||||
/// Request range will not exceed `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`.
|
||||
pub async fn backfill_block_rewards(&mut self) -> Result<(), Error> {
|
||||
let mut conn = database::get_connection(&self.pool)?;
|
||||
let max_block_reward_backfill = self.config.max_backfill_size_epochs * self.slots_per_epoch;
|
||||
|
||||
// Get the slot of the lowest entry in the `block_rewards` table.
|
||||
let lowest_filled_slot_opt = if self.config.block_rewards {
|
||||
database::get_lowest_block_rewards(&mut conn)?.map(|reward| reward.slot)
|
||||
} else {
|
||||
return Err(Error::NotEnabled("block_rewards".to_string()));
|
||||
};
|
||||
|
||||
let end_slot = if let Some(lowest_filled_slot) = lowest_filled_slot_opt {
|
||||
lowest_filled_slot.as_slot().saturating_sub(1_u64)
|
||||
} else {
|
||||
// No entries in the `block_rewards` table. Use `beacon_blocks` instead.
|
||||
if let Some(highest_beacon_block) =
|
||||
database::get_highest_beacon_block(&mut conn)?.map(|block| block.slot)
|
||||
{
|
||||
highest_beacon_block.as_slot()
|
||||
} else {
|
||||
// There are no blocks in the database, do not backfill the `block_rewards` table.
|
||||
warn!("Refusing to backfill block rewards as there are no blocks in the database");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
if end_slot <= 1 {
|
||||
debug!("Block rewards backfill is complete");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(lowest_block_slot) = database::get_lowest_beacon_block(&mut conn)? {
|
||||
let mut start_slot = lowest_block_slot.slot.as_slot();
|
||||
|
||||
if start_slot >= end_slot {
|
||||
debug!("Block rewards are up to date with the base of the database");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Ensure that the request range does not exceed `max_block_reward_backfill` or
|
||||
// `MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS`.
|
||||
if start_slot < end_slot.saturating_sub(max_block_reward_backfill) {
|
||||
start_slot = end_slot.saturating_sub(max_block_reward_backfill)
|
||||
}
|
||||
|
||||
if start_slot < end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS) {
|
||||
start_slot = end_slot.saturating_sub(MAX_SIZE_SINGLE_REQUEST_BLOCK_REWARDS)
|
||||
}
|
||||
|
||||
// The `block_rewards` API cannot accept `start_slot == 0`.
|
||||
if start_slot == 0 {
|
||||
start_slot += 1
|
||||
}
|
||||
|
||||
let rewards = get_block_rewards(&self.bn, start_slot, end_slot).await?;
|
||||
|
||||
if self.config.block_rewards {
|
||||
database::insert_batch_block_rewards(&mut conn, rewards)?;
|
||||
}
|
||||
} else {
|
||||
// There are no blocks in the `beacon_blocks` database, but there are entries in the
|
||||
// `block_rewards` table. This is a critical failure. It usually means someone has
|
||||
// manually tampered with the database tables and should not occur during normal
|
||||
// operation.
|
||||
error!("Database is corrupted. Please re-sync the database");
|
||||
return Err(Error::Database(DbError::DatabaseCorrupted));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
40
watch/src/blockprint/config.rs
Normal file
40
watch/src/blockprint/config.rs
Normal file
@ -0,0 +1,40 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub const fn enabled() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub const fn url() -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
pub const fn username() -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
pub const fn password() -> Option<String> {
|
||||
None
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
#[serde(default = "enabled")]
|
||||
pub enabled: bool,
|
||||
#[serde(default = "url")]
|
||||
pub url: Option<String>,
|
||||
#[serde(default = "username")]
|
||||
pub username: Option<String>,
|
||||
#[serde(default = "password")]
|
||||
pub password: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Config {
|
||||
enabled: enabled(),
|
||||
url: url(),
|
||||
username: username(),
|
||||
password: password(),
|
||||
}
|
||||
}
|
||||
}
|
224
watch/src/blockprint/database.rs
Normal file
224
watch/src/blockprint/database.rs
Normal file
@ -0,0 +1,224 @@
|
||||
use crate::database::{
|
||||
self,
|
||||
schema::{beacon_blocks, blockprint},
|
||||
watch_types::{WatchHash, WatchSlot},
|
||||
Error, PgConn, MAX_SIZE_BATCH_INSERT,
|
||||
};
|
||||
|
||||
use diesel::prelude::*;
|
||||
use diesel::sql_types::{Integer, Text};
|
||||
use diesel::{Insertable, Queryable};
|
||||
use log::debug;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::time::Instant;
|
||||
|
||||
type WatchConsensusClient = String;
|
||||
pub fn list_consensus_clients() -> Vec<WatchConsensusClient> {
|
||||
vec![
|
||||
"Lighthouse".to_string(),
|
||||
"Lodestar".to_string(),
|
||||
"Nimbus".to_string(),
|
||||
"Prysm".to_string(),
|
||||
"Teku".to_string(),
|
||||
"Unknown".to_string(),
|
||||
]
|
||||
}
|
||||
|
||||
#[derive(Debug, Queryable, Insertable, Serialize, Deserialize)]
|
||||
#[diesel(table_name = blockprint)]
|
||||
pub struct WatchBlockprint {
|
||||
pub slot: WatchSlot,
|
||||
pub best_guess: WatchConsensusClient,
|
||||
}
|
||||
|
||||
#[derive(Debug, QueryableByName, diesel::FromSqlRow)]
|
||||
pub struct WatchValidatorBlockprint {
|
||||
#[diesel(sql_type = Integer)]
|
||||
pub proposer_index: i32,
|
||||
#[diesel(sql_type = Text)]
|
||||
pub best_guess: WatchConsensusClient,
|
||||
#[diesel(sql_type = Integer)]
|
||||
pub slot: WatchSlot,
|
||||
}
|
||||
|
||||
/// Insert a batch of values into the `blockprint` table.
|
||||
///
|
||||
/// On a conflict, it will do nothing, leaving the old value.
|
||||
pub fn insert_batch_blockprint(
|
||||
conn: &mut PgConn,
|
||||
prints: Vec<WatchBlockprint>,
|
||||
) -> Result<(), Error> {
|
||||
use self::blockprint::dsl::*;
|
||||
|
||||
let mut count = 0;
|
||||
let timer = Instant::now();
|
||||
|
||||
for chunk in prints.chunks(MAX_SIZE_BATCH_INSERT) {
|
||||
count += diesel::insert_into(blockprint)
|
||||
.values(chunk)
|
||||
.on_conflict_do_nothing()
|
||||
.execute(conn)?;
|
||||
}
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Blockprint inserted, count: {count}, time_taken: {time_taken:?}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Selects the row from the `blockprint` table where `slot` is minimum.
|
||||
pub fn get_lowest_blockprint(conn: &mut PgConn) -> Result<Option<WatchBlockprint>, Error> {
|
||||
use self::blockprint::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let result = blockprint
|
||||
.order_by(slot.asc())
|
||||
.limit(1)
|
||||
.first::<WatchBlockprint>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Blockprint requested: lowest, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects the row from the `blockprint` table where `slot` is maximum.
|
||||
pub fn get_highest_blockprint(conn: &mut PgConn) -> Result<Option<WatchBlockprint>, Error> {
|
||||
use self::blockprint::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let result = blockprint
|
||||
.order_by(slot.desc())
|
||||
.limit(1)
|
||||
.first::<WatchBlockprint>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Blockprint requested: highest, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects a single row of the `blockprint` table corresponding to a given `root_query`.
|
||||
pub fn get_blockprint_by_root(
|
||||
conn: &mut PgConn,
|
||||
root_query: WatchHash,
|
||||
) -> Result<Option<WatchBlockprint>, Error> {
|
||||
use self::beacon_blocks::dsl::{beacon_blocks, root};
|
||||
use self::blockprint::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let join = beacon_blocks.inner_join(blockprint);
|
||||
|
||||
let result = join
|
||||
.select((slot, best_guess))
|
||||
.filter(root.eq(root_query))
|
||||
.first::<WatchBlockprint>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Blockprint requested: {root_query}, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects a single row of the `blockprint` table corresponding to a given `slot_query`.
|
||||
pub fn get_blockprint_by_slot(
|
||||
conn: &mut PgConn,
|
||||
slot_query: WatchSlot,
|
||||
) -> Result<Option<WatchBlockprint>, Error> {
|
||||
use self::blockprint::dsl::*;
|
||||
let timer = Instant::now();
|
||||
|
||||
let result = blockprint
|
||||
.filter(slot.eq(slot_query))
|
||||
.first::<WatchBlockprint>(conn)
|
||||
.optional()?;
|
||||
|
||||
let time_taken = timer.elapsed();
|
||||
debug!("Blockprint requested: {slot_query}, time_taken: {time_taken:?}");
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Selects `slot` from all rows of the `beacon_blocks` table which do not have a corresponding
|
||||
/// row in `blockprint`.
|
||||
#[allow(dead_code)]
|
||||
pub fn get_unknown_blockprint(conn: &mut PgConn) -> Result<Vec<Option<WatchSlot>>, Error> {
|
||||
use self::beacon_blocks::dsl::{beacon_blocks, root, slot};
|
||||
use self::blockprint::dsl::blockprint;
|
||||
|
||||
let join = beacon_blocks.left_join(blockprint);
|
||||
|
||||
let result = join
|
||||
.select(slot)
|
||||
.filter(root.is_null())
|
||||
.order_by(slot.desc())
|
||||
.nullable()
|
||||
.load::<Option<WatchSlot>>(conn)?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Constructs a HashMap of `index` -> `best_guess` for each validator's latest proposal at or before
|
||||
/// `target_slot`.
|
||||
/// Inserts `"Unknown" if no prior proposals exist.
|
||||
pub fn construct_validator_blockprints_at_slot(
|
||||
conn: &mut PgConn,
|
||||
target_slot: WatchSlot,
|
||||
slots_per_epoch: u64,
|
||||
) -> Result<HashMap<i32, WatchConsensusClient>, Error> {
|
||||
use self::blockprint::dsl::{blockprint, slot};
|
||||
|
||||
let total_validators =
|
||||
database::count_validators_activated_before_slot(conn, target_slot, slots_per_epoch)?
|
||||
as usize;
|
||||
|
||||
let mut blockprint_map = HashMap::with_capacity(total_validators);
|
||||
|
||||
let latest_proposals =
|
||||
database::get_all_validators_latest_proposer_info_at_slot(conn, target_slot)?;
|
||||
|
||||
let latest_proposal_slots: Vec<WatchSlot> = latest_proposals.clone().into_keys().collect();
|
||||
|
||||
let result = blockprint
|
||||
.filter(slot.eq_any(latest_proposal_slots))
|
||||
.load::<WatchBlockprint>(conn)?;
|
||||
|
||||
// Insert the validators which have available blockprints.
|
||||
for print in result {
|
||||
if let Some(proposer) = latest_proposals.get(&print.slot) {
|
||||
blockprint_map.insert(*proposer, print.best_guess);
|
||||
}
|
||||
}
|
||||
|
||||
// Insert the rest of the unknown validators.
|
||||
for validator_index in 0..total_validators {
|
||||
blockprint_map
|
||||
.entry(validator_index as i32)
|
||||
.or_insert_with(|| "Unknown".to_string());
|
||||
}
|
||||
|
||||
Ok(blockprint_map)
|
||||
}
|
||||
|
||||
/// Counts the number of occurances of each `client` present in the `validators` table at or before some
|
||||
/// `target_slot`.
|
||||
pub fn get_validators_clients_at_slot(
|
||||
conn: &mut PgConn,
|
||||
target_slot: WatchSlot,
|
||||
slots_per_epoch: u64,
|
||||
) -> Result<HashMap<WatchConsensusClient, usize>, Error> {
|
||||
let mut client_map: HashMap<WatchConsensusClient, usize> = HashMap::new();
|
||||
|
||||
// This includes all validators which were activated at or before `target_slot`.
|
||||
let validator_blockprints =
|
||||
construct_validator_blockprints_at_slot(conn, target_slot, slots_per_epoch)?;
|
||||
|
||||
for client in list_consensus_clients() {
|
||||
let count = validator_blockprints
|
||||
.iter()
|
||||
.filter(|(_, v)| (*v).clone() == client)
|
||||
.count();
|
||||
client_map.insert(client, count);
|
||||
}
|
||||
|
||||
Ok(client_map)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user