2020-09-29 03:46:54 +00:00
|
|
|
//! This crate contains a HTTP server which serves the endpoints listed here:
|
|
|
|
//!
|
2021-10-06 00:46:09 +00:00
|
|
|
//! https://github.com/ethereum/beacon-APIs
|
2020-09-29 03:46:54 +00:00
|
|
|
//!
|
|
|
|
//! There are also some additional, non-standard endpoints behind the `/lighthouse/` path which are
|
|
|
|
//! used for development.
|
|
|
|
|
Add API to compute discrete validator attestation performance (#2874)
## Issue Addressed
N/A
## Proposed Changes
Add a HTTP API which can be used to compute the attestation performances of a validator (or all validators) over a discrete range of epochs.
Performances can be computed for a single validator, or for the global validator set.
## Usage
### Request
The API can be used as follows:
```
curl "http://localhost:5052/lighthouse/analysis/attestation_performance/{validator_index}?start_epoch=57730&end_epoch=57732"
```
Alternatively, to compute performances for the global validator set:
```
curl "http://localhost:5052/lighthouse/analysis/attestation_performance/global?start_epoch=57730&end_epoch=57732"
```
### Response
The response is JSON formatted as follows:
```
[
{
"index": 72,
"epochs": {
"57730": {
"active": true,
"head": false,
"target": false,
"source": false
},
"57731": {
"active": true,
"head": true,
"target": true,
"source": true,
"delay": 1
},
"57732": {
"active": true,
"head": true,
"target": true,
"source": true,
"delay": 1
},
}
}
]
```
> Note that the `"epochs"` are not guaranteed to be in ascending order.
## Additional Info
- This API is intended to be used in our upcoming validator analysis tooling (#2873) and will likely not be very useful for regular users. Some advanced users or block explorers may find this API useful however.
- The request range is limited to 100 epochs (since the range is inclusive and it also computes the `end_epoch` it's actually 101 epochs) to prevent Lighthouse using exceptionally large amounts of memory.
2022-01-27 22:58:31 +00:00
|
|
|
mod attestation_performance;
|
2021-03-17 05:09:57 +00:00
|
|
|
mod attester_duties;
|
2020-09-29 03:46:54 +00:00
|
|
|
mod block_id;
|
2022-02-21 23:21:02 +00:00
|
|
|
mod block_packing_efficiency;
|
2022-01-27 01:06:02 +00:00
|
|
|
mod block_rewards;
|
2021-09-22 00:37:28 +00:00
|
|
|
mod database;
|
2020-09-29 03:46:54 +00:00
|
|
|
mod metrics;
|
2021-03-17 05:09:57 +00:00
|
|
|
mod proposer_duties;
|
2022-07-30 00:22:37 +00:00
|
|
|
mod publish_blocks;
|
2023-02-07 08:33:23 +00:00
|
|
|
mod standard_block_rewards;
|
2020-09-29 03:46:54 +00:00
|
|
|
mod state_id;
|
2023-01-24 02:06:42 +00:00
|
|
|
mod sync_committee_rewards;
|
2021-08-06 00:47:31 +00:00
|
|
|
mod sync_committees;
|
2023-04-03 05:35:11 +00:00
|
|
|
pub mod test_utils;
|
2022-12-01 06:03:53 +00:00
|
|
|
mod ui;
|
2020-09-29 03:46:54 +00:00
|
|
|
mod validator_inclusion;
|
2021-08-06 00:47:31 +00:00
|
|
|
mod version;
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
use beacon_chain::{
|
2022-07-30 00:22:37 +00:00
|
|
|
attestation_verification::VerifiedAttestation, observed_operations::ObservationOutcome,
|
|
|
|
validator_monitor::timestamp_now, AttestationError as AttnError, BeaconChain, BeaconChainError,
|
|
|
|
BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped,
|
2020-09-29 03:46:54 +00:00
|
|
|
};
|
2022-07-25 08:23:00 +00:00
|
|
|
pub use block_id::BlockId;
|
2023-07-31 23:51:37 +00:00
|
|
|
use bytes::Bytes;
|
2022-11-15 05:21:26 +00:00
|
|
|
use directory::DEFAULT_ROOT_DIR;
|
2022-09-19 07:58:48 +00:00
|
|
|
use eth2::types::{
|
Add broadcast validation routes to Beacon Node HTTP API (#4316)
## Issue Addressed
- #4293
- #4264
## Proposed Changes
*Changes largely follow those suggested in the main issue*.
- Add new routes to HTTP API
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Add new routes to `BeaconNodeHttpClient`
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Define new Eth2 common types
- `BroadcastValidation`, enum representing the level of validation to apply to blocks prior to broadcast
- `BroadcastValidationQuery`, the corresponding HTTP query string type for the above type
- ~~Define `_checked` variants of both `publish_block` and `publish_blinded_block` that enforce a validation level at a type level~~
- Add interactive tests to the `bn_http_api_tests` test target covering each validation level (to their own test module, `broadcast_validation_tests`)
- `beacon/blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- Only consensus pass (i.e., equivocates) (200)
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- `beacon/blinded_blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- ~~Only consensus pass (i.e., equivocates) (200)~~
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- Add a new trait, `IntoGossipVerifiedBlock`, which allows type-level guarantees to be made as to gossip validity
- Modify the structure of the `ObservedBlockProducers` cache from a `(slot, validator_index)` mapping to a `((slot, validator_index), block_root)` mapping
- Modify `ObservedBlockProducers::proposer_has_been_observed` to return a `SeenBlock` rather than a boolean on success
- Punish gossip peer (low) for submitting equivocating blocks
- Rename `BlockError::SlashablePublish` to `BlockError::SlashableProposal`
## Additional Info
This PR contains changes that directly modify how blocks are verified within the client. For more context, consult [comments in-thread](https://github.com/sigp/lighthouse/pull/4316#discussion_r1234724202).
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2023-06-29 12:02:38 +00:00
|
|
|
self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode,
|
|
|
|
SkipRandaoVerification, ValidatorId, ValidatorStatus,
|
2022-09-19 07:58:48 +00:00
|
|
|
};
|
Rename eth2_libp2p to lighthouse_network (#2702)
## Description
The `eth2_libp2p` crate was originally named and designed to incorporate a simple libp2p integration into lighthouse. Since its origins the crates purpose has expanded dramatically. It now houses a lot more sophistication that is specific to lighthouse and no longer just a libp2p integration.
As of this writing it currently houses the following high-level lighthouse-specific logic:
- Lighthouse's implementation of the eth2 RPC protocol and specific encodings/decodings
- Integration and handling of ENRs with respect to libp2p and eth2
- Lighthouse's discovery logic, its integration with discv5 and logic about searching and handling peers.
- Lighthouse's peer manager - This is a large module handling various aspects of Lighthouse's network, such as peer scoring, handling pings and metadata, connection maintenance and recording, etc.
- Lighthouse's peer database - This is a collection of information stored for each individual peer which is specific to lighthouse. We store connection state, sync state, last seen ips and scores etc. The data stored for each peer is designed for various elements of the lighthouse code base such as syncing and the http api.
- Gossipsub scoring - This stores a collection of gossipsub 1.1 scoring mechanisms that are continuously analyssed and updated based on the ethereum 2 networks and how Lighthouse performs on these networks.
- Lighthouse specific types for managing gossipsub topics, sync status and ENR fields
- Lighthouse's network HTTP API metrics - A collection of metrics for lighthouse network monitoring
- Lighthouse's custom configuration of all networking protocols, RPC, gossipsub, discovery, identify and libp2p.
Therefore it makes sense to rename the crate to be more akin to its current purposes, simply that it manages the majority of Lighthouse's network stack. This PR renames this crate to `lighthouse_network`
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2021-10-19 00:30:39 +00:00
|
|
|
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
|
2020-09-29 03:46:54 +00:00
|
|
|
use lighthouse_version::version_with_platform;
|
2023-05-22 05:57:08 +00:00
|
|
|
use logging::SSELoggingComponents;
|
2022-08-30 05:47:31 +00:00
|
|
|
use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage};
|
2023-02-07 06:13:49 +00:00
|
|
|
use operation_pool::ReceivedPreCapella;
|
2022-11-15 05:21:26 +00:00
|
|
|
use parking_lot::RwLock;
|
Add broadcast validation routes to Beacon Node HTTP API (#4316)
## Issue Addressed
- #4293
- #4264
## Proposed Changes
*Changes largely follow those suggested in the main issue*.
- Add new routes to HTTP API
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Add new routes to `BeaconNodeHttpClient`
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Define new Eth2 common types
- `BroadcastValidation`, enum representing the level of validation to apply to blocks prior to broadcast
- `BroadcastValidationQuery`, the corresponding HTTP query string type for the above type
- ~~Define `_checked` variants of both `publish_block` and `publish_blinded_block` that enforce a validation level at a type level~~
- Add interactive tests to the `bn_http_api_tests` test target covering each validation level (to their own test module, `broadcast_validation_tests`)
- `beacon/blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- Only consensus pass (i.e., equivocates) (200)
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- `beacon/blinded_blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- ~~Only consensus pass (i.e., equivocates) (200)~~
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- Add a new trait, `IntoGossipVerifiedBlock`, which allows type-level guarantees to be made as to gossip validity
- Modify the structure of the `ObservedBlockProducers` cache from a `(slot, validator_index)` mapping to a `((slot, validator_index), block_root)` mapping
- Modify `ObservedBlockProducers::proposer_has_been_observed` to return a `SeenBlock` rather than a boolean on success
- Punish gossip peer (low) for submitting equivocating blocks
- Rename `BlockError::SlashablePublish` to `BlockError::SlashableProposal`
## Additional Info
This PR contains changes that directly modify how blocks are verified within the client. For more context, consult [comments in-thread](https://github.com/sigp/lighthouse/pull/4316#discussion_r1234724202).
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2023-06-29 12:02:38 +00:00
|
|
|
pub use publish_blocks::{
|
|
|
|
publish_blinded_block, publish_block, reconstruct_block, ProvenancedBlock,
|
|
|
|
};
|
2020-09-29 03:46:54 +00:00
|
|
|
use serde::{Deserialize, Serialize};
|
2020-11-22 03:39:13 +00:00
|
|
|
use slog::{crit, debug, error, info, warn, Logger};
|
2020-09-29 03:46:54 +00:00
|
|
|
use slot_clock::SlotClock;
|
2020-10-22 06:05:49 +00:00
|
|
|
use ssz::Encode;
|
2022-07-25 08:23:00 +00:00
|
|
|
pub use state_id::StateId;
|
2020-09-29 03:46:54 +00:00
|
|
|
use std::borrow::Cow;
|
|
|
|
use std::future::Future;
|
2022-03-24 00:04:49 +00:00
|
|
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
2021-10-12 03:35:49 +00:00
|
|
|
use std::path::PathBuf;
|
|
|
|
use std::pin::Pin;
|
2020-09-29 03:46:54 +00:00
|
|
|
use std::sync::Arc;
|
2022-11-15 05:21:26 +00:00
|
|
|
use sysinfo::{System, SystemExt};
|
|
|
|
use system_health::observe_system_health_bn;
|
2022-08-30 05:47:31 +00:00
|
|
|
use tokio::sync::mpsc::{Sender, UnboundedSender};
|
2021-03-01 01:58:05 +00:00
|
|
|
use tokio_stream::{wrappers::BroadcastStream, StreamExt};
|
2020-09-29 03:46:54 +00:00
|
|
|
use types::{
|
2023-03-21 05:14:59 +00:00
|
|
|
Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError,
|
|
|
|
BlindedPayload, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, FullPayload,
|
2022-09-19 07:58:48 +00:00
|
|
|
ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof,
|
2022-11-24 20:09:26 +00:00
|
|
|
SignedBeaconBlock, SignedBlindedBeaconBlock, SignedBlsToExecutionChange,
|
|
|
|
SignedContributionAndProof, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot,
|
|
|
|
SyncCommitteeMessage, SyncContributionData,
|
2020-09-29 03:46:54 +00:00
|
|
|
};
|
2021-10-28 01:18:04 +00:00
|
|
|
use version::{
|
2023-03-30 06:08:37 +00:00
|
|
|
add_consensus_version_header, execution_optimistic_finalized_fork_versioned_response,
|
2022-07-25 08:23:00 +00:00
|
|
|
fork_versioned_response, inconsistent_fork_rejection, unsupported_version_rejection, V1, V2,
|
2021-10-28 01:18:04 +00:00
|
|
|
};
|
2020-11-28 05:30:57 +00:00
|
|
|
use warp::http::StatusCode;
|
2021-02-10 23:29:49 +00:00
|
|
|
use warp::sse::Event;
|
2021-01-06 03:01:46 +00:00
|
|
|
use warp::Reply;
|
2021-02-10 23:29:49 +00:00
|
|
|
use warp::{http::Response, Filter};
|
2022-01-20 09:14:19 +00:00
|
|
|
use warp_utils::{
|
|
|
|
query::multi_key_query,
|
2023-03-13 01:40:03 +00:00
|
|
|
task::{blocking_json_task, blocking_response_task},
|
|
|
|
uor::UnifyingOrFilter,
|
2022-01-20 09:14:19 +00:00
|
|
|
};
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
const API_PREFIX: &str = "eth";
|
|
|
|
|
|
|
|
/// If the node is within this many epochs from the head, we declare it to be synced regardless of
|
|
|
|
/// the network sync state.
|
|
|
|
///
|
|
|
|
/// This helps prevent attacks where nodes can convince us that we're syncing some non-existent
|
|
|
|
/// finalized head.
|
|
|
|
const SYNC_TOLERANCE_EPOCHS: u64 = 8;
|
|
|
|
|
2021-10-12 03:35:49 +00:00
|
|
|
/// A custom type which allows for both unsecured and TLS-enabled HTTP servers.
|
|
|
|
type HttpServer = (SocketAddr, Pin<Box<dyn Future<Output = ()> + Send>>);
|
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
/// Alias for readability.
|
|
|
|
pub type ExecutionOptimistic = bool;
|
|
|
|
|
2021-10-12 03:35:49 +00:00
|
|
|
/// Configuration used when serving the HTTP server over TLS.
|
|
|
|
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
|
|
|
|
pub struct TlsConfig {
|
|
|
|
pub cert: PathBuf,
|
|
|
|
pub key: PathBuf,
|
|
|
|
}
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
/// A wrapper around all the items required to spawn the HTTP server.
|
|
|
|
///
|
|
|
|
/// The server will gracefully handle the case where any fields are `None`.
|
|
|
|
pub struct Context<T: BeaconChainTypes> {
|
|
|
|
pub config: Config,
|
|
|
|
pub chain: Option<Arc<BeaconChain<T>>>,
|
2022-08-30 05:47:31 +00:00
|
|
|
pub network_senders: Option<NetworkSenders<T::EthSpec>>,
|
2020-09-29 03:46:54 +00:00
|
|
|
pub network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
|
2020-11-02 00:37:30 +00:00
|
|
|
pub eth1_service: Option<eth1::Service>,
|
2023-05-22 05:57:08 +00:00
|
|
|
pub sse_logging_components: Option<SSELoggingComponents>,
|
2020-09-29 03:46:54 +00:00
|
|
|
pub log: Logger,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Configuration for the HTTP server.
|
|
|
|
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
|
|
|
|
pub struct Config {
|
|
|
|
pub enabled: bool,
|
2022-03-24 00:04:49 +00:00
|
|
|
pub listen_addr: IpAddr,
|
2020-09-29 03:46:54 +00:00
|
|
|
pub listen_port: u16,
|
|
|
|
pub allow_origin: Option<String>,
|
2021-10-12 03:35:49 +00:00
|
|
|
pub tls_config: Option<TlsConfig>,
|
2021-10-02 19:57:23 +00:00
|
|
|
pub allow_sync_stalled: bool,
|
2022-08-10 07:52:59 +00:00
|
|
|
pub spec_fork_name: Option<ForkName>,
|
2022-11-15 05:21:26 +00:00
|
|
|
pub data_dir: PathBuf,
|
2020-09-29 03:46:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for Config {
|
|
|
|
fn default() -> Self {
|
|
|
|
Self {
|
|
|
|
enabled: false,
|
2022-03-24 00:04:49 +00:00
|
|
|
listen_addr: IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
|
2020-09-29 03:46:54 +00:00
|
|
|
listen_port: 5052,
|
|
|
|
allow_origin: None,
|
2021-10-12 03:35:49 +00:00
|
|
|
tls_config: None,
|
2021-10-02 19:57:23 +00:00
|
|
|
allow_sync_stalled: false,
|
2022-08-10 07:52:59 +00:00
|
|
|
spec_fork_name: None,
|
2022-11-15 05:21:26 +00:00
|
|
|
data_dir: PathBuf::from(DEFAULT_ROOT_DIR),
|
2020-09-29 03:46:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum Error {
|
|
|
|
Warp(warp::Error),
|
|
|
|
Other(String),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<warp::Error> for Error {
|
|
|
|
fn from(e: warp::Error) -> Self {
|
|
|
|
Error::Warp(e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<String> for Error {
|
|
|
|
fn from(e: String) -> Self {
|
|
|
|
Error::Other(e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates a `warp` logging wrapper which we use to create `slog` logs.
|
|
|
|
pub fn slog_logging(
|
|
|
|
log: Logger,
|
|
|
|
) -> warp::filters::log::Log<impl Fn(warp::filters::log::Info) + Clone> {
|
|
|
|
warp::log::custom(move |info| {
|
|
|
|
match info.status() {
|
2020-10-22 02:59:42 +00:00
|
|
|
status
|
|
|
|
if status == StatusCode::OK
|
|
|
|
|| status == StatusCode::NOT_FOUND
|
|
|
|
|| status == StatusCode::PARTIAL_CONTENT =>
|
|
|
|
{
|
2020-11-22 03:39:13 +00:00
|
|
|
debug!(
|
2020-09-29 03:46:54 +00:00
|
|
|
log,
|
|
|
|
"Processed HTTP API request";
|
|
|
|
"elapsed" => format!("{:?}", info.elapsed()),
|
|
|
|
"status" => status.to_string(),
|
|
|
|
"path" => info.path(),
|
|
|
|
"method" => info.method().to_string(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
status => {
|
|
|
|
warn!(
|
|
|
|
log,
|
|
|
|
"Error processing HTTP API request";
|
|
|
|
"elapsed" => format!("{:?}", info.elapsed()),
|
|
|
|
"status" => status.to_string(),
|
|
|
|
"path" => info.path(),
|
|
|
|
"method" => info.method().to_string(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates a `warp` logging wrapper which we use for Prometheus metrics (not necessarily logging,
|
|
|
|
/// per say).
|
|
|
|
pub fn prometheus_metrics() -> warp::filters::log::Log<impl Fn(warp::filters::log::Info) + Clone> {
|
|
|
|
warp::log::custom(move |info| {
|
|
|
|
// Here we restrict the `info.path()` value to some predefined values. Without this, we end
|
|
|
|
// up with a new metric type each time someone includes something unique in the path (e.g.,
|
|
|
|
// a block hash).
|
|
|
|
let path = {
|
|
|
|
let equals = |s: &'static str| -> Option<&'static str> {
|
2021-08-06 00:47:31 +00:00
|
|
|
if info.path() == format!("/{}/{}", API_PREFIX, s) {
|
2020-09-29 03:46:54 +00:00
|
|
|
Some(s)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let starts_with = |s: &'static str| -> Option<&'static str> {
|
2021-08-06 00:47:31 +00:00
|
|
|
if info.path().starts_with(&format!("/{}/{}", API_PREFIX, s)) {
|
2020-09-29 03:46:54 +00:00
|
|
|
Some(s)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-08-06 00:47:31 +00:00
|
|
|
// First line covers `POST /v1/beacon/blocks` only
|
|
|
|
equals("v1/beacon/blocks")
|
2022-06-23 05:19:21 +00:00
|
|
|
.or_else(|| starts_with("v1/validator/blocks"))
|
|
|
|
.or_else(|| starts_with("v2/validator/blocks"))
|
|
|
|
.or_else(|| starts_with("v1/validator/blinded_blocks"))
|
2021-08-06 00:47:31 +00:00
|
|
|
.or_else(|| starts_with("v1/validator/duties/attester"))
|
|
|
|
.or_else(|| starts_with("v1/validator/duties/proposer"))
|
2022-06-23 05:19:21 +00:00
|
|
|
.or_else(|| starts_with("v1/validator/duties/sync"))
|
2021-08-06 00:47:31 +00:00
|
|
|
.or_else(|| starts_with("v1/validator/attestation_data"))
|
|
|
|
.or_else(|| starts_with("v1/validator/aggregate_attestation"))
|
|
|
|
.or_else(|| starts_with("v1/validator/aggregate_and_proofs"))
|
2022-06-23 05:19:21 +00:00
|
|
|
.or_else(|| starts_with("v1/validator/sync_committee_contribution"))
|
|
|
|
.or_else(|| starts_with("v1/validator/contribution_and_proofs"))
|
2021-08-06 00:47:31 +00:00
|
|
|
.or_else(|| starts_with("v1/validator/beacon_committee_subscriptions"))
|
2022-06-23 05:19:21 +00:00
|
|
|
.or_else(|| starts_with("v1/validator/sync_committee_subscriptions"))
|
|
|
|
.or_else(|| starts_with("v1/beacon/pool/attestations"))
|
|
|
|
.or_else(|| starts_with("v1/beacon/pool/sync_committees"))
|
|
|
|
.or_else(|| starts_with("v1/beacon/blocks/head/root"))
|
|
|
|
.or_else(|| starts_with("v1/validator/prepare_beacon_proposer"))
|
|
|
|
.or_else(|| starts_with("v1/validator/register_validator"))
|
2021-08-06 00:47:31 +00:00
|
|
|
.or_else(|| starts_with("v1/beacon/"))
|
|
|
|
.or_else(|| starts_with("v2/beacon/"))
|
|
|
|
.or_else(|| starts_with("v1/config/"))
|
|
|
|
.or_else(|| starts_with("v1/debug/"))
|
2022-06-23 05:19:21 +00:00
|
|
|
.or_else(|| starts_with("v2/debug/"))
|
2021-08-06 00:47:31 +00:00
|
|
|
.or_else(|| starts_with("v1/events/"))
|
|
|
|
.or_else(|| starts_with("v1/node/"))
|
|
|
|
.or_else(|| starts_with("v1/validator/"))
|
2020-09-29 03:46:54 +00:00
|
|
|
.unwrap_or("other")
|
|
|
|
};
|
|
|
|
|
|
|
|
metrics::inc_counter_vec(&metrics::HTTP_API_PATHS_TOTAL, &[path]);
|
|
|
|
metrics::inc_counter_vec(
|
|
|
|
&metrics::HTTP_API_STATUS_CODES_TOTAL,
|
|
|
|
&[&info.status().to_string()],
|
|
|
|
);
|
|
|
|
metrics::observe_timer_vec(&metrics::HTTP_API_PATHS_TIMES, &[path], info.elapsed());
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates a server that will serve requests using information from `ctx`.
|
|
|
|
///
|
|
|
|
/// The server will shut down gracefully when the `shutdown` future resolves.
|
|
|
|
///
|
|
|
|
/// ## Returns
|
|
|
|
///
|
|
|
|
/// This function will bind the server to the provided address and then return a tuple of:
|
|
|
|
///
|
|
|
|
/// - `SocketAddr`: the address that the HTTP server will listen on.
|
|
|
|
/// - `Future`: the actual server future that will need to be awaited.
|
|
|
|
///
|
|
|
|
/// ## Errors
|
|
|
|
///
|
|
|
|
/// Returns an error if the server is unable to bind or there is another error during
|
|
|
|
/// configuration.
|
|
|
|
pub fn serve<T: BeaconChainTypes>(
|
|
|
|
ctx: Arc<Context<T>>,
|
|
|
|
shutdown: impl Future<Output = ()> + Send + Sync + 'static,
|
2021-10-12 03:35:49 +00:00
|
|
|
) -> Result<HttpServer, Error> {
|
2020-09-29 03:46:54 +00:00
|
|
|
let config = ctx.config.clone();
|
2021-10-02 19:57:23 +00:00
|
|
|
let allow_sync_stalled = config.allow_sync_stalled;
|
2020-09-29 03:46:54 +00:00
|
|
|
let log = ctx.log.clone();
|
2020-10-22 04:47:27 +00:00
|
|
|
|
|
|
|
// Configure CORS.
|
|
|
|
let cors_builder = {
|
|
|
|
let builder = warp::cors()
|
|
|
|
.allow_methods(vec!["GET", "POST"])
|
|
|
|
.allow_headers(vec!["Content-Type"]);
|
|
|
|
|
|
|
|
warp_utils::cors::set_builder_origins(
|
|
|
|
builder,
|
|
|
|
config.allow_origin.as_deref(),
|
|
|
|
(config.listen_addr, config.listen_port),
|
|
|
|
)?
|
|
|
|
};
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// Sanity check.
|
|
|
|
if !config.enabled {
|
|
|
|
crit!(log, "Cannot start disabled HTTP server");
|
|
|
|
return Err(Error::Other(
|
|
|
|
"A disabled server should not be started".to_string(),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
2021-08-06 00:47:31 +00:00
|
|
|
// Create a filter that extracts the endpoint version.
|
|
|
|
let any_version = warp::path(API_PREFIX).and(warp::path::param::<EndpointVersion>().or_else(
|
|
|
|
|_| async move {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid version identifier".to_string(),
|
|
|
|
))
|
|
|
|
},
|
|
|
|
));
|
|
|
|
|
|
|
|
// Filter that enforces a single endpoint version and then discards the `EndpointVersion`.
|
|
|
|
let single_version = |reqd: EndpointVersion| {
|
|
|
|
any_version
|
|
|
|
.and_then(move |version| async move {
|
|
|
|
if version == reqd {
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(unsupported_version_rejection(version))
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.untuple_one()
|
|
|
|
};
|
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
let eth_v1 = single_version(V1);
|
Add broadcast validation routes to Beacon Node HTTP API (#4316)
## Issue Addressed
- #4293
- #4264
## Proposed Changes
*Changes largely follow those suggested in the main issue*.
- Add new routes to HTTP API
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Add new routes to `BeaconNodeHttpClient`
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Define new Eth2 common types
- `BroadcastValidation`, enum representing the level of validation to apply to blocks prior to broadcast
- `BroadcastValidationQuery`, the corresponding HTTP query string type for the above type
- ~~Define `_checked` variants of both `publish_block` and `publish_blinded_block` that enforce a validation level at a type level~~
- Add interactive tests to the `bn_http_api_tests` test target covering each validation level (to their own test module, `broadcast_validation_tests`)
- `beacon/blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- Only consensus pass (i.e., equivocates) (200)
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- `beacon/blinded_blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- ~~Only consensus pass (i.e., equivocates) (200)~~
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- Add a new trait, `IntoGossipVerifiedBlock`, which allows type-level guarantees to be made as to gossip validity
- Modify the structure of the `ObservedBlockProducers` cache from a `(slot, validator_index)` mapping to a `((slot, validator_index), block_root)` mapping
- Modify `ObservedBlockProducers::proposer_has_been_observed` to return a `SeenBlock` rather than a boolean on success
- Punish gossip peer (low) for submitting equivocating blocks
- Rename `BlockError::SlashablePublish` to `BlockError::SlashableProposal`
## Additional Info
This PR contains changes that directly modify how blocks are verified within the client. For more context, consult [comments in-thread](https://github.com/sigp/lighthouse/pull/4316#discussion_r1234724202).
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2023-06-29 12:02:38 +00:00
|
|
|
let eth_v2 = single_version(V2);
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// Create a `warp` filter that provides access to the network globals.
|
|
|
|
let inner_network_globals = ctx.network_globals.clone();
|
|
|
|
let network_globals = warp::any()
|
|
|
|
.map(move || inner_network_globals.clone())
|
|
|
|
.and_then(|network_globals| async move {
|
|
|
|
match network_globals {
|
|
|
|
Some(globals) => Ok(globals),
|
|
|
|
None => Err(warp_utils::reject::custom_not_found(
|
|
|
|
"network globals are not initialized.".to_string(),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2022-11-15 05:21:26 +00:00
|
|
|
// Create a `warp` filter for the data_dir.
|
|
|
|
let inner_data_dir = ctx.config.data_dir.clone();
|
|
|
|
let data_dir_filter = warp::any().map(move || inner_data_dir.clone());
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
// Create a `warp` filter that provides access to the beacon chain.
|
|
|
|
let inner_ctx = ctx.clone();
|
|
|
|
let chain_filter =
|
|
|
|
warp::any()
|
|
|
|
.map(move || inner_ctx.chain.clone())
|
|
|
|
.and_then(|chain| async move {
|
|
|
|
match chain {
|
|
|
|
Some(chain) => Ok(chain),
|
|
|
|
None => Err(warp_utils::reject::custom_not_found(
|
|
|
|
"Beacon chain genesis has not yet been observed.".to_string(),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Create a `warp` filter that provides access to the network sender channel.
|
2022-08-30 05:47:31 +00:00
|
|
|
let network_tx = ctx
|
|
|
|
.network_senders
|
|
|
|
.as_ref()
|
|
|
|
.map(|senders| senders.network_send());
|
|
|
|
let network_tx_filter =
|
|
|
|
warp::any()
|
|
|
|
.map(move || network_tx.clone())
|
|
|
|
.and_then(|network_tx| async move {
|
|
|
|
match network_tx {
|
|
|
|
Some(network_tx) => Ok(network_tx),
|
|
|
|
None => Err(warp_utils::reject::custom_not_found(
|
|
|
|
"The networking stack has not yet started (network_tx).".to_string(),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Create a `warp` filter that provides access to the network attestation subscription channel.
|
|
|
|
let validator_subscriptions_tx = ctx
|
|
|
|
.network_senders
|
|
|
|
.as_ref()
|
|
|
|
.map(|senders| senders.validator_subscription_send());
|
|
|
|
let validator_subscription_tx_filter = warp::any()
|
|
|
|
.map(move || validator_subscriptions_tx.clone())
|
|
|
|
.and_then(|validator_subscriptions_tx| async move {
|
|
|
|
match validator_subscriptions_tx {
|
|
|
|
Some(validator_subscriptions_tx) => Ok(validator_subscriptions_tx),
|
2020-09-29 03:46:54 +00:00
|
|
|
None => Err(warp_utils::reject::custom_not_found(
|
2022-08-30 05:47:31 +00:00
|
|
|
"The networking stack has not yet started (validator_subscription_tx)."
|
|
|
|
.to_string(),
|
2020-09-29 03:46:54 +00:00
|
|
|
)),
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2020-11-02 00:37:30 +00:00
|
|
|
// Create a `warp` filter that provides access to the Eth1 service.
|
|
|
|
let inner_ctx = ctx.clone();
|
|
|
|
let eth1_service_filter = warp::any()
|
|
|
|
.map(move || inner_ctx.eth1_service.clone())
|
|
|
|
.and_then(|eth1_service| async move {
|
|
|
|
match eth1_service {
|
|
|
|
Some(eth1_service) => Ok(eth1_service),
|
|
|
|
None => Err(warp_utils::reject::custom_not_found(
|
|
|
|
"The Eth1 service is not started. Use --eth1 on the CLI.".to_string(),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2021-10-06 10:21:21 +00:00
|
|
|
// Create a `warp` filter that rejects requests whilst the node is syncing.
|
2021-10-02 19:57:23 +00:00
|
|
|
let not_while_syncing_filter =
|
|
|
|
warp::any()
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
move |network_globals: Arc<NetworkGlobals<T::EthSpec>>,
|
|
|
|
chain: Arc<BeaconChain<T>>| async move {
|
|
|
|
match *network_globals.sync_state.read() {
|
|
|
|
SyncState::SyncingFinalized { .. } => {
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let head_slot = chain.canonical_head.cached_head().head_slot();
|
2021-10-02 19:57:23 +00:00
|
|
|
|
|
|
|
let current_slot =
|
|
|
|
chain.slot_clock.now_or_genesis().ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_server_error(
|
|
|
|
"unable to read slot clock".to_string(),
|
|
|
|
)
|
|
|
|
})?;
|
|
|
|
|
|
|
|
let tolerance = SYNC_TOLERANCE_EPOCHS * T::EthSpec::slots_per_epoch();
|
|
|
|
|
|
|
|
if head_slot + tolerance >= current_slot {
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(warp_utils::reject::not_synced(format!(
|
|
|
|
"head slot is {}, current slot is {}",
|
|
|
|
head_slot, current_slot
|
|
|
|
)))
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
}
|
2021-10-02 19:57:23 +00:00
|
|
|
SyncState::SyncingHead { .. }
|
|
|
|
| SyncState::SyncTransition
|
|
|
|
| SyncState::BackFillSyncing { .. } => Ok(()),
|
|
|
|
SyncState::Synced => Ok(()),
|
|
|
|
SyncState::Stalled if allow_sync_stalled => Ok(()),
|
|
|
|
SyncState::Stalled => Err(warp_utils::reject::not_synced(
|
|
|
|
"sync is stalled".to_string(),
|
|
|
|
)),
|
2020-09-29 03:46:54 +00:00
|
|
|
}
|
2021-10-02 19:57:23 +00:00
|
|
|
},
|
|
|
|
)
|
|
|
|
.untuple_one();
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// Create a `warp` filter that provides access to the logger.
|
2021-07-09 06:15:32 +00:00
|
|
|
let inner_ctx = ctx.clone();
|
|
|
|
let log_filter = warp::any().map(move || inner_ctx.log.clone());
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2023-05-22 05:57:08 +00:00
|
|
|
let inner_components = ctx.sse_logging_components.clone();
|
|
|
|
let sse_component_filter = warp::any().map(move || inner_components.clone());
|
|
|
|
|
2022-11-15 05:21:26 +00:00
|
|
|
// Create a `warp` filter that provides access to local system information.
|
|
|
|
let system_info = Arc::new(RwLock::new(sysinfo::System::new()));
|
|
|
|
{
|
|
|
|
// grab write access for initialisation
|
|
|
|
let mut system_info = system_info.write();
|
|
|
|
system_info.refresh_disks_list();
|
|
|
|
system_info.refresh_networks_list();
|
|
|
|
system_info.refresh_cpu_specifics(sysinfo::CpuRefreshKind::everything());
|
|
|
|
system_info.refresh_cpu();
|
|
|
|
} // end lock
|
|
|
|
|
|
|
|
let system_info_filter =
|
|
|
|
warp::any()
|
|
|
|
.map(move || system_info.clone())
|
|
|
|
.map(|sysinfo: Arc<RwLock<System>>| {
|
|
|
|
{
|
|
|
|
// refresh stats
|
|
|
|
let mut sysinfo_lock = sysinfo.write();
|
|
|
|
sysinfo_lock.refresh_memory();
|
|
|
|
sysinfo_lock.refresh_cpu_specifics(sysinfo::CpuRefreshKind::everything());
|
|
|
|
sysinfo_lock.refresh_cpu();
|
|
|
|
sysinfo_lock.refresh_system();
|
|
|
|
sysinfo_lock.refresh_networks();
|
|
|
|
sysinfo_lock.refresh_disks();
|
|
|
|
} // end lock
|
|
|
|
sysinfo
|
|
|
|
});
|
|
|
|
|
|
|
|
let app_start = std::time::Instant::now();
|
|
|
|
let app_start_filter = warp::any().map(move || app_start);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Start of HTTP method definitions.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
// GET beacon/genesis
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_beacon_genesis = eth_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("genesis"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let genesis_data = api_types::GenesisData {
|
|
|
|
genesis_time: chain.genesis_time,
|
|
|
|
genesis_validators_root: chain.genesis_validators_root,
|
|
|
|
genesis_fork_version: chain.spec.genesis_fork_version,
|
|
|
|
};
|
|
|
|
Ok(api_types::GenericResponse::from(genesis_data))
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
/*
|
|
|
|
* beacon/states/{state_id}
|
|
|
|
*/
|
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
let beacon_states_path = eth_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("states"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<StateId>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid state ID".to_string(),
|
|
|
|
))
|
2020-11-09 23:13:56 +00:00
|
|
|
}))
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(chain_filter.clone());
|
|
|
|
|
|
|
|
// GET beacon/states/{state_id}/root
|
|
|
|
let get_beacon_state_root = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("root"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (root, execution_optimistic, finalized) = state_id.root(&chain)?;
|
2022-07-25 08:23:00 +00:00
|
|
|
Ok(root)
|
2020-09-29 03:46:54 +00:00
|
|
|
.map(api_types::RootData::from)
|
|
|
|
.map(api_types::GenericResponse::from)
|
2023-03-30 06:08:37 +00:00
|
|
|
.map(|resp| {
|
|
|
|
resp.add_execution_optimistic_finalized(execution_optimistic, finalized)
|
|
|
|
})
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET beacon/states/{state_id}/fork
|
|
|
|
let get_beacon_state_fork = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("fork"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| {
|
2022-07-25 08:23:00 +00:00
|
|
|
blocking_json_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (fork, execution_optimistic, finalized) =
|
|
|
|
state_id.fork_and_execution_optimistic_and_finalized(&chain)?;
|
|
|
|
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
2022-07-25 08:23:00 +00:00
|
|
|
data: fork,
|
|
|
|
execution_optimistic: Some(execution_optimistic),
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized: Some(finalized),
|
2022-07-25 08:23:00 +00:00
|
|
|
})
|
|
|
|
})
|
2020-09-29 03:46:54 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
// GET beacon/states/{state_id}/finality_checkpoints
|
|
|
|
let get_beacon_state_finality_checkpoints = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("finality_checkpoints"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (data, execution_optimistic, finalized) = state_id
|
|
|
|
.map_state_and_execution_optimistic_and_finalized(
|
|
|
|
&chain,
|
|
|
|
|state, execution_optimistic, finalized| {
|
|
|
|
Ok((
|
|
|
|
api_types::FinalityCheckpointsData {
|
|
|
|
previous_justified: state.previous_justified_checkpoint(),
|
|
|
|
current_justified: state.current_justified_checkpoint(),
|
|
|
|
finalized: state.finalized_checkpoint(),
|
|
|
|
},
|
|
|
|
execution_optimistic,
|
|
|
|
finalized,
|
|
|
|
))
|
|
|
|
},
|
|
|
|
)?;
|
2022-07-25 08:23:00 +00:00
|
|
|
|
2023-03-30 06:08:37 +00:00
|
|
|
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
2022-07-25 08:23:00 +00:00
|
|
|
data,
|
|
|
|
execution_optimistic: Some(execution_optimistic),
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized: Some(finalized),
|
2022-07-25 08:23:00 +00:00
|
|
|
})
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
// GET beacon/states/{state_id}/validator_balances?id
|
|
|
|
let get_beacon_state_validator_balances = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("validator_balances"))
|
|
|
|
.and(warp::path::end())
|
2022-01-20 09:14:19 +00:00
|
|
|
.and(multi_key_query::<api_types::ValidatorBalancesQuery>())
|
2020-11-09 23:13:56 +00:00
|
|
|
.and_then(
|
|
|
|
|state_id: StateId,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
2022-01-20 09:14:19 +00:00
|
|
|
query_res: Result<api_types::ValidatorBalancesQuery, warp::Rejection>| {
|
2020-11-09 23:13:56 +00:00
|
|
|
blocking_json_task(move || {
|
2022-01-20 09:14:19 +00:00
|
|
|
let query = query_res?;
|
2023-03-30 06:08:37 +00:00
|
|
|
let (data, execution_optimistic, finalized) = state_id
|
|
|
|
.map_state_and_execution_optimistic_and_finalized(
|
2022-07-25 08:23:00 +00:00
|
|
|
&chain,
|
2023-03-30 06:08:37 +00:00
|
|
|
|state, execution_optimistic, finalized| {
|
2022-07-25 08:23:00 +00:00
|
|
|
Ok((
|
|
|
|
state
|
|
|
|
.validators()
|
|
|
|
.iter()
|
|
|
|
.zip(state.balances().iter())
|
|
|
|
.enumerate()
|
|
|
|
// filter by validator id(s) if provided
|
|
|
|
.filter(|(index, (validator, _))| {
|
|
|
|
query.id.as_ref().map_or(true, |ids| {
|
|
|
|
ids.iter().any(|id| match id {
|
|
|
|
ValidatorId::PublicKey(pubkey) => {
|
|
|
|
&validator.pubkey == pubkey
|
|
|
|
}
|
|
|
|
ValidatorId::Index(param_index) => {
|
|
|
|
*param_index == *index as u64
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
2020-11-09 23:13:56 +00:00
|
|
|
})
|
2022-07-25 08:23:00 +00:00
|
|
|
.map(|(index, (_, balance))| {
|
|
|
|
Some(api_types::ValidatorBalanceData {
|
|
|
|
index: index as u64,
|
|
|
|
balance: *balance,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
execution_optimistic,
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized,
|
2022-07-25 08:23:00 +00:00
|
|
|
))
|
|
|
|
},
|
|
|
|
)?;
|
|
|
|
|
2023-03-30 06:08:37 +00:00
|
|
|
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
2022-07-25 08:23:00 +00:00
|
|
|
data,
|
|
|
|
execution_optimistic: Some(execution_optimistic),
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized: Some(finalized),
|
2022-07-25 08:23:00 +00:00
|
|
|
})
|
2020-11-09 23:13:56 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-10-29 05:13:04 +00:00
|
|
|
// GET beacon/states/{state_id}/validators?id,status
|
2020-09-29 03:46:54 +00:00
|
|
|
let get_beacon_state_validators = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("validators"))
|
|
|
|
.and(warp::path::end())
|
2022-01-20 09:14:19 +00:00
|
|
|
.and(multi_key_query::<api_types::ValidatorsQuery>())
|
2020-10-29 05:13:04 +00:00
|
|
|
.and_then(
|
2022-01-20 09:14:19 +00:00
|
|
|
|state_id: StateId,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
query_res: Result<api_types::ValidatorsQuery, warp::Rejection>| {
|
2020-10-29 05:13:04 +00:00
|
|
|
blocking_json_task(move || {
|
2022-01-20 09:14:19 +00:00
|
|
|
let query = query_res?;
|
2023-03-30 06:08:37 +00:00
|
|
|
let (data, execution_optimistic, finalized) = state_id
|
|
|
|
.map_state_and_execution_optimistic_and_finalized(
|
2022-07-25 08:23:00 +00:00
|
|
|
&chain,
|
2023-03-30 06:08:37 +00:00
|
|
|
|state, execution_optimistic, finalized| {
|
2022-07-25 08:23:00 +00:00
|
|
|
let epoch = state.current_epoch();
|
|
|
|
let far_future_epoch = chain.spec.far_future_epoch;
|
|
|
|
|
|
|
|
Ok((
|
|
|
|
state
|
|
|
|
.validators()
|
|
|
|
.iter()
|
|
|
|
.zip(state.balances().iter())
|
|
|
|
.enumerate()
|
|
|
|
// filter by validator id(s) if provided
|
|
|
|
.filter(|(index, (validator, _))| {
|
|
|
|
query.id.as_ref().map_or(true, |ids| {
|
|
|
|
ids.iter().any(|id| match id {
|
|
|
|
ValidatorId::PublicKey(pubkey) => {
|
|
|
|
&validator.pubkey == pubkey
|
|
|
|
}
|
|
|
|
ValidatorId::Index(param_index) => {
|
|
|
|
*param_index == *index as u64
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
})
|
|
|
|
// filter by status(es) if provided and map the result
|
|
|
|
.filter_map(|(index, (validator, balance))| {
|
|
|
|
let status = api_types::ValidatorStatus::from_validator(
|
|
|
|
validator,
|
|
|
|
epoch,
|
|
|
|
far_future_epoch,
|
|
|
|
);
|
|
|
|
|
|
|
|
let status_matches =
|
|
|
|
query.status.as_ref().map_or(true, |statuses| {
|
|
|
|
statuses.contains(&status)
|
|
|
|
|| statuses.contains(&status.superstatus())
|
|
|
|
});
|
|
|
|
|
|
|
|
if status_matches {
|
|
|
|
Some(api_types::ValidatorData {
|
|
|
|
index: index as u64,
|
|
|
|
balance: *balance,
|
|
|
|
status,
|
|
|
|
validator: validator.clone(),
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
None
|
2020-10-29 05:13:04 +00:00
|
|
|
}
|
|
|
|
})
|
2022-07-25 08:23:00 +00:00
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
execution_optimistic,
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized,
|
2022-07-25 08:23:00 +00:00
|
|
|
))
|
|
|
|
},
|
|
|
|
)?;
|
2021-02-24 04:15:13 +00:00
|
|
|
|
2023-03-30 06:08:37 +00:00
|
|
|
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
2022-07-25 08:23:00 +00:00
|
|
|
data,
|
|
|
|
execution_optimistic: Some(execution_optimistic),
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized: Some(finalized),
|
2022-07-25 08:23:00 +00:00
|
|
|
})
|
2020-10-29 05:13:04 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// GET beacon/states/{state_id}/validators/{validator_id}
|
|
|
|
let get_beacon_state_validators_id = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("validators"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<ValidatorId>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid validator ID".to_string(),
|
|
|
|
))
|
|
|
|
}))
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(
|
2022-10-26 06:50:04 +00:00
|
|
|
|state_id: StateId, chain: Arc<BeaconChain<T>>, validator_id: ValidatorId| {
|
2020-09-29 03:46:54 +00:00
|
|
|
blocking_json_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (data, execution_optimistic, finalized) = state_id
|
|
|
|
.map_state_and_execution_optimistic_and_finalized(
|
2022-07-25 08:23:00 +00:00
|
|
|
&chain,
|
2023-03-30 06:08:37 +00:00
|
|
|
|state, execution_optimistic, finalized| {
|
2022-07-25 08:23:00 +00:00
|
|
|
let index_opt = match &validator_id {
|
|
|
|
ValidatorId::PublicKey(pubkey) => {
|
2022-10-26 06:50:04 +00:00
|
|
|
state.validators().iter().position(|v| v.pubkey == *pubkey)
|
2022-07-25 08:23:00 +00:00
|
|
|
}
|
|
|
|
ValidatorId::Index(index) => Some(*index as usize),
|
|
|
|
};
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
Ok((
|
|
|
|
index_opt
|
|
|
|
.and_then(|index| {
|
|
|
|
let validator = state.validators().get(index)?;
|
|
|
|
let balance = *state.balances().get(index)?;
|
|
|
|
let epoch = state.current_epoch();
|
|
|
|
let far_future_epoch = chain.spec.far_future_epoch;
|
|
|
|
|
|
|
|
Some(api_types::ValidatorData {
|
|
|
|
index: index as u64,
|
|
|
|
balance,
|
|
|
|
status: api_types::ValidatorStatus::from_validator(
|
|
|
|
validator,
|
|
|
|
epoch,
|
|
|
|
far_future_epoch,
|
|
|
|
),
|
|
|
|
validator: validator.clone(),
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_not_found(format!(
|
|
|
|
"unknown validator: {}",
|
|
|
|
validator_id
|
|
|
|
))
|
|
|
|
})?,
|
|
|
|
execution_optimistic,
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized,
|
2022-07-25 08:23:00 +00:00
|
|
|
))
|
|
|
|
},
|
|
|
|
)?;
|
|
|
|
|
2023-03-30 06:08:37 +00:00
|
|
|
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
2022-07-25 08:23:00 +00:00
|
|
|
data,
|
|
|
|
execution_optimistic: Some(execution_optimistic),
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized: Some(finalized),
|
2022-07-25 08:23:00 +00:00
|
|
|
})
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-11-18 23:31:39 +00:00
|
|
|
// GET beacon/states/{state_id}/committees?slot,index,epoch
|
2020-09-29 03:46:54 +00:00
|
|
|
let get_beacon_state_committees = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("committees"))
|
|
|
|
.and(warp::query::<api_types::CommitteesQuery>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(
|
2020-11-18 23:31:39 +00:00
|
|
|
|state_id: StateId, chain: Arc<BeaconChain<T>>, query: api_types::CommitteesQuery| {
|
2020-09-29 03:46:54 +00:00
|
|
|
blocking_json_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (data, execution_optimistic, finalized) = state_id
|
|
|
|
.map_state_and_execution_optimistic_and_finalized(
|
2022-07-25 08:23:00 +00:00
|
|
|
&chain,
|
2023-03-30 06:08:37 +00:00
|
|
|
|state, execution_optimistic, finalized| {
|
2022-07-25 08:23:00 +00:00
|
|
|
let current_epoch = state.current_epoch();
|
|
|
|
let epoch = query.epoch.unwrap_or(current_epoch);
|
|
|
|
|
2023-03-21 05:14:59 +00:00
|
|
|
// Attempt to obtain the committee_cache from the beacon chain
|
|
|
|
let decision_slot = (epoch.saturating_sub(2u64))
|
|
|
|
.end_slot(T::EthSpec::slots_per_epoch());
|
|
|
|
// Find the decision block and skip to another method on any kind
|
|
|
|
// of failure
|
|
|
|
let shuffling_id = if let Ok(Some(shuffling_decision_block)) =
|
|
|
|
chain.block_root_at_slot(decision_slot, WhenSlotSkipped::Prev)
|
|
|
|
{
|
|
|
|
Some(AttestationShufflingId {
|
|
|
|
shuffling_epoch: epoch,
|
|
|
|
shuffling_decision_block,
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
// Attempt to read from the chain cache if there exists a
|
|
|
|
// shuffling_id
|
|
|
|
let maybe_cached_shuffling = if let Some(shuffling_id) =
|
|
|
|
shuffling_id.as_ref()
|
|
|
|
{
|
|
|
|
chain
|
|
|
|
.shuffling_cache
|
|
|
|
.try_write_for(std::time::Duration::from_secs(1))
|
|
|
|
.and_then(|mut cache_write| cache_write.get(shuffling_id))
|
|
|
|
.and_then(|cache_item| cache_item.wait().ok())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
let committee_cache = if let Some(ref shuffling) =
|
|
|
|
maybe_cached_shuffling
|
|
|
|
{
|
|
|
|
Cow::Borrowed(&**shuffling)
|
|
|
|
} else {
|
|
|
|
let possibly_built_cache =
|
|
|
|
match RelativeEpoch::from_epoch(current_epoch, epoch) {
|
|
|
|
Ok(relative_epoch)
|
|
|
|
if state.committee_cache_is_initialized(
|
|
|
|
relative_epoch,
|
|
|
|
) =>
|
|
|
|
{
|
|
|
|
state
|
|
|
|
.committee_cache(relative_epoch)
|
|
|
|
.map(Cow::Borrowed)
|
|
|
|
}
|
|
|
|
_ => CommitteeCache::initialized(
|
|
|
|
state,
|
|
|
|
epoch,
|
|
|
|
&chain.spec,
|
|
|
|
)
|
2022-07-25 08:23:00 +00:00
|
|
|
.map(Cow::Owned),
|
2023-03-21 05:14:59 +00:00
|
|
|
}
|
|
|
|
.map_err(|e| {
|
|
|
|
match e {
|
|
|
|
BeaconStateError::EpochOutOfBounds => {
|
|
|
|
let max_sprp =
|
|
|
|
T::EthSpec::slots_per_historical_root()
|
|
|
|
as u64;
|
|
|
|
let first_subsequent_restore_point_slot =
|
|
|
|
((epoch.start_slot(
|
|
|
|
T::EthSpec::slots_per_epoch(),
|
|
|
|
) / max_sprp)
|
|
|
|
+ 1)
|
|
|
|
* max_sprp;
|
|
|
|
if epoch < current_epoch {
|
|
|
|
warp_utils::reject::custom_bad_request(
|
|
|
|
format!(
|
|
|
|
"epoch out of bounds, \
|
|
|
|
try state at slot {}",
|
|
|
|
first_subsequent_restore_point_slot,
|
|
|
|
),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
warp_utils::reject::custom_bad_request(
|
|
|
|
"epoch out of bounds, \
|
|
|
|
too far in future"
|
|
|
|
.into(),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
warp_utils::reject::beacon_chain_error(e.into())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})?;
|
|
|
|
|
|
|
|
// Attempt to write to the beacon cache (only if the cache
|
|
|
|
// size is not the default value).
|
|
|
|
if chain.config.shuffling_cache_size
|
|
|
|
!= beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE
|
|
|
|
{
|
|
|
|
if let Some(shuffling_id) = shuffling_id {
|
|
|
|
if let Some(mut cache_write) = chain
|
|
|
|
.shuffling_cache
|
|
|
|
.try_write_for(std::time::Duration::from_secs(1))
|
|
|
|
{
|
|
|
|
cache_write.insert_committee_cache(
|
|
|
|
shuffling_id,
|
|
|
|
&*possibly_built_cache,
|
|
|
|
);
|
2022-07-25 08:23:00 +00:00
|
|
|
}
|
|
|
|
}
|
2023-03-21 05:14:59 +00:00
|
|
|
}
|
|
|
|
possibly_built_cache
|
|
|
|
};
|
2022-07-25 08:23:00 +00:00
|
|
|
|
|
|
|
// Use either the supplied slot or all slots in the epoch.
|
|
|
|
let slots =
|
|
|
|
query.slot.map(|slot| vec![slot]).unwrap_or_else(|| {
|
|
|
|
epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect()
|
|
|
|
});
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
// Use either the supplied committee index or all available indices.
|
|
|
|
let indices =
|
|
|
|
query.index.map(|index| vec![index]).unwrap_or_else(|| {
|
|
|
|
(0..committee_cache.committees_per_slot()).collect()
|
|
|
|
});
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
let mut response = Vec::with_capacity(slots.len() * indices.len());
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
for slot in slots {
|
|
|
|
// It is not acceptable to query with a slot that is not within the
|
|
|
|
// specified epoch.
|
|
|
|
if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch {
|
|
|
|
return Err(warp_utils::reject::custom_bad_request(
|
|
|
|
format!("{} is not in epoch {}", slot, epoch),
|
|
|
|
));
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
for &index in &indices {
|
|
|
|
let committee = committee_cache
|
|
|
|
.get_beacon_committee(slot, index)
|
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"committee index {} does not exist in epoch {}",
|
|
|
|
index, epoch
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
|
|
|
response.push(api_types::CommitteeData {
|
|
|
|
index,
|
|
|
|
slot,
|
|
|
|
validators: committee
|
|
|
|
.committee
|
|
|
|
.iter()
|
|
|
|
.map(|i| *i as u64)
|
|
|
|
.collect(),
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2023-03-30 06:08:37 +00:00
|
|
|
Ok((response, execution_optimistic, finalized))
|
2022-07-25 08:23:00 +00:00
|
|
|
},
|
|
|
|
)?;
|
2023-03-30 06:08:37 +00:00
|
|
|
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
2022-07-25 08:23:00 +00:00
|
|
|
data,
|
|
|
|
execution_optimistic: Some(execution_optimistic),
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized: Some(finalized),
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2021-08-06 00:47:31 +00:00
|
|
|
// GET beacon/states/{state_id}/sync_committees?epoch
|
|
|
|
let get_beacon_state_sync_committees = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("sync_committees"))
|
|
|
|
.and(warp::query::<api_types::SyncCommitteesQuery>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(
|
|
|
|
|state_id: StateId,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
query: api_types::SyncCommitteesQuery| {
|
|
|
|
blocking_json_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (sync_committee, execution_optimistic, finalized) = state_id
|
|
|
|
.map_state_and_execution_optimistic_and_finalized(
|
2022-07-25 08:23:00 +00:00
|
|
|
&chain,
|
2023-03-30 06:08:37 +00:00
|
|
|
|state, execution_optimistic, finalized| {
|
2022-07-25 08:23:00 +00:00
|
|
|
let current_epoch = state.current_epoch();
|
|
|
|
let epoch = query.epoch.unwrap_or(current_epoch);
|
|
|
|
Ok((
|
|
|
|
state
|
|
|
|
.get_built_sync_committee(epoch, &chain.spec)
|
|
|
|
.map(|committee| committee.clone())
|
|
|
|
.map_err(|e| match e {
|
|
|
|
BeaconStateError::SyncCommitteeNotKnown { .. } => {
|
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
2023-03-30 06:08:37 +00:00
|
|
|
"state at epoch {} has no \
|
|
|
|
sync committee for epoch {}",
|
|
|
|
current_epoch, epoch
|
|
|
|
))
|
2022-07-25 08:23:00 +00:00
|
|
|
}
|
|
|
|
BeaconStateError::IncorrectStateVariant => {
|
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"state at epoch {} is not activated for Altair",
|
|
|
|
current_epoch,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
e => warp_utils::reject::beacon_state_error(e),
|
|
|
|
})?,
|
|
|
|
execution_optimistic,
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized,
|
2022-07-25 08:23:00 +00:00
|
|
|
))
|
|
|
|
},
|
|
|
|
)?;
|
2021-08-06 00:47:31 +00:00
|
|
|
|
|
|
|
let validators = chain
|
|
|
|
.validator_indices(sync_committee.pubkeys.iter())
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
|
|
|
let validator_aggregates = validators
|
|
|
|
.chunks_exact(T::EthSpec::sync_subcommittee_size())
|
|
|
|
.map(|indices| api_types::SyncSubcommittee {
|
|
|
|
indices: indices.to_vec(),
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
let response = api_types::SyncCommitteeByValidatorIndices {
|
|
|
|
validators,
|
|
|
|
validator_aggregates,
|
|
|
|
};
|
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
Ok(api_types::GenericResponse::from(response)
|
2023-03-30 06:08:37 +00:00
|
|
|
.add_execution_optimistic_finalized(execution_optimistic, finalized))
|
2021-08-06 00:47:31 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2022-11-11 00:38:27 +00:00
|
|
|
// GET beacon/states/{state_id}/randao?epoch
|
|
|
|
let get_beacon_state_randao = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("randao"))
|
|
|
|
.and(warp::query::<api_types::RandaoQuery>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(
|
|
|
|
|state_id: StateId, chain: Arc<BeaconChain<T>>, query: api_types::RandaoQuery| {
|
|
|
|
blocking_json_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (randao, execution_optimistic, finalized) = state_id
|
|
|
|
.map_state_and_execution_optimistic_and_finalized(
|
2022-11-11 00:38:27 +00:00
|
|
|
&chain,
|
2023-03-30 06:08:37 +00:00
|
|
|
|state, execution_optimistic, finalized| {
|
2022-11-11 00:38:27 +00:00
|
|
|
let epoch = query.epoch.unwrap_or_else(|| state.current_epoch());
|
|
|
|
let randao = *state.get_randao_mix(epoch).map_err(|e| {
|
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"epoch out of range: {e:?}"
|
|
|
|
))
|
|
|
|
})?;
|
2023-03-30 06:08:37 +00:00
|
|
|
Ok((randao, execution_optimistic, finalized))
|
2022-11-11 00:38:27 +00:00
|
|
|
},
|
|
|
|
)?;
|
|
|
|
|
|
|
|
Ok(
|
|
|
|
api_types::GenericResponse::from(api_types::RandaoMix { randao })
|
2023-03-30 06:08:37 +00:00
|
|
|
.add_execution_optimistic_finalized(execution_optimistic, finalized),
|
2022-11-11 00:38:27 +00:00
|
|
|
)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
// GET beacon/headers
|
|
|
|
//
|
|
|
|
// Note: this endpoint only returns information about blocks in the canonical chain. Given that
|
|
|
|
// there's a `canonical` flag on the response, I assume it should also return non-canonical
|
|
|
|
// things. Returning non-canonical things is hard for us since we don't already have a
|
|
|
|
// mechanism for arbitrary forwards block iteration, we only support iterating forwards along
|
|
|
|
// the canonical chain.
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_beacon_headers = eth_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("headers"))
|
|
|
|
.and(warp::query::<api_types::HeadersQuery>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|query: api_types::HeadersQuery, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (root, block, execution_optimistic, finalized) =
|
|
|
|
match (query.slot, query.parent_root) {
|
|
|
|
// No query parameters, return the canonical head block.
|
|
|
|
(None, None) => {
|
|
|
|
let (cached_head, execution_status) = chain
|
|
|
|
.canonical_head
|
|
|
|
.head_and_execution_status()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
(
|
|
|
|
cached_head.head_block_root(),
|
|
|
|
cached_head.snapshot.beacon_block.clone_as_blinded(),
|
|
|
|
execution_status.is_optimistic_or_invalid(),
|
|
|
|
false,
|
|
|
|
)
|
2020-09-29 03:46:54 +00:00
|
|
|
}
|
2023-03-30 06:08:37 +00:00
|
|
|
// Only the parent root parameter, do a forwards-iterator lookup.
|
|
|
|
(None, Some(parent_root)) => {
|
|
|
|
let (parent, execution_optimistic, _parent_finalized) =
|
|
|
|
BlockId::from_root(parent_root).blinded_block(&chain)?;
|
|
|
|
let (root, _slot) = chain
|
|
|
|
.forwards_iter_block_roots(parent.slot())
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
// Ignore any skip-slots immediately following the parent.
|
|
|
|
.find(|res| {
|
|
|
|
res.as_ref().map_or(false, |(root, _)| *root != parent_root)
|
|
|
|
})
|
|
|
|
.transpose()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_not_found(format!(
|
|
|
|
"child of block with root {}",
|
|
|
|
parent_root
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
|
|
|
BlockId::from_root(root)
|
|
|
|
.blinded_block(&chain)
|
|
|
|
// Ignore this `execution_optimistic` since the first value has
|
|
|
|
// more information about the original request.
|
|
|
|
.map(|(block, _execution_optimistic, finalized)| {
|
|
|
|
(root, block, execution_optimistic, finalized)
|
|
|
|
})?
|
|
|
|
}
|
|
|
|
// Slot is supplied, search by slot and optionally filter by
|
|
|
|
// parent root.
|
|
|
|
(Some(slot), parent_root_opt) => {
|
|
|
|
let (root, execution_optimistic, finalized) =
|
|
|
|
BlockId::from_slot(slot).root(&chain)?;
|
|
|
|
// Ignore the second `execution_optimistic`, the first one is the
|
|
|
|
// most relevant since it knows that we queried by slot.
|
|
|
|
let (block, _execution_optimistic, _finalized) =
|
|
|
|
BlockId::from_root(root).blinded_block(&chain)?;
|
|
|
|
|
|
|
|
// If the parent root was supplied, check that it matches the block
|
|
|
|
// obtained via a slot lookup.
|
|
|
|
if let Some(parent_root) = parent_root_opt {
|
|
|
|
if block.parent_root() != parent_root {
|
|
|
|
return Err(warp_utils::reject::custom_not_found(format!(
|
|
|
|
"no canonical block at slot {} with parent root {}",
|
|
|
|
slot, parent_root
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2023-03-30 06:08:37 +00:00
|
|
|
(root, block, execution_optimistic, finalized)
|
|
|
|
}
|
|
|
|
};
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
let data = api_types::BlockHeaderData {
|
|
|
|
root,
|
|
|
|
canonical: true,
|
|
|
|
header: api_types::BlockHeaderAndSignature {
|
2021-07-09 06:15:32 +00:00
|
|
|
message: block.message().block_header(),
|
|
|
|
signature: block.signature().clone().into(),
|
2020-09-29 03:46:54 +00:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
Ok(api_types::GenericResponse::from(vec![data])
|
2023-03-30 06:08:37 +00:00
|
|
|
.add_execution_optimistic_finalized(execution_optimistic, finalized))
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET beacon/headers/{block_id}
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_beacon_headers_block_id = eth_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("headers"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<BlockId>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid block ID".to_string(),
|
|
|
|
))
|
|
|
|
}))
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (root, execution_optimistic, finalized) = block_id.root(&chain)?;
|
2022-07-25 08:23:00 +00:00
|
|
|
// Ignore the second `execution_optimistic` since the first one has more
|
|
|
|
// information about the original request.
|
2023-03-30 06:08:37 +00:00
|
|
|
let (block, _execution_optimistic, _finalized) =
|
2022-07-25 08:23:00 +00:00
|
|
|
BlockId::from_root(root).blinded_block(&chain)?;
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
let canonical = chain
|
Use the forwards iterator more often (#2376)
## Issue Addressed
NA
## Primary Change
When investigating memory usage, I noticed that retrieving a block from an early slot (e.g., slot 900) would cause a sharp increase in the memory footprint (from 400mb to 800mb+) which seemed to be ever-lasting.
After some investigation, I found that the reverse iteration from the head back to that slot was the likely culprit. To counter this, I've switched the `BeaconChain::block_root_at_slot` to use the forwards iterator, instead of the reverse one.
I also noticed that the networking stack is using `BeaconChain::root_at_slot` to check if a peer is relevant (`check_peer_relevance`). Perhaps the steep, seemingly-random-but-consistent increases in memory usage are caused by the use of this function.
Using the forwards iterator with the HTTP API alleviated the sharp increases in memory usage. It also made the response much faster (before it felt like to took 1-2s, now it feels instant).
## Additional Changes
In the process I also noticed that we have two functions for getting block roots:
- `BeaconChain::block_root_at_slot`: returns `None` for a skip slot.
- `BeaconChain::root_at_slot`: returns the previous root for a skip slot.
I unified these two functions into `block_root_at_slot` and added the `WhenSlotSkipped` enum. Now, the caller must be explicit about the skip-slot behaviour when requesting a root.
Additionally, I replaced `vec![]` with `Vec::with_capacity` in `store::chunked_vector::range_query`. I stumbled across this whilst debugging and made this modification to see what effect it would have (not much). It seems like a decent change to keep around, but I'm not concerned either way.
Also, `BeaconChain::get_ancestor_block_root` is unused, so I got rid of it :wastebasket:.
## Additional Info
I haven't also done the same for state roots here. Whilst it's possible and a good idea, it's more work since the fwds iterators are presently block-roots-specific.
Whilst there's a few places a reverse iteration of state roots could be triggered (e.g., attestation production, HTTP API), they're no where near as common as the `check_peer_relevance` call. As such, I think we should get this PR merged first, then come back for the state root iters. I made an issue here https://github.com/sigp/lighthouse/issues/2377.
2021-05-31 04:18:20 +00:00
|
|
|
.block_root_at_slot(block.slot(), WhenSlotSkipped::None)
|
2020-09-29 03:46:54 +00:00
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
.map_or(false, |canonical| root == canonical);
|
|
|
|
|
|
|
|
let data = api_types::BlockHeaderData {
|
|
|
|
root,
|
|
|
|
canonical,
|
|
|
|
header: api_types::BlockHeaderAndSignature {
|
2021-07-09 06:15:32 +00:00
|
|
|
message: block.message().block_header(),
|
|
|
|
signature: block.signature().clone().into(),
|
2020-09-29 03:46:54 +00:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2023-03-30 06:08:37 +00:00
|
|
|
Ok(api_types::ExecutionOptimisticFinalizedResponse {
|
2022-07-25 08:23:00 +00:00
|
|
|
execution_optimistic: Some(execution_optimistic),
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized: Some(finalized),
|
2022-07-25 08:23:00 +00:00
|
|
|
data,
|
|
|
|
})
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
/*
|
|
|
|
* beacon/blocks
|
|
|
|
*/
|
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
// POST beacon/blocks
|
2022-07-25 08:23:00 +00:00
|
|
|
let post_beacon_blocks = eth_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("blocks"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.and_then(
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
|block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
2020-09-29 03:46:54 +00:00
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
log: Logger| async move {
|
2023-03-17 00:44:03 +00:00
|
|
|
publish_blocks::publish_block(
|
|
|
|
None,
|
Add broadcast validation routes to Beacon Node HTTP API (#4316)
## Issue Addressed
- #4293
- #4264
## Proposed Changes
*Changes largely follow those suggested in the main issue*.
- Add new routes to HTTP API
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Add new routes to `BeaconNodeHttpClient`
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Define new Eth2 common types
- `BroadcastValidation`, enum representing the level of validation to apply to blocks prior to broadcast
- `BroadcastValidationQuery`, the corresponding HTTP query string type for the above type
- ~~Define `_checked` variants of both `publish_block` and `publish_blinded_block` that enforce a validation level at a type level~~
- Add interactive tests to the `bn_http_api_tests` test target covering each validation level (to their own test module, `broadcast_validation_tests`)
- `beacon/blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- Only consensus pass (i.e., equivocates) (200)
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- `beacon/blinded_blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- ~~Only consensus pass (i.e., equivocates) (200)~~
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- Add a new trait, `IntoGossipVerifiedBlock`, which allows type-level guarantees to be made as to gossip validity
- Modify the structure of the `ObservedBlockProducers` cache from a `(slot, validator_index)` mapping to a `((slot, validator_index), block_root)` mapping
- Modify `ObservedBlockProducers::proposer_has_been_observed` to return a `SeenBlock` rather than a boolean on success
- Punish gossip peer (low) for submitting equivocating blocks
- Rename `BlockError::SlashablePublish` to `BlockError::SlashableProposal`
## Additional Info
This PR contains changes that directly modify how blocks are verified within the client. For more context, consult [comments in-thread](https://github.com/sigp/lighthouse/pull/4316#discussion_r1234724202).
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2023-06-29 12:02:38 +00:00
|
|
|
ProvenancedBlock::local(block),
|
2023-03-17 00:44:03 +00:00
|
|
|
chain,
|
|
|
|
&network_tx,
|
|
|
|
log,
|
Add broadcast validation routes to Beacon Node HTTP API (#4316)
## Issue Addressed
- #4293
- #4264
## Proposed Changes
*Changes largely follow those suggested in the main issue*.
- Add new routes to HTTP API
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Add new routes to `BeaconNodeHttpClient`
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Define new Eth2 common types
- `BroadcastValidation`, enum representing the level of validation to apply to blocks prior to broadcast
- `BroadcastValidationQuery`, the corresponding HTTP query string type for the above type
- ~~Define `_checked` variants of both `publish_block` and `publish_blinded_block` that enforce a validation level at a type level~~
- Add interactive tests to the `bn_http_api_tests` test target covering each validation level (to their own test module, `broadcast_validation_tests`)
- `beacon/blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- Only consensus pass (i.e., equivocates) (200)
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- `beacon/blinded_blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- ~~Only consensus pass (i.e., equivocates) (200)~~
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- Add a new trait, `IntoGossipVerifiedBlock`, which allows type-level guarantees to be made as to gossip validity
- Modify the structure of the `ObservedBlockProducers` cache from a `(slot, validator_index)` mapping to a `((slot, validator_index), block_root)` mapping
- Modify `ObservedBlockProducers::proposer_has_been_observed` to return a `SeenBlock` rather than a boolean on success
- Punish gossip peer (low) for submitting equivocating blocks
- Rename `BlockError::SlashablePublish` to `BlockError::SlashableProposal`
## Additional Info
This PR contains changes that directly modify how blocks are verified within the client. For more context, consult [comments in-thread](https://github.com/sigp/lighthouse/pull/4316#discussion_r1234724202).
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2023-06-29 12:02:38 +00:00
|
|
|
BroadcastValidation::default(),
|
2023-03-17 00:44:03 +00:00
|
|
|
)
|
|
|
|
.await
|
|
|
|
.map(|()| warp::reply().into_response())
|
2020-09-29 03:46:54 +00:00
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2023-07-31 23:51:37 +00:00
|
|
|
let post_beacon_blocks_ssz = eth_v1
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("blocks"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::bytes())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|block_bytes: Bytes,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
|
|
|
log: Logger| async move {
|
|
|
|
let block = match SignedBeaconBlock::<T::EthSpec>::from_ssz_bytes(
|
|
|
|
&block_bytes,
|
|
|
|
&chain.spec,
|
|
|
|
) {
|
|
|
|
Ok(data) => data,
|
|
|
|
Err(e) => {
|
|
|
|
return Err(warp_utils::reject::custom_bad_request(format!("{:?}", e)))
|
|
|
|
}
|
|
|
|
};
|
|
|
|
publish_blocks::publish_block(
|
|
|
|
None,
|
|
|
|
ProvenancedBlock::local(Arc::new(block)),
|
|
|
|
chain,
|
|
|
|
&network_tx,
|
|
|
|
log,
|
|
|
|
BroadcastValidation::default(),
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.map(|()| warp::reply().into_response())
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
Add broadcast validation routes to Beacon Node HTTP API (#4316)
## Issue Addressed
- #4293
- #4264
## Proposed Changes
*Changes largely follow those suggested in the main issue*.
- Add new routes to HTTP API
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Add new routes to `BeaconNodeHttpClient`
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Define new Eth2 common types
- `BroadcastValidation`, enum representing the level of validation to apply to blocks prior to broadcast
- `BroadcastValidationQuery`, the corresponding HTTP query string type for the above type
- ~~Define `_checked` variants of both `publish_block` and `publish_blinded_block` that enforce a validation level at a type level~~
- Add interactive tests to the `bn_http_api_tests` test target covering each validation level (to their own test module, `broadcast_validation_tests`)
- `beacon/blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- Only consensus pass (i.e., equivocates) (200)
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- `beacon/blinded_blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- ~~Only consensus pass (i.e., equivocates) (200)~~
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- Add a new trait, `IntoGossipVerifiedBlock`, which allows type-level guarantees to be made as to gossip validity
- Modify the structure of the `ObservedBlockProducers` cache from a `(slot, validator_index)` mapping to a `((slot, validator_index), block_root)` mapping
- Modify `ObservedBlockProducers::proposer_has_been_observed` to return a `SeenBlock` rather than a boolean on success
- Punish gossip peer (low) for submitting equivocating blocks
- Rename `BlockError::SlashablePublish` to `BlockError::SlashableProposal`
## Additional Info
This PR contains changes that directly modify how blocks are verified within the client. For more context, consult [comments in-thread](https://github.com/sigp/lighthouse/pull/4316#discussion_r1234724202).
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2023-06-29 12:02:38 +00:00
|
|
|
let post_beacon_blocks_v2 = eth_v2
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("blocks"))
|
|
|
|
.and(warp::query::<api_types::BroadcastValidationQuery>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.then(
|
|
|
|
|validation_level: api_types::BroadcastValidationQuery,
|
|
|
|
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
|
|
|
log: Logger| async move {
|
|
|
|
match publish_blocks::publish_block(
|
|
|
|
None,
|
|
|
|
ProvenancedBlock::local(block),
|
|
|
|
chain,
|
|
|
|
&network_tx,
|
|
|
|
log,
|
|
|
|
validation_level.broadcast_validation,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(()) => warp::reply().into_response(),
|
|
|
|
Err(e) => match warp_utils::reject::handle_rejection(e).await {
|
|
|
|
Ok(reply) => reply.into_response(),
|
|
|
|
Err(_) => warp::reply::with_status(
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
eth2::StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
)
|
|
|
|
.into_response(),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2023-07-31 23:51:37 +00:00
|
|
|
let post_beacon_blocks_v2_ssz = eth_v2
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("blocks"))
|
|
|
|
.and(warp::query::<api_types::BroadcastValidationQuery>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::bytes())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.then(
|
|
|
|
|validation_level: api_types::BroadcastValidationQuery,
|
|
|
|
block_bytes: Bytes,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
|
|
|
log: Logger| async move {
|
|
|
|
let block = match SignedBeaconBlock::<T::EthSpec>::from_ssz_bytes(
|
|
|
|
&block_bytes,
|
|
|
|
&chain.spec,
|
|
|
|
) {
|
|
|
|
Ok(data) => data,
|
|
|
|
Err(_) => {
|
|
|
|
return warp::reply::with_status(
|
|
|
|
StatusCode::BAD_REQUEST,
|
|
|
|
eth2::StatusCode::BAD_REQUEST,
|
|
|
|
)
|
|
|
|
.into_response();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
match publish_blocks::publish_block(
|
|
|
|
None,
|
|
|
|
ProvenancedBlock::local(Arc::new(block)),
|
|
|
|
chain,
|
|
|
|
&network_tx,
|
|
|
|
log,
|
|
|
|
validation_level.broadcast_validation,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(()) => warp::reply().into_response(),
|
|
|
|
Err(e) => match warp_utils::reject::handle_rejection(e).await {
|
|
|
|
Ok(reply) => reply.into_response(),
|
|
|
|
Err(_) => warp::reply::with_status(
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
eth2::StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
)
|
|
|
|
.into_response(),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2022-03-31 07:52:23 +00:00
|
|
|
/*
|
|
|
|
* beacon/blocks
|
|
|
|
*/
|
|
|
|
|
|
|
|
// POST beacon/blocks
|
2022-07-25 08:23:00 +00:00
|
|
|
let post_beacon_blinded_blocks = eth_v1
|
2022-03-31 07:52:23 +00:00
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("blinded_blocks"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.and_then(
|
2022-07-30 00:22:37 +00:00
|
|
|
|block: SignedBeaconBlock<T::EthSpec, BlindedPayload<_>>,
|
2022-03-31 07:52:23 +00:00
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
2022-07-30 00:22:37 +00:00
|
|
|
log: Logger| async move {
|
Add broadcast validation routes to Beacon Node HTTP API (#4316)
## Issue Addressed
- #4293
- #4264
## Proposed Changes
*Changes largely follow those suggested in the main issue*.
- Add new routes to HTTP API
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Add new routes to `BeaconNodeHttpClient`
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Define new Eth2 common types
- `BroadcastValidation`, enum representing the level of validation to apply to blocks prior to broadcast
- `BroadcastValidationQuery`, the corresponding HTTP query string type for the above type
- ~~Define `_checked` variants of both `publish_block` and `publish_blinded_block` that enforce a validation level at a type level~~
- Add interactive tests to the `bn_http_api_tests` test target covering each validation level (to their own test module, `broadcast_validation_tests`)
- `beacon/blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- Only consensus pass (i.e., equivocates) (200)
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- `beacon/blinded_blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- ~~Only consensus pass (i.e., equivocates) (200)~~
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- Add a new trait, `IntoGossipVerifiedBlock`, which allows type-level guarantees to be made as to gossip validity
- Modify the structure of the `ObservedBlockProducers` cache from a `(slot, validator_index)` mapping to a `((slot, validator_index), block_root)` mapping
- Modify `ObservedBlockProducers::proposer_has_been_observed` to return a `SeenBlock` rather than a boolean on success
- Punish gossip peer (low) for submitting equivocating blocks
- Rename `BlockError::SlashablePublish` to `BlockError::SlashableProposal`
## Additional Info
This PR contains changes that directly modify how blocks are verified within the client. For more context, consult [comments in-thread](https://github.com/sigp/lighthouse/pull/4316#discussion_r1234724202).
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2023-06-29 12:02:38 +00:00
|
|
|
publish_blocks::publish_blinded_block(
|
|
|
|
block,
|
|
|
|
chain,
|
|
|
|
&network_tx,
|
|
|
|
log,
|
|
|
|
BroadcastValidation::default(),
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.map(|()| warp::reply().into_response())
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
let post_beacon_blinded_blocks_v2 = eth_v2
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("blinded_blocks"))
|
|
|
|
.and(warp::query::<api_types::BroadcastValidationQuery>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.then(
|
|
|
|
|validation_level: api_types::BroadcastValidationQuery,
|
|
|
|
block: SignedBeaconBlock<T::EthSpec, BlindedPayload<_>>,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
|
|
|
log: Logger| async move {
|
|
|
|
match publish_blocks::publish_blinded_block(
|
|
|
|
block,
|
|
|
|
chain,
|
|
|
|
&network_tx,
|
|
|
|
log,
|
|
|
|
validation_level.broadcast_validation,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(()) => warp::reply().into_response(),
|
|
|
|
Err(e) => match warp_utils::reject::handle_rejection(e).await {
|
|
|
|
Ok(reply) => reply.into_response(),
|
|
|
|
Err(_) => warp::reply::with_status(
|
|
|
|
StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
eth2::StatusCode::INTERNAL_SERVER_ERROR,
|
|
|
|
)
|
|
|
|
.into_response(),
|
|
|
|
},
|
|
|
|
}
|
2022-03-31 07:52:23 +00:00
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2021-08-06 00:47:31 +00:00
|
|
|
let block_id_or_err = warp::path::param::<BlockId>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid block ID".to_string(),
|
|
|
|
))
|
|
|
|
});
|
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
let beacon_blocks_path_v1 = eth_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("blocks"))
|
2021-08-06 00:47:31 +00:00
|
|
|
.and(block_id_or_err)
|
|
|
|
.and(chain_filter.clone());
|
|
|
|
|
|
|
|
let beacon_blocks_path_any = any_version
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("blocks"))
|
|
|
|
.and(block_id_or_err)
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(chain_filter.clone());
|
|
|
|
|
|
|
|
// GET beacon/blocks/{block_id}
|
2021-08-06 00:47:31 +00:00
|
|
|
let get_beacon_block = beacon_blocks_path_any
|
2021-02-24 04:15:14 +00:00
|
|
|
.clone()
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::header::optional::<api_types::Accept>("accept"))
|
|
|
|
.and_then(
|
2021-08-06 00:47:31 +00:00
|
|
|
|endpoint_version: EndpointVersion,
|
|
|
|
block_id: BlockId,
|
2021-02-24 04:15:14 +00:00
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
accept_header: Option<api_types::Accept>| {
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
async move {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (block, execution_optimistic, finalized) =
|
|
|
|
block_id.full_block(&chain).await?;
|
2021-10-28 01:18:04 +00:00
|
|
|
let fork_name = block
|
|
|
|
.fork_name(&chain.spec)
|
|
|
|
.map_err(inconsistent_fork_rejection)?;
|
2022-07-25 08:23:00 +00:00
|
|
|
|
2021-02-24 04:15:14 +00:00
|
|
|
match accept_header {
|
|
|
|
Some(api_types::Accept::Ssz) => Response::builder()
|
|
|
|
.status(200)
|
|
|
|
.header("Content-Type", "application/octet-stream")
|
|
|
|
.body(block.as_ssz_bytes().into())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::custom_server_error(format!(
|
|
|
|
"failed to create response: {}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
}),
|
2023-03-30 06:08:37 +00:00
|
|
|
_ => execution_optimistic_finalized_fork_versioned_response(
|
2022-07-25 08:23:00 +00:00
|
|
|
endpoint_version,
|
|
|
|
fork_name,
|
|
|
|
execution_optimistic,
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized,
|
2022-07-25 08:23:00 +00:00
|
|
|
block,
|
|
|
|
)
|
|
|
|
.map(|res| warp::reply::json(&res).into_response()),
|
2021-02-24 04:15:14 +00:00
|
|
|
}
|
2021-10-28 01:18:04 +00:00
|
|
|
.map(|resp| add_consensus_version_header(resp, fork_name))
|
Separate execution payloads in the DB (#3157)
## Proposed Changes
Reduce post-merge disk usage by not storing finalized execution payloads in Lighthouse's database.
:warning: **This is achieved in a backwards-incompatible way for networks that have already merged** :warning:. Kiln users and shadow fork enjoyers will be unable to downgrade after running the code from this PR. The upgrade migration may take several minutes to run, and can't be aborted after it begins.
The main changes are:
- New column in the database called `ExecPayload`, keyed by beacon block root.
- The `BeaconBlock` column now stores blinded blocks only.
- Lots of places that previously used full blocks now use blinded blocks, e.g. analytics APIs, block replay in the DB, etc.
- On finalization:
- `prune_abanonded_forks` deletes non-canonical payloads whilst deleting non-canonical blocks.
- `migrate_db` deletes finalized canonical payloads whilst deleting finalized states.
- Conversions between blinded and full blocks are implemented in a compositional way, duplicating some work from Sean's PR #3134.
- The execution layer has a new `get_payload_by_block_hash` method that reconstructs a payload using the EE's `eth_getBlockByHash` call.
- I've tested manually that it works on Kiln, using Geth and Nethermind.
- This isn't necessarily the most efficient method, and new engine APIs are being discussed to improve this: https://github.com/ethereum/execution-apis/pull/146.
- We're depending on the `ethers` master branch, due to lots of recent changes. We're also using a workaround for https://github.com/gakonst/ethers-rs/issues/1134.
- Payload reconstruction is used in the HTTP API via `BeaconChain::get_block`, which is now `async`. Due to the `async` fn, the `blocking_json` wrapper has been removed.
- Payload reconstruction is used in network RPC to serve blocks-by-{root,range} responses. Here the `async` adjustment is messier, although I think I've managed to come up with a reasonable compromise: the handlers take the `SendOnDrop` by value so that they can drop it on _task completion_ (after the `fn` returns). Still, this is introducing disk reads onto core executor threads, which may have a negative performance impact (thoughts appreciated).
## Additional Info
- [x] For performance it would be great to remove the cloning of full blocks when converting them to blinded blocks to write to disk. I'm going to experiment with a `put_block` API that takes the block by value, breaks it into a blinded block and a payload, stores the blinded block, and then re-assembles the full block for the caller.
- [x] We should measure the latency of blocks-by-root and blocks-by-range responses.
- [x] We should add integration tests that stress the payload reconstruction (basic tests done, issue for more extensive tests: https://github.com/sigp/lighthouse/issues/3159)
- [x] We should (manually) test the schema v9 migration from several prior versions, particularly as blocks have changed on disk and some migrations rely on being able to load blocks.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2022-05-12 00:42:17 +00:00
|
|
|
}
|
2021-02-24 04:15:14 +00:00
|
|
|
},
|
|
|
|
);
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// GET beacon/blocks/{block_id}/root
|
2021-08-06 00:47:31 +00:00
|
|
|
let get_beacon_block_root = beacon_blocks_path_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.clone()
|
|
|
|
.and(warp::path("root"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?;
|
2022-07-25 08:23:00 +00:00
|
|
|
Ok(api_types::GenericResponse::from(api_types::RootData::from(
|
|
|
|
block.canonical_root(),
|
|
|
|
))
|
2023-03-30 06:08:37 +00:00
|
|
|
.add_execution_optimistic_finalized(execution_optimistic, finalized))
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET beacon/blocks/{block_id}/attestations
|
2021-08-06 00:47:31 +00:00
|
|
|
let get_beacon_block_attestations = beacon_blocks_path_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.clone()
|
|
|
|
.and(warp::path("attestations"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (block, execution_optimistic, finalized) = block_id.blinded_block(&chain)?;
|
2022-07-25 08:23:00 +00:00
|
|
|
Ok(
|
|
|
|
api_types::GenericResponse::from(block.message().body().attestations().clone())
|
2023-03-30 06:08:37 +00:00
|
|
|
.add_execution_optimistic_finalized(execution_optimistic, finalized),
|
2022-07-25 08:23:00 +00:00
|
|
|
)
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2022-11-11 00:38:27 +00:00
|
|
|
// GET beacon/blinded_blocks/{block_id}
|
|
|
|
let get_beacon_blinded_block = eth_v1
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("blinded_blocks"))
|
|
|
|
.and(block_id_or_err)
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::header::optional::<api_types::Accept>("accept"))
|
|
|
|
.and_then(
|
|
|
|
|block_id: BlockId,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
accept_header: Option<api_types::Accept>| {
|
2023-03-13 01:40:03 +00:00
|
|
|
blocking_response_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (block, execution_optimistic, finalized) =
|
|
|
|
block_id.blinded_block(&chain)?;
|
2022-11-11 00:38:27 +00:00
|
|
|
let fork_name = block
|
|
|
|
.fork_name(&chain.spec)
|
|
|
|
.map_err(inconsistent_fork_rejection)?;
|
|
|
|
|
|
|
|
match accept_header {
|
|
|
|
Some(api_types::Accept::Ssz) => Response::builder()
|
|
|
|
.status(200)
|
|
|
|
.header("Content-Type", "application/octet-stream")
|
|
|
|
.body(block.as_ssz_bytes().into())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::custom_server_error(format!(
|
|
|
|
"failed to create response: {}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
}),
|
|
|
|
_ => {
|
|
|
|
// Post as a V2 endpoint so we return the fork version.
|
2023-03-30 06:08:37 +00:00
|
|
|
execution_optimistic_finalized_fork_versioned_response(
|
2022-11-11 00:38:27 +00:00
|
|
|
V2,
|
|
|
|
fork_name,
|
|
|
|
execution_optimistic,
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized,
|
2022-11-11 00:38:27 +00:00
|
|
|
block,
|
|
|
|
)
|
|
|
|
.map(|res| warp::reply::json(&res).into_response())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
.map(|resp| add_consensus_version_header(resp, fork_name))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
/*
|
|
|
|
* beacon/pool
|
|
|
|
*/
|
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
let beacon_pool_path = eth_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("pool"))
|
|
|
|
.and(chain_filter.clone());
|
|
|
|
|
|
|
|
// POST beacon/pool/attestations
|
|
|
|
let post_beacon_pool_attestations = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("attestations"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(network_tx_filter.clone())
|
2020-11-18 23:31:39 +00:00
|
|
|
.and(log_filter.clone())
|
2020-09-29 03:46:54 +00:00
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
2020-11-18 23:31:39 +00:00
|
|
|
attestations: Vec<Attestation<T::EthSpec>>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
|
|
|
log: Logger| {
|
2020-09-29 03:46:54 +00:00
|
|
|
blocking_json_task(move || {
|
2021-01-20 19:19:38 +00:00
|
|
|
let seen_timestamp = timestamp_now();
|
2020-11-18 23:31:39 +00:00
|
|
|
let mut failures = Vec::new();
|
2022-08-10 07:52:57 +00:00
|
|
|
let mut num_already_known = 0;
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2020-11-18 23:31:39 +00:00
|
|
|
for (index, attestation) in attestations.as_slice().iter().enumerate() {
|
|
|
|
let attestation = match chain
|
Batch BLS verification for attestations (#2399)
## Issue Addressed
NA
## Proposed Changes
Adds the ability to verify batches of aggregated/unaggregated attestations from the network.
When the `BeaconProcessor` finds there are messages in the aggregated or unaggregated attestation queues, it will first check the length of the queue:
- `== 1` verify the attestation individually.
- `>= 2` take up to 64 of those attestations and verify them in a batch.
Notably, we only perform batch verification if the queue has a backlog. We don't apply any artificial delays to attestations to try and force them into batches.
### Batching Details
To assist with implementing batches we modify `beacon_chain::attestation_verification` to have two distinct categories for attestations:
- *Indexed* attestations: those which have passed initial validation and were valid enough for us to derive an `IndexedAttestation`.
- *Verified* attestations: those attestations which were indexed *and also* passed signature verification. These are well-formed, interesting messages which were signed by validators.
The batching functions accept `n` attestations and then return `n` attestation verification `Result`s, where those `Result`s can be any combination of `Ok` or `Err`. In other words, we attempt to verify as many attestations as possible and return specific per-attestation results so peer scores can be updated, if required.
When we batch verify attestations, we first try to map all those attestations to *indexed* attestations. If any of those attestations were able to be indexed, we then perform batch BLS verification on those indexed attestations. If the batch verification succeeds, we convert them into *verified* attestations, disabling individual signature checking. If the batch fails, we convert to verified attestations with individual signature checking enabled.
Ultimately, we optimistically try to do a batch verification of attestation signatures and fall-back to individual verification if it fails. This opens an attach vector for "poisoning" the attestations and causing us to waste a batch verification. I argue that peer scoring should do a good-enough job of defending against this and the typical-case gains massively outweigh the worst-case losses.
## Additional Info
Before this PR, attestation verification took the attestations by value (instead of by reference). It turns out that this was unnecessary and, in my opinion, resulted in some undesirable ergonomics (e.g., we had to pass the attestation back in the `Err` variant to avoid clones). In this PR I've modified attestation verification so that it now takes a reference.
I refactored the `beacon_chain/tests/attestation_verification.rs` tests so they use a builder-esque "tester" struct instead of a weird macro. It made it easier for me to test individual/batch with the same set of tests and I think it was a nice tidy-up. Notably, I did this last to try and make sure my new refactors to *actual* production code would pass under the existing test suite.
2021-09-22 08:49:41 +00:00
|
|
|
.verify_unaggregated_attestation_for_gossip(attestation, None)
|
2020-11-18 23:31:39 +00:00
|
|
|
{
|
|
|
|
Ok(attestation) => attestation,
|
2022-08-10 07:52:57 +00:00
|
|
|
Err(AttnError::PriorAttestationKnown { .. }) => {
|
|
|
|
num_already_known += 1;
|
|
|
|
|
|
|
|
// Skip to the next attestation since an attestation for this
|
|
|
|
// validator is already known in this epoch.
|
|
|
|
//
|
|
|
|
// There's little value for the network in validating a second
|
|
|
|
// attestation for another validator since it is either:
|
|
|
|
//
|
|
|
|
// 1. A duplicate.
|
|
|
|
// 2. Slashable.
|
|
|
|
// 3. Invalid.
|
|
|
|
//
|
|
|
|
// We are likely to get duplicates in the case where a VC is using
|
|
|
|
// fallback BNs. If the first BN actually publishes some/all of a
|
|
|
|
// batch of attestations but fails to respond in a timely fashion,
|
|
|
|
// the VC is likely to try publishing the attestations on another
|
|
|
|
// BN. That second BN may have already seen the attestations from
|
|
|
|
// the first BN and therefore indicate that the attestations are
|
|
|
|
// "already seen". An attestation that has already been seen has
|
|
|
|
// been published on the network so there's no actual error from
|
|
|
|
// the perspective of the user.
|
|
|
|
//
|
|
|
|
// It's better to prevent slashable attestations from ever
|
|
|
|
// appearing on the network than trying to slash validators,
|
|
|
|
// especially those validators connected to the local API.
|
|
|
|
//
|
|
|
|
// There might be *some* value in determining that this attestation
|
|
|
|
// is invalid, but since a valid attestation already it exists it
|
|
|
|
// appears that this validator is capable of producing valid
|
|
|
|
// attestations and there's no immediate cause for concern.
|
|
|
|
continue;
|
|
|
|
}
|
2020-11-18 23:31:39 +00:00
|
|
|
Err(e) => {
|
|
|
|
error!(log,
|
|
|
|
"Failure verifying attestation for gossip";
|
|
|
|
"error" => ?e,
|
|
|
|
"request_index" => index,
|
|
|
|
"committee_index" => attestation.data.index,
|
|
|
|
"attestation_slot" => attestation.data.slot,
|
|
|
|
);
|
|
|
|
failures.push(api_types::Failure::new(
|
|
|
|
index,
|
|
|
|
format!("Verification: {:?}", e),
|
|
|
|
));
|
|
|
|
// skip to the next attestation so we do not publish this one to gossip
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-01-20 19:19:38 +00:00
|
|
|
// Notify the validator monitor.
|
|
|
|
chain
|
|
|
|
.validator_monitor
|
|
|
|
.read()
|
|
|
|
.register_api_unaggregated_attestation(
|
|
|
|
seen_timestamp,
|
|
|
|
attestation.indexed_attestation(),
|
|
|
|
&chain.slot_clock,
|
|
|
|
);
|
|
|
|
|
2020-11-18 23:31:39 +00:00
|
|
|
publish_pubsub_message(
|
|
|
|
&network_tx,
|
|
|
|
PubsubMessage::Attestation(Box::new((
|
|
|
|
attestation.subnet_id(),
|
|
|
|
attestation.attestation().clone(),
|
|
|
|
))),
|
|
|
|
)?;
|
|
|
|
|
|
|
|
let committee_index = attestation.attestation().data.index;
|
|
|
|
let slot = attestation.attestation().data.slot;
|
|
|
|
|
|
|
|
if let Err(e) = chain.apply_attestation_to_fork_choice(&attestation) {
|
|
|
|
error!(log,
|
|
|
|
"Failure applying verified attestation to fork choice";
|
|
|
|
"error" => ?e,
|
|
|
|
"request_index" => index,
|
|
|
|
"committee_index" => committee_index,
|
|
|
|
"slot" => slot,
|
|
|
|
);
|
|
|
|
failures.push(api_types::Failure::new(
|
|
|
|
index,
|
|
|
|
format!("Fork choice: {:?}", e),
|
|
|
|
));
|
|
|
|
};
|
|
|
|
|
Batch BLS verification for attestations (#2399)
## Issue Addressed
NA
## Proposed Changes
Adds the ability to verify batches of aggregated/unaggregated attestations from the network.
When the `BeaconProcessor` finds there are messages in the aggregated or unaggregated attestation queues, it will first check the length of the queue:
- `== 1` verify the attestation individually.
- `>= 2` take up to 64 of those attestations and verify them in a batch.
Notably, we only perform batch verification if the queue has a backlog. We don't apply any artificial delays to attestations to try and force them into batches.
### Batching Details
To assist with implementing batches we modify `beacon_chain::attestation_verification` to have two distinct categories for attestations:
- *Indexed* attestations: those which have passed initial validation and were valid enough for us to derive an `IndexedAttestation`.
- *Verified* attestations: those attestations which were indexed *and also* passed signature verification. These are well-formed, interesting messages which were signed by validators.
The batching functions accept `n` attestations and then return `n` attestation verification `Result`s, where those `Result`s can be any combination of `Ok` or `Err`. In other words, we attempt to verify as many attestations as possible and return specific per-attestation results so peer scores can be updated, if required.
When we batch verify attestations, we first try to map all those attestations to *indexed* attestations. If any of those attestations were able to be indexed, we then perform batch BLS verification on those indexed attestations. If the batch verification succeeds, we convert them into *verified* attestations, disabling individual signature checking. If the batch fails, we convert to verified attestations with individual signature checking enabled.
Ultimately, we optimistically try to do a batch verification of attestation signatures and fall-back to individual verification if it fails. This opens an attach vector for "poisoning" the attestations and causing us to waste a batch verification. I argue that peer scoring should do a good-enough job of defending against this and the typical-case gains massively outweigh the worst-case losses.
## Additional Info
Before this PR, attestation verification took the attestations by value (instead of by reference). It turns out that this was unnecessary and, in my opinion, resulted in some undesirable ergonomics (e.g., we had to pass the attestation back in the `Err` variant to avoid clones). In this PR I've modified attestation verification so that it now takes a reference.
I refactored the `beacon_chain/tests/attestation_verification.rs` tests so they use a builder-esque "tester" struct instead of a weird macro. It made it easier for me to test individual/batch with the same set of tests and I think it was a nice tidy-up. Notably, I did this last to try and make sure my new refactors to *actual* production code would pass under the existing test suite.
2021-09-22 08:49:41 +00:00
|
|
|
if let Err(e) = chain.add_to_naive_aggregation_pool(&attestation) {
|
2020-11-18 23:31:39 +00:00
|
|
|
error!(log,
|
|
|
|
"Failure adding verified attestation to the naive aggregation pool";
|
|
|
|
"error" => ?e,
|
|
|
|
"request_index" => index,
|
|
|
|
"committee_index" => committee_index,
|
|
|
|
"slot" => slot,
|
|
|
|
);
|
|
|
|
failures.push(api_types::Failure::new(
|
|
|
|
index,
|
|
|
|
format!("Naive aggregation pool: {:?}", e),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
}
|
2022-08-10 07:52:57 +00:00
|
|
|
|
|
|
|
if num_already_known > 0 {
|
|
|
|
debug!(
|
|
|
|
log,
|
|
|
|
"Some unagg attestations already known";
|
|
|
|
"count" => num_already_known
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2020-11-18 23:31:39 +00:00
|
|
|
if failures.is_empty() {
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(warp_utils::reject::indexed_bad_request(
|
|
|
|
"error processing attestations".to_string(),
|
|
|
|
failures,
|
|
|
|
))
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-11-18 23:31:39 +00:00
|
|
|
// GET beacon/pool/attestations?committee_index,slot
|
2020-09-29 03:46:54 +00:00
|
|
|
let get_beacon_pool_attestations = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("attestations"))
|
|
|
|
.and(warp::path::end())
|
2020-11-18 23:31:39 +00:00
|
|
|
.and(warp::query::<api_types::AttestationPoolQuery>())
|
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>, query: api_types::AttestationPoolQuery| {
|
|
|
|
blocking_json_task(move || {
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
let query_filter = |data: &AttestationData| {
|
|
|
|
query.slot.map_or(true, |slot| slot == data.slot)
|
2020-11-18 23:31:39 +00:00
|
|
|
&& query
|
|
|
|
.committee_index
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
.map_or(true, |index| index == data.index)
|
2020-11-18 23:31:39 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
let mut attestations = chain.op_pool.get_filtered_attestations(query_filter);
|
|
|
|
attestations.extend(
|
|
|
|
chain
|
|
|
|
.naive_aggregation_pool
|
|
|
|
.read()
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
.filter(|att| query_filter(&att.data)),
|
2020-11-18 23:31:39 +00:00
|
|
|
);
|
|
|
|
Ok(api_types::GenericResponse::from(attestations))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// POST beacon/pool/attester_slashings
|
|
|
|
let post_beacon_pool_attester_slashings = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("attester_slashings"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
|
|
|
slashing: AttesterSlashing<T::EthSpec>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let outcome = chain
|
|
|
|
.verify_attester_slashing_for_gossip(slashing.clone())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::object_invalid(format!(
|
|
|
|
"gossip verification failed: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
2021-01-20 19:19:38 +00:00
|
|
|
// Notify the validator monitor.
|
|
|
|
chain
|
|
|
|
.validator_monitor
|
|
|
|
.read()
|
|
|
|
.register_api_attester_slashing(&slashing);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
if let ObservationOutcome::New(slashing) = outcome {
|
|
|
|
publish_pubsub_message(
|
|
|
|
&network_tx,
|
|
|
|
PubsubMessage::AttesterSlashing(Box::new(
|
|
|
|
slashing.clone().into_inner(),
|
|
|
|
)),
|
|
|
|
)?;
|
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
chain.import_attester_slashing(slashing);
|
2020-09-29 03:46:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET beacon/pool/attester_slashings
|
|
|
|
let get_beacon_pool_attester_slashings = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("attester_slashings"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let attestations = chain.op_pool.get_all_attester_slashings();
|
|
|
|
Ok(api_types::GenericResponse::from(attestations))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// POST beacon/pool/proposer_slashings
|
|
|
|
let post_beacon_pool_proposer_slashings = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("proposer_slashings"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
|
|
|
slashing: ProposerSlashing,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let outcome = chain
|
|
|
|
.verify_proposer_slashing_for_gossip(slashing.clone())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::object_invalid(format!(
|
|
|
|
"gossip verification failed: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
2021-01-20 19:19:38 +00:00
|
|
|
// Notify the validator monitor.
|
|
|
|
chain
|
|
|
|
.validator_monitor
|
|
|
|
.read()
|
|
|
|
.register_api_proposer_slashing(&slashing);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
if let ObservationOutcome::New(slashing) = outcome {
|
|
|
|
publish_pubsub_message(
|
|
|
|
&network_tx,
|
|
|
|
PubsubMessage::ProposerSlashing(Box::new(
|
|
|
|
slashing.clone().into_inner(),
|
|
|
|
)),
|
|
|
|
)?;
|
|
|
|
|
|
|
|
chain.import_proposer_slashing(slashing);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET beacon/pool/proposer_slashings
|
|
|
|
let get_beacon_pool_proposer_slashings = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("proposer_slashings"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let attestations = chain.op_pool.get_all_proposer_slashings();
|
|
|
|
Ok(api_types::GenericResponse::from(attestations))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// POST beacon/pool/voluntary_exits
|
|
|
|
let post_beacon_pool_voluntary_exits = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("voluntary_exits"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
|
|
|
exit: SignedVoluntaryExit,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let outcome = chain
|
|
|
|
.verify_voluntary_exit_for_gossip(exit.clone())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::object_invalid(format!(
|
|
|
|
"gossip verification failed: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
2021-01-20 19:19:38 +00:00
|
|
|
// Notify the validator monitor.
|
|
|
|
chain
|
|
|
|
.validator_monitor
|
|
|
|
.read()
|
|
|
|
.register_api_voluntary_exit(&exit.message);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
if let ObservationOutcome::New(exit) = outcome {
|
|
|
|
publish_pubsub_message(
|
|
|
|
&network_tx,
|
|
|
|
PubsubMessage::VoluntaryExit(Box::new(exit.clone().into_inner())),
|
|
|
|
)?;
|
|
|
|
|
|
|
|
chain.import_voluntary_exit(exit);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET beacon/pool/voluntary_exits
|
|
|
|
let get_beacon_pool_voluntary_exits = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("voluntary_exits"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let attestations = chain.op_pool.get_all_voluntary_exits();
|
|
|
|
Ok(api_types::GenericResponse::from(attestations))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2021-08-06 00:47:31 +00:00
|
|
|
// POST beacon/pool/sync_committees
|
|
|
|
let post_beacon_pool_sync_committees = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("sync_committees"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
|
|
|
signatures: Vec<SyncCommitteeMessage>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
|
|
|
log: Logger| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
sync_committees::process_sync_committee_signatures(
|
|
|
|
signatures, network_tx, &chain, log,
|
|
|
|
)?;
|
|
|
|
Ok(api_types::GenericResponse::from(()))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2022-11-24 20:09:26 +00:00
|
|
|
// GET beacon/pool/bls_to_execution_changes
|
|
|
|
let get_beacon_pool_bls_to_execution_changes = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("bls_to_execution_changes"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let address_changes = chain.op_pool.get_all_bls_to_execution_changes();
|
|
|
|
Ok(api_types::GenericResponse::from(address_changes))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// POST beacon/pool/bls_to_execution_changes
|
|
|
|
let post_beacon_pool_bls_to_execution_changes = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("bls_to_execution_changes"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(network_tx_filter.clone())
|
2022-12-14 01:01:33 +00:00
|
|
|
.and(log_filter.clone())
|
2022-11-24 20:09:26 +00:00
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
2022-12-14 01:01:33 +00:00
|
|
|
address_changes: Vec<SignedBlsToExecutionChange>,
|
2023-01-20 23:39:59 +00:00
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
2022-12-14 01:01:33 +00:00
|
|
|
log: Logger| {
|
2022-11-24 20:09:26 +00:00
|
|
|
blocking_json_task(move || {
|
2022-12-14 01:01:33 +00:00
|
|
|
let mut failures = vec![];
|
|
|
|
|
|
|
|
for (index, address_change) in address_changes.into_iter().enumerate() {
|
|
|
|
let validator_index = address_change.message.validator_index;
|
|
|
|
|
2023-01-20 23:39:59 +00:00
|
|
|
match chain.verify_bls_to_execution_change_for_http_api(address_change) {
|
2022-12-14 01:01:33 +00:00
|
|
|
Ok(ObservationOutcome::New(verified_address_change)) => {
|
2023-01-20 23:39:59 +00:00
|
|
|
let validator_index =
|
|
|
|
verified_address_change.as_inner().message.validator_index;
|
|
|
|
let address = verified_address_change
|
|
|
|
.as_inner()
|
|
|
|
.message
|
|
|
|
.to_execution_address;
|
|
|
|
|
|
|
|
// New to P2P *and* op pool, gossip immediately if post-Capella.
|
2023-02-07 06:13:49 +00:00
|
|
|
let received_pre_capella = if chain.current_slot_is_post_capella().unwrap_or(false) {
|
|
|
|
ReceivedPreCapella::No
|
|
|
|
} else {
|
|
|
|
ReceivedPreCapella::Yes
|
|
|
|
};
|
|
|
|
if matches!(received_pre_capella, ReceivedPreCapella::No) {
|
2023-01-20 23:39:59 +00:00
|
|
|
publish_pubsub_message(
|
|
|
|
&network_tx,
|
|
|
|
PubsubMessage::BlsToExecutionChange(Box::new(
|
|
|
|
verified_address_change.as_inner().clone(),
|
|
|
|
)),
|
|
|
|
)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Import to op pool (may return `false` if there's a race).
|
|
|
|
let imported =
|
2023-02-07 06:13:49 +00:00
|
|
|
chain.import_bls_to_execution_change(verified_address_change, received_pre_capella);
|
2023-01-20 23:39:59 +00:00
|
|
|
|
|
|
|
info!(
|
|
|
|
log,
|
|
|
|
"Processed BLS to execution change";
|
|
|
|
"validator_index" => validator_index,
|
|
|
|
"address" => ?address,
|
2023-02-07 06:13:49 +00:00
|
|
|
"published" => matches!(received_pre_capella, ReceivedPreCapella::No),
|
2023-01-20 23:39:59 +00:00
|
|
|
"imported" => imported,
|
|
|
|
);
|
2022-12-14 01:01:33 +00:00
|
|
|
}
|
|
|
|
Ok(ObservationOutcome::AlreadyKnown) => {
|
|
|
|
debug!(
|
|
|
|
log,
|
|
|
|
"BLS to execution change already known";
|
|
|
|
"validator_index" => validator_index,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
Err(e) => {
|
2023-01-20 23:39:59 +00:00
|
|
|
warn!(
|
2022-12-14 01:01:33 +00:00
|
|
|
log,
|
|
|
|
"Invalid BLS to execution change";
|
|
|
|
"validator_index" => validator_index,
|
2023-01-20 23:39:59 +00:00
|
|
|
"reason" => ?e,
|
|
|
|
"source" => "HTTP",
|
2022-12-14 01:01:33 +00:00
|
|
|
);
|
|
|
|
failures.push(api_types::Failure::new(
|
|
|
|
index,
|
|
|
|
format!("invalid: {e:?}"),
|
|
|
|
));
|
|
|
|
}
|
2022-11-24 20:09:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-14 01:01:33 +00:00
|
|
|
if failures.is_empty() {
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(warp_utils::reject::indexed_bad_request(
|
|
|
|
"some BLS to execution changes failed to verify".into(),
|
|
|
|
failures,
|
|
|
|
))
|
|
|
|
}
|
2022-11-24 20:09:26 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2022-10-30 04:04:24 +00:00
|
|
|
// GET beacon/deposit_snapshot
|
|
|
|
let get_beacon_deposit_snapshot = eth_v1
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("deposit_snapshot"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::header::optional::<api_types::Accept>("accept"))
|
|
|
|
.and(eth1_service_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|accept_header: Option<api_types::Accept>, eth1_service: eth1::Service| {
|
2023-03-13 01:40:03 +00:00
|
|
|
blocking_response_task(move || match accept_header {
|
2022-10-30 04:04:24 +00:00
|
|
|
Some(api_types::Accept::Json) | None => {
|
|
|
|
let snapshot = eth1_service.get_deposit_snapshot();
|
|
|
|
Ok(
|
|
|
|
warp::reply::json(&api_types::GenericResponse::from(snapshot))
|
|
|
|
.into_response(),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
_ => eth1_service
|
|
|
|
.get_deposit_snapshot()
|
|
|
|
.map(|snapshot| {
|
|
|
|
Response::builder()
|
|
|
|
.status(200)
|
|
|
|
.header("Content-Type", "application/octet-stream")
|
|
|
|
.body(snapshot.as_ssz_bytes().into())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::custom_server_error(format!(
|
|
|
|
"failed to create response: {}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.unwrap_or_else(|| {
|
|
|
|
Response::builder()
|
|
|
|
.status(503)
|
|
|
|
.header("Content-Type", "application/octet-stream")
|
|
|
|
.body(Vec::new().into())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::custom_server_error(format!(
|
|
|
|
"failed to create response: {}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})
|
|
|
|
}),
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2023-02-07 08:33:23 +00:00
|
|
|
let beacon_rewards_path = eth_v1
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("rewards"))
|
|
|
|
.and(chain_filter.clone());
|
|
|
|
|
|
|
|
// GET beacon/rewards/blocks/{block_id}
|
|
|
|
let get_beacon_rewards_blocks = beacon_rewards_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("blocks"))
|
|
|
|
.and(block_id_or_err)
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>, block_id: BlockId| {
|
|
|
|
blocking_json_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (rewards, execution_optimistic, finalized) =
|
2023-02-07 08:33:23 +00:00
|
|
|
standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?;
|
|
|
|
Ok(rewards)
|
|
|
|
.map(api_types::GenericResponse::from)
|
2023-03-30 06:08:37 +00:00
|
|
|
.map(|resp| {
|
|
|
|
resp.add_execution_optimistic_finalized(execution_optimistic, finalized)
|
|
|
|
})
|
2023-02-07 08:33:23 +00:00
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2023-01-24 02:06:42 +00:00
|
|
|
/*
|
|
|
|
* beacon/rewards
|
|
|
|
*/
|
|
|
|
|
|
|
|
let beacon_rewards_path = eth_v1
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("rewards"))
|
|
|
|
.and(chain_filter.clone());
|
|
|
|
|
Implement `attestation_rewards` API (per-validator reward) (#3822)
## Issue Addressed
#3661
## Proposed Changes
`/eth/v1/beacon/rewards/attestations/{epoch}`
```json
{
"execution_optimistic": false,
"finalized": false,
"data": [
{
"ideal_rewards": [
{
"effective_balance": "1000000000",
"head": "2500",
"target": "5000",
"source": "5000"
}
],
"total_rewards": [
{
"validator_index": "0",
"head": "2000",
"target": "2000",
"source": "4000",
"inclusion_delay": "2000"
}
]
}
]
}
```
The issue contains the implementation of three per-validator reward APIs:
- [`sync_committee_rewards`](https://github.com/sigp/lighthouse/pull/3790)
- `attestation_rewards`
- `block_rewards`.
This PR *only* implements the `attestation_rewards`.
The endpoints can be viewed in the Ethereum Beacon nodes API browser: https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Rewards
## Additional Info
The implementation of [consensus client reward APIs](https://github.com/eth-protocol-fellows/cohort-three/blob/master/projects/project-ideas.md#consensus-client-reward-apis) is part of the [EPF](https://github.com/eth-protocol-fellows/cohort-three).
---
- [x] `get_state`
- [x] Calculate *ideal rewards* with some logic from `get_flag_index_deltas`
- [x] Calculate *actual rewards* with some logic from `get_flag_index_deltas`
- [x] Code cleanup
- [x] Testing
2023-02-07 00:00:19 +00:00
|
|
|
// POST beacon/rewards/attestations/{epoch}
|
|
|
|
let post_beacon_rewards_attestations = beacon_rewards_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("attestations"))
|
|
|
|
.and(warp::path::param::<Epoch>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and_then(
|
Phase 0 attestation rewards via Beacon API (#4474)
## Issue Addressed
Addresses #4026.
Beacon-API spec [here](https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/getAttestationsRewards).
Endpoint: `POST /eth/v1/beacon/rewards/attestations/{epoch}`
This endpoint already supports post-Altair epochs. This PR adds support for phase 0 rewards calculation.
## Proposed Changes
- [x] Attestation rewards API to support phase 0 rewards calculation, re-using logic from `state_processing`. Refactored `get_attestation_deltas` slightly to support computing deltas for a subset of validators.
- [x] Add `inclusion_delay` to `ideal_rewards` (`beacon-API` spec update to follow)
- [x] Add `inactivity` penalties to both `ideal_rewards` and `total_rewards` (`beacon-API` spec update to follow)
- [x] Add tests to compute attestation rewards and compare results with beacon states
## Additional Notes
- The extra penalty for missing attestations or being slashed during an inactivity leak is currently not included in the API response (for both phase 0 and Altair) in the spec.
- I went with adding `inactivity` as a separate component rather than combining them with the 4 rewards, because this is how it was grouped in [the phase 0 spec](https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#get_attestation_deltas). During inactivity leak, all rewards include the optimal reward, and inactivity penalties are calculated separately (see below code snippet from the spec), so it would be quite confusing if we merge them. This would also work better with Altair, because there's no "cancelling" of rewards and inactivity penalties are more separate.
- Altair calculation logic (to include inactivity penalties) to be updated in a follow-up PR.
```python
def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
"""
Return attestation reward/penalty deltas for each validator.
"""
source_rewards, source_penalties = get_source_deltas(state)
target_rewards, target_penalties = get_target_deltas(state)
head_rewards, head_penalties = get_head_deltas(state)
inclusion_delay_rewards, _ = get_inclusion_delay_deltas(state)
_, inactivity_penalties = get_inactivity_penalty_deltas(state)
rewards = [
source_rewards[i] + target_rewards[i] + head_rewards[i] + inclusion_delay_rewards[i]
for i in range(len(state.validators))
]
penalties = [
source_penalties[i] + target_penalties[i] + head_penalties[i] + inactivity_penalties[i]
for i in range(len(state.validators))
]
return rewards, penalties
```
## Example API Response
<details>
<summary>Click me</summary>
```json
{
"ideal_rewards": [
{
"effective_balance": "1000000000",
"head": "6638",
"target": "6638",
"source": "6638",
"inclusion_delay": "9783",
"inactivity": "0"
},
{
"effective_balance": "2000000000",
"head": "13276",
"target": "13276",
"source": "13276",
"inclusion_delay": "19565",
"inactivity": "0"
},
{
"effective_balance": "3000000000",
"head": "19914",
"target": "19914",
"source": "19914",
"inclusion_delay": "29349",
"inactivity": "0"
},
{
"effective_balance": "4000000000",
"head": "26553",
"target": "26553",
"source": "26553",
"inclusion_delay": "39131",
"inactivity": "0"
},
{
"effective_balance": "5000000000",
"head": "33191",
"target": "33191",
"source": "33191",
"inclusion_delay": "48914",
"inactivity": "0"
},
{
"effective_balance": "6000000000",
"head": "39829",
"target": "39829",
"source": "39829",
"inclusion_delay": "58697",
"inactivity": "0"
},
{
"effective_balance": "7000000000",
"head": "46468",
"target": "46468",
"source": "46468",
"inclusion_delay": "68480",
"inactivity": "0"
},
{
"effective_balance": "8000000000",
"head": "53106",
"target": "53106",
"source": "53106",
"inclusion_delay": "78262",
"inactivity": "0"
},
{
"effective_balance": "9000000000",
"head": "59744",
"target": "59744",
"source": "59744",
"inclusion_delay": "88046",
"inactivity": "0"
},
{
"effective_balance": "10000000000",
"head": "66383",
"target": "66383",
"source": "66383",
"inclusion_delay": "97828",
"inactivity": "0"
},
{
"effective_balance": "11000000000",
"head": "73021",
"target": "73021",
"source": "73021",
"inclusion_delay": "107611",
"inactivity": "0"
},
{
"effective_balance": "12000000000",
"head": "79659",
"target": "79659",
"source": "79659",
"inclusion_delay": "117394",
"inactivity": "0"
},
{
"effective_balance": "13000000000",
"head": "86298",
"target": "86298",
"source": "86298",
"inclusion_delay": "127176",
"inactivity": "0"
},
{
"effective_balance": "14000000000",
"head": "92936",
"target": "92936",
"source": "92936",
"inclusion_delay": "136959",
"inactivity": "0"
},
{
"effective_balance": "15000000000",
"head": "99574",
"target": "99574",
"source": "99574",
"inclusion_delay": "146742",
"inactivity": "0"
},
{
"effective_balance": "16000000000",
"head": "106212",
"target": "106212",
"source": "106212",
"inclusion_delay": "156525",
"inactivity": "0"
},
{
"effective_balance": "17000000000",
"head": "112851",
"target": "112851",
"source": "112851",
"inclusion_delay": "166307",
"inactivity": "0"
},
{
"effective_balance": "18000000000",
"head": "119489",
"target": "119489",
"source": "119489",
"inclusion_delay": "176091",
"inactivity": "0"
},
{
"effective_balance": "19000000000",
"head": "126127",
"target": "126127",
"source": "126127",
"inclusion_delay": "185873",
"inactivity": "0"
},
{
"effective_balance": "20000000000",
"head": "132766",
"target": "132766",
"source": "132766",
"inclusion_delay": "195656",
"inactivity": "0"
},
{
"effective_balance": "21000000000",
"head": "139404",
"target": "139404",
"source": "139404",
"inclusion_delay": "205439",
"inactivity": "0"
},
{
"effective_balance": "22000000000",
"head": "146042",
"target": "146042",
"source": "146042",
"inclusion_delay": "215222",
"inactivity": "0"
},
{
"effective_balance": "23000000000",
"head": "152681",
"target": "152681",
"source": "152681",
"inclusion_delay": "225004",
"inactivity": "0"
},
{
"effective_balance": "24000000000",
"head": "159319",
"target": "159319",
"source": "159319",
"inclusion_delay": "234787",
"inactivity": "0"
},
{
"effective_balance": "25000000000",
"head": "165957",
"target": "165957",
"source": "165957",
"inclusion_delay": "244570",
"inactivity": "0"
},
{
"effective_balance": "26000000000",
"head": "172596",
"target": "172596",
"source": "172596",
"inclusion_delay": "254352",
"inactivity": "0"
},
{
"effective_balance": "27000000000",
"head": "179234",
"target": "179234",
"source": "179234",
"inclusion_delay": "264136",
"inactivity": "0"
},
{
"effective_balance": "28000000000",
"head": "185872",
"target": "185872",
"source": "185872",
"inclusion_delay": "273918",
"inactivity": "0"
},
{
"effective_balance": "29000000000",
"head": "192510",
"target": "192510",
"source": "192510",
"inclusion_delay": "283701",
"inactivity": "0"
},
{
"effective_balance": "30000000000",
"head": "199149",
"target": "199149",
"source": "199149",
"inclusion_delay": "293484",
"inactivity": "0"
},
{
"effective_balance": "31000000000",
"head": "205787",
"target": "205787",
"source": "205787",
"inclusion_delay": "303267",
"inactivity": "0"
},
{
"effective_balance": "32000000000",
"head": "212426",
"target": "212426",
"source": "212426",
"inclusion_delay": "313050",
"inactivity": "0"
}
],
"total_rewards": [
{
"validator_index": "0",
"head": "212426",
"target": "212426",
"source": "212426",
"inclusion_delay": "313050",
"inactivity": "0"
},
{
"validator_index": "32",
"head": "212426",
"target": "212426",
"source": "212426",
"inclusion_delay": "313050",
"inactivity": "0"
},
{
"validator_index": "63",
"head": "-357771",
"target": "-357771",
"source": "-357771",
"inclusion_delay": "0",
"inactivity": "0"
}
]
}
```
</details>
2023-07-18 01:48:40 +00:00
|
|
|
|chain: Arc<BeaconChain<T>>, epoch: Epoch, validators: Vec<ValidatorId>| {
|
Implement `attestation_rewards` API (per-validator reward) (#3822)
## Issue Addressed
#3661
## Proposed Changes
`/eth/v1/beacon/rewards/attestations/{epoch}`
```json
{
"execution_optimistic": false,
"finalized": false,
"data": [
{
"ideal_rewards": [
{
"effective_balance": "1000000000",
"head": "2500",
"target": "5000",
"source": "5000"
}
],
"total_rewards": [
{
"validator_index": "0",
"head": "2000",
"target": "2000",
"source": "4000",
"inclusion_delay": "2000"
}
]
}
]
}
```
The issue contains the implementation of three per-validator reward APIs:
- [`sync_committee_rewards`](https://github.com/sigp/lighthouse/pull/3790)
- `attestation_rewards`
- `block_rewards`.
This PR *only* implements the `attestation_rewards`.
The endpoints can be viewed in the Ethereum Beacon nodes API browser: https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Rewards
## Additional Info
The implementation of [consensus client reward APIs](https://github.com/eth-protocol-fellows/cohort-three/blob/master/projects/project-ideas.md#consensus-client-reward-apis) is part of the [EPF](https://github.com/eth-protocol-fellows/cohort-three).
---
- [x] `get_state`
- [x] Calculate *ideal rewards* with some logic from `get_flag_index_deltas`
- [x] Calculate *actual rewards* with some logic from `get_flag_index_deltas`
- [x] Code cleanup
- [x] Testing
2023-02-07 00:00:19 +00:00
|
|
|
blocking_json_task(move || {
|
|
|
|
let attestation_rewards = chain
|
Phase 0 attestation rewards via Beacon API (#4474)
## Issue Addressed
Addresses #4026.
Beacon-API spec [here](https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Beacon/getAttestationsRewards).
Endpoint: `POST /eth/v1/beacon/rewards/attestations/{epoch}`
This endpoint already supports post-Altair epochs. This PR adds support for phase 0 rewards calculation.
## Proposed Changes
- [x] Attestation rewards API to support phase 0 rewards calculation, re-using logic from `state_processing`. Refactored `get_attestation_deltas` slightly to support computing deltas for a subset of validators.
- [x] Add `inclusion_delay` to `ideal_rewards` (`beacon-API` spec update to follow)
- [x] Add `inactivity` penalties to both `ideal_rewards` and `total_rewards` (`beacon-API` spec update to follow)
- [x] Add tests to compute attestation rewards and compare results with beacon states
## Additional Notes
- The extra penalty for missing attestations or being slashed during an inactivity leak is currently not included in the API response (for both phase 0 and Altair) in the spec.
- I went with adding `inactivity` as a separate component rather than combining them with the 4 rewards, because this is how it was grouped in [the phase 0 spec](https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#get_attestation_deltas). During inactivity leak, all rewards include the optimal reward, and inactivity penalties are calculated separately (see below code snippet from the spec), so it would be quite confusing if we merge them. This would also work better with Altair, because there's no "cancelling" of rewards and inactivity penalties are more separate.
- Altair calculation logic (to include inactivity penalties) to be updated in a follow-up PR.
```python
def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
"""
Return attestation reward/penalty deltas for each validator.
"""
source_rewards, source_penalties = get_source_deltas(state)
target_rewards, target_penalties = get_target_deltas(state)
head_rewards, head_penalties = get_head_deltas(state)
inclusion_delay_rewards, _ = get_inclusion_delay_deltas(state)
_, inactivity_penalties = get_inactivity_penalty_deltas(state)
rewards = [
source_rewards[i] + target_rewards[i] + head_rewards[i] + inclusion_delay_rewards[i]
for i in range(len(state.validators))
]
penalties = [
source_penalties[i] + target_penalties[i] + head_penalties[i] + inactivity_penalties[i]
for i in range(len(state.validators))
]
return rewards, penalties
```
## Example API Response
<details>
<summary>Click me</summary>
```json
{
"ideal_rewards": [
{
"effective_balance": "1000000000",
"head": "6638",
"target": "6638",
"source": "6638",
"inclusion_delay": "9783",
"inactivity": "0"
},
{
"effective_balance": "2000000000",
"head": "13276",
"target": "13276",
"source": "13276",
"inclusion_delay": "19565",
"inactivity": "0"
},
{
"effective_balance": "3000000000",
"head": "19914",
"target": "19914",
"source": "19914",
"inclusion_delay": "29349",
"inactivity": "0"
},
{
"effective_balance": "4000000000",
"head": "26553",
"target": "26553",
"source": "26553",
"inclusion_delay": "39131",
"inactivity": "0"
},
{
"effective_balance": "5000000000",
"head": "33191",
"target": "33191",
"source": "33191",
"inclusion_delay": "48914",
"inactivity": "0"
},
{
"effective_balance": "6000000000",
"head": "39829",
"target": "39829",
"source": "39829",
"inclusion_delay": "58697",
"inactivity": "0"
},
{
"effective_balance": "7000000000",
"head": "46468",
"target": "46468",
"source": "46468",
"inclusion_delay": "68480",
"inactivity": "0"
},
{
"effective_balance": "8000000000",
"head": "53106",
"target": "53106",
"source": "53106",
"inclusion_delay": "78262",
"inactivity": "0"
},
{
"effective_balance": "9000000000",
"head": "59744",
"target": "59744",
"source": "59744",
"inclusion_delay": "88046",
"inactivity": "0"
},
{
"effective_balance": "10000000000",
"head": "66383",
"target": "66383",
"source": "66383",
"inclusion_delay": "97828",
"inactivity": "0"
},
{
"effective_balance": "11000000000",
"head": "73021",
"target": "73021",
"source": "73021",
"inclusion_delay": "107611",
"inactivity": "0"
},
{
"effective_balance": "12000000000",
"head": "79659",
"target": "79659",
"source": "79659",
"inclusion_delay": "117394",
"inactivity": "0"
},
{
"effective_balance": "13000000000",
"head": "86298",
"target": "86298",
"source": "86298",
"inclusion_delay": "127176",
"inactivity": "0"
},
{
"effective_balance": "14000000000",
"head": "92936",
"target": "92936",
"source": "92936",
"inclusion_delay": "136959",
"inactivity": "0"
},
{
"effective_balance": "15000000000",
"head": "99574",
"target": "99574",
"source": "99574",
"inclusion_delay": "146742",
"inactivity": "0"
},
{
"effective_balance": "16000000000",
"head": "106212",
"target": "106212",
"source": "106212",
"inclusion_delay": "156525",
"inactivity": "0"
},
{
"effective_balance": "17000000000",
"head": "112851",
"target": "112851",
"source": "112851",
"inclusion_delay": "166307",
"inactivity": "0"
},
{
"effective_balance": "18000000000",
"head": "119489",
"target": "119489",
"source": "119489",
"inclusion_delay": "176091",
"inactivity": "0"
},
{
"effective_balance": "19000000000",
"head": "126127",
"target": "126127",
"source": "126127",
"inclusion_delay": "185873",
"inactivity": "0"
},
{
"effective_balance": "20000000000",
"head": "132766",
"target": "132766",
"source": "132766",
"inclusion_delay": "195656",
"inactivity": "0"
},
{
"effective_balance": "21000000000",
"head": "139404",
"target": "139404",
"source": "139404",
"inclusion_delay": "205439",
"inactivity": "0"
},
{
"effective_balance": "22000000000",
"head": "146042",
"target": "146042",
"source": "146042",
"inclusion_delay": "215222",
"inactivity": "0"
},
{
"effective_balance": "23000000000",
"head": "152681",
"target": "152681",
"source": "152681",
"inclusion_delay": "225004",
"inactivity": "0"
},
{
"effective_balance": "24000000000",
"head": "159319",
"target": "159319",
"source": "159319",
"inclusion_delay": "234787",
"inactivity": "0"
},
{
"effective_balance": "25000000000",
"head": "165957",
"target": "165957",
"source": "165957",
"inclusion_delay": "244570",
"inactivity": "0"
},
{
"effective_balance": "26000000000",
"head": "172596",
"target": "172596",
"source": "172596",
"inclusion_delay": "254352",
"inactivity": "0"
},
{
"effective_balance": "27000000000",
"head": "179234",
"target": "179234",
"source": "179234",
"inclusion_delay": "264136",
"inactivity": "0"
},
{
"effective_balance": "28000000000",
"head": "185872",
"target": "185872",
"source": "185872",
"inclusion_delay": "273918",
"inactivity": "0"
},
{
"effective_balance": "29000000000",
"head": "192510",
"target": "192510",
"source": "192510",
"inclusion_delay": "283701",
"inactivity": "0"
},
{
"effective_balance": "30000000000",
"head": "199149",
"target": "199149",
"source": "199149",
"inclusion_delay": "293484",
"inactivity": "0"
},
{
"effective_balance": "31000000000",
"head": "205787",
"target": "205787",
"source": "205787",
"inclusion_delay": "303267",
"inactivity": "0"
},
{
"effective_balance": "32000000000",
"head": "212426",
"target": "212426",
"source": "212426",
"inclusion_delay": "313050",
"inactivity": "0"
}
],
"total_rewards": [
{
"validator_index": "0",
"head": "212426",
"target": "212426",
"source": "212426",
"inclusion_delay": "313050",
"inactivity": "0"
},
{
"validator_index": "32",
"head": "212426",
"target": "212426",
"source": "212426",
"inclusion_delay": "313050",
"inactivity": "0"
},
{
"validator_index": "63",
"head": "-357771",
"target": "-357771",
"source": "-357771",
"inclusion_delay": "0",
"inactivity": "0"
}
]
}
```
</details>
2023-07-18 01:48:40 +00:00
|
|
|
.compute_attestation_rewards(epoch, validators)
|
Implement `attestation_rewards` API (per-validator reward) (#3822)
## Issue Addressed
#3661
## Proposed Changes
`/eth/v1/beacon/rewards/attestations/{epoch}`
```json
{
"execution_optimistic": false,
"finalized": false,
"data": [
{
"ideal_rewards": [
{
"effective_balance": "1000000000",
"head": "2500",
"target": "5000",
"source": "5000"
}
],
"total_rewards": [
{
"validator_index": "0",
"head": "2000",
"target": "2000",
"source": "4000",
"inclusion_delay": "2000"
}
]
}
]
}
```
The issue contains the implementation of three per-validator reward APIs:
- [`sync_committee_rewards`](https://github.com/sigp/lighthouse/pull/3790)
- `attestation_rewards`
- `block_rewards`.
This PR *only* implements the `attestation_rewards`.
The endpoints can be viewed in the Ethereum Beacon nodes API browser: https://ethereum.github.io/beacon-APIs/?urls.primaryName=dev#/Rewards
## Additional Info
The implementation of [consensus client reward APIs](https://github.com/eth-protocol-fellows/cohort-three/blob/master/projects/project-ideas.md#consensus-client-reward-apis) is part of the [EPF](https://github.com/eth-protocol-fellows/cohort-three).
---
- [x] `get_state`
- [x] Calculate *ideal rewards* with some logic from `get_flag_index_deltas`
- [x] Calculate *actual rewards* with some logic from `get_flag_index_deltas`
- [x] Code cleanup
- [x] Testing
2023-02-07 00:00:19 +00:00
|
|
|
.map_err(|e| match e {
|
|
|
|
BeaconChainError::MissingBeaconState(root) => {
|
|
|
|
warp_utils::reject::custom_not_found(format!(
|
|
|
|
"missing state {root:?}",
|
|
|
|
))
|
|
|
|
}
|
|
|
|
BeaconChainError::NoStateForSlot(slot) => {
|
|
|
|
warp_utils::reject::custom_not_found(format!(
|
|
|
|
"missing state at slot {slot}"
|
|
|
|
))
|
|
|
|
}
|
|
|
|
BeaconChainError::BeaconStateError(
|
|
|
|
BeaconStateError::UnknownValidator(validator_index),
|
|
|
|
) => warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"validator is unknown: {validator_index}"
|
|
|
|
)),
|
|
|
|
BeaconChainError::ValidatorPubkeyUnknown(pubkey) => {
|
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"validator pubkey is unknown: {pubkey:?}"
|
|
|
|
))
|
|
|
|
}
|
|
|
|
e => warp_utils::reject::custom_server_error(format!(
|
|
|
|
"unexpected error: {:?}",
|
|
|
|
e
|
|
|
|
)),
|
|
|
|
})?;
|
|
|
|
let execution_optimistic =
|
|
|
|
chain.is_optimistic_or_invalid_head().unwrap_or_default();
|
|
|
|
|
|
|
|
Ok(attestation_rewards)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
.map(|resp| resp.add_execution_optimistic(execution_optimistic))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2023-01-24 02:06:42 +00:00
|
|
|
// POST beacon/rewards/sync_committee/{block_id}
|
|
|
|
let post_beacon_rewards_sync_committee = beacon_rewards_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("sync_committee"))
|
|
|
|
.and(block_id_or_err)
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
|
|
|
block_id: BlockId,
|
|
|
|
validators: Vec<ValidatorId>,
|
|
|
|
log: Logger| {
|
|
|
|
blocking_json_task(move || {
|
2023-03-30 06:08:37 +00:00
|
|
|
let (rewards, execution_optimistic, finalized) =
|
2023-01-24 02:06:42 +00:00
|
|
|
sync_committee_rewards::compute_sync_committee_rewards(
|
|
|
|
chain, block_id, validators, log,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
Ok(rewards)
|
|
|
|
.map(api_types::GenericResponse::from)
|
2023-03-30 06:08:37 +00:00
|
|
|
.map(|resp| {
|
|
|
|
resp.add_execution_optimistic_finalized(execution_optimistic, finalized)
|
|
|
|
})
|
2023-01-24 02:06:42 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
/*
|
2021-08-24 01:36:27 +00:00
|
|
|
* config
|
2020-09-29 03:46:54 +00:00
|
|
|
*/
|
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
let config_path = eth_v1.and(warp::path("config"));
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// GET config/fork_schedule
|
|
|
|
let get_config_fork_schedule = config_path
|
|
|
|
.and(warp::path("fork_schedule"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
2021-08-24 01:36:27 +00:00
|
|
|
let forks = ForkName::list_all()
|
|
|
|
.into_iter()
|
|
|
|
.filter_map(|fork_name| chain.spec.fork_for_name(fork_name))
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
Ok(api_types::GenericResponse::from(forks))
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET config/spec
|
2022-08-10 07:52:59 +00:00
|
|
|
let spec_fork_name = ctx.config.spec_fork_name;
|
2020-09-29 03:46:54 +00:00
|
|
|
let get_config_spec = config_path
|
|
|
|
.and(warp::path("spec"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
2021-07-09 06:15:32 +00:00
|
|
|
.and_then(move |chain: Arc<BeaconChain<T>>| {
|
2020-09-29 03:46:54 +00:00
|
|
|
blocking_json_task(move || {
|
2022-08-10 07:52:59 +00:00
|
|
|
let config_and_preset =
|
|
|
|
ConfigAndPreset::from_chain_spec::<T::EthSpec>(&chain.spec, spec_fork_name);
|
2021-07-09 06:15:32 +00:00
|
|
|
Ok(api_types::GenericResponse::from(config_and_preset))
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET config/deposit_contract
|
|
|
|
let get_config_deposit_contract = config_path
|
|
|
|
.and(warp::path("deposit_contract"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(api_types::GenericResponse::from(
|
|
|
|
api_types::DepositContractData {
|
|
|
|
address: chain.spec.deposit_contract_address,
|
2021-10-01 06:32:38 +00:00
|
|
|
chain_id: chain.spec.deposit_chain_id,
|
2020-09-29 03:46:54 +00:00
|
|
|
},
|
|
|
|
))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
/*
|
|
|
|
* debug
|
|
|
|
*/
|
|
|
|
|
|
|
|
// GET debug/beacon/states/{state_id}
|
2021-08-06 00:47:31 +00:00
|
|
|
let get_debug_beacon_states = any_version
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("debug"))
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("states"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<StateId>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid state ID".to_string(),
|
|
|
|
))
|
|
|
|
}))
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path::end())
|
2021-01-06 03:01:46 +00:00
|
|
|
.and(warp::header::optional::<api_types::Accept>("accept"))
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(chain_filter.clone())
|
2021-01-06 03:01:46 +00:00
|
|
|
.and_then(
|
2021-08-06 00:47:31 +00:00
|
|
|
|endpoint_version: EndpointVersion,
|
|
|
|
state_id: StateId,
|
2021-01-06 03:01:46 +00:00
|
|
|
accept_header: Option<api_types::Accept>,
|
|
|
|
chain: Arc<BeaconChain<T>>| {
|
2023-03-13 01:40:03 +00:00
|
|
|
blocking_response_task(move || match accept_header {
|
2021-01-06 03:01:46 +00:00
|
|
|
Some(api_types::Accept::Ssz) => {
|
2022-07-25 08:23:00 +00:00
|
|
|
// We can ignore the optimistic status for the "fork" since it's a
|
|
|
|
// specification constant that doesn't change across competing heads of the
|
|
|
|
// beacon chain.
|
2023-03-30 06:08:37 +00:00
|
|
|
let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?;
|
2021-10-28 01:18:04 +00:00
|
|
|
let fork_name = state
|
|
|
|
.fork_name(&chain.spec)
|
|
|
|
.map_err(inconsistent_fork_rejection)?;
|
2021-01-06 03:01:46 +00:00
|
|
|
Response::builder()
|
|
|
|
.status(200)
|
|
|
|
.header("Content-Type", "application/octet-stream")
|
|
|
|
.body(state.as_ssz_bytes().into())
|
2023-03-13 01:40:03 +00:00
|
|
|
.map(|resp: warp::reply::Response| {
|
|
|
|
add_consensus_version_header(resp, fork_name)
|
|
|
|
})
|
2021-01-06 03:01:46 +00:00
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::custom_server_error(format!(
|
|
|
|
"failed to create response: {}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})
|
|
|
|
}
|
2023-03-30 06:08:37 +00:00
|
|
|
_ => state_id.map_state_and_execution_optimistic_and_finalized(
|
2022-07-25 08:23:00 +00:00
|
|
|
&chain,
|
2023-03-30 06:08:37 +00:00
|
|
|
|state, execution_optimistic, finalized| {
|
2022-07-25 08:23:00 +00:00
|
|
|
let fork_name = state
|
|
|
|
.fork_name(&chain.spec)
|
|
|
|
.map_err(inconsistent_fork_rejection)?;
|
2023-03-30 06:08:37 +00:00
|
|
|
let res = execution_optimistic_finalized_fork_versioned_response(
|
2022-07-25 08:23:00 +00:00
|
|
|
endpoint_version,
|
|
|
|
fork_name,
|
|
|
|
execution_optimistic,
|
2023-03-30 06:08:37 +00:00
|
|
|
finalized,
|
2022-07-25 08:23:00 +00:00
|
|
|
&state,
|
|
|
|
)?;
|
|
|
|
Ok(add_consensus_version_header(
|
|
|
|
warp::reply::json(&res).into_response(),
|
|
|
|
fork_name,
|
|
|
|
))
|
|
|
|
},
|
|
|
|
),
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
2021-01-06 03:01:46 +00:00
|
|
|
},
|
|
|
|
);
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// GET debug/beacon/heads
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_debug_beacon_heads = any_version
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("debug"))
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("heads"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
2022-07-25 08:23:00 +00:00
|
|
|
.and_then(
|
|
|
|
|endpoint_version: EndpointVersion, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let heads = chain
|
|
|
|
.heads()
|
|
|
|
.into_iter()
|
|
|
|
.map(|(root, slot)| {
|
|
|
|
let execution_optimistic = if endpoint_version == V1 {
|
|
|
|
None
|
|
|
|
} else if endpoint_version == V2 {
|
|
|
|
chain
|
|
|
|
.canonical_head
|
|
|
|
.fork_choice_read_lock()
|
2022-07-30 05:08:57 +00:00
|
|
|
.is_optimistic_or_invalid_block(&root)
|
2022-07-25 08:23:00 +00:00
|
|
|
.ok()
|
|
|
|
} else {
|
|
|
|
return Err(unsupported_version_rejection(endpoint_version));
|
|
|
|
};
|
|
|
|
Ok(api_types::ChainHeadData {
|
|
|
|
slot,
|
|
|
|
root,
|
|
|
|
execution_optimistic,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.collect::<Result<Vec<_>, warp::Rejection>>();
|
|
|
|
Ok(api_types::GenericResponse::from(heads?))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2023-03-29 02:56:37 +00:00
|
|
|
// GET debug/fork_choice
|
|
|
|
let get_debug_fork_choice = eth_v1
|
|
|
|
.and(warp::path("debug"))
|
|
|
|
.and(warp::path("fork_choice"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let beacon_fork_choice = chain.canonical_head.fork_choice_read_lock();
|
|
|
|
|
|
|
|
let proto_array = beacon_fork_choice.proto_array().core_proto_array();
|
|
|
|
|
|
|
|
let fork_choice_nodes = proto_array
|
|
|
|
.nodes
|
|
|
|
.iter()
|
|
|
|
.map(|node| {
|
|
|
|
let execution_status = if node.execution_status.is_execution_enabled() {
|
|
|
|
Some(node.execution_status.to_string())
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
|
|
|
|
|
|
|
ForkChoiceNode {
|
|
|
|
slot: node.slot,
|
|
|
|
block_root: node.root,
|
|
|
|
parent_root: node
|
|
|
|
.parent
|
|
|
|
.and_then(|index| proto_array.nodes.get(index))
|
|
|
|
.map(|parent| parent.root),
|
2023-05-15 02:10:42 +00:00
|
|
|
justified_epoch: node.justified_checkpoint.epoch,
|
|
|
|
finalized_epoch: node.finalized_checkpoint.epoch,
|
2023-03-29 02:56:37 +00:00
|
|
|
weight: node.weight,
|
|
|
|
validity: execution_status,
|
|
|
|
execution_block_hash: node
|
|
|
|
.execution_status
|
|
|
|
.block_hash()
|
|
|
|
.map(|block_hash| block_hash.into_root()),
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
Ok(ForkChoice {
|
|
|
|
justified_checkpoint: proto_array.justified_checkpoint,
|
|
|
|
finalized_checkpoint: proto_array.finalized_checkpoint,
|
|
|
|
fork_choice_nodes,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
/*
|
|
|
|
* node
|
|
|
|
*/
|
|
|
|
|
|
|
|
// GET node/identity
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_node_identity = eth_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("identity"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
2020-10-22 02:59:42 +00:00
|
|
|
let enr = network_globals.local_enr();
|
|
|
|
let p2p_addresses = enr.multiaddr_p2p_tcp();
|
|
|
|
let discovery_addresses = enr.multiaddr_p2p_udp();
|
2021-08-04 01:44:57 +00:00
|
|
|
let meta_data = network_globals.local_metadata.read();
|
2020-09-29 03:46:54 +00:00
|
|
|
Ok(api_types::GenericResponse::from(api_types::IdentityData {
|
|
|
|
peer_id: network_globals.local_peer_id().to_base58(),
|
2020-10-22 02:59:42 +00:00
|
|
|
enr,
|
|
|
|
p2p_addresses,
|
|
|
|
discovery_addresses,
|
|
|
|
metadata: api_types::MetaData {
|
2021-08-04 01:44:57 +00:00
|
|
|
seq_number: *meta_data.seq_number(),
|
2020-10-22 02:59:42 +00:00
|
|
|
attnets: format!(
|
2021-08-04 01:44:57 +00:00
|
|
|
"0x{}",
|
|
|
|
hex::encode(meta_data.attnets().clone().into_bytes()),
|
|
|
|
),
|
|
|
|
syncnets: format!(
|
2020-10-22 02:59:42 +00:00
|
|
|
"0x{}",
|
|
|
|
hex::encode(
|
2021-08-04 01:44:57 +00:00
|
|
|
meta_data
|
|
|
|
.syncnets()
|
|
|
|
.map(|x| x.clone())
|
|
|
|
.unwrap_or_default()
|
2020-10-22 02:59:42 +00:00
|
|
|
.into_bytes()
|
2021-08-04 01:44:57 +00:00
|
|
|
)
|
2020-10-22 02:59:42 +00:00
|
|
|
),
|
|
|
|
},
|
2020-09-29 03:46:54 +00:00
|
|
|
}))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET node/version
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_node_version = eth_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("version"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(api_types::GenericResponse::from(api_types::VersionData {
|
|
|
|
version: version_with_platform(),
|
|
|
|
}))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET node/syncing
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_node_syncing = eth_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("syncing"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|network_globals: Arc<NetworkGlobals<T::EthSpec>>, chain: Arc<BeaconChain<T>>| {
|
Implement `el_offline` and use it in the VC (#4295)
## Issue Addressed
Closes https://github.com/sigp/lighthouse/issues/4291, part of #3613.
## Proposed Changes
- Implement the `el_offline` field on `/eth/v1/node/syncing`. We set `el_offline=true` if:
- The EL's internal status is `Offline` or `AuthFailed`, _or_
- The most recent call to `newPayload` resulted in an error (more on this in a moment).
- Use the `el_offline` field in the VC to mark nodes with offline ELs as _unsynced_. These nodes will still be used, but only after synced nodes.
- Overhaul the usage of `RequireSynced` so that `::No` is used almost everywhere. The `--allow-unsynced` flag was broken and had the opposite effect to intended, so it has been deprecated.
- Add tests for the EL being offline on the upcheck call, and being offline due to the newPayload check.
## Why track `newPayload` errors?
Tracking the EL's online/offline status is too coarse-grained to be useful in practice, because:
- If the EL is timing out to some calls, it's unlikely to timeout on the `upcheck` call, which is _just_ `eth_syncing`. Every failed call is followed by an upcheck [here](https://github.com/sigp/lighthouse/blob/693886b94176faa4cb450f024696cb69cda2fe58/beacon_node/execution_layer/src/engines.rs#L372-L380), which would have the effect of masking the failure and keeping the status _online_.
- The `newPayload` call is the most likely to time out. It's the call in which ELs tend to do most of their work (often 1-2 seconds), with `forkchoiceUpdated` usually returning much faster (<50ms).
- If `newPayload` is failing consistently (e.g. timing out) then this is a good indication that either the node's EL is in trouble, or the network as a whole is. In the first case validator clients _should_ prefer other BNs if they have one available. In the second case, all of their BNs will likely report `el_offline` and they'll just have to proceed with trying to use them.
## Additional Changes
- Add utility method `ForkName::latest` which is quite convenient for test writing, but probably other things too.
- Delete some stale comments from when we used to support multiple execution nodes.
2023-05-17 05:51:56 +00:00
|
|
|
async move {
|
|
|
|
let el_offline = if let Some(el) = &chain.execution_layer {
|
|
|
|
el.is_offline_or_erroring().await
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
};
|
2020-09-29 03:46:54 +00:00
|
|
|
|
Implement `el_offline` and use it in the VC (#4295)
## Issue Addressed
Closes https://github.com/sigp/lighthouse/issues/4291, part of #3613.
## Proposed Changes
- Implement the `el_offline` field on `/eth/v1/node/syncing`. We set `el_offline=true` if:
- The EL's internal status is `Offline` or `AuthFailed`, _or_
- The most recent call to `newPayload` resulted in an error (more on this in a moment).
- Use the `el_offline` field in the VC to mark nodes with offline ELs as _unsynced_. These nodes will still be used, but only after synced nodes.
- Overhaul the usage of `RequireSynced` so that `::No` is used almost everywhere. The `--allow-unsynced` flag was broken and had the opposite effect to intended, so it has been deprecated.
- Add tests for the EL being offline on the upcheck call, and being offline due to the newPayload check.
## Why track `newPayload` errors?
Tracking the EL's online/offline status is too coarse-grained to be useful in practice, because:
- If the EL is timing out to some calls, it's unlikely to timeout on the `upcheck` call, which is _just_ `eth_syncing`. Every failed call is followed by an upcheck [here](https://github.com/sigp/lighthouse/blob/693886b94176faa4cb450f024696cb69cda2fe58/beacon_node/execution_layer/src/engines.rs#L372-L380), which would have the effect of masking the failure and keeping the status _online_.
- The `newPayload` call is the most likely to time out. It's the call in which ELs tend to do most of their work (often 1-2 seconds), with `forkchoiceUpdated` usually returning much faster (<50ms).
- If `newPayload` is failing consistently (e.g. timing out) then this is a good indication that either the node's EL is in trouble, or the network as a whole is. In the first case validator clients _should_ prefer other BNs if they have one available. In the second case, all of their BNs will likely report `el_offline` and they'll just have to proceed with trying to use them.
## Additional Changes
- Add utility method `ForkName::latest` which is quite convenient for test writing, but probably other things too.
- Delete some stale comments from when we used to support multiple execution nodes.
2023-05-17 05:51:56 +00:00
|
|
|
blocking_json_task(move || {
|
|
|
|
let head_slot = chain.canonical_head.cached_head().head_slot();
|
|
|
|
let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_server_error(
|
|
|
|
"Unable to read slot clock".into(),
|
|
|
|
)
|
|
|
|
})?;
|
2020-09-29 03:46:54 +00:00
|
|
|
|
Implement `el_offline` and use it in the VC (#4295)
## Issue Addressed
Closes https://github.com/sigp/lighthouse/issues/4291, part of #3613.
## Proposed Changes
- Implement the `el_offline` field on `/eth/v1/node/syncing`. We set `el_offline=true` if:
- The EL's internal status is `Offline` or `AuthFailed`, _or_
- The most recent call to `newPayload` resulted in an error (more on this in a moment).
- Use the `el_offline` field in the VC to mark nodes with offline ELs as _unsynced_. These nodes will still be used, but only after synced nodes.
- Overhaul the usage of `RequireSynced` so that `::No` is used almost everywhere. The `--allow-unsynced` flag was broken and had the opposite effect to intended, so it has been deprecated.
- Add tests for the EL being offline on the upcheck call, and being offline due to the newPayload check.
## Why track `newPayload` errors?
Tracking the EL's online/offline status is too coarse-grained to be useful in practice, because:
- If the EL is timing out to some calls, it's unlikely to timeout on the `upcheck` call, which is _just_ `eth_syncing`. Every failed call is followed by an upcheck [here](https://github.com/sigp/lighthouse/blob/693886b94176faa4cb450f024696cb69cda2fe58/beacon_node/execution_layer/src/engines.rs#L372-L380), which would have the effect of masking the failure and keeping the status _online_.
- The `newPayload` call is the most likely to time out. It's the call in which ELs tend to do most of their work (often 1-2 seconds), with `forkchoiceUpdated` usually returning much faster (<50ms).
- If `newPayload` is failing consistently (e.g. timing out) then this is a good indication that either the node's EL is in trouble, or the network as a whole is. In the first case validator clients _should_ prefer other BNs if they have one available. In the second case, all of their BNs will likely report `el_offline` and they'll just have to proceed with trying to use them.
## Additional Changes
- Add utility method `ForkName::latest` which is quite convenient for test writing, but probably other things too.
- Delete some stale comments from when we used to support multiple execution nodes.
2023-05-17 05:51:56 +00:00
|
|
|
// Taking advantage of saturating subtraction on slot.
|
|
|
|
let sync_distance = current_slot - head_slot;
|
2022-07-26 08:50:16 +00:00
|
|
|
|
Implement `el_offline` and use it in the VC (#4295)
## Issue Addressed
Closes https://github.com/sigp/lighthouse/issues/4291, part of #3613.
## Proposed Changes
- Implement the `el_offline` field on `/eth/v1/node/syncing`. We set `el_offline=true` if:
- The EL's internal status is `Offline` or `AuthFailed`, _or_
- The most recent call to `newPayload` resulted in an error (more on this in a moment).
- Use the `el_offline` field in the VC to mark nodes with offline ELs as _unsynced_. These nodes will still be used, but only after synced nodes.
- Overhaul the usage of `RequireSynced` so that `::No` is used almost everywhere. The `--allow-unsynced` flag was broken and had the opposite effect to intended, so it has been deprecated.
- Add tests for the EL being offline on the upcheck call, and being offline due to the newPayload check.
## Why track `newPayload` errors?
Tracking the EL's online/offline status is too coarse-grained to be useful in practice, because:
- If the EL is timing out to some calls, it's unlikely to timeout on the `upcheck` call, which is _just_ `eth_syncing`. Every failed call is followed by an upcheck [here](https://github.com/sigp/lighthouse/blob/693886b94176faa4cb450f024696cb69cda2fe58/beacon_node/execution_layer/src/engines.rs#L372-L380), which would have the effect of masking the failure and keeping the status _online_.
- The `newPayload` call is the most likely to time out. It's the call in which ELs tend to do most of their work (often 1-2 seconds), with `forkchoiceUpdated` usually returning much faster (<50ms).
- If `newPayload` is failing consistently (e.g. timing out) then this is a good indication that either the node's EL is in trouble, or the network as a whole is. In the first case validator clients _should_ prefer other BNs if they have one available. In the second case, all of their BNs will likely report `el_offline` and they'll just have to proceed with trying to use them.
## Additional Changes
- Add utility method `ForkName::latest` which is quite convenient for test writing, but probably other things too.
- Delete some stale comments from when we used to support multiple execution nodes.
2023-05-17 05:51:56 +00:00
|
|
|
let is_optimistic = chain
|
|
|
|
.is_optimistic_or_invalid_head()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
2020-09-29 03:46:54 +00:00
|
|
|
|
Implement `el_offline` and use it in the VC (#4295)
## Issue Addressed
Closes https://github.com/sigp/lighthouse/issues/4291, part of #3613.
## Proposed Changes
- Implement the `el_offline` field on `/eth/v1/node/syncing`. We set `el_offline=true` if:
- The EL's internal status is `Offline` or `AuthFailed`, _or_
- The most recent call to `newPayload` resulted in an error (more on this in a moment).
- Use the `el_offline` field in the VC to mark nodes with offline ELs as _unsynced_. These nodes will still be used, but only after synced nodes.
- Overhaul the usage of `RequireSynced` so that `::No` is used almost everywhere. The `--allow-unsynced` flag was broken and had the opposite effect to intended, so it has been deprecated.
- Add tests for the EL being offline on the upcheck call, and being offline due to the newPayload check.
## Why track `newPayload` errors?
Tracking the EL's online/offline status is too coarse-grained to be useful in practice, because:
- If the EL is timing out to some calls, it's unlikely to timeout on the `upcheck` call, which is _just_ `eth_syncing`. Every failed call is followed by an upcheck [here](https://github.com/sigp/lighthouse/blob/693886b94176faa4cb450f024696cb69cda2fe58/beacon_node/execution_layer/src/engines.rs#L372-L380), which would have the effect of masking the failure and keeping the status _online_.
- The `newPayload` call is the most likely to time out. It's the call in which ELs tend to do most of their work (often 1-2 seconds), with `forkchoiceUpdated` usually returning much faster (<50ms).
- If `newPayload` is failing consistently (e.g. timing out) then this is a good indication that either the node's EL is in trouble, or the network as a whole is. In the first case validator clients _should_ prefer other BNs if they have one available. In the second case, all of their BNs will likely report `el_offline` and they'll just have to proceed with trying to use them.
## Additional Changes
- Add utility method `ForkName::latest` which is quite convenient for test writing, but probably other things too.
- Delete some stale comments from when we used to support multiple execution nodes.
2023-05-17 05:51:56 +00:00
|
|
|
let syncing_data = api_types::SyncingData {
|
|
|
|
is_syncing: network_globals.sync_state.read().is_syncing(),
|
|
|
|
is_optimistic: Some(is_optimistic),
|
|
|
|
el_offline: Some(el_offline),
|
|
|
|
head_slot,
|
|
|
|
sync_distance,
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(api_types::GenericResponse::from(syncing_data))
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-10-22 02:59:42 +00:00
|
|
|
// GET node/health
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_node_health = eth_v1
|
2020-10-22 02:59:42 +00:00
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("health"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals.clone())
|
2023-06-30 01:13:04 +00:00
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|network_globals: Arc<NetworkGlobals<T::EthSpec>>, chain: Arc<BeaconChain<T>>| {
|
|
|
|
async move {
|
|
|
|
let el_offline = if let Some(el) = &chain.execution_layer {
|
|
|
|
el.is_offline_or_erroring().await
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
};
|
|
|
|
|
|
|
|
blocking_response_task(move || {
|
|
|
|
let is_optimistic = chain
|
|
|
|
.is_optimistic_or_invalid_head()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
|
|
|
let is_syncing = !network_globals.sync_state.read().is_synced();
|
|
|
|
|
|
|
|
if el_offline {
|
|
|
|
Err(warp_utils::reject::not_synced("execution layer is offline".to_string()))
|
|
|
|
} else if is_syncing || is_optimistic {
|
|
|
|
Ok(warp::reply::with_status(
|
|
|
|
warp::reply(),
|
|
|
|
warp::http::StatusCode::PARTIAL_CONTENT,
|
|
|
|
))
|
|
|
|
} else {
|
|
|
|
Ok(warp::reply::with_status(
|
|
|
|
warp::reply(),
|
|
|
|
warp::http::StatusCode::OK,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.await
|
|
|
|
}
|
|
|
|
},
|
|
|
|
);
|
2020-10-22 02:59:42 +00:00
|
|
|
|
|
|
|
// GET node/peers/{peer_id}
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_node_peers_by_id = eth_v1
|
2020-10-22 02:59:42 +00:00
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("peers"))
|
|
|
|
.and(warp::path::param::<String>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(
|
|
|
|
|requested_peer_id: String, network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let peer_id = PeerId::from_bytes(
|
2020-12-23 07:53:36 +00:00
|
|
|
&bs58::decode(requested_peer_id.as_str())
|
2020-10-22 02:59:42 +00:00
|
|
|
.into_vec()
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"invalid peer id: {}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?,
|
|
|
|
)
|
|
|
|
.map_err(|_| {
|
|
|
|
warp_utils::reject::custom_bad_request("invalid peer id.".to_string())
|
|
|
|
})?;
|
|
|
|
|
2021-11-25 03:45:52 +00:00
|
|
|
if let Some(peer_info) = network_globals.peers.read().peer_info(&peer_id) {
|
2021-10-11 02:45:06 +00:00
|
|
|
let address = if let Some(socket_addr) = peer_info.seen_addresses().next() {
|
Rename eth2_libp2p to lighthouse_network (#2702)
## Description
The `eth2_libp2p` crate was originally named and designed to incorporate a simple libp2p integration into lighthouse. Since its origins the crates purpose has expanded dramatically. It now houses a lot more sophistication that is specific to lighthouse and no longer just a libp2p integration.
As of this writing it currently houses the following high-level lighthouse-specific logic:
- Lighthouse's implementation of the eth2 RPC protocol and specific encodings/decodings
- Integration and handling of ENRs with respect to libp2p and eth2
- Lighthouse's discovery logic, its integration with discv5 and logic about searching and handling peers.
- Lighthouse's peer manager - This is a large module handling various aspects of Lighthouse's network, such as peer scoring, handling pings and metadata, connection maintenance and recording, etc.
- Lighthouse's peer database - This is a collection of information stored for each individual peer which is specific to lighthouse. We store connection state, sync state, last seen ips and scores etc. The data stored for each peer is designed for various elements of the lighthouse code base such as syncing and the http api.
- Gossipsub scoring - This stores a collection of gossipsub 1.1 scoring mechanisms that are continuously analyssed and updated based on the ethereum 2 networks and how Lighthouse performs on these networks.
- Lighthouse specific types for managing gossipsub topics, sync status and ENR fields
- Lighthouse's network HTTP API metrics - A collection of metrics for lighthouse network monitoring
- Lighthouse's custom configuration of all networking protocols, RPC, gossipsub, discovery, identify and libp2p.
Therefore it makes sense to rename the crate to be more akin to its current purposes, simply that it manages the majority of Lighthouse's network stack. This PR renames this crate to `lighthouse_network`
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2021-10-19 00:30:39 +00:00
|
|
|
let mut addr = lighthouse_network::Multiaddr::from(socket_addr.ip());
|
|
|
|
addr.push(lighthouse_network::multiaddr::Protocol::Tcp(
|
|
|
|
socket_addr.port(),
|
|
|
|
));
|
2020-11-09 04:01:03 +00:00
|
|
|
addr.to_string()
|
2021-10-11 02:45:06 +00:00
|
|
|
} else if let Some(addr) = peer_info.listening_addresses().first() {
|
2020-11-09 04:01:03 +00:00
|
|
|
addr.to_string()
|
|
|
|
} else {
|
|
|
|
String::new()
|
2020-10-22 02:59:42 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// the eth2 API spec implies only peers we have been connected to at some point should be included.
|
2021-10-11 02:45:06 +00:00
|
|
|
if let Some(dir) = peer_info.connection_direction().as_ref() {
|
2020-10-22 02:59:42 +00:00
|
|
|
return Ok(api_types::GenericResponse::from(api_types::PeerData {
|
|
|
|
peer_id: peer_id.to_string(),
|
2021-10-11 02:45:06 +00:00
|
|
|
enr: peer_info.enr().map(|enr| enr.to_base64()),
|
2020-11-13 02:02:41 +00:00
|
|
|
last_seen_p2p_address: address,
|
2021-07-30 01:11:47 +00:00
|
|
|
direction: api_types::PeerDirection::from_connection_direction(dir),
|
2020-10-22 02:59:42 +00:00
|
|
|
state: api_types::PeerState::from_peer_connection_status(
|
2021-07-30 01:11:47 +00:00
|
|
|
peer_info.connection_status(),
|
2020-10-22 02:59:42 +00:00
|
|
|
),
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(warp_utils::reject::custom_not_found(
|
|
|
|
"peer not found.".to_string(),
|
|
|
|
))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET node/peers
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_node_peers = eth_v1
|
2020-10-22 02:59:42 +00:00
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("peers"))
|
|
|
|
.and(warp::path::end())
|
2022-01-20 09:14:19 +00:00
|
|
|
.and(multi_key_query::<api_types::PeersQuery>())
|
2020-11-13 02:02:41 +00:00
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(
|
2022-01-20 09:14:19 +00:00
|
|
|
|query_res: Result<api_types::PeersQuery, warp::Rejection>,
|
|
|
|
network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
2020-11-13 02:02:41 +00:00
|
|
|
blocking_json_task(move || {
|
2022-01-20 09:14:19 +00:00
|
|
|
let query = query_res?;
|
2020-11-13 02:02:41 +00:00
|
|
|
let mut peers: Vec<api_types::PeerData> = Vec::new();
|
|
|
|
network_globals
|
2021-11-25 03:45:52 +00:00
|
|
|
.peers
|
|
|
|
.read()
|
2020-11-13 02:02:41 +00:00
|
|
|
.peers()
|
|
|
|
.for_each(|(peer_id, peer_info)| {
|
|
|
|
let address =
|
2021-10-11 02:45:06 +00:00
|
|
|
if let Some(socket_addr) = peer_info.seen_addresses().next() {
|
Rename eth2_libp2p to lighthouse_network (#2702)
## Description
The `eth2_libp2p` crate was originally named and designed to incorporate a simple libp2p integration into lighthouse. Since its origins the crates purpose has expanded dramatically. It now houses a lot more sophistication that is specific to lighthouse and no longer just a libp2p integration.
As of this writing it currently houses the following high-level lighthouse-specific logic:
- Lighthouse's implementation of the eth2 RPC protocol and specific encodings/decodings
- Integration and handling of ENRs with respect to libp2p and eth2
- Lighthouse's discovery logic, its integration with discv5 and logic about searching and handling peers.
- Lighthouse's peer manager - This is a large module handling various aspects of Lighthouse's network, such as peer scoring, handling pings and metadata, connection maintenance and recording, etc.
- Lighthouse's peer database - This is a collection of information stored for each individual peer which is specific to lighthouse. We store connection state, sync state, last seen ips and scores etc. The data stored for each peer is designed for various elements of the lighthouse code base such as syncing and the http api.
- Gossipsub scoring - This stores a collection of gossipsub 1.1 scoring mechanisms that are continuously analyssed and updated based on the ethereum 2 networks and how Lighthouse performs on these networks.
- Lighthouse specific types for managing gossipsub topics, sync status and ENR fields
- Lighthouse's network HTTP API metrics - A collection of metrics for lighthouse network monitoring
- Lighthouse's custom configuration of all networking protocols, RPC, gossipsub, discovery, identify and libp2p.
Therefore it makes sense to rename the crate to be more akin to its current purposes, simply that it manages the majority of Lighthouse's network stack. This PR renames this crate to `lighthouse_network`
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2021-10-19 00:30:39 +00:00
|
|
|
let mut addr =
|
|
|
|
lighthouse_network::Multiaddr::from(socket_addr.ip());
|
|
|
|
addr.push(lighthouse_network::multiaddr::Protocol::Tcp(
|
2020-11-13 02:02:41 +00:00
|
|
|
socket_addr.port(),
|
|
|
|
));
|
|
|
|
addr.to_string()
|
2021-10-11 02:45:06 +00:00
|
|
|
} else if let Some(addr) = peer_info.listening_addresses().first() {
|
2020-11-13 02:02:41 +00:00
|
|
|
addr.to_string()
|
|
|
|
} else {
|
|
|
|
String::new()
|
|
|
|
};
|
|
|
|
|
|
|
|
// the eth2 API spec implies only peers we have been connected to at some point should be included.
|
2021-10-11 02:45:06 +00:00
|
|
|
if let Some(dir) = peer_info.connection_direction() {
|
2020-11-13 02:02:41 +00:00
|
|
|
let direction =
|
2021-07-30 01:11:47 +00:00
|
|
|
api_types::PeerDirection::from_connection_direction(dir);
|
2020-11-13 02:02:41 +00:00
|
|
|
let state = api_types::PeerState::from_peer_connection_status(
|
2021-07-30 01:11:47 +00:00
|
|
|
peer_info.connection_status(),
|
2020-11-13 02:02:41 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
let state_matches = query.state.as_ref().map_or(true, |states| {
|
2022-01-20 09:14:19 +00:00
|
|
|
states.iter().any(|state_param| *state_param == state)
|
2020-11-13 02:02:41 +00:00
|
|
|
});
|
|
|
|
let direction_matches =
|
|
|
|
query.direction.as_ref().map_or(true, |directions| {
|
2022-01-20 09:14:19 +00:00
|
|
|
directions.iter().any(|dir_param| *dir_param == direction)
|
2020-11-13 02:02:41 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
if state_matches && direction_matches {
|
|
|
|
peers.push(api_types::PeerData {
|
|
|
|
peer_id: peer_id.to_string(),
|
2021-10-11 02:45:06 +00:00
|
|
|
enr: peer_info.enr().map(|enr| enr.to_base64()),
|
2020-11-13 02:02:41 +00:00
|
|
|
last_seen_p2p_address: address,
|
|
|
|
direction,
|
|
|
|
state,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
Ok(api_types::PeersData {
|
|
|
|
meta: api_types::PeersMetaData {
|
|
|
|
count: peers.len() as u64,
|
|
|
|
},
|
|
|
|
data: peers,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET node/peer_count
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_node_peer_count = eth_v1
|
2020-11-13 02:02:41 +00:00
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("peer_count"))
|
|
|
|
.and(warp::path::end())
|
2020-10-22 02:59:42 +00:00
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
2020-11-13 02:02:41 +00:00
|
|
|
let mut connected: u64 = 0;
|
|
|
|
let mut connecting: u64 = 0;
|
|
|
|
let mut disconnected: u64 = 0;
|
|
|
|
let mut disconnecting: u64 = 0;
|
|
|
|
|
2021-11-25 03:45:52 +00:00
|
|
|
network_globals
|
|
|
|
.peers
|
|
|
|
.read()
|
|
|
|
.peers()
|
|
|
|
.for_each(|(_, peer_info)| {
|
|
|
|
let state = api_types::PeerState::from_peer_connection_status(
|
|
|
|
peer_info.connection_status(),
|
|
|
|
);
|
|
|
|
match state {
|
|
|
|
api_types::PeerState::Connected => connected += 1,
|
|
|
|
api_types::PeerState::Connecting => connecting += 1,
|
|
|
|
api_types::PeerState::Disconnected => disconnected += 1,
|
|
|
|
api_types::PeerState::Disconnecting => disconnecting += 1,
|
|
|
|
}
|
|
|
|
});
|
2020-11-13 02:02:41 +00:00
|
|
|
|
|
|
|
Ok(api_types::GenericResponse::from(api_types::PeerCount {
|
|
|
|
connected,
|
2021-05-10 00:53:09 +00:00
|
|
|
connecting,
|
2020-11-13 02:02:41 +00:00
|
|
|
disconnected,
|
2021-05-10 00:53:09 +00:00
|
|
|
disconnecting,
|
2020-11-13 02:02:41 +00:00
|
|
|
}))
|
2020-10-22 02:59:42 +00:00
|
|
|
})
|
|
|
|
});
|
2020-09-29 03:46:54 +00:00
|
|
|
/*
|
|
|
|
* validator
|
|
|
|
*/
|
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
// GET validator/duties/proposer/{epoch}
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_validator_duties_proposer = eth_v1
|
2020-11-09 23:13:56 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("duties"))
|
|
|
|
.and(warp::path("proposer"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<Epoch>().or_else(|_| async {
|
2021-02-10 23:29:49 +00:00
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid epoch".to_string(),
|
|
|
|
))
|
2020-12-03 23:10:08 +00:00
|
|
|
}))
|
2020-11-09 23:13:56 +00:00
|
|
|
.and(warp::path::end())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
|
|
|
.and(chain_filter.clone())
|
2021-03-17 05:09:57 +00:00
|
|
|
.and(log_filter.clone())
|
|
|
|
.and_then(|epoch: Epoch, chain: Arc<BeaconChain<T>>, log: Logger| {
|
|
|
|
blocking_json_task(move || proposer_duties::proposer_duties(epoch, &chain, &log))
|
|
|
|
});
|
2020-11-09 23:13:56 +00:00
|
|
|
|
|
|
|
// GET validator/blocks/{slot}
|
2021-08-06 00:47:31 +00:00
|
|
|
let get_validator_blocks = any_version
|
2020-11-09 23:13:56 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("blocks"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<Slot>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid slot".to_string(),
|
|
|
|
))
|
|
|
|
}))
|
2020-11-09 23:13:56 +00:00
|
|
|
.and(warp::path::end())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
|
|
|
.and(warp::query::<api_types::ValidatorBlocksQuery>())
|
|
|
|
.and(chain_filter.clone())
|
2023-02-28 02:20:53 +00:00
|
|
|
.and(log_filter.clone())
|
2020-11-09 23:13:56 +00:00
|
|
|
.and_then(
|
2021-08-06 00:47:31 +00:00
|
|
|
|endpoint_version: EndpointVersion,
|
|
|
|
slot: Slot,
|
|
|
|
query: api_types::ValidatorBlocksQuery,
|
2023-02-28 02:20:53 +00:00
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
log: Logger| async move {
|
|
|
|
debug!(
|
|
|
|
log,
|
|
|
|
"Block production request from HTTP API";
|
|
|
|
"slot" => slot
|
|
|
|
);
|
|
|
|
|
2022-09-19 07:58:48 +00:00
|
|
|
let randao_reveal = query.randao_reveal.decompress().map_err(|e| {
|
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"randao reveal is not a valid BLS signature: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
|
|
|
let randao_verification =
|
|
|
|
if query.skip_randao_verification == SkipRandaoVerification::Yes {
|
|
|
|
if !randao_reveal.is_infinity() {
|
|
|
|
return Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"randao_reveal must be point-at-infinity if verification is skipped"
|
|
|
|
.into(),
|
|
|
|
));
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
}
|
2022-09-19 07:58:48 +00:00
|
|
|
ProduceBlockVerification::NoVerification
|
|
|
|
} else {
|
|
|
|
ProduceBlockVerification::VerifyRandao
|
|
|
|
};
|
2020-11-09 23:13:56 +00:00
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let (block, _) = chain
|
|
|
|
.produce_block_with_verification::<FullPayload<T::EthSpec>>(
|
|
|
|
randao_reveal,
|
|
|
|
slot,
|
|
|
|
query.graffiti.map(Into::into),
|
|
|
|
randao_verification,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.map_err(warp_utils::reject::block_production_error)?;
|
|
|
|
let fork_name = block
|
|
|
|
.to_ref()
|
|
|
|
.fork_name(&chain.spec)
|
|
|
|
.map_err(inconsistent_fork_rejection)?;
|
|
|
|
|
|
|
|
fork_versioned_response(endpoint_version, fork_name, block)
|
2023-03-13 01:40:03 +00:00
|
|
|
.map(|response| warp::reply::json(&response).into_response())
|
2023-07-31 01:53:07 +00:00
|
|
|
.map(|res| add_consensus_version_header(res, fork_name))
|
2022-03-31 07:52:23 +00:00
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET validator/blinded_blocks/{slot}
|
2022-08-05 06:46:58 +00:00
|
|
|
let get_validator_blinded_blocks = eth_v1
|
2022-03-31 07:52:23 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("blinded_blocks"))
|
|
|
|
.and(warp::path::param::<Slot>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid slot".to_string(),
|
|
|
|
))
|
|
|
|
}))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
|
|
|
.and(warp::query::<api_types::ValidatorBlocksQuery>())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
2022-08-05 06:46:58 +00:00
|
|
|
|slot: Slot,
|
2022-03-31 07:52:23 +00:00
|
|
|
query: api_types::ValidatorBlocksQuery,
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
chain: Arc<BeaconChain<T>>| async move {
|
2022-09-19 07:58:48 +00:00
|
|
|
let randao_reveal = query.randao_reveal.decompress().map_err(|e| {
|
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"randao reveal is not a valid BLS signature: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
|
|
|
let randao_verification =
|
|
|
|
if query.skip_randao_verification == SkipRandaoVerification::Yes {
|
|
|
|
if !randao_reveal.is_infinity() {
|
|
|
|
return Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"randao_reveal must be point-at-infinity if verification is skipped"
|
|
|
|
.into()
|
|
|
|
));
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
}
|
2022-09-19 07:58:48 +00:00
|
|
|
ProduceBlockVerification::NoVerification
|
|
|
|
} else {
|
|
|
|
ProduceBlockVerification::VerifyRandao
|
|
|
|
};
|
2022-03-31 07:52:23 +00:00
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let (block, _) = chain
|
|
|
|
.produce_block_with_verification::<BlindedPayload<T::EthSpec>>(
|
|
|
|
randao_reveal,
|
|
|
|
slot,
|
|
|
|
query.graffiti.map(Into::into),
|
|
|
|
randao_verification,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
.map_err(warp_utils::reject::block_production_error)?;
|
|
|
|
let fork_name = block
|
|
|
|
.to_ref()
|
|
|
|
.fork_name(&chain.spec)
|
|
|
|
.map_err(inconsistent_fork_rejection)?;
|
2022-09-19 07:58:48 +00:00
|
|
|
|
2022-08-30 05:47:31 +00:00
|
|
|
// Pose as a V2 endpoint so we return the fork `version`.
|
2022-08-05 06:46:58 +00:00
|
|
|
fork_versioned_response(V2, fork_name, block)
|
2023-03-13 01:40:03 +00:00
|
|
|
.map(|response| warp::reply::json(&response).into_response())
|
2023-07-31 01:53:07 +00:00
|
|
|
.map(|res| add_consensus_version_header(res, fork_name))
|
2020-11-09 23:13:56 +00:00
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET validator/attestation_data?slot,committee_index
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_validator_attestation_data = eth_v1
|
2020-11-09 23:13:56 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("attestation_data"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::query::<api_types::ValidatorAttestationDataQuery>())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|query: api_types::ValidatorAttestationDataQuery, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
2020-11-16 02:59:35 +00:00
|
|
|
let current_slot = chain
|
|
|
|
.slot()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
|
|
|
// allow a tolerance of one slot to account for clock skew
|
|
|
|
if query.slot > current_slot + 1 {
|
|
|
|
return Err(warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"request slot {} is more than one slot past the current slot {}",
|
|
|
|
query.slot, current_slot
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
chain
|
|
|
|
.produce_unaggregated_attestation(query.slot, query.committee_index)
|
|
|
|
.map(|attestation| attestation.data)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET validator/aggregate_attestation?attestation_data_root,slot
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_validator_aggregate_attestation = eth_v1
|
2020-11-09 23:13:56 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("aggregate_attestation"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::query::<api_types::ValidatorAggregateAttestationQuery>())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|query: api_types::ValidatorAggregateAttestationQuery, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
chain
|
|
|
|
.get_aggregated_attestation_by_slot_and_root(
|
|
|
|
query.slot,
|
|
|
|
&query.attestation_data_root,
|
|
|
|
)
|
2022-04-13 03:54:42 +00:00
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"unable to fetch aggregate: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?
|
2020-11-09 23:13:56 +00:00
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_not_found(
|
|
|
|
"no matching aggregate found".to_string(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// POST validator/duties/attester/{epoch}
|
2022-07-25 08:23:00 +00:00
|
|
|
let post_validator_duties_attester = eth_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("duties"))
|
|
|
|
.and(warp::path("attester"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<Epoch>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid epoch".to_string(),
|
|
|
|
))
|
|
|
|
}))
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path::end())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
2020-11-09 23:13:56 +00:00
|
|
|
.and(warp::body::json())
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
2020-11-09 23:13:56 +00:00
|
|
|
|epoch: Epoch, indices: api_types::ValidatorIndexData, chain: Arc<BeaconChain<T>>| {
|
2020-09-29 03:46:54 +00:00
|
|
|
blocking_json_task(move || {
|
2021-03-17 05:09:57 +00:00
|
|
|
attester_duties::attester_duties(epoch, &indices.0, &chain)
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2021-08-06 00:47:31 +00:00
|
|
|
// POST validator/duties/sync
|
2022-07-25 08:23:00 +00:00
|
|
|
let post_validator_duties_sync = eth_v1
|
2021-08-06 00:47:31 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("duties"))
|
|
|
|
.and(warp::path("sync"))
|
|
|
|
.and(warp::path::param::<Epoch>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid epoch".to_string(),
|
|
|
|
))
|
|
|
|
}))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|epoch: Epoch, indices: api_types::ValidatorIndexData, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
sync_committees::sync_committee_duties(epoch, &indices.0, &chain)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET validator/sync_committee_contribution
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_validator_sync_committee_contribution = eth_v1
|
2021-08-06 00:47:31 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("sync_committee_contribution"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::query::<SyncContributionData>())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|sync_committee_data: SyncContributionData, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
chain
|
|
|
|
.get_aggregated_sync_committee_contribution(&sync_committee_data)
|
2022-07-27 00:51:05 +00:00
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"unable to fetch sync contribution: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?
|
2021-08-06 00:47:31 +00:00
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_not_found(
|
|
|
|
"no matching sync contribution found".to_string(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
// POST validator/aggregate_and_proofs
|
2022-07-25 08:23:00 +00:00
|
|
|
let post_validator_aggregate_and_proofs = eth_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("aggregate_and_proofs"))
|
|
|
|
.and(warp::path::end())
|
2021-08-06 00:47:31 +00:00
|
|
|
.and(not_while_syncing_filter.clone())
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(network_tx_filter.clone())
|
2020-11-09 23:13:56 +00:00
|
|
|
.and(log_filter.clone())
|
2020-09-29 03:46:54 +00:00
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
2020-11-09 23:13:56 +00:00
|
|
|
aggregates: Vec<SignedAggregateAndProof<T::EthSpec>>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, log: Logger| {
|
2020-09-29 03:46:54 +00:00
|
|
|
blocking_json_task(move || {
|
2021-01-20 19:19:38 +00:00
|
|
|
let seen_timestamp = timestamp_now();
|
2020-11-09 23:13:56 +00:00
|
|
|
let mut verified_aggregates = Vec::with_capacity(aggregates.len());
|
|
|
|
let mut messages = Vec::with_capacity(aggregates.len());
|
|
|
|
let mut failures = Vec::new();
|
|
|
|
|
|
|
|
// Verify that all messages in the post are valid before processing further
|
Batch BLS verification for attestations (#2399)
## Issue Addressed
NA
## Proposed Changes
Adds the ability to verify batches of aggregated/unaggregated attestations from the network.
When the `BeaconProcessor` finds there are messages in the aggregated or unaggregated attestation queues, it will first check the length of the queue:
- `== 1` verify the attestation individually.
- `>= 2` take up to 64 of those attestations and verify them in a batch.
Notably, we only perform batch verification if the queue has a backlog. We don't apply any artificial delays to attestations to try and force them into batches.
### Batching Details
To assist with implementing batches we modify `beacon_chain::attestation_verification` to have two distinct categories for attestations:
- *Indexed* attestations: those which have passed initial validation and were valid enough for us to derive an `IndexedAttestation`.
- *Verified* attestations: those attestations which were indexed *and also* passed signature verification. These are well-formed, interesting messages which were signed by validators.
The batching functions accept `n` attestations and then return `n` attestation verification `Result`s, where those `Result`s can be any combination of `Ok` or `Err`. In other words, we attempt to verify as many attestations as possible and return specific per-attestation results so peer scores can be updated, if required.
When we batch verify attestations, we first try to map all those attestations to *indexed* attestations. If any of those attestations were able to be indexed, we then perform batch BLS verification on those indexed attestations. If the batch verification succeeds, we convert them into *verified* attestations, disabling individual signature checking. If the batch fails, we convert to verified attestations with individual signature checking enabled.
Ultimately, we optimistically try to do a batch verification of attestation signatures and fall-back to individual verification if it fails. This opens an attach vector for "poisoning" the attestations and causing us to waste a batch verification. I argue that peer scoring should do a good-enough job of defending against this and the typical-case gains massively outweigh the worst-case losses.
## Additional Info
Before this PR, attestation verification took the attestations by value (instead of by reference). It turns out that this was unnecessary and, in my opinion, resulted in some undesirable ergonomics (e.g., we had to pass the attestation back in the `Err` variant to avoid clones). In this PR I've modified attestation verification so that it now takes a reference.
I refactored the `beacon_chain/tests/attestation_verification.rs` tests so they use a builder-esque "tester" struct instead of a weird macro. It made it easier for me to test individual/batch with the same set of tests and I think it was a nice tidy-up. Notably, I did this last to try and make sure my new refactors to *actual* production code would pass under the existing test suite.
2021-09-22 08:49:41 +00:00
|
|
|
for (index, aggregate) in aggregates.iter().enumerate() {
|
2021-07-14 05:24:08 +00:00
|
|
|
match chain.verify_aggregated_attestation_for_gossip(aggregate) {
|
2020-11-09 23:13:56 +00:00
|
|
|
Ok(verified_aggregate) => {
|
|
|
|
messages.push(PubsubMessage::AggregateAndProofAttestation(Box::new(
|
|
|
|
verified_aggregate.aggregate().clone(),
|
|
|
|
)));
|
2021-01-20 19:19:38 +00:00
|
|
|
|
|
|
|
// Notify the validator monitor.
|
|
|
|
chain
|
|
|
|
.validator_monitor
|
|
|
|
.read()
|
|
|
|
.register_api_aggregated_attestation(
|
|
|
|
seen_timestamp,
|
|
|
|
verified_aggregate.aggregate(),
|
|
|
|
verified_aggregate.indexed_attestation(),
|
|
|
|
&chain.slot_clock,
|
|
|
|
);
|
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
verified_aggregates.push((index, verified_aggregate));
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
// If we already know the attestation, don't broadcast it or attempt to
|
|
|
|
// further verify it. Return success.
|
|
|
|
//
|
|
|
|
// It's reasonably likely that two different validators produce
|
|
|
|
// identical aggregates, especially if they're using the same beacon
|
|
|
|
// node.
|
2023-06-27 01:06:49 +00:00
|
|
|
Err(AttnError::AttestationSupersetKnown(_)) => continue,
|
2022-08-10 07:52:57 +00:00
|
|
|
// If we've already seen this aggregator produce an aggregate, just
|
|
|
|
// skip this one.
|
|
|
|
//
|
|
|
|
// We're likely to see this with VCs that use fallback BNs. The first
|
|
|
|
// BN might time-out *after* publishing the aggregate and then the
|
|
|
|
// second BN will indicate it's already seen the aggregate.
|
|
|
|
//
|
|
|
|
// There's no actual error for the user or the network since the
|
|
|
|
// aggregate has been successfully published by some other node.
|
|
|
|
Err(AttnError::AggregatorAlreadyKnown(_)) => continue,
|
Batch BLS verification for attestations (#2399)
## Issue Addressed
NA
## Proposed Changes
Adds the ability to verify batches of aggregated/unaggregated attestations from the network.
When the `BeaconProcessor` finds there are messages in the aggregated or unaggregated attestation queues, it will first check the length of the queue:
- `== 1` verify the attestation individually.
- `>= 2` take up to 64 of those attestations and verify them in a batch.
Notably, we only perform batch verification if the queue has a backlog. We don't apply any artificial delays to attestations to try and force them into batches.
### Batching Details
To assist with implementing batches we modify `beacon_chain::attestation_verification` to have two distinct categories for attestations:
- *Indexed* attestations: those which have passed initial validation and were valid enough for us to derive an `IndexedAttestation`.
- *Verified* attestations: those attestations which were indexed *and also* passed signature verification. These are well-formed, interesting messages which were signed by validators.
The batching functions accept `n` attestations and then return `n` attestation verification `Result`s, where those `Result`s can be any combination of `Ok` or `Err`. In other words, we attempt to verify as many attestations as possible and return specific per-attestation results so peer scores can be updated, if required.
When we batch verify attestations, we first try to map all those attestations to *indexed* attestations. If any of those attestations were able to be indexed, we then perform batch BLS verification on those indexed attestations. If the batch verification succeeds, we convert them into *verified* attestations, disabling individual signature checking. If the batch fails, we convert to verified attestations with individual signature checking enabled.
Ultimately, we optimistically try to do a batch verification of attestation signatures and fall-back to individual verification if it fails. This opens an attach vector for "poisoning" the attestations and causing us to waste a batch verification. I argue that peer scoring should do a good-enough job of defending against this and the typical-case gains massively outweigh the worst-case losses.
## Additional Info
Before this PR, attestation verification took the attestations by value (instead of by reference). It turns out that this was unnecessary and, in my opinion, resulted in some undesirable ergonomics (e.g., we had to pass the attestation back in the `Err` variant to avoid clones). In this PR I've modified attestation verification so that it now takes a reference.
I refactored the `beacon_chain/tests/attestation_verification.rs` tests so they use a builder-esque "tester" struct instead of a weird macro. It made it easier for me to test individual/batch with the same set of tests and I think it was a nice tidy-up. Notably, I did this last to try and make sure my new refactors to *actual* production code would pass under the existing test suite.
2021-09-22 08:49:41 +00:00
|
|
|
Err(e) => {
|
2020-11-09 23:13:56 +00:00
|
|
|
error!(log,
|
|
|
|
"Failure verifying aggregate and proofs";
|
|
|
|
"error" => format!("{:?}", e),
|
|
|
|
"request_index" => index,
|
|
|
|
"aggregator_index" => aggregate.message.aggregator_index,
|
|
|
|
"attestation_index" => aggregate.message.aggregate.data.index,
|
|
|
|
"attestation_slot" => aggregate.message.aggregate.data.slot,
|
|
|
|
);
|
|
|
|
failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e)));
|
2021-02-10 23:29:49 +00:00
|
|
|
}
|
2020-11-09 23:13:56 +00:00
|
|
|
}
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
// Publish aggregate attestations to the libp2p network
|
|
|
|
if !messages.is_empty() {
|
|
|
|
publish_network_message(&network_tx, NetworkMessage::Publish { messages })?;
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
// Import aggregate attestations
|
|
|
|
for (index, verified_aggregate) in verified_aggregates {
|
|
|
|
if let Err(e) = chain.apply_attestation_to_fork_choice(&verified_aggregate) {
|
|
|
|
error!(log,
|
|
|
|
"Failure applying verified aggregate attestation to fork choice";
|
|
|
|
"error" => format!("{:?}", e),
|
|
|
|
"request_index" => index,
|
|
|
|
"aggregator_index" => verified_aggregate.aggregate().message.aggregator_index,
|
|
|
|
"attestation_index" => verified_aggregate.attestation().data.index,
|
|
|
|
"attestation_slot" => verified_aggregate.attestation().data.slot,
|
|
|
|
);
|
|
|
|
failures.push(api_types::Failure::new(index, format!("Fork choice: {:?}", e)));
|
|
|
|
}
|
Refactor op pool for speed and correctness (#3312)
## Proposed Changes
This PR has two aims: to speed up attestation packing in the op pool, and to fix bugs in the verification of attester slashings, proposer slashings and voluntary exits. The changes are bundled into a single database schema upgrade (v12).
Attestation packing is sped up by removing several inefficiencies:
- No more recalculation of `attesting_indices` during packing.
- No (unnecessary) examination of the `ParticipationFlags`: a bitfield suffices. See `RewardCache`.
- No re-checking of attestation validity during packing: the `AttestationMap` provides attestations which are "correct by construction" (I have checked this using Hydra).
- No SSZ re-serialization for the clunky `AttestationId` type (it can be removed in a future release).
So far the speed-up seems to be roughly 2-10x, from 500ms down to 50-100ms.
Verification of attester slashings, proposer slashings and voluntary exits is fixed by:
- Tracking the `ForkVersion`s that were used to verify each message inside the `SigVerifiedOp`. This allows us to quickly re-verify that they match the head state's opinion of what the `ForkVersion` should be at the epoch(s) relevant to the message.
- Storing the `SigVerifiedOp` on disk rather than the raw operation. This allows us to continue track the fork versions after a reboot.
This is mostly contained in this commit 52bb1840ae5c4356a8fc3a51e5df23ed65ed2c7f.
## Additional Info
The schema upgrade uses the justified state to re-verify attestations and compute `attesting_indices` for them. It will drop any attestations that fail to verify, by the logic that attestations are most valuable in the few slots after they're observed, and are probably stale and useless by the time a node restarts. Exits and proposer slashings and similarly re-verified to obtain `SigVerifiedOp`s.
This PR contains a runtime killswitch `--paranoid-block-proposal` which opts out of all the optimisations in favour of closely verifying every included message. Although I'm quite sure that the optimisations are correct this flag could be useful in the event of an unforeseen emergency.
Finally, you might notice that the `RewardCache` appears quite useless in its current form because it is only updated on the hot-path immediately before proposal. My hope is that in future we can shift calls to `RewardCache::update` into the background, e.g. while performing the state advance. It is also forward-looking to `tree-states` compatibility, where iterating and indexing `state.{previous,current}_epoch_participation` is expensive and needs to be minimised.
2022-08-29 09:10:26 +00:00
|
|
|
if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) {
|
|
|
|
warn!(
|
|
|
|
log,
|
|
|
|
"Could not add verified aggregate attestation to the inclusion pool";
|
|
|
|
"error" => ?e,
|
|
|
|
"request_index" => index,
|
|
|
|
);
|
2020-11-09 23:13:56 +00:00
|
|
|
failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e)));
|
|
|
|
}
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
if !failures.is_empty() {
|
|
|
|
Err(warp_utils::reject::indexed_bad_request("error processing aggregate and proofs".to_string(),
|
2021-02-10 23:29:49 +00:00
|
|
|
failures,
|
2020-09-29 03:46:54 +00:00
|
|
|
))
|
2020-11-09 23:13:56 +00:00
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
let post_validator_contribution_and_proofs = eth_v1
|
2021-08-06 00:47:31 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("contribution_and_proofs"))
|
|
|
|
.and(warp::path::end())
|
2021-09-22 00:37:28 +00:00
|
|
|
.and(not_while_syncing_filter.clone())
|
2021-08-06 00:47:31 +00:00
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(warp::body::json())
|
2022-08-30 05:47:31 +00:00
|
|
|
.and(network_tx_filter)
|
2021-08-06 00:47:31 +00:00
|
|
|
.and(log_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
|
|
|
contributions: Vec<SignedContributionAndProof<T::EthSpec>>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
|
|
|
log: Logger| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
sync_committees::process_signed_contribution_and_proofs(
|
|
|
|
contributions,
|
|
|
|
network_tx,
|
|
|
|
&chain,
|
|
|
|
log,
|
|
|
|
)?;
|
|
|
|
Ok(api_types::GenericResponse::from(()))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
// POST validator/beacon_committee_subscriptions
|
2022-07-25 08:23:00 +00:00
|
|
|
let post_validator_beacon_committee_subscriptions = eth_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("beacon_committee_subscriptions"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
2022-08-30 05:47:31 +00:00
|
|
|
.and(validator_subscription_tx_filter.clone())
|
2021-01-20 19:19:38 +00:00
|
|
|
.and(chain_filter.clone())
|
2022-08-30 05:47:31 +00:00
|
|
|
.and(log_filter.clone())
|
2020-09-29 03:46:54 +00:00
|
|
|
.and_then(
|
|
|
|
|subscriptions: Vec<api_types::BeaconCommitteeSubscription>,
|
2022-08-30 05:47:31 +00:00
|
|
|
validator_subscription_tx: Sender<ValidatorSubscriptionMessage>,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
log: Logger| {
|
2020-09-29 03:46:54 +00:00
|
|
|
blocking_json_task(move || {
|
|
|
|
for subscription in &subscriptions {
|
2021-01-20 19:19:38 +00:00
|
|
|
chain
|
|
|
|
.validator_monitor
|
|
|
|
.write()
|
|
|
|
.auto_register_local_validator(subscription.validator_index);
|
|
|
|
|
2022-08-30 05:47:31 +00:00
|
|
|
let validator_subscription = api_types::ValidatorSubscription {
|
2020-09-29 03:46:54 +00:00
|
|
|
validator_index: subscription.validator_index,
|
|
|
|
attestation_committee_index: subscription.committee_index,
|
|
|
|
slot: subscription.slot,
|
|
|
|
committee_count_at_slot: subscription.committees_at_slot,
|
|
|
|
is_aggregator: subscription.is_aggregator,
|
|
|
|
};
|
|
|
|
|
2022-08-30 05:47:31 +00:00
|
|
|
let message = ValidatorSubscriptionMessage::AttestationSubscribe {
|
|
|
|
subscriptions: vec![validator_subscription],
|
|
|
|
};
|
|
|
|
if let Err(e) = validator_subscription_tx.try_send(message) {
|
|
|
|
warn!(
|
|
|
|
log,
|
|
|
|
"Unable to process committee subscriptions";
|
|
|
|
"info" => "the host may be overloaded or resource-constrained",
|
|
|
|
"error" => ?e,
|
|
|
|
);
|
|
|
|
return Err(warp_utils::reject::custom_server_error(
|
|
|
|
"unable to queue subscription, host may be overloaded or shutting down".to_string(),
|
|
|
|
));
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2022-02-08 19:52:20 +00:00
|
|
|
// POST validator/prepare_beacon_proposer
|
2022-07-25 08:23:00 +00:00
|
|
|
let post_validator_prepare_beacon_proposer = eth_v1
|
2022-02-08 19:52:20 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("prepare_beacon_proposer"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
|
|
|
log: Logger,
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
preparation_data: Vec<ProposerPreparationData>| async move {
|
|
|
|
let execution_layer = chain
|
|
|
|
.execution_layer
|
|
|
|
.as_ref()
|
|
|
|
.ok_or(BeaconChainError::ExecutionLayerMissing)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
2022-02-08 19:52:20 +00:00
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
let current_slot = chain
|
|
|
|
.slot()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
|
2022-02-08 19:52:20 +00:00
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
debug!(
|
|
|
|
log,
|
|
|
|
"Received proposer preparation data";
|
|
|
|
"count" => preparation_data.len(),
|
|
|
|
);
|
|
|
|
|
|
|
|
execution_layer
|
|
|
|
.update_proposer_preparation(current_epoch, &preparation_data)
|
|
|
|
.await;
|
2022-02-08 19:52:20 +00:00
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
chain
|
|
|
|
.prepare_beacon_proposer(current_slot)
|
|
|
|
.await
|
|
|
|
.map_err(|e| {
|
2022-03-09 00:42:05 +00:00
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"error updating proposer preparations: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
2023-03-13 01:40:03 +00:00
|
|
|
Ok::<_, warp::reject::Rejection>(warp::reply::json(&()).into_response())
|
2022-02-08 19:52:20 +00:00
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2022-06-30 00:49:21 +00:00
|
|
|
// POST validator/register_validator
|
2022-07-25 08:23:00 +00:00
|
|
|
let post_validator_register_validator = eth_v1
|
2022-06-30 00:49:21 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("register_validator"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
|
|
|
log: Logger,
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
register_val_data: Vec<SignedValidatorRegistrationData>| async move {
|
|
|
|
let execution_layer = chain
|
|
|
|
.execution_layer
|
|
|
|
.as_ref()
|
|
|
|
.ok_or(BeaconChainError::ExecutionLayerMissing)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
let current_slot = chain
|
|
|
|
.slot_clock
|
|
|
|
.now_or_genesis()
|
|
|
|
.ok_or(BeaconChainError::UnableToReadSlot)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
|
2022-06-30 00:49:21 +00:00
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
debug!(
|
|
|
|
log,
|
|
|
|
"Received register validator request";
|
|
|
|
"count" => register_val_data.len(),
|
|
|
|
);
|
2022-06-30 00:49:21 +00:00
|
|
|
|
2022-08-24 23:34:58 +00:00
|
|
|
let head_snapshot = chain.head_snapshot();
|
|
|
|
let spec = &chain.spec;
|
|
|
|
|
|
|
|
let (preparation_data, filtered_registration_data): (
|
|
|
|
Vec<ProposerPreparationData>,
|
|
|
|
Vec<SignedValidatorRegistrationData>,
|
|
|
|
) = register_val_data
|
|
|
|
.into_iter()
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
.filter_map(|register_data| {
|
|
|
|
chain
|
|
|
|
.validator_index(®ister_data.message.pubkey)
|
|
|
|
.ok()
|
|
|
|
.flatten()
|
2022-08-24 23:34:58 +00:00
|
|
|
.and_then(|validator_index| {
|
|
|
|
let validator = head_snapshot
|
|
|
|
.beacon_state
|
|
|
|
.get_validator(validator_index)
|
|
|
|
.ok()?;
|
|
|
|
let validator_status = ValidatorStatus::from_validator(
|
|
|
|
validator,
|
|
|
|
current_epoch,
|
|
|
|
spec.far_future_epoch,
|
|
|
|
)
|
|
|
|
.superstatus();
|
|
|
|
let is_active_or_pending =
|
|
|
|
matches!(validator_status, ValidatorStatus::Pending)
|
|
|
|
|| matches!(validator_status, ValidatorStatus::Active);
|
|
|
|
|
|
|
|
// Filter out validators who are not 'active' or 'pending'.
|
2022-09-23 03:52:46 +00:00
|
|
|
is_active_or_pending.then_some({
|
2022-08-24 23:34:58 +00:00
|
|
|
(
|
|
|
|
ProposerPreparationData {
|
|
|
|
validator_index: validator_index as u64,
|
|
|
|
fee_recipient: register_data.message.fee_recipient,
|
|
|
|
},
|
|
|
|
register_data,
|
|
|
|
)
|
|
|
|
})
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
})
|
|
|
|
})
|
2022-08-24 23:34:58 +00:00
|
|
|
.unzip();
|
2022-06-30 00:49:21 +00:00
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
// Update the prepare beacon proposer cache based on this request.
|
|
|
|
execution_layer
|
|
|
|
.update_proposer_preparation(current_epoch, &preparation_data)
|
|
|
|
.await;
|
2022-06-30 00:49:21 +00:00
|
|
|
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
// Call prepare beacon proposer blocking with the latest update in order to make
|
2022-07-30 00:22:37 +00:00
|
|
|
// sure we have a local payload to fall back to in the event of the blinded block
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
// flow failing.
|
|
|
|
chain
|
|
|
|
.prepare_beacon_proposer(current_slot)
|
|
|
|
.await
|
|
|
|
.map_err(|e| {
|
2022-06-30 00:49:21 +00:00
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"error updating proposer preparations: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
2022-07-30 00:22:37 +00:00
|
|
|
let builder = execution_layer
|
|
|
|
.builder()
|
|
|
|
.as_ref()
|
|
|
|
.ok_or(BeaconChainError::BuilderMissing)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
|
|
|
info!(
|
|
|
|
log,
|
|
|
|
"Forwarding register validator request to connected builder";
|
2022-08-24 23:34:58 +00:00
|
|
|
"count" => filtered_registration_data.len(),
|
2022-07-30 00:22:37 +00:00
|
|
|
);
|
2022-06-30 00:49:21 +00:00
|
|
|
|
2022-07-30 00:22:37 +00:00
|
|
|
builder
|
2022-08-24 23:34:58 +00:00
|
|
|
.post_builder_validators(&filtered_registration_data)
|
2022-07-30 00:22:37 +00:00
|
|
|
.await
|
2023-03-13 01:40:03 +00:00
|
|
|
.map(|resp| warp::reply::json(&resp).into_response())
|
2022-07-30 00:22:37 +00:00
|
|
|
.map_err(|e| {
|
2023-02-12 23:14:08 +00:00
|
|
|
warn!(
|
2022-11-07 06:48:31 +00:00
|
|
|
log,
|
|
|
|
"Relay error when registering validator(s)";
|
|
|
|
"num_registrations" => filtered_registration_data.len(),
|
|
|
|
"error" => ?e
|
|
|
|
);
|
2022-07-30 00:22:37 +00:00
|
|
|
// Forward the HTTP status code if we are able to, otherwise fall back
|
|
|
|
// to a server error.
|
|
|
|
if let eth2::Error::ServerMessage(message) = e {
|
|
|
|
if message.code == StatusCode::BAD_REQUEST.as_u16() {
|
|
|
|
return warp_utils::reject::custom_bad_request(message.message);
|
|
|
|
} else {
|
|
|
|
// According to the spec this response should only be a 400 or 500,
|
|
|
|
// so we fall back to a 500 here.
|
|
|
|
return warp_utils::reject::custom_server_error(message.message);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
warp_utils::reject::custom_server_error(format!("{e:?}"))
|
|
|
|
})
|
2022-06-30 00:49:21 +00:00
|
|
|
},
|
|
|
|
);
|
2021-08-06 00:47:31 +00:00
|
|
|
// POST validator/sync_committee_subscriptions
|
2022-07-25 08:23:00 +00:00
|
|
|
let post_validator_sync_committee_subscriptions = eth_v1
|
2021-08-06 00:47:31 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("sync_committee_subscriptions"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
2022-08-30 05:47:31 +00:00
|
|
|
.and(validator_subscription_tx_filter)
|
2021-08-06 00:47:31 +00:00
|
|
|
.and(chain_filter.clone())
|
2022-08-30 05:47:31 +00:00
|
|
|
.and(log_filter.clone())
|
2021-08-06 00:47:31 +00:00
|
|
|
.and_then(
|
|
|
|
|subscriptions: Vec<types::SyncCommitteeSubscription>,
|
2022-08-30 05:47:31 +00:00
|
|
|
validator_subscription_tx: Sender<ValidatorSubscriptionMessage>,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
log: Logger
|
|
|
|
| {
|
2021-08-06 00:47:31 +00:00
|
|
|
blocking_json_task(move || {
|
|
|
|
for subscription in subscriptions {
|
|
|
|
chain
|
|
|
|
.validator_monitor
|
|
|
|
.write()
|
|
|
|
.auto_register_local_validator(subscription.validator_index);
|
|
|
|
|
2022-08-30 05:47:31 +00:00
|
|
|
let message = ValidatorSubscriptionMessage::SyncCommitteeSubscribe {
|
2021-08-06 00:47:31 +00:00
|
|
|
subscriptions: vec![subscription],
|
2022-08-30 05:47:31 +00:00
|
|
|
};
|
|
|
|
if let Err(e) = validator_subscription_tx.try_send(message) {
|
|
|
|
warn!(
|
|
|
|
log,
|
|
|
|
"Unable to process sync subscriptions";
|
|
|
|
"info" => "the host may be overloaded or resource-constrained",
|
|
|
|
"error" => ?e
|
|
|
|
);
|
|
|
|
return Err(warp_utils::reject::custom_server_error(
|
|
|
|
"unable to queue subscription, host may be overloaded or shutting down".to_string(),
|
|
|
|
));
|
|
|
|
}
|
2021-08-06 00:47:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2023-07-31 01:53:03 +00:00
|
|
|
// POST vaidator/liveness/{epoch}
|
|
|
|
let post_validator_liveness_epoch = eth_v1
|
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("liveness"))
|
|
|
|
.and(warp::path::param::<Epoch>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|epoch: Epoch, indices: Vec<u64>, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
// Ensure the request is for either the current, previous or next epoch.
|
|
|
|
let current_epoch = chain
|
|
|
|
.epoch()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
let prev_epoch = current_epoch.saturating_sub(Epoch::new(1));
|
|
|
|
let next_epoch = current_epoch.saturating_add(Epoch::new(1));
|
|
|
|
|
|
|
|
if epoch < prev_epoch || epoch > next_epoch {
|
|
|
|
return Err(warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"request epoch {} is more than one epoch from the current epoch {}",
|
|
|
|
epoch, current_epoch
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
let liveness: Vec<api_types::StandardLivenessResponseData> = indices
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.map(|index| {
|
|
|
|
let is_live = chain.validator_seen_at_epoch(index as usize, epoch);
|
|
|
|
api_types::StandardLivenessResponseData { index, is_live }
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
Ok(api_types::GenericResponse::from(liveness))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2021-07-31 03:50:52 +00:00
|
|
|
// POST lighthouse/liveness
|
|
|
|
let post_lighthouse_liveness = warp::path("lighthouse")
|
|
|
|
.and(warp::path("liveness"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|request_data: api_types::LivenessRequestData, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
// Ensure the request is for either the current, previous or next epoch.
|
|
|
|
let current_epoch = chain
|
|
|
|
.epoch()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
let prev_epoch = current_epoch.saturating_sub(Epoch::new(1));
|
|
|
|
let next_epoch = current_epoch.saturating_add(Epoch::new(1));
|
|
|
|
|
|
|
|
if request_data.epoch < prev_epoch || request_data.epoch > next_epoch {
|
|
|
|
return Err(warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"request epoch {} is more than one epoch from the current epoch {}",
|
|
|
|
request_data.epoch, current_epoch
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
let liveness: Vec<api_types::LivenessResponseData> = request_data
|
|
|
|
.indices
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.map(|index| {
|
|
|
|
let is_live =
|
|
|
|
chain.validator_seen_at_epoch(index as usize, request_data.epoch);
|
|
|
|
api_types::LivenessResponseData {
|
2022-12-16 04:04:00 +00:00
|
|
|
index,
|
2021-07-31 03:50:52 +00:00
|
|
|
epoch: request_data.epoch,
|
|
|
|
is_live,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
Ok(api_types::GenericResponse::from(liveness))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
// GET lighthouse/health
|
|
|
|
let get_lighthouse_health = warp::path("lighthouse")
|
|
|
|
.and(warp::path("health"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
eth2::lighthouse::Health::observe()
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
.map_err(warp_utils::reject::custom_bad_request)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2022-11-15 05:21:26 +00:00
|
|
|
// GET lighthouse/ui/health
|
|
|
|
let get_lighthouse_ui_health = warp::path("lighthouse")
|
|
|
|
.and(warp::path("ui"))
|
|
|
|
.and(warp::path("health"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(system_info_filter)
|
|
|
|
.and(app_start_filter)
|
|
|
|
.and(data_dir_filter)
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(
|
|
|
|
|sysinfo, app_start: std::time::Instant, data_dir, network_globals| {
|
|
|
|
blocking_json_task(move || {
|
2022-12-16 04:04:00 +00:00
|
|
|
let app_uptime = app_start.elapsed().as_secs();
|
2022-11-15 05:21:26 +00:00
|
|
|
Ok(api_types::GenericResponse::from(observe_system_health_bn(
|
|
|
|
sysinfo,
|
|
|
|
data_dir,
|
|
|
|
app_uptime,
|
|
|
|
network_globals,
|
|
|
|
)))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2022-12-01 06:03:53 +00:00
|
|
|
// GET lighthouse/ui/validator_count
|
|
|
|
let get_lighthouse_ui_validator_count = warp::path("lighthouse")
|
|
|
|
.and(warp::path("ui"))
|
|
|
|
.and(warp::path("validator_count"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
ui::get_validator_count(chain).map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2022-12-09 06:39:19 +00:00
|
|
|
// POST lighthouse/ui/validator_metrics
|
|
|
|
let post_lighthouse_ui_validator_metrics = warp::path("lighthouse")
|
|
|
|
.and(warp::path("ui"))
|
|
|
|
.and(warp::path("validator_metrics"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|request_data: ui::ValidatorMetricsRequestData, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
ui::post_validator_monitor_metrics(request_data, chain)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
Cache validator balances and allow them to be served over the HTTP API (#3863)
## Issue Addressed
#3804
## Proposed Changes
- Add `total_balance` to the validator monitor and adjust the number of historical epochs which are cached.
- Allow certain values in the cache to be served out via the HTTP API without requiring a state read.
## Usage
```
curl -X POST "http://localhost:5052/lighthouse/ui/validator_info" -d '{"indices": [0]}' -H "Content-Type: application/json" | jq
```
```
{
"data": {
"validators": {
"0": {
"info": [
{
"epoch": 172981,
"total_balance": 36566388519
},
...
{
"epoch": 172990,
"total_balance": 36566496513
}
]
},
"1": {
"info": [
{
"epoch": 172981,
"total_balance": 36355797968
},
...
{
"epoch": 172990,
"total_balance": 36355905962
}
]
}
}
}
}
```
## Additional Info
This requires no historical states to operate which mean it will still function on the freshly checkpoint synced node, however because of this, the values will populate each epoch (up to a maximum of 10 entries).
Another benefit of this method, is that we can easily cache any other values which would normally require a state read and serve them via the same endpoint. However, we would need be cautious about not overly increasing block processing time by caching values from complex computations.
This also caches some of the validator metrics directly, rather than pulling them from the Prometheus metrics when the API is called. This means when the validator count exceeds the individual monitor threshold, the cached values will still be available.
Co-authored-by: Paul Hauner <paul@paulhauner.com>
2023-02-21 20:54:55 +00:00
|
|
|
// POST lighthouse/ui/validator_info
|
|
|
|
let post_lighthouse_ui_validator_info = warp::path("lighthouse")
|
|
|
|
.and(warp::path("ui"))
|
|
|
|
.and(warp::path("validator_info"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|request_data: ui::ValidatorInfoRequestData, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
ui::get_validator_info(request_data, chain)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
// GET lighthouse/syncing
|
|
|
|
let get_lighthouse_syncing = warp::path("lighthouse")
|
|
|
|
.and(warp::path("syncing"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(api_types::GenericResponse::from(
|
|
|
|
network_globals.sync_state(),
|
|
|
|
))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2021-12-22 06:17:14 +00:00
|
|
|
// GET lighthouse/nat
|
|
|
|
let get_lighthouse_nat = warp::path("lighthouse")
|
|
|
|
.and(warp::path("nat"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(api_types::GenericResponse::from(
|
|
|
|
lighthouse_network::metrics::NAT_OPEN
|
|
|
|
.as_ref()
|
|
|
|
.map(|v| v.get())
|
|
|
|
.unwrap_or(0)
|
|
|
|
!= 0,
|
|
|
|
))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
// GET lighthouse/peers
|
|
|
|
let get_lighthouse_peers = warp::path("lighthouse")
|
|
|
|
.and(warp::path("peers"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(network_globals
|
2021-11-25 03:45:52 +00:00
|
|
|
.peers
|
|
|
|
.read()
|
2020-09-29 03:46:54 +00:00
|
|
|
.peers()
|
|
|
|
.map(|(peer_id, peer_info)| eth2::lighthouse::Peer {
|
|
|
|
peer_id: peer_id.to_string(),
|
|
|
|
peer_info: peer_info.clone(),
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>())
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET lighthouse/peers/connected
|
|
|
|
let get_lighthouse_peers_connected = warp::path("lighthouse")
|
|
|
|
.and(warp::path("peers"))
|
|
|
|
.and(warp::path("connected"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals)
|
|
|
|
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(network_globals
|
2021-11-25 03:45:52 +00:00
|
|
|
.peers
|
|
|
|
.read()
|
2020-09-29 03:46:54 +00:00
|
|
|
.connected_peers()
|
|
|
|
.map(|(peer_id, peer_info)| eth2::lighthouse::Peer {
|
|
|
|
peer_id: peer_id.to_string(),
|
|
|
|
peer_info: peer_info.clone(),
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>())
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET lighthouse/proto_array
|
|
|
|
let get_lighthouse_proto_array = warp::path("lighthouse")
|
|
|
|
.and(warp::path("proto_array"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
2023-03-13 01:40:03 +00:00
|
|
|
blocking_response_task(move || {
|
2020-09-29 03:46:54 +00:00
|
|
|
Ok::<_, warp::Rejection>(warp::reply::json(&api_types::GenericResponseRef::from(
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
chain
|
|
|
|
.canonical_head
|
|
|
|
.fork_choice_read_lock()
|
|
|
|
.proto_array()
|
|
|
|
.core_proto_array(),
|
2020-09-29 03:46:54 +00:00
|
|
|
)))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET lighthouse/validator_inclusion/{epoch}/{validator_id}
|
|
|
|
let get_lighthouse_validator_inclusion_global = warp::path("lighthouse")
|
|
|
|
.and(warp::path("validator_inclusion"))
|
|
|
|
.and(warp::path::param::<Epoch>())
|
|
|
|
.and(warp::path::param::<ValidatorId>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|epoch: Epoch, validator_id: ValidatorId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
validator_inclusion::validator_inclusion_data(epoch, &validator_id, &chain)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET lighthouse/validator_inclusion/{epoch}/global
|
|
|
|
let get_lighthouse_validator_inclusion = warp::path("lighthouse")
|
|
|
|
.and(warp::path("validator_inclusion"))
|
|
|
|
.and(warp::path::param::<Epoch>())
|
|
|
|
.and(warp::path("global"))
|
|
|
|
.and(warp::path::end())
|
2020-10-22 06:05:49 +00:00
|
|
|
.and(chain_filter.clone())
|
2020-09-29 03:46:54 +00:00
|
|
|
.and_then(|epoch: Epoch, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
validator_inclusion::global_validator_inclusion_data(epoch, &chain)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2020-11-02 00:37:30 +00:00
|
|
|
// GET lighthouse/eth1/syncing
|
|
|
|
let get_lighthouse_eth1_syncing = warp::path("lighthouse")
|
|
|
|
.and(warp::path("eth1"))
|
|
|
|
.and(warp::path("syncing"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
2020-11-30 20:29:17 +00:00
|
|
|
let current_slot_opt = chain.slot().ok();
|
2020-11-02 00:37:30 +00:00
|
|
|
|
|
|
|
chain
|
|
|
|
.eth1_chain
|
|
|
|
.as_ref()
|
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_not_found(
|
|
|
|
"Eth1 sync is disabled. See the --eth1 CLI flag.".to_string(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.and_then(|eth1| {
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
eth1.sync_status(chain.genesis_time, current_slot_opt, &chain.spec)
|
2020-11-02 00:37:30 +00:00
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_server_error(
|
|
|
|
"Unable to determine Eth1 sync status".to_string(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET lighthouse/eth1/block_cache
|
|
|
|
let get_lighthouse_eth1_block_cache = warp::path("lighthouse")
|
|
|
|
.and(warp::path("eth1"))
|
|
|
|
.and(warp::path("block_cache"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(eth1_service_filter.clone())
|
|
|
|
.and_then(|eth1_service: eth1::Service| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(api_types::GenericResponse::from(
|
|
|
|
eth1_service
|
|
|
|
.blocks()
|
|
|
|
.read()
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET lighthouse/eth1/deposit_cache
|
|
|
|
let get_lighthouse_eth1_deposit_cache = warp::path("lighthouse")
|
|
|
|
.and(warp::path("eth1"))
|
|
|
|
.and(warp::path("deposit_cache"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(eth1_service_filter)
|
|
|
|
.and_then(|eth1_service: eth1::Service| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(api_types::GenericResponse::from(
|
|
|
|
eth1_service
|
|
|
|
.deposits()
|
|
|
|
.read()
|
|
|
|
.cache
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2020-10-22 06:05:49 +00:00
|
|
|
// GET lighthouse/beacon/states/{state_id}/ssz
|
|
|
|
let get_lighthouse_beacon_states_ssz = warp::path("lighthouse")
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("states"))
|
|
|
|
.and(warp::path::param::<StateId>())
|
|
|
|
.and(warp::path("ssz"))
|
|
|
|
.and(warp::path::end())
|
2020-11-23 01:00:22 +00:00
|
|
|
.and(chain_filter.clone())
|
2020-10-22 06:05:49 +00:00
|
|
|
.and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| {
|
2023-03-13 01:40:03 +00:00
|
|
|
blocking_response_task(move || {
|
2022-07-25 08:23:00 +00:00
|
|
|
// This debug endpoint provides no indication of optimistic status.
|
2023-03-30 06:08:37 +00:00
|
|
|
let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?;
|
2020-10-22 06:05:49 +00:00
|
|
|
Response::builder()
|
|
|
|
.status(200)
|
|
|
|
.header("Content-Type", "application/ssz")
|
|
|
|
.body(state.as_ssz_bytes())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::custom_server_error(format!(
|
|
|
|
"failed to create response: {}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2020-11-23 01:00:22 +00:00
|
|
|
// GET lighthouse/staking
|
|
|
|
let get_lighthouse_staking = warp::path("lighthouse")
|
|
|
|
.and(warp::path("staking"))
|
|
|
|
.and(warp::path::end())
|
2020-12-04 00:18:58 +00:00
|
|
|
.and(chain_filter.clone())
|
2020-11-23 01:00:22 +00:00
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
if chain.eth1_chain.is_some() {
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(warp_utils::reject::custom_not_found(
|
|
|
|
"staking is not enabled, \
|
|
|
|
see the --staking CLI flag"
|
|
|
|
.to_string(),
|
|
|
|
))
|
|
|
|
}
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2021-09-22 00:37:28 +00:00
|
|
|
let database_path = warp::path("lighthouse").and(warp::path("database"));
|
|
|
|
|
|
|
|
// GET lighthouse/database/info
|
|
|
|
let get_lighthouse_database_info = database_path
|
|
|
|
.and(warp::path("info"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| blocking_json_task(move || database::info(chain)));
|
|
|
|
|
|
|
|
// POST lighthouse/database/reconstruct
|
|
|
|
let post_lighthouse_database_reconstruct = database_path
|
|
|
|
.and(warp::path("reconstruct"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(not_while_syncing_filter)
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
chain.store_migrator.process_reconstruction();
|
|
|
|
Ok("success")
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// POST lighthouse/database/historical_blocks
|
|
|
|
let post_lighthouse_database_historical_blocks = database_path
|
|
|
|
.and(warp::path("historical_blocks"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.and_then(
|
Use async code when interacting with EL (#3244)
## Overview
This rather extensive PR achieves two primary goals:
1. Uses the finalized/justified checkpoints of fork choice (FC), rather than that of the head state.
2. Refactors fork choice, block production and block processing to `async` functions.
Additionally, it achieves:
- Concurrent forkchoice updates to the EL and cache pruning after a new head is selected.
- Concurrent "block packing" (attestations, etc) and execution payload retrieval during block production.
- Concurrent per-block-processing and execution payload verification during block processing.
- The `Arc`-ification of `SignedBeaconBlock` during block processing (it's never mutated, so why not?):
- I had to do this to deal with sending blocks into spawned tasks.
- Previously we were cloning the beacon block at least 2 times during each block processing, these clones are either removed or turned into cheaper `Arc` clones.
- We were also `Box`-ing and un-`Box`-ing beacon blocks as they moved throughout the networking crate. This is not a big deal, but it's nice to avoid shifting things between the stack and heap.
- Avoids cloning *all the blocks* in *every chain segment* during sync.
- It also has the potential to clean up our code where we need to pass an *owned* block around so we can send it back in the case of an error (I didn't do much of this, my PR is already big enough :sweat_smile:)
- The `BeaconChain::HeadSafetyStatus` struct was removed. It was an old relic from prior merge specs.
For motivation for this change, see https://github.com/sigp/lighthouse/pull/3244#issuecomment-1160963273
## Changes to `canonical_head` and `fork_choice`
Previously, the `BeaconChain` had two separate fields:
```
canonical_head: RwLock<Snapshot>,
fork_choice: RwLock<BeaconForkChoice>
```
Now, we have grouped these values under a single struct:
```
canonical_head: CanonicalHead {
cached_head: RwLock<Arc<Snapshot>>,
fork_choice: RwLock<BeaconForkChoice>
}
```
Apart from ergonomics, the only *actual* change here is wrapping the canonical head snapshot in an `Arc`. This means that we no longer need to hold the `cached_head` (`canonical_head`, in old terms) lock when we want to pull some values from it. This was done to avoid deadlock risks by preventing functions from acquiring (and holding) the `cached_head` and `fork_choice` locks simultaneously.
## Breaking Changes
### The `state` (root) field in the `finalized_checkpoint` SSE event
Consider the scenario where epoch `n` is just finalized, but `start_slot(n)` is skipped. There are two state roots we might in the `finalized_checkpoint` SSE event:
1. The state root of the finalized block, which is `get_block(finalized_checkpoint.root).state_root`.
4. The state root at slot of `start_slot(n)`, which would be the state from (1), but "skipped forward" through any skip slots.
Previously, Lighthouse would choose (2). However, we can see that when [Teku generates that event](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/beaconrestapi/src/main/java/tech/pegasys/teku/beaconrestapi/handlers/v1/events/EventSubscriptionManager.java#L171-L182) it uses [`getStateRootFromBlockRoot`](https://github.com/ConsenSys/teku/blob/de2b2801c89ef5abf983d6bf37867c37fc47121f/data/provider/src/main/java/tech/pegasys/teku/api/ChainDataProvider.java#L336-L341) which uses (1).
I have switched Lighthouse from (2) to (1). I think it's a somewhat arbitrary choice between the two, where (1) is easier to compute and is consistent with Teku.
## Notes for Reviewers
I've renamed `BeaconChain::fork_choice` to `BeaconChain::recompute_head`. Doing this helped ensure I broke all previous uses of fork choice and I also find it more descriptive. It describes an action and can't be confused with trying to get a reference to the `ForkChoice` struct.
I've changed the ordering of SSE events when a block is received. It used to be `[block, finalized, head]` and now it's `[block, head, finalized]`. It was easier this way and I don't think we were making any promises about SSE event ordering so it's not "breaking".
I've made it so fork choice will run when it's first constructed. I did this because I wanted to have a cached version of the last call to `get_head`. Ensuring `get_head` has been run *at least once* means that the cached values doesn't need to wrapped in an `Option`. This was fairly simple, it just involved passing a `slot` to the constructor so it knows *when* it's being run. When loading a fork choice from the store and a slot clock isn't handy I've just used the `slot` that was saved in the `fork_choice_store`. That seems like it would be a faithful representation of the slot when we saved it.
I added the `genesis_time: u64` to the `BeaconChain`. It's small, constant and nice to have around.
Since we're using FC for the fin/just checkpoints, we no longer get the `0x00..00` roots at genesis. You can see I had to remove a work-around in `ef-tests` here: b56be3bc2. I can't find any reason why this would be an issue, if anything I think it'll be better since the genesis-alias has caught us out a few times (0x00..00 isn't actually a real root). Edit: I did find a case where the `network` expected the 0x00..00 alias and patched it here: 3f26ac3e2.
You'll notice a lot of changes in tests. Generally, tests should be functionally equivalent. Here are the things creating the most diff-noise in tests:
- Changing tests to be `tokio::async` tests.
- Adding `.await` to fork choice, block processing and block production functions.
- Refactor of the `canonical_head` "API" provided by the `BeaconChain`. E.g., `chain.canonical_head.cached_head()` instead of `chain.canonical_head.read()`.
- Wrapping `SignedBeaconBlock` in an `Arc`.
- In the `beacon_chain/tests/block_verification`, we can't use the `lazy_static` `CHAIN_SEGMENT` variable anymore since it's generated with an async function. We just generate it in each test, not so efficient but hopefully insignificant.
I had to disable `rayon` concurrent tests in the `fork_choice` tests. This is because the use of `rayon` and `block_on` was causing a panic.
Co-authored-by: Mac L <mjladson@pm.me>
2022-07-03 05:36:50 +00:00
|
|
|
|blocks: Vec<Arc<SignedBlindedBeaconBlock<T::EthSpec>>>,
|
2021-09-22 00:37:28 +00:00
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
log: Logger| {
|
|
|
|
info!(
|
|
|
|
log,
|
|
|
|
"Importing historical blocks";
|
|
|
|
"count" => blocks.len(),
|
|
|
|
"source" => "http_api"
|
|
|
|
);
|
|
|
|
blocking_json_task(move || database::historical_blocks(chain, blocks))
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
Add API to compute discrete validator attestation performance (#2874)
## Issue Addressed
N/A
## Proposed Changes
Add a HTTP API which can be used to compute the attestation performances of a validator (or all validators) over a discrete range of epochs.
Performances can be computed for a single validator, or for the global validator set.
## Usage
### Request
The API can be used as follows:
```
curl "http://localhost:5052/lighthouse/analysis/attestation_performance/{validator_index}?start_epoch=57730&end_epoch=57732"
```
Alternatively, to compute performances for the global validator set:
```
curl "http://localhost:5052/lighthouse/analysis/attestation_performance/global?start_epoch=57730&end_epoch=57732"
```
### Response
The response is JSON formatted as follows:
```
[
{
"index": 72,
"epochs": {
"57730": {
"active": true,
"head": false,
"target": false,
"source": false
},
"57731": {
"active": true,
"head": true,
"target": true,
"source": true,
"delay": 1
},
"57732": {
"active": true,
"head": true,
"target": true,
"source": true,
"delay": 1
},
}
}
]
```
> Note that the `"epochs"` are not guaranteed to be in ascending order.
## Additional Info
- This API is intended to be used in our upcoming validator analysis tooling (#2873) and will likely not be very useful for regular users. Some advanced users or block explorers may find this API useful however.
- The request range is limited to 100 epochs (since the range is inclusive and it also computes the `end_epoch` it's actually 101 epochs) to prevent Lighthouse using exceptionally large amounts of memory.
2022-01-27 22:58:31 +00:00
|
|
|
// GET lighthouse/analysis/block_rewards
|
2022-01-27 01:06:02 +00:00
|
|
|
let get_lighthouse_block_rewards = warp::path("lighthouse")
|
Add API to compute discrete validator attestation performance (#2874)
## Issue Addressed
N/A
## Proposed Changes
Add a HTTP API which can be used to compute the attestation performances of a validator (or all validators) over a discrete range of epochs.
Performances can be computed for a single validator, or for the global validator set.
## Usage
### Request
The API can be used as follows:
```
curl "http://localhost:5052/lighthouse/analysis/attestation_performance/{validator_index}?start_epoch=57730&end_epoch=57732"
```
Alternatively, to compute performances for the global validator set:
```
curl "http://localhost:5052/lighthouse/analysis/attestation_performance/global?start_epoch=57730&end_epoch=57732"
```
### Response
The response is JSON formatted as follows:
```
[
{
"index": 72,
"epochs": {
"57730": {
"active": true,
"head": false,
"target": false,
"source": false
},
"57731": {
"active": true,
"head": true,
"target": true,
"source": true,
"delay": 1
},
"57732": {
"active": true,
"head": true,
"target": true,
"source": true,
"delay": 1
},
}
}
]
```
> Note that the `"epochs"` are not guaranteed to be in ascending order.
## Additional Info
- This API is intended to be used in our upcoming validator analysis tooling (#2873) and will likely not be very useful for regular users. Some advanced users or block explorers may find this API useful however.
- The request range is limited to 100 epochs (since the range is inclusive and it also computes the `end_epoch` it's actually 101 epochs) to prevent Lighthouse using exceptionally large amounts of memory.
2022-01-27 22:58:31 +00:00
|
|
|
.and(warp::path("analysis"))
|
2022-01-27 01:06:02 +00:00
|
|
|
.and(warp::path("block_rewards"))
|
|
|
|
.and(warp::query::<eth2::lighthouse::BlockRewardsQuery>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.and_then(|query, chain, log| {
|
|
|
|
blocking_json_task(move || block_rewards::get_block_rewards(query, chain, log))
|
|
|
|
});
|
|
|
|
|
2022-06-29 04:50:37 +00:00
|
|
|
// POST lighthouse/analysis/block_rewards
|
|
|
|
let post_lighthouse_block_rewards = warp::path("lighthouse")
|
|
|
|
.and(warp::path("analysis"))
|
|
|
|
.and(warp::path("block_rewards"))
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.and_then(|blocks, chain, log| {
|
|
|
|
blocking_json_task(move || block_rewards::compute_block_rewards(blocks, chain, log))
|
|
|
|
});
|
|
|
|
|
Add API to compute discrete validator attestation performance (#2874)
## Issue Addressed
N/A
## Proposed Changes
Add a HTTP API which can be used to compute the attestation performances of a validator (or all validators) over a discrete range of epochs.
Performances can be computed for a single validator, or for the global validator set.
## Usage
### Request
The API can be used as follows:
```
curl "http://localhost:5052/lighthouse/analysis/attestation_performance/{validator_index}?start_epoch=57730&end_epoch=57732"
```
Alternatively, to compute performances for the global validator set:
```
curl "http://localhost:5052/lighthouse/analysis/attestation_performance/global?start_epoch=57730&end_epoch=57732"
```
### Response
The response is JSON formatted as follows:
```
[
{
"index": 72,
"epochs": {
"57730": {
"active": true,
"head": false,
"target": false,
"source": false
},
"57731": {
"active": true,
"head": true,
"target": true,
"source": true,
"delay": 1
},
"57732": {
"active": true,
"head": true,
"target": true,
"source": true,
"delay": 1
},
}
}
]
```
> Note that the `"epochs"` are not guaranteed to be in ascending order.
## Additional Info
- This API is intended to be used in our upcoming validator analysis tooling (#2873) and will likely not be very useful for regular users. Some advanced users or block explorers may find this API useful however.
- The request range is limited to 100 epochs (since the range is inclusive and it also computes the `end_epoch` it's actually 101 epochs) to prevent Lighthouse using exceptionally large amounts of memory.
2022-01-27 22:58:31 +00:00
|
|
|
// GET lighthouse/analysis/attestation_performance/{index}
|
|
|
|
let get_lighthouse_attestation_performance = warp::path("lighthouse")
|
|
|
|
.and(warp::path("analysis"))
|
|
|
|
.and(warp::path("attestation_performance"))
|
|
|
|
.and(warp::path::param::<String>())
|
|
|
|
.and(warp::query::<eth2::lighthouse::AttestationPerformanceQuery>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|target, query, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
attestation_performance::get_attestation_performance(target, query, chain)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2022-02-21 23:21:02 +00:00
|
|
|
// GET lighthouse/analysis/block_packing_efficiency
|
|
|
|
let get_lighthouse_block_packing_efficiency = warp::path("lighthouse")
|
|
|
|
.and(warp::path("analysis"))
|
|
|
|
.and(warp::path("block_packing_efficiency"))
|
|
|
|
.and(warp::query::<eth2::lighthouse::BlockPackingEfficiencyQuery>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|query, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
block_packing_efficiency::get_block_packing_efficiency(query, chain)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2022-07-21 05:45:39 +00:00
|
|
|
// GET lighthouse/merge_readiness
|
|
|
|
let get_lighthouse_merge_readiness = warp::path("lighthouse")
|
|
|
|
.and(warp::path("merge_readiness"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| async move {
|
|
|
|
let merge_readiness = chain.check_merge_readiness().await;
|
2023-03-13 01:40:03 +00:00
|
|
|
Ok::<_, warp::reject::Rejection>(
|
|
|
|
warp::reply::json(&api_types::GenericResponse::from(merge_readiness))
|
|
|
|
.into_response(),
|
|
|
|
)
|
2022-07-21 05:45:39 +00:00
|
|
|
});
|
|
|
|
|
2022-07-25 08:23:00 +00:00
|
|
|
let get_events = eth_v1
|
2020-12-04 00:18:58 +00:00
|
|
|
.and(warp::path("events"))
|
|
|
|
.and(warp::path::end())
|
2022-01-20 09:14:19 +00:00
|
|
|
.and(multi_key_query::<api_types::EventQuery>())
|
2020-12-04 00:18:58 +00:00
|
|
|
.and(chain_filter)
|
|
|
|
.and_then(
|
2022-01-20 09:14:19 +00:00
|
|
|
|topics_res: Result<api_types::EventQuery, warp::Rejection>,
|
|
|
|
chain: Arc<BeaconChain<T>>| {
|
2023-03-13 01:40:03 +00:00
|
|
|
blocking_response_task(move || {
|
2022-01-20 09:14:19 +00:00
|
|
|
let topics = topics_res?;
|
2020-12-04 00:18:58 +00:00
|
|
|
// for each topic subscribed spawn a new subscription
|
2022-01-20 09:14:19 +00:00
|
|
|
let mut receivers = Vec::with_capacity(topics.topics.len());
|
2020-12-04 00:18:58 +00:00
|
|
|
|
|
|
|
if let Some(event_handler) = chain.event_handler.as_ref() {
|
2022-01-20 09:14:19 +00:00
|
|
|
for topic in topics.topics {
|
2020-12-04 00:18:58 +00:00
|
|
|
let receiver = match topic {
|
|
|
|
api_types::EventTopic::Head => event_handler.subscribe_head(),
|
|
|
|
api_types::EventTopic::Block => event_handler.subscribe_block(),
|
|
|
|
api_types::EventTopic::Attestation => {
|
|
|
|
event_handler.subscribe_attestation()
|
|
|
|
}
|
|
|
|
api_types::EventTopic::VoluntaryExit => {
|
|
|
|
event_handler.subscribe_exit()
|
|
|
|
}
|
|
|
|
api_types::EventTopic::FinalizedCheckpoint => {
|
|
|
|
event_handler.subscribe_finalized()
|
|
|
|
}
|
2021-06-17 02:10:46 +00:00
|
|
|
api_types::EventTopic::ChainReorg => {
|
|
|
|
event_handler.subscribe_reorgs()
|
|
|
|
}
|
2021-09-25 07:53:58 +00:00
|
|
|
api_types::EventTopic::ContributionAndProof => {
|
|
|
|
event_handler.subscribe_contributions()
|
|
|
|
}
|
2023-03-05 23:43:30 +00:00
|
|
|
api_types::EventTopic::PayloadAttributes => {
|
|
|
|
event_handler.subscribe_payload_attributes()
|
|
|
|
}
|
2021-09-30 04:31:41 +00:00
|
|
|
api_types::EventTopic::LateHead => {
|
|
|
|
event_handler.subscribe_late_head()
|
|
|
|
}
|
2022-01-27 01:06:02 +00:00
|
|
|
api_types::EventTopic::BlockReward => {
|
|
|
|
event_handler.subscribe_block_reward()
|
|
|
|
}
|
2020-12-04 00:18:58 +00:00
|
|
|
};
|
2021-02-10 23:29:49 +00:00
|
|
|
|
2021-03-01 01:58:05 +00:00
|
|
|
receivers.push(BroadcastStream::new(receiver).map(|msg| {
|
|
|
|
match msg {
|
|
|
|
Ok(data) => Event::default()
|
|
|
|
.event(data.topic_name())
|
|
|
|
.json_data(data)
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::server_sent_event_error(format!(
|
|
|
|
"{:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
}),
|
|
|
|
Err(e) => Err(warp_utils::reject::server_sent_event_error(
|
|
|
|
format!("{:?}", e),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
}));
|
2020-12-04 00:18:58 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return Err(warp_utils::reject::custom_server_error(
|
|
|
|
"event handler was not initialized".to_string(),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
2021-02-10 23:29:49 +00:00
|
|
|
let s = futures::stream::select_all(receivers);
|
2020-12-04 00:18:58 +00:00
|
|
|
|
2021-02-10 23:29:49 +00:00
|
|
|
Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s)))
|
2020-12-04 00:18:58 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2023-05-22 05:57:08 +00:00
|
|
|
// Subscribe to logs via Server Side Events
|
|
|
|
// /lighthouse/logs
|
|
|
|
let lighthouse_log_events = warp::path("lighthouse")
|
|
|
|
.and(warp::path("logs"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(sse_component_filter)
|
|
|
|
.and_then(|sse_component: Option<SSELoggingComponents>| {
|
|
|
|
blocking_response_task(move || {
|
|
|
|
if let Some(logging_components) = sse_component {
|
|
|
|
// Build a JSON stream
|
|
|
|
let s =
|
|
|
|
BroadcastStream::new(logging_components.sender.subscribe()).map(|msg| {
|
|
|
|
match msg {
|
|
|
|
Ok(data) => {
|
|
|
|
// Serialize to json
|
|
|
|
match data.to_json_string() {
|
|
|
|
// Send the json as a Server Side Event
|
|
|
|
Ok(json) => Ok(Event::default().data(json)),
|
|
|
|
Err(e) => Err(warp_utils::reject::server_sent_event_error(
|
|
|
|
format!("Unable to serialize to JSON {}", e),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(e) => Err(warp_utils::reject::server_sent_event_error(
|
|
|
|
format!("Unable to receive event {}", e),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s)))
|
|
|
|
} else {
|
|
|
|
Err(warp_utils::reject::custom_server_error(
|
|
|
|
"SSE Logging is not enabled".to_string(),
|
|
|
|
))
|
|
|
|
}
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
// Define the ultimate set of routes that will be provided to the server.
|
2023-03-13 01:40:03 +00:00
|
|
|
// Use `uor` rather than `or` in order to simplify types (see `UnifyingOrFilter`).
|
2020-09-29 03:46:54 +00:00
|
|
|
let routes = warp::get()
|
|
|
|
.and(
|
|
|
|
get_beacon_genesis
|
2023-03-13 01:40:03 +00:00
|
|
|
.uor(get_beacon_state_root)
|
|
|
|
.uor(get_beacon_state_fork)
|
|
|
|
.uor(get_beacon_state_finality_checkpoints)
|
|
|
|
.uor(get_beacon_state_validator_balances)
|
|
|
|
.uor(get_beacon_state_validators_id)
|
|
|
|
.uor(get_beacon_state_validators)
|
|
|
|
.uor(get_beacon_state_committees)
|
|
|
|
.uor(get_beacon_state_sync_committees)
|
|
|
|
.uor(get_beacon_state_randao)
|
|
|
|
.uor(get_beacon_headers)
|
|
|
|
.uor(get_beacon_headers_block_id)
|
|
|
|
.uor(get_beacon_block)
|
|
|
|
.uor(get_beacon_block_attestations)
|
|
|
|
.uor(get_beacon_blinded_block)
|
|
|
|
.uor(get_beacon_block_root)
|
|
|
|
.uor(get_beacon_pool_attestations)
|
|
|
|
.uor(get_beacon_pool_attester_slashings)
|
|
|
|
.uor(get_beacon_pool_proposer_slashings)
|
|
|
|
.uor(get_beacon_pool_voluntary_exits)
|
|
|
|
.uor(get_beacon_pool_bls_to_execution_changes)
|
|
|
|
.uor(get_beacon_deposit_snapshot)
|
|
|
|
.uor(get_beacon_rewards_blocks)
|
|
|
|
.uor(get_config_fork_schedule)
|
|
|
|
.uor(get_config_spec)
|
|
|
|
.uor(get_config_deposit_contract)
|
|
|
|
.uor(get_debug_beacon_states)
|
|
|
|
.uor(get_debug_beacon_heads)
|
2023-03-29 02:56:37 +00:00
|
|
|
.uor(get_debug_fork_choice)
|
2023-03-13 01:40:03 +00:00
|
|
|
.uor(get_node_identity)
|
|
|
|
.uor(get_node_version)
|
|
|
|
.uor(get_node_syncing)
|
|
|
|
.uor(get_node_health)
|
|
|
|
.uor(get_node_peers_by_id)
|
|
|
|
.uor(get_node_peers)
|
|
|
|
.uor(get_node_peer_count)
|
|
|
|
.uor(get_validator_duties_proposer)
|
|
|
|
.uor(get_validator_blocks)
|
|
|
|
.uor(get_validator_blinded_blocks)
|
|
|
|
.uor(get_validator_attestation_data)
|
|
|
|
.uor(get_validator_aggregate_attestation)
|
|
|
|
.uor(get_validator_sync_committee_contribution)
|
|
|
|
.uor(get_lighthouse_health)
|
|
|
|
.uor(get_lighthouse_ui_health)
|
|
|
|
.uor(get_lighthouse_ui_validator_count)
|
|
|
|
.uor(get_lighthouse_syncing)
|
|
|
|
.uor(get_lighthouse_nat)
|
|
|
|
.uor(get_lighthouse_peers)
|
|
|
|
.uor(get_lighthouse_peers_connected)
|
|
|
|
.uor(get_lighthouse_proto_array)
|
|
|
|
.uor(get_lighthouse_validator_inclusion_global)
|
|
|
|
.uor(get_lighthouse_validator_inclusion)
|
|
|
|
.uor(get_lighthouse_eth1_syncing)
|
|
|
|
.uor(get_lighthouse_eth1_block_cache)
|
|
|
|
.uor(get_lighthouse_eth1_deposit_cache)
|
|
|
|
.uor(get_lighthouse_beacon_states_ssz)
|
|
|
|
.uor(get_lighthouse_staking)
|
|
|
|
.uor(get_lighthouse_database_info)
|
|
|
|
.uor(get_lighthouse_block_rewards)
|
|
|
|
.uor(get_lighthouse_attestation_performance)
|
|
|
|
.uor(get_lighthouse_block_packing_efficiency)
|
|
|
|
.uor(get_lighthouse_merge_readiness)
|
|
|
|
.uor(get_events)
|
2023-05-22 05:57:08 +00:00
|
|
|
.uor(lighthouse_log_events.boxed())
|
2023-01-16 03:42:09 +00:00
|
|
|
.recover(warp_utils::reject::handle_rejection),
|
2020-09-29 03:46:54 +00:00
|
|
|
)
|
2022-11-11 00:38:27 +00:00
|
|
|
.boxed()
|
2023-03-13 01:40:03 +00:00
|
|
|
.uor(
|
|
|
|
warp::post().and(
|
2023-07-31 23:51:37 +00:00
|
|
|
warp::header::exact("Content-Type", "application/octet-stream")
|
|
|
|
// Routes which expect `application/octet-stream` go within this `and`.
|
|
|
|
.and(post_beacon_blocks_ssz.uor(post_beacon_blocks_v2_ssz))
|
|
|
|
.uor(post_beacon_blocks)
|
2023-03-13 01:40:03 +00:00
|
|
|
.uor(post_beacon_blinded_blocks)
|
Add broadcast validation routes to Beacon Node HTTP API (#4316)
## Issue Addressed
- #4293
- #4264
## Proposed Changes
*Changes largely follow those suggested in the main issue*.
- Add new routes to HTTP API
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Add new routes to `BeaconNodeHttpClient`
- `post_beacon_blocks_v2`
- `post_blinded_beacon_blocks_v2`
- Define new Eth2 common types
- `BroadcastValidation`, enum representing the level of validation to apply to blocks prior to broadcast
- `BroadcastValidationQuery`, the corresponding HTTP query string type for the above type
- ~~Define `_checked` variants of both `publish_block` and `publish_blinded_block` that enforce a validation level at a type level~~
- Add interactive tests to the `bn_http_api_tests` test target covering each validation level (to their own test module, `broadcast_validation_tests`)
- `beacon/blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- Only consensus pass (i.e., equivocates) (200)
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- `beacon/blinded_blocks`
- `broadcast_validation=gossip`
- Invalid (400)
- Full Pass (200)
- Partial Pass (202)
- `broadcast_validation=consensus`
- Invalid (400)
- Only gossip (400)
- ~~Only consensus pass (i.e., equivocates) (200)~~
- Full pass (200)
- `broadcast_validation=consensus_and_equivocation`
- Invalid (400)
- Invalid due to early equivocation (400)
- Only gossip (400)
- Only consensus (400)
- Pass (200)
- Add a new trait, `IntoGossipVerifiedBlock`, which allows type-level guarantees to be made as to gossip validity
- Modify the structure of the `ObservedBlockProducers` cache from a `(slot, validator_index)` mapping to a `((slot, validator_index), block_root)` mapping
- Modify `ObservedBlockProducers::proposer_has_been_observed` to return a `SeenBlock` rather than a boolean on success
- Punish gossip peer (low) for submitting equivocating blocks
- Rename `BlockError::SlashablePublish` to `BlockError::SlashableProposal`
## Additional Info
This PR contains changes that directly modify how blocks are verified within the client. For more context, consult [comments in-thread](https://github.com/sigp/lighthouse/pull/4316#discussion_r1234724202).
Co-authored-by: Michael Sproul <michael@sigmaprime.io>
2023-06-29 12:02:38 +00:00
|
|
|
.uor(post_beacon_blocks_v2)
|
|
|
|
.uor(post_beacon_blinded_blocks_v2)
|
2023-03-13 01:40:03 +00:00
|
|
|
.uor(post_beacon_pool_attestations)
|
|
|
|
.uor(post_beacon_pool_attester_slashings)
|
|
|
|
.uor(post_beacon_pool_proposer_slashings)
|
|
|
|
.uor(post_beacon_pool_voluntary_exits)
|
|
|
|
.uor(post_beacon_pool_sync_committees)
|
|
|
|
.uor(post_beacon_pool_bls_to_execution_changes)
|
|
|
|
.uor(post_beacon_rewards_attestations)
|
|
|
|
.uor(post_beacon_rewards_sync_committee)
|
|
|
|
.uor(post_validator_duties_attester)
|
|
|
|
.uor(post_validator_duties_sync)
|
|
|
|
.uor(post_validator_aggregate_and_proofs)
|
|
|
|
.uor(post_validator_contribution_and_proofs)
|
|
|
|
.uor(post_validator_beacon_committee_subscriptions)
|
|
|
|
.uor(post_validator_sync_committee_subscriptions)
|
|
|
|
.uor(post_validator_prepare_beacon_proposer)
|
|
|
|
.uor(post_validator_register_validator)
|
2023-07-31 01:53:03 +00:00
|
|
|
.uor(post_validator_liveness_epoch)
|
2023-03-13 01:40:03 +00:00
|
|
|
.uor(post_lighthouse_liveness)
|
|
|
|
.uor(post_lighthouse_database_reconstruct)
|
|
|
|
.uor(post_lighthouse_database_historical_blocks)
|
|
|
|
.uor(post_lighthouse_block_rewards)
|
|
|
|
.uor(post_lighthouse_ui_validator_metrics)
|
|
|
|
.uor(post_lighthouse_ui_validator_info)
|
|
|
|
.recover(warp_utils::reject::handle_rejection),
|
|
|
|
),
|
|
|
|
)
|
2020-09-29 03:46:54 +00:00
|
|
|
.recover(warp_utils::reject::handle_rejection)
|
|
|
|
.with(slog_logging(log.clone()))
|
|
|
|
.with(prometheus_metrics())
|
|
|
|
// Add a `Server` header.
|
|
|
|
.map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform()))
|
2023-03-13 01:40:03 +00:00
|
|
|
.with(cors_builder.build())
|
|
|
|
.boxed();
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2022-03-24 00:04:49 +00:00
|
|
|
let http_socket: SocketAddr = SocketAddr::new(config.listen_addr, config.listen_port);
|
2021-10-12 03:35:49 +00:00
|
|
|
let http_server: HttpServer = match config.tls_config {
|
|
|
|
Some(tls_config) => {
|
|
|
|
let (socket, server) = warp::serve(routes)
|
|
|
|
.tls()
|
|
|
|
.cert_path(tls_config.cert)
|
|
|
|
.key_path(tls_config.key)
|
|
|
|
.try_bind_with_graceful_shutdown(http_socket, async {
|
|
|
|
shutdown.await;
|
|
|
|
})?;
|
|
|
|
|
|
|
|
info!(log, "HTTP API is being served over TLS";);
|
|
|
|
|
|
|
|
(socket, Box::pin(server))
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
let (socket, server) =
|
|
|
|
warp::serve(routes).try_bind_with_graceful_shutdown(http_socket, async {
|
|
|
|
shutdown.await;
|
|
|
|
})?;
|
|
|
|
(socket, Box::pin(server))
|
|
|
|
}
|
2020-11-28 05:30:57 +00:00
|
|
|
};
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
info!(
|
|
|
|
log,
|
|
|
|
"HTTP API started";
|
2021-10-12 03:35:49 +00:00
|
|
|
"listen_address" => %http_server.0,
|
2020-09-29 03:46:54 +00:00
|
|
|
);
|
|
|
|
|
2021-10-12 03:35:49 +00:00
|
|
|
Ok(http_server)
|
2020-09-29 03:46:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Publish a message to the libp2p pubsub network.
|
|
|
|
fn publish_pubsub_message<T: EthSpec>(
|
|
|
|
network_tx: &UnboundedSender<NetworkMessage<T>>,
|
|
|
|
message: PubsubMessage<T>,
|
|
|
|
) -> Result<(), warp::Rejection> {
|
|
|
|
publish_network_message(
|
|
|
|
network_tx,
|
|
|
|
NetworkMessage::Publish {
|
|
|
|
messages: vec![message],
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Publish a message to the libp2p network.
|
|
|
|
fn publish_network_message<T: EthSpec>(
|
|
|
|
network_tx: &UnboundedSender<NetworkMessage<T>>,
|
|
|
|
message: NetworkMessage<T>,
|
|
|
|
) -> Result<(), warp::Rejection> {
|
|
|
|
network_tx.send(message).map_err(|e| {
|
|
|
|
warp_utils::reject::custom_server_error(format!(
|
|
|
|
"unable to publish to network channel: {}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})
|
|
|
|
}
|