2020-09-29 03:46:54 +00:00
|
|
|
//! This crate contains a HTTP server which serves the endpoints listed here:
|
|
|
|
//!
|
|
|
|
//! https://github.com/ethereum/eth2.0-APIs
|
|
|
|
//!
|
|
|
|
//! There are also some additional, non-standard endpoints behind the `/lighthouse/` path which are
|
|
|
|
//! used for development.
|
|
|
|
|
|
|
|
mod beacon_proposer_cache;
|
|
|
|
mod block_id;
|
2021-02-10 23:29:49 +00:00
|
|
|
mod broadcast_stream;
|
2020-09-29 03:46:54 +00:00
|
|
|
mod metrics;
|
|
|
|
mod state_id;
|
|
|
|
mod validator_inclusion;
|
|
|
|
|
|
|
|
use beacon_chain::{
|
2021-01-20 19:19:38 +00:00
|
|
|
attestation_verification::SignatureVerifiedAttestation,
|
|
|
|
observed_operations::ObservationOutcome, validator_monitor::timestamp_now,
|
|
|
|
AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes,
|
2020-09-29 03:46:54 +00:00
|
|
|
};
|
|
|
|
use beacon_proposer_cache::BeaconProposerCache;
|
|
|
|
use block_id::BlockId;
|
2021-02-10 23:29:49 +00:00
|
|
|
use eth2::types::{self as api_types, ValidatorId};
|
2020-10-22 02:59:42 +00:00
|
|
|
use eth2_libp2p::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
|
2020-09-29 03:46:54 +00:00
|
|
|
use lighthouse_version::version_with_platform;
|
|
|
|
use network::NetworkMessage;
|
|
|
|
use parking_lot::Mutex;
|
|
|
|
use serde::{Deserialize, Serialize};
|
2020-11-22 03:39:13 +00:00
|
|
|
use slog::{crit, debug, error, info, warn, Logger};
|
2020-09-29 03:46:54 +00:00
|
|
|
use slot_clock::SlotClock;
|
2020-10-22 06:05:49 +00:00
|
|
|
use ssz::Encode;
|
2020-09-29 03:46:54 +00:00
|
|
|
use state_id::StateId;
|
|
|
|
use state_processing::per_slot_processing;
|
|
|
|
use std::borrow::Cow;
|
|
|
|
use std::convert::TryInto;
|
|
|
|
use std::future::Future;
|
|
|
|
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
|
|
|
|
use std::sync::Arc;
|
|
|
|
use tokio::sync::mpsc::UnboundedSender;
|
2021-02-10 23:29:49 +00:00
|
|
|
use tokio_stream::StreamExt;
|
2020-09-29 03:46:54 +00:00
|
|
|
use types::{
|
|
|
|
Attestation, AttestationDuty, AttesterSlashing, CloneConfig, CommitteeCache, Epoch, EthSpec,
|
2020-11-09 23:13:56 +00:00
|
|
|
Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, RelativeEpoch, SignedAggregateAndProof,
|
2020-09-29 03:46:54 +00:00
|
|
|
SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig,
|
|
|
|
};
|
2020-11-28 05:30:57 +00:00
|
|
|
use warp::http::StatusCode;
|
2021-02-10 23:29:49 +00:00
|
|
|
use warp::sse::Event;
|
2021-01-06 03:01:46 +00:00
|
|
|
use warp::Reply;
|
2021-02-10 23:29:49 +00:00
|
|
|
use warp::{http::Response, Filter};
|
2020-10-02 09:42:19 +00:00
|
|
|
use warp_utils::task::{blocking_json_task, blocking_task};
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
const API_PREFIX: &str = "eth";
|
|
|
|
const API_VERSION: &str = "v1";
|
|
|
|
|
|
|
|
/// If the node is within this many epochs from the head, we declare it to be synced regardless of
|
|
|
|
/// the network sync state.
|
|
|
|
///
|
|
|
|
/// This helps prevent attacks where nodes can convince us that we're syncing some non-existent
|
|
|
|
/// finalized head.
|
|
|
|
const SYNC_TOLERANCE_EPOCHS: u64 = 8;
|
|
|
|
|
|
|
|
/// A wrapper around all the items required to spawn the HTTP server.
|
|
|
|
///
|
|
|
|
/// The server will gracefully handle the case where any fields are `None`.
|
|
|
|
pub struct Context<T: BeaconChainTypes> {
|
|
|
|
pub config: Config,
|
|
|
|
pub chain: Option<Arc<BeaconChain<T>>>,
|
|
|
|
pub network_tx: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>,
|
|
|
|
pub network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
|
2020-11-02 00:37:30 +00:00
|
|
|
pub eth1_service: Option<eth1::Service>,
|
2020-09-29 03:46:54 +00:00
|
|
|
pub log: Logger,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Configuration for the HTTP server.
|
|
|
|
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
|
|
|
|
pub struct Config {
|
|
|
|
pub enabled: bool,
|
|
|
|
pub listen_addr: Ipv4Addr,
|
|
|
|
pub listen_port: u16,
|
|
|
|
pub allow_origin: Option<String>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for Config {
|
|
|
|
fn default() -> Self {
|
|
|
|
Self {
|
|
|
|
enabled: false,
|
|
|
|
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
|
|
|
|
listen_port: 5052,
|
|
|
|
allow_origin: None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub enum Error {
|
|
|
|
Warp(warp::Error),
|
|
|
|
Other(String),
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<warp::Error> for Error {
|
|
|
|
fn from(e: warp::Error) -> Self {
|
|
|
|
Error::Warp(e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<String> for Error {
|
|
|
|
fn from(e: String) -> Self {
|
|
|
|
Error::Other(e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates a `warp` logging wrapper which we use to create `slog` logs.
|
|
|
|
pub fn slog_logging(
|
|
|
|
log: Logger,
|
|
|
|
) -> warp::filters::log::Log<impl Fn(warp::filters::log::Info) + Clone> {
|
|
|
|
warp::log::custom(move |info| {
|
|
|
|
match info.status() {
|
2020-10-22 02:59:42 +00:00
|
|
|
status
|
|
|
|
if status == StatusCode::OK
|
|
|
|
|| status == StatusCode::NOT_FOUND
|
|
|
|
|| status == StatusCode::PARTIAL_CONTENT =>
|
|
|
|
{
|
2020-11-22 03:39:13 +00:00
|
|
|
debug!(
|
2020-09-29 03:46:54 +00:00
|
|
|
log,
|
|
|
|
"Processed HTTP API request";
|
|
|
|
"elapsed" => format!("{:?}", info.elapsed()),
|
|
|
|
"status" => status.to_string(),
|
|
|
|
"path" => info.path(),
|
|
|
|
"method" => info.method().to_string(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
status => {
|
|
|
|
warn!(
|
|
|
|
log,
|
|
|
|
"Error processing HTTP API request";
|
|
|
|
"elapsed" => format!("{:?}", info.elapsed()),
|
|
|
|
"status" => status.to_string(),
|
|
|
|
"path" => info.path(),
|
|
|
|
"method" => info.method().to_string(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates a `warp` logging wrapper which we use for Prometheus metrics (not necessarily logging,
|
|
|
|
/// per say).
|
|
|
|
pub fn prometheus_metrics() -> warp::filters::log::Log<impl Fn(warp::filters::log::Info) + Clone> {
|
|
|
|
warp::log::custom(move |info| {
|
|
|
|
// Here we restrict the `info.path()` value to some predefined values. Without this, we end
|
|
|
|
// up with a new metric type each time someone includes something unique in the path (e.g.,
|
|
|
|
// a block hash).
|
|
|
|
let path = {
|
|
|
|
let equals = |s: &'static str| -> Option<&'static str> {
|
|
|
|
if info.path() == format!("/{}/{}/{}", API_PREFIX, API_VERSION, s) {
|
|
|
|
Some(s)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let starts_with = |s: &'static str| -> Option<&'static str> {
|
|
|
|
if info
|
|
|
|
.path()
|
|
|
|
.starts_with(&format!("/{}/{}/{}", API_PREFIX, API_VERSION, s))
|
|
|
|
{
|
|
|
|
Some(s)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
equals("beacon/blocks")
|
|
|
|
.or_else(|| starts_with("validator/duties/attester"))
|
|
|
|
.or_else(|| starts_with("validator/duties/proposer"))
|
|
|
|
.or_else(|| starts_with("validator/attestation_data"))
|
|
|
|
.or_else(|| starts_with("validator/blocks"))
|
|
|
|
.or_else(|| starts_with("validator/aggregate_attestation"))
|
|
|
|
.or_else(|| starts_with("validator/aggregate_and_proofs"))
|
|
|
|
.or_else(|| starts_with("validator/beacon_committee_subscriptions"))
|
|
|
|
.or_else(|| starts_with("beacon/"))
|
|
|
|
.or_else(|| starts_with("config/"))
|
|
|
|
.or_else(|| starts_with("debug/"))
|
|
|
|
.or_else(|| starts_with("events/"))
|
|
|
|
.or_else(|| starts_with("node/"))
|
|
|
|
.or_else(|| starts_with("validator/"))
|
|
|
|
.unwrap_or("other")
|
|
|
|
};
|
|
|
|
|
|
|
|
metrics::inc_counter_vec(&metrics::HTTP_API_PATHS_TOTAL, &[path]);
|
|
|
|
metrics::inc_counter_vec(
|
|
|
|
&metrics::HTTP_API_STATUS_CODES_TOTAL,
|
|
|
|
&[&info.status().to_string()],
|
|
|
|
);
|
|
|
|
metrics::observe_timer_vec(&metrics::HTTP_API_PATHS_TIMES, &[path], info.elapsed());
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates a server that will serve requests using information from `ctx`.
|
|
|
|
///
|
|
|
|
/// The server will shut down gracefully when the `shutdown` future resolves.
|
|
|
|
///
|
|
|
|
/// ## Returns
|
|
|
|
///
|
|
|
|
/// This function will bind the server to the provided address and then return a tuple of:
|
|
|
|
///
|
|
|
|
/// - `SocketAddr`: the address that the HTTP server will listen on.
|
|
|
|
/// - `Future`: the actual server future that will need to be awaited.
|
|
|
|
///
|
|
|
|
/// ## Errors
|
|
|
|
///
|
|
|
|
/// Returns an error if the server is unable to bind or there is another error during
|
|
|
|
/// configuration.
|
|
|
|
pub fn serve<T: BeaconChainTypes>(
|
|
|
|
ctx: Arc<Context<T>>,
|
|
|
|
shutdown: impl Future<Output = ()> + Send + Sync + 'static,
|
|
|
|
) -> Result<(SocketAddr, impl Future<Output = ()>), Error> {
|
|
|
|
let config = ctx.config.clone();
|
|
|
|
let log = ctx.log.clone();
|
2020-10-22 04:47:27 +00:00
|
|
|
|
|
|
|
// Configure CORS.
|
|
|
|
let cors_builder = {
|
|
|
|
let builder = warp::cors()
|
|
|
|
.allow_methods(vec!["GET", "POST"])
|
|
|
|
.allow_headers(vec!["Content-Type"]);
|
|
|
|
|
|
|
|
warp_utils::cors::set_builder_origins(
|
|
|
|
builder,
|
|
|
|
config.allow_origin.as_deref(),
|
|
|
|
(config.listen_addr, config.listen_port),
|
|
|
|
)?
|
|
|
|
};
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// Sanity check.
|
|
|
|
if !config.enabled {
|
|
|
|
crit!(log, "Cannot start disabled HTTP server");
|
|
|
|
return Err(Error::Other(
|
|
|
|
"A disabled server should not be started".to_string(),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
|
|
|
let eth1_v1 = warp::path(API_PREFIX).and(warp::path(API_VERSION));
|
|
|
|
|
|
|
|
// Instantiate the beacon proposer cache.
|
|
|
|
let beacon_proposer_cache = ctx
|
|
|
|
.chain
|
|
|
|
.as_ref()
|
|
|
|
.map(|chain| BeaconProposerCache::new(&chain))
|
|
|
|
.transpose()
|
|
|
|
.map_err(|e| format!("Unable to initialize beacon proposer cache: {:?}", e))?
|
|
|
|
.map(Mutex::new)
|
|
|
|
.map(Arc::new);
|
|
|
|
|
|
|
|
// Create a `warp` filter that provides access to the proposer cache.
|
|
|
|
let beacon_proposer_cache = || {
|
|
|
|
warp::any()
|
|
|
|
.map(move || beacon_proposer_cache.clone())
|
|
|
|
.and_then(|beacon_proposer_cache| async move {
|
|
|
|
match beacon_proposer_cache {
|
|
|
|
Some(cache) => Ok(cache),
|
|
|
|
None => Err(warp_utils::reject::custom_not_found(
|
|
|
|
"Beacon proposer cache is not initialized.".to_string(),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
})
|
|
|
|
};
|
|
|
|
|
|
|
|
// Create a `warp` filter that provides access to the network globals.
|
|
|
|
let inner_network_globals = ctx.network_globals.clone();
|
|
|
|
let network_globals = warp::any()
|
|
|
|
.map(move || inner_network_globals.clone())
|
|
|
|
.and_then(|network_globals| async move {
|
|
|
|
match network_globals {
|
|
|
|
Some(globals) => Ok(globals),
|
|
|
|
None => Err(warp_utils::reject::custom_not_found(
|
|
|
|
"network globals are not initialized.".to_string(),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Create a `warp` filter that provides access to the beacon chain.
|
|
|
|
let inner_ctx = ctx.clone();
|
|
|
|
let chain_filter =
|
|
|
|
warp::any()
|
|
|
|
.map(move || inner_ctx.chain.clone())
|
|
|
|
.and_then(|chain| async move {
|
|
|
|
match chain {
|
|
|
|
Some(chain) => Ok(chain),
|
|
|
|
None => Err(warp_utils::reject::custom_not_found(
|
|
|
|
"Beacon chain genesis has not yet been observed.".to_string(),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
// Create a `warp` filter that provides access to the network sender channel.
|
|
|
|
let inner_ctx = ctx.clone();
|
|
|
|
let network_tx_filter = warp::any()
|
|
|
|
.map(move || inner_ctx.network_tx.clone())
|
|
|
|
.and_then(|network_tx| async move {
|
|
|
|
match network_tx {
|
|
|
|
Some(network_tx) => Ok(network_tx),
|
|
|
|
None => Err(warp_utils::reject::custom_not_found(
|
|
|
|
"The networking stack has not yet started.".to_string(),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2020-11-02 00:37:30 +00:00
|
|
|
// Create a `warp` filter that provides access to the Eth1 service.
|
|
|
|
let inner_ctx = ctx.clone();
|
|
|
|
let eth1_service_filter = warp::any()
|
|
|
|
.map(move || inner_ctx.eth1_service.clone())
|
|
|
|
.and_then(|eth1_service| async move {
|
|
|
|
match eth1_service {
|
|
|
|
Some(eth1_service) => Ok(eth1_service),
|
|
|
|
None => Err(warp_utils::reject::custom_not_found(
|
|
|
|
"The Eth1 service is not started. Use --eth1 on the CLI.".to_string(),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
// Create a `warp` filter that rejects request whilst the node is syncing.
|
|
|
|
let not_while_syncing_filter = warp::any()
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|network_globals: Arc<NetworkGlobals<T::EthSpec>>, chain: Arc<BeaconChain<T>>| async move {
|
|
|
|
match *network_globals.sync_state.read() {
|
2020-10-21 22:02:25 +00:00
|
|
|
SyncState::SyncingFinalized { .. } => {
|
|
|
|
let head_slot = chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
let current_slot = chain
|
|
|
|
.slot_clock
|
|
|
|
.now_or_genesis()
|
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_server_error(
|
|
|
|
"unable to read slot clock".to_string(),
|
|
|
|
)
|
|
|
|
})?;
|
|
|
|
|
|
|
|
let tolerance = SYNC_TOLERANCE_EPOCHS * T::EthSpec::slots_per_epoch();
|
|
|
|
|
2020-10-21 22:02:25 +00:00
|
|
|
if head_slot + tolerance >= current_slot {
|
2020-09-29 03:46:54 +00:00
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(warp_utils::reject::not_synced(format!(
|
|
|
|
"head slot is {}, current slot is {}",
|
2020-10-21 22:02:25 +00:00
|
|
|
head_slot, current_slot
|
2020-09-29 03:46:54 +00:00
|
|
|
)))
|
|
|
|
}
|
|
|
|
}
|
2020-11-01 23:37:39 +00:00
|
|
|
SyncState::SyncingHead { .. } | SyncState::SyncTransition => Ok(()),
|
2020-09-29 03:46:54 +00:00
|
|
|
SyncState::Synced => Ok(()),
|
|
|
|
SyncState::Stalled => Err(warp_utils::reject::not_synced(
|
|
|
|
"sync is stalled".to_string(),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
},
|
|
|
|
)
|
|
|
|
.untuple_one();
|
|
|
|
|
|
|
|
// Create a `warp` filter that provides access to the logger.
|
|
|
|
let log_filter = warp::any().map(move || ctx.log.clone());
|
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Start of HTTP method definitions.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
// GET beacon/genesis
|
|
|
|
let get_beacon_genesis = eth1_v1
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("genesis"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
chain
|
|
|
|
.head_info()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)
|
|
|
|
.map(|head| api_types::GenesisData {
|
|
|
|
genesis_time: head.genesis_time,
|
|
|
|
genesis_validators_root: head.genesis_validators_root,
|
|
|
|
genesis_fork_version: chain.spec.genesis_fork_version,
|
|
|
|
})
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
/*
|
|
|
|
* beacon/states/{state_id}
|
|
|
|
*/
|
|
|
|
|
|
|
|
let beacon_states_path = eth1_v1
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("states"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<StateId>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid state ID".to_string(),
|
|
|
|
))
|
2020-11-09 23:13:56 +00:00
|
|
|
}))
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(chain_filter.clone());
|
|
|
|
|
|
|
|
// GET beacon/states/{state_id}/root
|
|
|
|
let get_beacon_state_root = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("root"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
state_id
|
|
|
|
.root(&chain)
|
|
|
|
.map(api_types::RootData::from)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET beacon/states/{state_id}/fork
|
|
|
|
let get_beacon_state_fork = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("fork"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || state_id.fork(&chain).map(api_types::GenericResponse::from))
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET beacon/states/{state_id}/finality_checkpoints
|
|
|
|
let get_beacon_state_finality_checkpoints = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("finality_checkpoints"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
state_id
|
|
|
|
.map_state(&chain, |state| {
|
|
|
|
Ok(api_types::FinalityCheckpointsData {
|
|
|
|
previous_justified: state.previous_justified_checkpoint,
|
|
|
|
current_justified: state.current_justified_checkpoint,
|
|
|
|
finalized: state.finalized_checkpoint,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
// GET beacon/states/{state_id}/validator_balances?id
|
|
|
|
let get_beacon_state_validator_balances = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("validator_balances"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::query::<api_types::ValidatorBalancesQuery>())
|
|
|
|
.and_then(
|
|
|
|
|state_id: StateId,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
query: api_types::ValidatorBalancesQuery| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
state_id
|
|
|
|
.map_state(&chain, |state| {
|
|
|
|
Ok(state
|
|
|
|
.validators
|
|
|
|
.iter()
|
|
|
|
.zip(state.balances.iter())
|
|
|
|
.enumerate()
|
|
|
|
// filter by validator id(s) if provided
|
|
|
|
.filter(|(index, (validator, _))| {
|
|
|
|
query.id.as_ref().map_or(true, |ids| {
|
|
|
|
ids.0.iter().any(|id| match id {
|
|
|
|
ValidatorId::PublicKey(pubkey) => {
|
|
|
|
&validator.pubkey == pubkey
|
|
|
|
}
|
|
|
|
ValidatorId::Index(param_index) => {
|
|
|
|
*param_index == *index as u64
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.map(|(index, (_, balance))| {
|
|
|
|
Some(api_types::ValidatorBalanceData {
|
|
|
|
index: index as u64,
|
|
|
|
balance: *balance,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>())
|
|
|
|
})
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-10-29 05:13:04 +00:00
|
|
|
// GET beacon/states/{state_id}/validators?id,status
|
2020-09-29 03:46:54 +00:00
|
|
|
let get_beacon_state_validators = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("validators"))
|
2020-10-29 05:13:04 +00:00
|
|
|
.and(warp::query::<api_types::ValidatorsQuery>())
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path::end())
|
2020-10-29 05:13:04 +00:00
|
|
|
.and_then(
|
|
|
|
|state_id: StateId, chain: Arc<BeaconChain<T>>, query: api_types::ValidatorsQuery| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
state_id
|
|
|
|
.map_state(&chain, |state| {
|
|
|
|
let epoch = state.current_epoch();
|
|
|
|
let far_future_epoch = chain.spec.far_future_epoch;
|
|
|
|
|
|
|
|
Ok(state
|
|
|
|
.validators
|
|
|
|
.iter()
|
|
|
|
.zip(state.balances.iter())
|
|
|
|
.enumerate()
|
|
|
|
// filter by validator id(s) if provided
|
|
|
|
.filter(|(index, (validator, _))| {
|
|
|
|
query.id.as_ref().map_or(true, |ids| {
|
|
|
|
ids.0.iter().any(|id| match id {
|
|
|
|
ValidatorId::PublicKey(pubkey) => {
|
|
|
|
&validator.pubkey == pubkey
|
|
|
|
}
|
|
|
|
ValidatorId::Index(param_index) => {
|
|
|
|
*param_index == *index as u64
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
})
|
|
|
|
// filter by status(es) if provided and map the result
|
|
|
|
.filter_map(|(index, (validator, balance))| {
|
|
|
|
let status = api_types::ValidatorStatus::from_validator(
|
2021-02-24 04:15:13 +00:00
|
|
|
validator,
|
2020-10-29 05:13:04 +00:00
|
|
|
epoch,
|
|
|
|
far_future_epoch,
|
|
|
|
);
|
|
|
|
|
2021-02-24 04:15:13 +00:00
|
|
|
let status_matches =
|
|
|
|
query.status.as_ref().map_or(true, |statuses| {
|
|
|
|
statuses.0.contains(&status)
|
|
|
|
|| statuses.0.contains(&status.superstatus())
|
|
|
|
});
|
|
|
|
|
|
|
|
if status_matches {
|
2020-10-29 05:13:04 +00:00
|
|
|
Some(api_types::ValidatorData {
|
|
|
|
index: index as u64,
|
|
|
|
balance: *balance,
|
|
|
|
status,
|
|
|
|
validator: validator.clone(),
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>())
|
|
|
|
})
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// GET beacon/states/{state_id}/validators/{validator_id}
|
|
|
|
let get_beacon_state_validators_id = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("validators"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<ValidatorId>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid validator ID".to_string(),
|
|
|
|
))
|
|
|
|
}))
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(
|
|
|
|
|state_id: StateId, chain: Arc<BeaconChain<T>>, validator_id: ValidatorId| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
state_id
|
|
|
|
.map_state(&chain, |state| {
|
|
|
|
let index_opt = match &validator_id {
|
|
|
|
ValidatorId::PublicKey(pubkey) => {
|
|
|
|
state.validators.iter().position(|v| v.pubkey == *pubkey)
|
|
|
|
}
|
|
|
|
ValidatorId::Index(index) => Some(*index as usize),
|
|
|
|
};
|
|
|
|
|
|
|
|
index_opt
|
|
|
|
.and_then(|index| {
|
|
|
|
let validator = state.validators.get(index)?;
|
|
|
|
let balance = *state.balances.get(index)?;
|
|
|
|
let epoch = state.current_epoch();
|
|
|
|
let far_future_epoch = chain.spec.far_future_epoch;
|
|
|
|
|
|
|
|
Some(api_types::ValidatorData {
|
|
|
|
index: index as u64,
|
|
|
|
balance,
|
|
|
|
status: api_types::ValidatorStatus::from_validator(
|
2021-02-24 04:15:13 +00:00
|
|
|
validator,
|
2020-09-29 03:46:54 +00:00
|
|
|
epoch,
|
|
|
|
far_future_epoch,
|
|
|
|
),
|
|
|
|
validator: validator.clone(),
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_not_found(format!(
|
|
|
|
"unknown validator: {}",
|
|
|
|
validator_id
|
|
|
|
))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-11-18 23:31:39 +00:00
|
|
|
// GET beacon/states/{state_id}/committees?slot,index,epoch
|
2020-09-29 03:46:54 +00:00
|
|
|
let get_beacon_state_committees = beacon_states_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("committees"))
|
|
|
|
.and(warp::query::<api_types::CommitteesQuery>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(
|
2020-11-18 23:31:39 +00:00
|
|
|
|state_id: StateId, chain: Arc<BeaconChain<T>>, query: api_types::CommitteesQuery| {
|
|
|
|
// the api spec says if the epoch is not present then the epoch of the state should be used
|
|
|
|
let query_state_id = query.epoch.map_or(state_id, |epoch| {
|
|
|
|
StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))
|
|
|
|
});
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
blocking_json_task(move || {
|
2020-11-18 23:31:39 +00:00
|
|
|
query_state_id.map_state(&chain, |state| {
|
|
|
|
let epoch = state.slot.epoch(T::EthSpec::slots_per_epoch());
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
let committee_cache = if state
|
2020-11-18 23:31:39 +00:00
|
|
|
.committee_cache_is_initialized(RelativeEpoch::Current)
|
2020-09-29 03:46:54 +00:00
|
|
|
{
|
2020-11-18 23:31:39 +00:00
|
|
|
state
|
|
|
|
.committee_cache(RelativeEpoch::Current)
|
|
|
|
.map(Cow::Borrowed)
|
2020-09-29 03:46:54 +00:00
|
|
|
} else {
|
|
|
|
CommitteeCache::initialized(state, epoch, &chain.spec).map(Cow::Owned)
|
|
|
|
}
|
2020-11-18 23:31:39 +00:00
|
|
|
.map_err(BeaconChainError::BeaconStateError)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// Use either the supplied slot or all slots in the epoch.
|
|
|
|
let slots = query.slot.map(|slot| vec![slot]).unwrap_or_else(|| {
|
|
|
|
epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect()
|
|
|
|
});
|
|
|
|
|
|
|
|
// Use either the supplied committee index or all available indices.
|
|
|
|
let indices = query.index.map(|index| vec![index]).unwrap_or_else(|| {
|
|
|
|
(0..committee_cache.committees_per_slot()).collect()
|
|
|
|
});
|
|
|
|
|
|
|
|
let mut response = Vec::with_capacity(slots.len() * indices.len());
|
|
|
|
|
|
|
|
for slot in slots {
|
|
|
|
// It is not acceptable to query with a slot that is not within the
|
|
|
|
// specified epoch.
|
|
|
|
if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch {
|
|
|
|
return Err(warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"{} is not in epoch {}",
|
|
|
|
slot, epoch
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
for &index in &indices {
|
|
|
|
let committee = committee_cache
|
|
|
|
.get_beacon_committee(slot, index)
|
|
|
|
.ok_or_else(|| {
|
2020-11-18 23:31:39 +00:00
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"committee index {} does not exist in epoch {}",
|
|
|
|
index, epoch
|
|
|
|
))
|
|
|
|
})?;
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
response.push(api_types::CommitteeData {
|
|
|
|
index,
|
|
|
|
slot,
|
|
|
|
validators: committee
|
|
|
|
.committee
|
|
|
|
.iter()
|
|
|
|
.map(|i| *i as u64)
|
|
|
|
.collect(),
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(api_types::GenericResponse::from(response))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET beacon/headers
|
|
|
|
//
|
|
|
|
// Note: this endpoint only returns information about blocks in the canonical chain. Given that
|
|
|
|
// there's a `canonical` flag on the response, I assume it should also return non-canonical
|
|
|
|
// things. Returning non-canonical things is hard for us since we don't already have a
|
|
|
|
// mechanism for arbitrary forwards block iteration, we only support iterating forwards along
|
|
|
|
// the canonical chain.
|
|
|
|
let get_beacon_headers = eth1_v1
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("headers"))
|
|
|
|
.and(warp::query::<api_types::HeadersQuery>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|query: api_types::HeadersQuery, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let (root, block) = match (query.slot, query.parent_root) {
|
|
|
|
// No query parameters, return the canonical head block.
|
|
|
|
(None, None) => chain
|
|
|
|
.head_beacon_block()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)
|
|
|
|
.map(|block| (block.canonical_root(), block))?,
|
|
|
|
// Only the parent root parameter, do a forwards-iterator lookup.
|
|
|
|
(None, Some(parent_root)) => {
|
|
|
|
let parent = BlockId::from_root(parent_root).block(&chain)?;
|
|
|
|
let (root, _slot) = chain
|
|
|
|
.forwards_iter_block_roots(parent.slot())
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
// Ignore any skip-slots immediately following the parent.
|
|
|
|
.find(|res| {
|
|
|
|
res.as_ref().map_or(false, |(root, _)| *root != parent_root)
|
|
|
|
})
|
|
|
|
.transpose()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_not_found(format!(
|
|
|
|
"child of block with root {}",
|
|
|
|
parent_root
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
|
|
|
BlockId::from_root(root)
|
|
|
|
.block(&chain)
|
|
|
|
.map(|block| (root, block))?
|
|
|
|
}
|
|
|
|
// Slot is supplied, search by slot and optionally filter by
|
|
|
|
// parent root.
|
|
|
|
(Some(slot), parent_root_opt) => {
|
|
|
|
let root = BlockId::from_slot(slot).root(&chain)?;
|
|
|
|
let block = BlockId::from_root(root).block(&chain)?;
|
|
|
|
|
|
|
|
// If the parent root was supplied, check that it matches the block
|
|
|
|
// obtained via a slot lookup.
|
|
|
|
if let Some(parent_root) = parent_root_opt {
|
|
|
|
if block.parent_root() != parent_root {
|
|
|
|
return Err(warp_utils::reject::custom_not_found(format!(
|
|
|
|
"no canonical block at slot {} with parent root {}",
|
|
|
|
slot, parent_root
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
(root, block)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let data = api_types::BlockHeaderData {
|
|
|
|
root,
|
|
|
|
canonical: true,
|
|
|
|
header: api_types::BlockHeaderAndSignature {
|
|
|
|
message: block.message.block_header(),
|
|
|
|
signature: block.signature.into(),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(api_types::GenericResponse::from(vec![data]))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET beacon/headers/{block_id}
|
|
|
|
let get_beacon_headers_block_id = eth1_v1
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("headers"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<BlockId>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid block ID".to_string(),
|
|
|
|
))
|
|
|
|
}))
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let root = block_id.root(&chain)?;
|
|
|
|
let block = BlockId::from_root(root).block(&chain)?;
|
|
|
|
|
|
|
|
let canonical = chain
|
|
|
|
.block_root_at_slot(block.slot())
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
.map_or(false, |canonical| root == canonical);
|
|
|
|
|
|
|
|
let data = api_types::BlockHeaderData {
|
|
|
|
root,
|
|
|
|
canonical,
|
|
|
|
header: api_types::BlockHeaderAndSignature {
|
|
|
|
message: block.message.block_header(),
|
|
|
|
signature: block.signature.into(),
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(api_types::GenericResponse::from(data))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
/*
|
|
|
|
* beacon/blocks
|
|
|
|
*/
|
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
// POST beacon/blocks
|
2020-09-29 03:46:54 +00:00
|
|
|
let post_beacon_blocks = eth1_v1
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("blocks"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and(log_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|block: SignedBeaconBlock<T::EthSpec>,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
|
|
|
log: Logger| {
|
|
|
|
blocking_json_task(move || {
|
2021-01-20 19:19:38 +00:00
|
|
|
let seen_timestamp = timestamp_now();
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
// Send the block, regardless of whether or not it is valid. The API
|
|
|
|
// specification is very clear that this is the desired behaviour.
|
|
|
|
publish_pubsub_message(
|
|
|
|
&network_tx,
|
|
|
|
PubsubMessage::BeaconBlock(Box::new(block.clone())),
|
|
|
|
)?;
|
|
|
|
|
|
|
|
match chain.process_block(block.clone()) {
|
|
|
|
Ok(root) => {
|
|
|
|
info!(
|
|
|
|
log,
|
|
|
|
"Valid block from HTTP API";
|
|
|
|
"root" => format!("{}", root)
|
|
|
|
);
|
|
|
|
|
2021-01-20 19:19:38 +00:00
|
|
|
// Notify the validator monitor.
|
|
|
|
chain.validator_monitor.read().register_api_block(
|
|
|
|
seen_timestamp,
|
|
|
|
&block.message,
|
|
|
|
root,
|
|
|
|
&chain.slot_clock,
|
|
|
|
);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
// Update the head since it's likely this block will become the new
|
|
|
|
// head.
|
|
|
|
chain
|
|
|
|
.fork_choice()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
let msg = format!("{:?}", e);
|
|
|
|
error!(
|
|
|
|
log,
|
|
|
|
"Invalid block provided to HTTP API";
|
|
|
|
"reason" => &msg
|
|
|
|
);
|
|
|
|
Err(warp_utils::reject::broadcast_without_import(msg))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
let beacon_blocks_path = eth1_v1
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("blocks"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<BlockId>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid block ID".to_string(),
|
|
|
|
))
|
|
|
|
}))
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(chain_filter.clone());
|
|
|
|
|
|
|
|
// GET beacon/blocks/{block_id}
|
2021-02-24 04:15:14 +00:00
|
|
|
let get_beacon_block = beacon_blocks_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::header::optional::<api_types::Accept>("accept"))
|
|
|
|
.and_then(
|
|
|
|
|block_id: BlockId,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
accept_header: Option<api_types::Accept>| {
|
|
|
|
blocking_task(move || {
|
|
|
|
let block = block_id.block(&chain)?;
|
|
|
|
match accept_header {
|
|
|
|
Some(api_types::Accept::Ssz) => Response::builder()
|
|
|
|
.status(200)
|
|
|
|
.header("Content-Type", "application/octet-stream")
|
|
|
|
.body(block.as_ssz_bytes().into())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::custom_server_error(format!(
|
|
|
|
"failed to create response: {}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
}),
|
|
|
|
_ => Ok(
|
|
|
|
warp::reply::json(&api_types::GenericResponseRef::from(&block))
|
|
|
|
.into_response(),
|
|
|
|
),
|
|
|
|
}
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// GET beacon/blocks/{block_id}/root
|
|
|
|
let get_beacon_block_root = beacon_blocks_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("root"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
block_id
|
|
|
|
.root(&chain)
|
|
|
|
.map(api_types::RootData::from)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET beacon/blocks/{block_id}/attestations
|
|
|
|
let get_beacon_block_attestations = beacon_blocks_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("attestations"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|block_id: BlockId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
block_id
|
|
|
|
.block(&chain)
|
|
|
|
.map(|block| block.message.body.attestations)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
/*
|
|
|
|
* beacon/pool
|
|
|
|
*/
|
|
|
|
|
|
|
|
let beacon_pool_path = eth1_v1
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("pool"))
|
|
|
|
.and(chain_filter.clone());
|
|
|
|
|
|
|
|
// POST beacon/pool/attestations
|
|
|
|
let post_beacon_pool_attestations = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("attestations"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(network_tx_filter.clone())
|
2020-11-18 23:31:39 +00:00
|
|
|
.and(log_filter.clone())
|
2020-09-29 03:46:54 +00:00
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
2020-11-18 23:31:39 +00:00
|
|
|
attestations: Vec<Attestation<T::EthSpec>>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
|
|
|
log: Logger| {
|
2020-09-29 03:46:54 +00:00
|
|
|
blocking_json_task(move || {
|
2021-01-20 19:19:38 +00:00
|
|
|
let seen_timestamp = timestamp_now();
|
2020-11-18 23:31:39 +00:00
|
|
|
let mut failures = Vec::new();
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2020-11-18 23:31:39 +00:00
|
|
|
for (index, attestation) in attestations.as_slice().iter().enumerate() {
|
|
|
|
let attestation = match chain
|
|
|
|
.verify_unaggregated_attestation_for_gossip(attestation.clone(), None)
|
|
|
|
{
|
|
|
|
Ok(attestation) => attestation,
|
|
|
|
Err(e) => {
|
|
|
|
error!(log,
|
|
|
|
"Failure verifying attestation for gossip";
|
|
|
|
"error" => ?e,
|
|
|
|
"request_index" => index,
|
|
|
|
"committee_index" => attestation.data.index,
|
|
|
|
"attestation_slot" => attestation.data.slot,
|
|
|
|
);
|
|
|
|
failures.push(api_types::Failure::new(
|
|
|
|
index,
|
|
|
|
format!("Verification: {:?}", e),
|
|
|
|
));
|
|
|
|
// skip to the next attestation so we do not publish this one to gossip
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-01-20 19:19:38 +00:00
|
|
|
// Notify the validator monitor.
|
|
|
|
chain
|
|
|
|
.validator_monitor
|
|
|
|
.read()
|
|
|
|
.register_api_unaggregated_attestation(
|
|
|
|
seen_timestamp,
|
|
|
|
attestation.indexed_attestation(),
|
|
|
|
&chain.slot_clock,
|
|
|
|
);
|
|
|
|
|
2020-11-18 23:31:39 +00:00
|
|
|
publish_pubsub_message(
|
|
|
|
&network_tx,
|
|
|
|
PubsubMessage::Attestation(Box::new((
|
|
|
|
attestation.subnet_id(),
|
|
|
|
attestation.attestation().clone(),
|
|
|
|
))),
|
|
|
|
)?;
|
|
|
|
|
|
|
|
let committee_index = attestation.attestation().data.index;
|
|
|
|
let slot = attestation.attestation().data.slot;
|
|
|
|
|
|
|
|
if let Err(e) = chain.apply_attestation_to_fork_choice(&attestation) {
|
|
|
|
error!(log,
|
|
|
|
"Failure applying verified attestation to fork choice";
|
|
|
|
"error" => ?e,
|
|
|
|
"request_index" => index,
|
|
|
|
"committee_index" => committee_index,
|
|
|
|
"slot" => slot,
|
|
|
|
);
|
|
|
|
failures.push(api_types::Failure::new(
|
|
|
|
index,
|
|
|
|
format!("Fork choice: {:?}", e),
|
|
|
|
));
|
|
|
|
};
|
|
|
|
|
|
|
|
if let Err(e) = chain.add_to_naive_aggregation_pool(attestation) {
|
|
|
|
error!(log,
|
|
|
|
"Failure adding verified attestation to the naive aggregation pool";
|
|
|
|
"error" => ?e,
|
|
|
|
"request_index" => index,
|
|
|
|
"committee_index" => committee_index,
|
|
|
|
"slot" => slot,
|
|
|
|
);
|
|
|
|
failures.push(api_types::Failure::new(
|
|
|
|
index,
|
|
|
|
format!("Naive aggregation pool: {:?}", e),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if failures.is_empty() {
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(warp_utils::reject::indexed_bad_request(
|
|
|
|
"error processing attestations".to_string(),
|
|
|
|
failures,
|
|
|
|
))
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-11-18 23:31:39 +00:00
|
|
|
// GET beacon/pool/attestations?committee_index,slot
|
2020-09-29 03:46:54 +00:00
|
|
|
let get_beacon_pool_attestations = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("attestations"))
|
|
|
|
.and(warp::path::end())
|
2020-11-18 23:31:39 +00:00
|
|
|
.and(warp::query::<api_types::AttestationPoolQuery>())
|
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>, query: api_types::AttestationPoolQuery| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let query_filter = |attestation: &Attestation<T::EthSpec>| {
|
|
|
|
query
|
|
|
|
.slot
|
|
|
|
.map_or(true, |slot| slot == attestation.data.slot)
|
|
|
|
&& query
|
|
|
|
.committee_index
|
|
|
|
.map_or(true, |index| index == attestation.data.index)
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut attestations = chain.op_pool.get_filtered_attestations(query_filter);
|
|
|
|
attestations.extend(
|
|
|
|
chain
|
|
|
|
.naive_aggregation_pool
|
|
|
|
.read()
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.filter(query_filter),
|
|
|
|
);
|
|
|
|
Ok(api_types::GenericResponse::from(attestations))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// POST beacon/pool/attester_slashings
|
|
|
|
let post_beacon_pool_attester_slashings = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("attester_slashings"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
|
|
|
slashing: AttesterSlashing<T::EthSpec>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let outcome = chain
|
|
|
|
.verify_attester_slashing_for_gossip(slashing.clone())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::object_invalid(format!(
|
|
|
|
"gossip verification failed: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
2021-01-20 19:19:38 +00:00
|
|
|
// Notify the validator monitor.
|
|
|
|
chain
|
|
|
|
.validator_monitor
|
|
|
|
.read()
|
|
|
|
.register_api_attester_slashing(&slashing);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
if let ObservationOutcome::New(slashing) = outcome {
|
|
|
|
publish_pubsub_message(
|
|
|
|
&network_tx,
|
|
|
|
PubsubMessage::AttesterSlashing(Box::new(
|
|
|
|
slashing.clone().into_inner(),
|
|
|
|
)),
|
|
|
|
)?;
|
|
|
|
|
|
|
|
chain
|
|
|
|
.import_attester_slashing(slashing)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET beacon/pool/attester_slashings
|
|
|
|
let get_beacon_pool_attester_slashings = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("attester_slashings"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let attestations = chain.op_pool.get_all_attester_slashings();
|
|
|
|
Ok(api_types::GenericResponse::from(attestations))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// POST beacon/pool/proposer_slashings
|
|
|
|
let post_beacon_pool_proposer_slashings = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("proposer_slashings"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
|
|
|
slashing: ProposerSlashing,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let outcome = chain
|
|
|
|
.verify_proposer_slashing_for_gossip(slashing.clone())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::object_invalid(format!(
|
|
|
|
"gossip verification failed: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
2021-01-20 19:19:38 +00:00
|
|
|
// Notify the validator monitor.
|
|
|
|
chain
|
|
|
|
.validator_monitor
|
|
|
|
.read()
|
|
|
|
.register_api_proposer_slashing(&slashing);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
if let ObservationOutcome::New(slashing) = outcome {
|
|
|
|
publish_pubsub_message(
|
|
|
|
&network_tx,
|
|
|
|
PubsubMessage::ProposerSlashing(Box::new(
|
|
|
|
slashing.clone().into_inner(),
|
|
|
|
)),
|
|
|
|
)?;
|
|
|
|
|
|
|
|
chain.import_proposer_slashing(slashing);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET beacon/pool/proposer_slashings
|
|
|
|
let get_beacon_pool_proposer_slashings = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("proposer_slashings"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let attestations = chain.op_pool.get_all_proposer_slashings();
|
|
|
|
Ok(api_types::GenericResponse::from(attestations))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// POST beacon/pool/voluntary_exits
|
|
|
|
let post_beacon_pool_voluntary_exits = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("voluntary_exits"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(network_tx_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
|
|
|
exit: SignedVoluntaryExit,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let outcome = chain
|
|
|
|
.verify_voluntary_exit_for_gossip(exit.clone())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::object_invalid(format!(
|
|
|
|
"gossip verification failed: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
2021-01-20 19:19:38 +00:00
|
|
|
// Notify the validator monitor.
|
|
|
|
chain
|
|
|
|
.validator_monitor
|
|
|
|
.read()
|
|
|
|
.register_api_voluntary_exit(&exit.message);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
if let ObservationOutcome::New(exit) = outcome {
|
|
|
|
publish_pubsub_message(
|
|
|
|
&network_tx,
|
|
|
|
PubsubMessage::VoluntaryExit(Box::new(exit.clone().into_inner())),
|
|
|
|
)?;
|
|
|
|
|
|
|
|
chain.import_voluntary_exit(exit);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET beacon/pool/voluntary_exits
|
|
|
|
let get_beacon_pool_voluntary_exits = beacon_pool_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("voluntary_exits"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let attestations = chain.op_pool.get_all_voluntary_exits();
|
|
|
|
Ok(api_types::GenericResponse::from(attestations))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
/*
|
|
|
|
* config/fork_schedule
|
|
|
|
*/
|
|
|
|
|
|
|
|
let config_path = eth1_v1.and(warp::path("config"));
|
|
|
|
|
|
|
|
// GET config/fork_schedule
|
|
|
|
let get_config_fork_schedule = config_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("fork_schedule"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
StateId::head()
|
|
|
|
.fork(&chain)
|
|
|
|
.map(|fork| api_types::GenericResponse::from(vec![fork]))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET config/spec
|
|
|
|
let get_config_spec = config_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("spec"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(api_types::GenericResponse::from(YamlConfig::from_spec::<
|
|
|
|
T::EthSpec,
|
|
|
|
>(
|
|
|
|
&chain.spec
|
|
|
|
)))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET config/deposit_contract
|
|
|
|
let get_config_deposit_contract = config_path
|
|
|
|
.clone()
|
|
|
|
.and(warp::path("deposit_contract"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(api_types::GenericResponse::from(
|
|
|
|
api_types::DepositContractData {
|
|
|
|
address: chain.spec.deposit_contract_address,
|
|
|
|
chain_id: eth1::DEFAULT_NETWORK_ID.into(),
|
|
|
|
},
|
|
|
|
))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
/*
|
|
|
|
* debug
|
|
|
|
*/
|
|
|
|
|
|
|
|
// GET debug/beacon/states/{state_id}
|
|
|
|
let get_debug_beacon_states = eth1_v1
|
|
|
|
.and(warp::path("debug"))
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("states"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<StateId>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid state ID".to_string(),
|
|
|
|
))
|
|
|
|
}))
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path::end())
|
2021-01-06 03:01:46 +00:00
|
|
|
.and(warp::header::optional::<api_types::Accept>("accept"))
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(chain_filter.clone())
|
2021-01-06 03:01:46 +00:00
|
|
|
.and_then(
|
|
|
|
|state_id: StateId,
|
|
|
|
accept_header: Option<api_types::Accept>,
|
|
|
|
chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_task(move || match accept_header {
|
|
|
|
Some(api_types::Accept::Ssz) => {
|
|
|
|
let state = state_id.state(&chain)?;
|
|
|
|
Response::builder()
|
|
|
|
.status(200)
|
|
|
|
.header("Content-Type", "application/octet-stream")
|
|
|
|
.body(state.as_ssz_bytes().into())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::custom_server_error(format!(
|
|
|
|
"failed to create response: {}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
_ => state_id.map_state(&chain, |state| {
|
|
|
|
Ok(
|
|
|
|
warp::reply::json(&api_types::GenericResponseRef::from(&state))
|
|
|
|
.into_response(),
|
|
|
|
)
|
|
|
|
}),
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
2021-01-06 03:01:46 +00:00
|
|
|
},
|
|
|
|
);
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
// GET debug/beacon/heads
|
|
|
|
let get_debug_beacon_heads = eth1_v1
|
|
|
|
.and(warp::path("debug"))
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("heads"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let heads = chain
|
|
|
|
.heads()
|
|
|
|
.into_iter()
|
|
|
|
.map(|(root, slot)| api_types::ChainHeadData { root, slot })
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
Ok(api_types::GenericResponse::from(heads))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
/*
|
|
|
|
* node
|
|
|
|
*/
|
|
|
|
|
|
|
|
// GET node/identity
|
|
|
|
let get_node_identity = eth1_v1
|
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("identity"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
2020-10-22 02:59:42 +00:00
|
|
|
let enr = network_globals.local_enr();
|
|
|
|
let p2p_addresses = enr.multiaddr_p2p_tcp();
|
|
|
|
let discovery_addresses = enr.multiaddr_p2p_udp();
|
2020-09-29 03:46:54 +00:00
|
|
|
Ok(api_types::GenericResponse::from(api_types::IdentityData {
|
|
|
|
peer_id: network_globals.local_peer_id().to_base58(),
|
2020-10-22 02:59:42 +00:00
|
|
|
enr,
|
|
|
|
p2p_addresses,
|
|
|
|
discovery_addresses,
|
|
|
|
metadata: api_types::MetaData {
|
|
|
|
seq_number: network_globals.local_metadata.read().seq_number,
|
|
|
|
attnets: format!(
|
|
|
|
"0x{}",
|
|
|
|
hex::encode(
|
|
|
|
network_globals
|
|
|
|
.local_metadata
|
|
|
|
.read()
|
|
|
|
.attnets
|
|
|
|
.clone()
|
|
|
|
.into_bytes()
|
|
|
|
),
|
|
|
|
),
|
|
|
|
},
|
2020-09-29 03:46:54 +00:00
|
|
|
}))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET node/version
|
|
|
|
let get_node_version = eth1_v1
|
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("version"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(api_types::GenericResponse::from(api_types::VersionData {
|
|
|
|
version: version_with_platform(),
|
|
|
|
}))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET node/syncing
|
|
|
|
let get_node_syncing = eth1_v1
|
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("syncing"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|network_globals: Arc<NetworkGlobals<T::EthSpec>>, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let head_slot = chain
|
|
|
|
.head_info()
|
|
|
|
.map(|info| info.slot)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
let current_slot = chain
|
|
|
|
.slot()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
|
|
|
// Taking advantage of saturating subtraction on slot.
|
|
|
|
let sync_distance = current_slot - head_slot;
|
|
|
|
|
|
|
|
let syncing_data = api_types::SyncingData {
|
|
|
|
is_syncing: network_globals.sync_state.read().is_syncing(),
|
|
|
|
head_slot,
|
|
|
|
sync_distance,
|
|
|
|
};
|
|
|
|
|
|
|
|
Ok(api_types::GenericResponse::from(syncing_data))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-10-22 02:59:42 +00:00
|
|
|
// GET node/health
|
|
|
|
let get_node_health = eth1_v1
|
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("health"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_task(move || match *network_globals.sync_state.read() {
|
2020-11-01 23:37:39 +00:00
|
|
|
SyncState::SyncingFinalized { .. }
|
|
|
|
| SyncState::SyncingHead { .. }
|
|
|
|
| SyncState::SyncTransition => Ok(warp::reply::with_status(
|
|
|
|
warp::reply(),
|
|
|
|
warp::http::StatusCode::PARTIAL_CONTENT,
|
|
|
|
)),
|
2020-10-22 02:59:42 +00:00
|
|
|
SyncState::Synced => Ok(warp::reply::with_status(
|
|
|
|
warp::reply(),
|
|
|
|
warp::http::StatusCode::OK,
|
|
|
|
)),
|
|
|
|
SyncState::Stalled => Err(warp_utils::reject::not_synced(
|
|
|
|
"sync stalled, beacon chain may not yet be initialized.".to_string(),
|
|
|
|
)),
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET node/peers/{peer_id}
|
|
|
|
let get_node_peers_by_id = eth1_v1
|
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("peers"))
|
|
|
|
.and(warp::path::param::<String>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(
|
|
|
|
|requested_peer_id: String, network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let peer_id = PeerId::from_bytes(
|
2020-12-23 07:53:36 +00:00
|
|
|
&bs58::decode(requested_peer_id.as_str())
|
2020-10-22 02:59:42 +00:00
|
|
|
.into_vec()
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"invalid peer id: {}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?,
|
|
|
|
)
|
|
|
|
.map_err(|_| {
|
|
|
|
warp_utils::reject::custom_bad_request("invalid peer id.".to_string())
|
|
|
|
})?;
|
|
|
|
|
|
|
|
if let Some(peer_info) = network_globals.peers.read().peer_info(&peer_id) {
|
2020-11-09 04:01:03 +00:00
|
|
|
let address = if let Some(socket_addr) =
|
|
|
|
peer_info.seen_addresses.iter().next()
|
|
|
|
{
|
|
|
|
let mut addr = eth2_libp2p::Multiaddr::from(socket_addr.ip());
|
|
|
|
addr.push(eth2_libp2p::multiaddr::Protocol::Tcp(socket_addr.port()));
|
|
|
|
addr.to_string()
|
|
|
|
} else if let Some(addr) = peer_info.listening_addresses.first() {
|
|
|
|
addr.to_string()
|
|
|
|
} else {
|
|
|
|
String::new()
|
2020-10-22 02:59:42 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// the eth2 API spec implies only peers we have been connected to at some point should be included.
|
|
|
|
if let Some(dir) = peer_info.connection_direction.as_ref() {
|
|
|
|
return Ok(api_types::GenericResponse::from(api_types::PeerData {
|
|
|
|
peer_id: peer_id.to_string(),
|
|
|
|
enr: peer_info.enr.as_ref().map(|enr| enr.to_base64()),
|
2020-11-13 02:02:41 +00:00
|
|
|
last_seen_p2p_address: address,
|
2020-10-22 02:59:42 +00:00
|
|
|
direction: api_types::PeerDirection::from_connection_direction(
|
|
|
|
&dir,
|
|
|
|
),
|
|
|
|
state: api_types::PeerState::from_peer_connection_status(
|
2020-10-23 01:27:48 +00:00
|
|
|
&peer_info.connection_status(),
|
2020-10-22 02:59:42 +00:00
|
|
|
),
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(warp_utils::reject::custom_not_found(
|
|
|
|
"peer not found.".to_string(),
|
|
|
|
))
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET node/peers
|
|
|
|
let get_node_peers = eth1_v1
|
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("peers"))
|
|
|
|
.and(warp::path::end())
|
2020-11-13 02:02:41 +00:00
|
|
|
.and(warp::query::<api_types::PeersQuery>())
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(
|
|
|
|
|query: api_types::PeersQuery, network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let mut peers: Vec<api_types::PeerData> = Vec::new();
|
|
|
|
network_globals
|
|
|
|
.peers
|
|
|
|
.read()
|
|
|
|
.peers()
|
|
|
|
.for_each(|(peer_id, peer_info)| {
|
|
|
|
let address =
|
|
|
|
if let Some(socket_addr) = peer_info.seen_addresses.iter().next() {
|
|
|
|
let mut addr = eth2_libp2p::Multiaddr::from(socket_addr.ip());
|
|
|
|
addr.push(eth2_libp2p::multiaddr::Protocol::Tcp(
|
|
|
|
socket_addr.port(),
|
|
|
|
));
|
|
|
|
addr.to_string()
|
|
|
|
} else if let Some(addr) = peer_info.listening_addresses.first() {
|
|
|
|
addr.to_string()
|
|
|
|
} else {
|
|
|
|
String::new()
|
|
|
|
};
|
|
|
|
|
|
|
|
// the eth2 API spec implies only peers we have been connected to at some point should be included.
|
|
|
|
if let Some(dir) = peer_info.connection_direction.as_ref() {
|
|
|
|
let direction =
|
|
|
|
api_types::PeerDirection::from_connection_direction(&dir);
|
|
|
|
let state = api_types::PeerState::from_peer_connection_status(
|
|
|
|
&peer_info.connection_status(),
|
|
|
|
);
|
|
|
|
|
|
|
|
let state_matches = query.state.as_ref().map_or(true, |states| {
|
|
|
|
states.0.iter().any(|state_param| *state_param == state)
|
|
|
|
});
|
|
|
|
let direction_matches =
|
|
|
|
query.direction.as_ref().map_or(true, |directions| {
|
|
|
|
directions.0.iter().any(|dir_param| *dir_param == direction)
|
|
|
|
});
|
|
|
|
|
|
|
|
if state_matches && direction_matches {
|
|
|
|
peers.push(api_types::PeerData {
|
|
|
|
peer_id: peer_id.to_string(),
|
|
|
|
enr: peer_info.enr.as_ref().map(|enr| enr.to_base64()),
|
|
|
|
last_seen_p2p_address: address,
|
|
|
|
direction,
|
|
|
|
state,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
Ok(api_types::PeersData {
|
|
|
|
meta: api_types::PeersMetaData {
|
|
|
|
count: peers.len() as u64,
|
|
|
|
},
|
|
|
|
data: peers,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET node/peer_count
|
|
|
|
let get_node_peer_count = eth1_v1
|
|
|
|
.and(warp::path("node"))
|
|
|
|
.and(warp::path("peer_count"))
|
|
|
|
.and(warp::path::end())
|
2020-10-22 02:59:42 +00:00
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
2020-11-13 02:02:41 +00:00
|
|
|
let mut connected: u64 = 0;
|
|
|
|
let mut connecting: u64 = 0;
|
|
|
|
let mut disconnected: u64 = 0;
|
|
|
|
let mut disconnecting: u64 = 0;
|
|
|
|
|
2020-10-22 02:59:42 +00:00
|
|
|
network_globals
|
|
|
|
.peers
|
|
|
|
.read()
|
|
|
|
.peers()
|
2020-11-13 02:02:41 +00:00
|
|
|
.for_each(|(_, peer_info)| {
|
|
|
|
let state = api_types::PeerState::from_peer_connection_status(
|
|
|
|
&peer_info.connection_status(),
|
|
|
|
);
|
|
|
|
match state {
|
|
|
|
api_types::PeerState::Connected => connected += 1,
|
|
|
|
api_types::PeerState::Connecting => connecting += 1,
|
|
|
|
api_types::PeerState::Disconnected => disconnected += 1,
|
|
|
|
api_types::PeerState::Disconnecting => disconnecting += 1,
|
2020-10-22 02:59:42 +00:00
|
|
|
}
|
|
|
|
});
|
2020-11-13 02:02:41 +00:00
|
|
|
|
|
|
|
Ok(api_types::GenericResponse::from(api_types::PeerCount {
|
|
|
|
disconnecting,
|
|
|
|
connecting,
|
|
|
|
connected,
|
|
|
|
disconnected,
|
|
|
|
}))
|
2020-10-22 02:59:42 +00:00
|
|
|
})
|
|
|
|
});
|
2020-09-29 03:46:54 +00:00
|
|
|
/*
|
|
|
|
* validator
|
|
|
|
*/
|
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
// GET validator/duties/proposer/{epoch}
|
|
|
|
let get_validator_duties_proposer = eth1_v1
|
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("duties"))
|
|
|
|
.and(warp::path("proposer"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<Epoch>().or_else(|_| async {
|
2021-02-10 23:29:49 +00:00
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid epoch".to_string(),
|
|
|
|
))
|
2020-12-03 23:10:08 +00:00
|
|
|
}))
|
2020-11-09 23:13:56 +00:00
|
|
|
.and(warp::path::end())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(beacon_proposer_cache())
|
|
|
|
.and_then(
|
|
|
|
|epoch: Epoch,
|
|
|
|
chain: Arc<BeaconChain<T>>,
|
|
|
|
beacon_proposer_cache: Arc<Mutex<BeaconProposerCache>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let current_epoch = chain
|
|
|
|
.epoch()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
|
|
|
if epoch > current_epoch {
|
|
|
|
return Err(warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"request epoch {} is ahead of the current epoch {}",
|
|
|
|
epoch, current_epoch
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
if epoch == current_epoch {
|
2020-12-04 00:18:58 +00:00
|
|
|
let dependent_root_slot = current_epoch
|
|
|
|
.start_slot(T::EthSpec::slots_per_epoch()) - 1;
|
2021-02-10 23:29:49 +00:00
|
|
|
let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? {
|
2020-12-04 00:18:58 +00:00
|
|
|
chain.head_beacon_block_root().map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
} else {
|
|
|
|
chain
|
|
|
|
.root_at_slot(dependent_root_slot)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
.unwrap_or(chain.genesis_block_root)
|
|
|
|
};
|
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
beacon_proposer_cache
|
|
|
|
.lock()
|
|
|
|
.get_proposers(&chain, epoch)
|
2021-02-10 23:29:49 +00:00
|
|
|
.map(|duties| api_types::DutiesResponse { data: duties, dependent_root })
|
2020-11-09 23:13:56 +00:00
|
|
|
} else {
|
|
|
|
let state =
|
|
|
|
StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))
|
|
|
|
.state(&chain)?;
|
|
|
|
|
2020-12-04 00:18:58 +00:00
|
|
|
let dependent_root_slot = state.current_epoch()
|
|
|
|
.start_slot(T::EthSpec::slots_per_epoch()) - 1;
|
2021-02-10 23:29:49 +00:00
|
|
|
let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? {
|
2020-12-04 00:18:58 +00:00
|
|
|
chain.head_beacon_block_root().map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
} else {
|
|
|
|
chain
|
|
|
|
.root_at_slot(dependent_root_slot)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
.unwrap_or(chain.genesis_block_root)
|
|
|
|
};
|
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
epoch
|
|
|
|
.slot_iter(T::EthSpec::slots_per_epoch())
|
|
|
|
.map(|slot| {
|
|
|
|
state
|
|
|
|
.get_beacon_proposer_index(slot, &chain.spec)
|
|
|
|
.map_err(warp_utils::reject::beacon_state_error)
|
|
|
|
.and_then(|i| {
|
|
|
|
let pubkey =
|
|
|
|
chain.validator_pubkey(i)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
.ok_or_else(||
|
|
|
|
warp_utils::reject::beacon_chain_error(
|
|
|
|
BeaconChainError::ValidatorPubkeyCacheIncomplete(i)
|
|
|
|
)
|
|
|
|
)?;
|
|
|
|
|
|
|
|
Ok(api_types::ProposerData {
|
|
|
|
pubkey: PublicKeyBytes::from(pubkey),
|
|
|
|
validator_index: i as u64,
|
|
|
|
slot,
|
|
|
|
})
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.collect::<Result<Vec<api_types::ProposerData>, _>>()
|
2020-12-04 00:18:58 +00:00
|
|
|
.map(|duties| {
|
2021-02-10 23:29:49 +00:00
|
|
|
api_types::DutiesResponse {
|
2020-12-04 00:18:58 +00:00
|
|
|
dependent_root,
|
|
|
|
data: duties,
|
|
|
|
}
|
|
|
|
})
|
2020-11-09 23:13:56 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET validator/blocks/{slot}
|
|
|
|
let get_validator_blocks = eth1_v1
|
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("blocks"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<Slot>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid slot".to_string(),
|
|
|
|
))
|
|
|
|
}))
|
2020-11-09 23:13:56 +00:00
|
|
|
.and(warp::path::end())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
|
|
|
.and(warp::query::<api_types::ValidatorBlocksQuery>())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|slot: Slot, query: api_types::ValidatorBlocksQuery, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let randao_reveal = (&query.randao_reveal).try_into().map_err(|e| {
|
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"randao reveal is not valid BLS signature: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
|
|
|
chain
|
|
|
|
.produce_block(randao_reveal, slot, query.graffiti.map(Into::into))
|
|
|
|
.map(|block_and_state| block_and_state.0)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
.map_err(warp_utils::reject::block_production_error)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET validator/attestation_data?slot,committee_index
|
|
|
|
let get_validator_attestation_data = eth1_v1
|
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("attestation_data"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::query::<api_types::ValidatorAttestationDataQuery>())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|query: api_types::ValidatorAttestationDataQuery, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
2020-11-16 02:59:35 +00:00
|
|
|
let current_slot = chain
|
|
|
|
.slot()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
|
|
|
// allow a tolerance of one slot to account for clock skew
|
|
|
|
if query.slot > current_slot + 1 {
|
|
|
|
return Err(warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"request slot {} is more than one slot past the current slot {}",
|
|
|
|
query.slot, current_slot
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
chain
|
|
|
|
.produce_unaggregated_attestation(query.slot, query.committee_index)
|
|
|
|
.map(|attestation| attestation.data)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET validator/aggregate_attestation?attestation_data_root,slot
|
|
|
|
let get_validator_aggregate_attestation = eth1_v1
|
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("aggregate_attestation"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::query::<api_types::ValidatorAggregateAttestationQuery>())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|query: api_types::ValidatorAggregateAttestationQuery, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
chain
|
|
|
|
.get_aggregated_attestation_by_slot_and_root(
|
|
|
|
query.slot,
|
|
|
|
&query.attestation_data_root,
|
|
|
|
)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_not_found(
|
|
|
|
"no matching aggregate found".to_string(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// POST validator/duties/attester/{epoch}
|
|
|
|
let post_validator_duties_attester = eth1_v1
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("duties"))
|
|
|
|
.and(warp::path("attester"))
|
2020-12-03 23:10:08 +00:00
|
|
|
.and(warp::path::param::<Epoch>().or_else(|_| async {
|
|
|
|
Err(warp_utils::reject::custom_bad_request(
|
|
|
|
"Invalid epoch".to_string(),
|
|
|
|
))
|
|
|
|
}))
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(warp::path::end())
|
|
|
|
.and(not_while_syncing_filter.clone())
|
2020-11-09 23:13:56 +00:00
|
|
|
.and(warp::body::json())
|
2020-09-29 03:46:54 +00:00
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
2020-11-09 23:13:56 +00:00
|
|
|
|epoch: Epoch, indices: api_types::ValidatorIndexData, chain: Arc<BeaconChain<T>>| {
|
2020-09-29 03:46:54 +00:00
|
|
|
blocking_json_task(move || {
|
|
|
|
let current_epoch = chain
|
|
|
|
.epoch()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
|
|
|
|
|
|
|
if epoch > current_epoch + 1 {
|
|
|
|
return Err(warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"request epoch {} is more than one epoch past the current epoch {}",
|
|
|
|
epoch, current_epoch
|
|
|
|
)));
|
|
|
|
}
|
|
|
|
|
|
|
|
let validator_count = StateId::head()
|
|
|
|
.map_state(&chain, |state| Ok(state.validators.len() as u64))?;
|
|
|
|
|
|
|
|
let pubkeys = indices
|
2020-11-09 23:13:56 +00:00
|
|
|
.0
|
|
|
|
.iter()
|
|
|
|
.filter(|i| **i < validator_count as u64)
|
2020-09-29 03:46:54 +00:00
|
|
|
.map(|i| {
|
|
|
|
let pubkey = chain
|
2020-11-09 23:13:56 +00:00
|
|
|
.validator_pubkey(*i as usize)
|
2020-09-29 03:46:54 +00:00
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_bad_request(format!(
|
|
|
|
"unknown validator index {}",
|
2020-11-09 23:13:56 +00:00
|
|
|
*i
|
2020-09-29 03:46:54 +00:00
|
|
|
))
|
|
|
|
})?;
|
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
Ok((*i, pubkey))
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
.collect::<Result<Vec<_>, warp::Rejection>>()?;
|
|
|
|
|
|
|
|
// Converts the internal Lighthouse `AttestationDuty` struct into an
|
|
|
|
// API-conforming `AttesterData` struct.
|
|
|
|
let convert = |validator_index: u64,
|
|
|
|
pubkey: PublicKey,
|
|
|
|
duty: AttestationDuty|
|
|
|
|
-> api_types::AttesterData {
|
|
|
|
api_types::AttesterData {
|
|
|
|
pubkey: pubkey.into(),
|
|
|
|
validator_index,
|
|
|
|
committees_at_slot: duty.committees_at_slot,
|
|
|
|
committee_index: duty.index,
|
|
|
|
committee_length: duty.committee_len as u64,
|
|
|
|
validator_committee_index: duty.committee_position as u64,
|
|
|
|
slot: duty.slot,
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Here we have two paths:
|
|
|
|
//
|
|
|
|
// ## Fast
|
|
|
|
//
|
|
|
|
// If the request epoch is the current epoch, use the cached beacon chain
|
|
|
|
// method.
|
|
|
|
//
|
|
|
|
// ## Slow
|
|
|
|
//
|
|
|
|
// If the request epoch is prior to the current epoch, load a beacon state from
|
|
|
|
// disk
|
|
|
|
//
|
|
|
|
// The idea is to stop historical requests from washing out the cache on the
|
|
|
|
// beacon chain, whilst allowing a VC to request duties quickly.
|
2020-12-04 00:18:58 +00:00
|
|
|
let (duties, dependent_root) = if epoch == current_epoch {
|
2020-09-29 03:46:54 +00:00
|
|
|
// Fast path.
|
2020-12-04 00:18:58 +00:00
|
|
|
let duties = pubkeys
|
2020-09-29 03:46:54 +00:00
|
|
|
.into_iter()
|
|
|
|
// Exclude indices which do not represent a known public key and a
|
|
|
|
// validator duty.
|
|
|
|
.filter_map(|(i, pubkey)| {
|
|
|
|
Some(
|
|
|
|
chain
|
|
|
|
.validator_attestation_duty(i as usize, epoch)
|
|
|
|
.transpose()?
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)
|
|
|
|
.map(|duty| convert(i, pubkey, duty)),
|
|
|
|
)
|
|
|
|
})
|
2020-12-04 00:18:58 +00:00
|
|
|
.collect::<Result<Vec<_>, warp::Rejection>>()?;
|
|
|
|
|
|
|
|
let dependent_root_slot =
|
|
|
|
(epoch - 1).start_slot(T::EthSpec::slots_per_epoch()) - 1;
|
|
|
|
let dependent_root = if dependent_root_slot
|
|
|
|
> chain
|
|
|
|
.best_slot()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
{
|
|
|
|
chain
|
|
|
|
.head_beacon_block_root()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
} else {
|
|
|
|
chain
|
|
|
|
.root_at_slot(dependent_root_slot)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
.unwrap_or(chain.genesis_block_root)
|
|
|
|
};
|
|
|
|
|
|
|
|
(duties, dependent_root)
|
2020-09-29 03:46:54 +00:00
|
|
|
} else {
|
|
|
|
// If the head state is equal to or earlier than the request epoch, use it.
|
|
|
|
let mut state = chain
|
|
|
|
.with_head(|head| {
|
|
|
|
if head.beacon_state.current_epoch() <= epoch {
|
|
|
|
Ok(Some(
|
|
|
|
head.beacon_state
|
|
|
|
.clone_with(CloneConfig::committee_caches_only()),
|
|
|
|
))
|
|
|
|
} else {
|
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
.map(Result::Ok)
|
|
|
|
.unwrap_or_else(|| {
|
|
|
|
StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))
|
|
|
|
.state(&chain)
|
|
|
|
})?;
|
|
|
|
|
|
|
|
// Only skip forward to the epoch prior to the request, since we have a
|
|
|
|
// one-epoch look-ahead on shuffling.
|
|
|
|
while state
|
|
|
|
.next_epoch()
|
|
|
|
.map_err(warp_utils::reject::beacon_state_error)?
|
|
|
|
< epoch
|
|
|
|
{
|
|
|
|
// Don't calculate state roots since they aren't required for calculating
|
|
|
|
// shuffling (achieved by providing Hash256::zero()).
|
|
|
|
per_slot_processing(&mut state, Some(Hash256::zero()), &chain.spec)
|
|
|
|
.map_err(warp_utils::reject::slot_processing_error)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
let relative_epoch =
|
|
|
|
RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err(
|
|
|
|
|e| {
|
|
|
|
warp_utils::reject::custom_server_error(format!(
|
|
|
|
"unable to obtain suitable state: {:?}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
},
|
|
|
|
)?;
|
|
|
|
|
|
|
|
state
|
|
|
|
.build_committee_cache(relative_epoch, &chain.spec)
|
|
|
|
.map_err(warp_utils::reject::beacon_state_error)?;
|
2020-12-04 00:18:58 +00:00
|
|
|
let duties = pubkeys
|
2020-09-29 03:46:54 +00:00
|
|
|
.into_iter()
|
|
|
|
.filter_map(|(i, pubkey)| {
|
|
|
|
Some(
|
|
|
|
state
|
|
|
|
.get_attestation_duties(i as usize, relative_epoch)
|
|
|
|
.transpose()?
|
|
|
|
.map_err(warp_utils::reject::beacon_state_error)
|
|
|
|
.map(|duty| convert(i, pubkey, duty)),
|
|
|
|
)
|
|
|
|
})
|
2020-12-04 00:18:58 +00:00
|
|
|
.collect::<Result<Vec<_>, warp::Rejection>>()?;
|
|
|
|
|
|
|
|
let dependent_root_slot =
|
|
|
|
(epoch - 1).start_slot(T::EthSpec::slots_per_epoch()) - 1;
|
|
|
|
let dependent_root = if dependent_root_slot
|
|
|
|
> chain
|
|
|
|
.best_slot()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
{
|
|
|
|
chain
|
|
|
|
.head_beacon_block_root()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
} else {
|
|
|
|
chain
|
|
|
|
.root_at_slot(dependent_root_slot)
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
|
|
|
.unwrap_or(chain.genesis_block_root)
|
|
|
|
};
|
|
|
|
|
|
|
|
(duties, dependent_root)
|
2020-09-29 03:46:54 +00:00
|
|
|
};
|
|
|
|
|
2020-12-04 00:18:58 +00:00
|
|
|
Ok(api_types::DutiesResponse {
|
|
|
|
dependent_root,
|
|
|
|
data: duties,
|
|
|
|
})
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// POST validator/aggregate_and_proofs
|
|
|
|
let post_validator_aggregate_and_proofs = eth1_v1
|
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("aggregate_and_proofs"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(not_while_syncing_filter)
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(network_tx_filter.clone())
|
2020-11-09 23:13:56 +00:00
|
|
|
.and(log_filter.clone())
|
2020-09-29 03:46:54 +00:00
|
|
|
.and_then(
|
|
|
|
|chain: Arc<BeaconChain<T>>,
|
2020-11-09 23:13:56 +00:00
|
|
|
aggregates: Vec<SignedAggregateAndProof<T::EthSpec>>,
|
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>, log: Logger| {
|
2020-09-29 03:46:54 +00:00
|
|
|
blocking_json_task(move || {
|
2021-01-20 19:19:38 +00:00
|
|
|
let seen_timestamp = timestamp_now();
|
2020-11-09 23:13:56 +00:00
|
|
|
let mut verified_aggregates = Vec::with_capacity(aggregates.len());
|
|
|
|
let mut messages = Vec::with_capacity(aggregates.len());
|
|
|
|
let mut failures = Vec::new();
|
|
|
|
|
|
|
|
// Verify that all messages in the post are valid before processing further
|
|
|
|
for (index, aggregate) in aggregates.as_slice().iter().enumerate() {
|
2020-09-29 03:46:54 +00:00
|
|
|
match chain.verify_aggregated_attestation_for_gossip(aggregate.clone()) {
|
2020-11-09 23:13:56 +00:00
|
|
|
Ok(verified_aggregate) => {
|
|
|
|
messages.push(PubsubMessage::AggregateAndProofAttestation(Box::new(
|
|
|
|
verified_aggregate.aggregate().clone(),
|
|
|
|
)));
|
2021-01-20 19:19:38 +00:00
|
|
|
|
|
|
|
// Notify the validator monitor.
|
|
|
|
chain
|
|
|
|
.validator_monitor
|
|
|
|
.read()
|
|
|
|
.register_api_aggregated_attestation(
|
|
|
|
seen_timestamp,
|
|
|
|
verified_aggregate.aggregate(),
|
|
|
|
verified_aggregate.indexed_attestation(),
|
|
|
|
&chain.slot_clock,
|
|
|
|
);
|
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
verified_aggregates.push((index, verified_aggregate));
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
// If we already know the attestation, don't broadcast it or attempt to
|
|
|
|
// further verify it. Return success.
|
|
|
|
//
|
|
|
|
// It's reasonably likely that two different validators produce
|
|
|
|
// identical aggregates, especially if they're using the same beacon
|
|
|
|
// node.
|
2020-11-09 23:13:56 +00:00
|
|
|
Err(AttnError::AttestationAlreadyKnown(_)) => continue,
|
2020-09-29 03:46:54 +00:00
|
|
|
Err(e) => {
|
2020-11-09 23:13:56 +00:00
|
|
|
error!(log,
|
|
|
|
"Failure verifying aggregate and proofs";
|
|
|
|
"error" => format!("{:?}", e),
|
|
|
|
"request_index" => index,
|
|
|
|
"aggregator_index" => aggregate.message.aggregator_index,
|
|
|
|
"attestation_index" => aggregate.message.aggregate.data.index,
|
|
|
|
"attestation_slot" => aggregate.message.aggregate.data.slot,
|
|
|
|
);
|
|
|
|
failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e)));
|
2021-02-10 23:29:49 +00:00
|
|
|
}
|
2020-11-09 23:13:56 +00:00
|
|
|
}
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
// Publish aggregate attestations to the libp2p network
|
|
|
|
if !messages.is_empty() {
|
|
|
|
publish_network_message(&network_tx, NetworkMessage::Publish { messages })?;
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
// Import aggregate attestations
|
|
|
|
for (index, verified_aggregate) in verified_aggregates {
|
|
|
|
if let Err(e) = chain.apply_attestation_to_fork_choice(&verified_aggregate) {
|
|
|
|
error!(log,
|
|
|
|
"Failure applying verified aggregate attestation to fork choice";
|
|
|
|
"error" => format!("{:?}", e),
|
|
|
|
"request_index" => index,
|
|
|
|
"aggregator_index" => verified_aggregate.aggregate().message.aggregator_index,
|
|
|
|
"attestation_index" => verified_aggregate.attestation().data.index,
|
|
|
|
"attestation_slot" => verified_aggregate.attestation().data.slot,
|
|
|
|
);
|
|
|
|
failures.push(api_types::Failure::new(index, format!("Fork choice: {:?}", e)));
|
|
|
|
}
|
|
|
|
if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) {
|
|
|
|
warn!(log,
|
|
|
|
"Could not add verified aggregate attestation to the inclusion pool";
|
|
|
|
"error" => format!("{:?}", e),
|
|
|
|
"request_index" => index,
|
|
|
|
);
|
|
|
|
failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e)));
|
|
|
|
}
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2020-11-09 23:13:56 +00:00
|
|
|
if !failures.is_empty() {
|
|
|
|
Err(warp_utils::reject::indexed_bad_request("error processing aggregate and proofs".to_string(),
|
2021-02-10 23:29:49 +00:00
|
|
|
failures,
|
2020-09-29 03:46:54 +00:00
|
|
|
))
|
2020-11-09 23:13:56 +00:00
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-09-29 03:46:54 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// POST validator/beacon_committee_subscriptions
|
|
|
|
let post_validator_beacon_committee_subscriptions = eth1_v1
|
|
|
|
.and(warp::path("validator"))
|
|
|
|
.and(warp::path("beacon_committee_subscriptions"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::body::json())
|
|
|
|
.and(network_tx_filter)
|
2021-01-20 19:19:38 +00:00
|
|
|
.and(chain_filter.clone())
|
2020-09-29 03:46:54 +00:00
|
|
|
.and_then(
|
|
|
|
|subscriptions: Vec<api_types::BeaconCommitteeSubscription>,
|
2021-01-20 19:19:38 +00:00
|
|
|
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
|
|
|
chain: Arc<BeaconChain<T>>| {
|
2020-09-29 03:46:54 +00:00
|
|
|
blocking_json_task(move || {
|
|
|
|
for subscription in &subscriptions {
|
2021-01-20 19:19:38 +00:00
|
|
|
chain
|
|
|
|
.validator_monitor
|
|
|
|
.write()
|
|
|
|
.auto_register_local_validator(subscription.validator_index);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
let subscription = api_types::ValidatorSubscription {
|
|
|
|
validator_index: subscription.validator_index,
|
|
|
|
attestation_committee_index: subscription.committee_index,
|
|
|
|
slot: subscription.slot,
|
|
|
|
committee_count_at_slot: subscription.committees_at_slot,
|
|
|
|
is_aggregator: subscription.is_aggregator,
|
|
|
|
};
|
|
|
|
|
|
|
|
publish_network_message(
|
|
|
|
&network_tx,
|
|
|
|
NetworkMessage::Subscribe {
|
|
|
|
subscriptions: vec![subscription],
|
|
|
|
},
|
|
|
|
)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET lighthouse/health
|
|
|
|
let get_lighthouse_health = warp::path("lighthouse")
|
|
|
|
.and(warp::path("health"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and_then(|| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
eth2::lighthouse::Health::observe()
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
.map_err(warp_utils::reject::custom_bad_request)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET lighthouse/syncing
|
|
|
|
let get_lighthouse_syncing = warp::path("lighthouse")
|
|
|
|
.and(warp::path("syncing"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(api_types::GenericResponse::from(
|
|
|
|
network_globals.sync_state(),
|
|
|
|
))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET lighthouse/peers
|
|
|
|
let get_lighthouse_peers = warp::path("lighthouse")
|
|
|
|
.and(warp::path("peers"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals.clone())
|
|
|
|
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(network_globals
|
|
|
|
.peers
|
|
|
|
.read()
|
|
|
|
.peers()
|
|
|
|
.map(|(peer_id, peer_info)| eth2::lighthouse::Peer {
|
|
|
|
peer_id: peer_id.to_string(),
|
|
|
|
peer_info: peer_info.clone(),
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>())
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET lighthouse/peers/connected
|
|
|
|
let get_lighthouse_peers_connected = warp::path("lighthouse")
|
|
|
|
.and(warp::path("peers"))
|
|
|
|
.and(warp::path("connected"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(network_globals)
|
|
|
|
.and_then(|network_globals: Arc<NetworkGlobals<T::EthSpec>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(network_globals
|
|
|
|
.peers
|
|
|
|
.read()
|
|
|
|
.connected_peers()
|
|
|
|
.map(|(peer_id, peer_info)| eth2::lighthouse::Peer {
|
|
|
|
peer_id: peer_id.to_string(),
|
|
|
|
peer_info: peer_info.clone(),
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>())
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET lighthouse/proto_array
|
|
|
|
let get_lighthouse_proto_array = warp::path("lighthouse")
|
|
|
|
.and(warp::path("proto_array"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_task(move || {
|
|
|
|
Ok::<_, warp::Rejection>(warp::reply::json(&api_types::GenericResponseRef::from(
|
|
|
|
chain.fork_choice.read().proto_array().core_proto_array(),
|
|
|
|
)))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET lighthouse/validator_inclusion/{epoch}/{validator_id}
|
|
|
|
let get_lighthouse_validator_inclusion_global = warp::path("lighthouse")
|
|
|
|
.and(warp::path("validator_inclusion"))
|
|
|
|
.and(warp::path::param::<Epoch>())
|
|
|
|
.and(warp::path::param::<ValidatorId>())
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(
|
|
|
|
|epoch: Epoch, validator_id: ValidatorId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
validator_inclusion::validator_inclusion_data(epoch, &validator_id, &chain)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
|
|
|
// GET lighthouse/validator_inclusion/{epoch}/global
|
|
|
|
let get_lighthouse_validator_inclusion = warp::path("lighthouse")
|
|
|
|
.and(warp::path("validator_inclusion"))
|
|
|
|
.and(warp::path::param::<Epoch>())
|
|
|
|
.and(warp::path("global"))
|
|
|
|
.and(warp::path::end())
|
2020-10-22 06:05:49 +00:00
|
|
|
.and(chain_filter.clone())
|
2020-09-29 03:46:54 +00:00
|
|
|
.and_then(|epoch: Epoch, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
validator_inclusion::global_validator_inclusion_data(epoch, &chain)
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2020-11-02 00:37:30 +00:00
|
|
|
// GET lighthouse/eth1/syncing
|
|
|
|
let get_lighthouse_eth1_syncing = warp::path("lighthouse")
|
|
|
|
.and(warp::path("eth1"))
|
|
|
|
.and(warp::path("syncing"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(chain_filter.clone())
|
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
let head_info = chain
|
|
|
|
.head_info()
|
|
|
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
2020-11-30 20:29:17 +00:00
|
|
|
let current_slot_opt = chain.slot().ok();
|
2020-11-02 00:37:30 +00:00
|
|
|
|
|
|
|
chain
|
|
|
|
.eth1_chain
|
|
|
|
.as_ref()
|
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_not_found(
|
|
|
|
"Eth1 sync is disabled. See the --eth1 CLI flag.".to_string(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
.and_then(|eth1| {
|
2020-11-30 20:29:17 +00:00
|
|
|
eth1.sync_status(head_info.genesis_time, current_slot_opt, &chain.spec)
|
2020-11-02 00:37:30 +00:00
|
|
|
.ok_or_else(|| {
|
|
|
|
warp_utils::reject::custom_server_error(
|
|
|
|
"Unable to determine Eth1 sync status".to_string(),
|
|
|
|
)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
.map(api_types::GenericResponse::from)
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET lighthouse/eth1/block_cache
|
|
|
|
let get_lighthouse_eth1_block_cache = warp::path("lighthouse")
|
|
|
|
.and(warp::path("eth1"))
|
|
|
|
.and(warp::path("block_cache"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(eth1_service_filter.clone())
|
|
|
|
.and_then(|eth1_service: eth1::Service| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(api_types::GenericResponse::from(
|
|
|
|
eth1_service
|
|
|
|
.blocks()
|
|
|
|
.read()
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
// GET lighthouse/eth1/deposit_cache
|
|
|
|
let get_lighthouse_eth1_deposit_cache = warp::path("lighthouse")
|
|
|
|
.and(warp::path("eth1"))
|
|
|
|
.and(warp::path("deposit_cache"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(eth1_service_filter)
|
|
|
|
.and_then(|eth1_service: eth1::Service| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
Ok(api_types::GenericResponse::from(
|
|
|
|
eth1_service
|
|
|
|
.deposits()
|
|
|
|
.read()
|
|
|
|
.cache
|
|
|
|
.iter()
|
|
|
|
.cloned()
|
|
|
|
.collect::<Vec<_>>(),
|
|
|
|
))
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2020-10-22 06:05:49 +00:00
|
|
|
// GET lighthouse/beacon/states/{state_id}/ssz
|
|
|
|
let get_lighthouse_beacon_states_ssz = warp::path("lighthouse")
|
|
|
|
.and(warp::path("beacon"))
|
|
|
|
.and(warp::path("states"))
|
|
|
|
.and(warp::path::param::<StateId>())
|
|
|
|
.and(warp::path("ssz"))
|
|
|
|
.and(warp::path::end())
|
2020-11-23 01:00:22 +00:00
|
|
|
.and(chain_filter.clone())
|
2020-10-22 06:05:49 +00:00
|
|
|
.and_then(|state_id: StateId, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_task(move || {
|
|
|
|
let state = state_id.state(&chain)?;
|
|
|
|
Response::builder()
|
|
|
|
.status(200)
|
|
|
|
.header("Content-Type", "application/ssz")
|
|
|
|
.body(state.as_ssz_bytes())
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::custom_server_error(format!(
|
|
|
|
"failed to create response: {}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2020-11-23 01:00:22 +00:00
|
|
|
// GET lighthouse/staking
|
|
|
|
let get_lighthouse_staking = warp::path("lighthouse")
|
|
|
|
.and(warp::path("staking"))
|
|
|
|
.and(warp::path::end())
|
2020-12-04 00:18:58 +00:00
|
|
|
.and(chain_filter.clone())
|
2020-11-23 01:00:22 +00:00
|
|
|
.and_then(|chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_json_task(move || {
|
|
|
|
if chain.eth1_chain.is_some() {
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(warp_utils::reject::custom_not_found(
|
|
|
|
"staking is not enabled, \
|
|
|
|
see the --staking CLI flag"
|
|
|
|
.to_string(),
|
|
|
|
))
|
|
|
|
}
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
2020-12-04 00:18:58 +00:00
|
|
|
let get_events = eth1_v1
|
|
|
|
.and(warp::path("events"))
|
|
|
|
.and(warp::path::end())
|
|
|
|
.and(warp::query::<api_types::EventQuery>())
|
|
|
|
.and(chain_filter)
|
|
|
|
.and_then(
|
|
|
|
|topics: api_types::EventQuery, chain: Arc<BeaconChain<T>>| {
|
|
|
|
blocking_task(move || {
|
|
|
|
// for each topic subscribed spawn a new subscription
|
2021-02-10 23:29:49 +00:00
|
|
|
let mut receivers = Vec::with_capacity(topics.topics.0.len());
|
2020-12-04 00:18:58 +00:00
|
|
|
|
|
|
|
if let Some(event_handler) = chain.event_handler.as_ref() {
|
|
|
|
for topic in topics.topics.0.clone() {
|
|
|
|
let receiver = match topic {
|
|
|
|
api_types::EventTopic::Head => event_handler.subscribe_head(),
|
|
|
|
api_types::EventTopic::Block => event_handler.subscribe_block(),
|
|
|
|
api_types::EventTopic::Attestation => {
|
|
|
|
event_handler.subscribe_attestation()
|
|
|
|
}
|
|
|
|
api_types::EventTopic::VoluntaryExit => {
|
|
|
|
event_handler.subscribe_exit()
|
|
|
|
}
|
|
|
|
api_types::EventTopic::FinalizedCheckpoint => {
|
|
|
|
event_handler.subscribe_finalized()
|
|
|
|
}
|
|
|
|
};
|
2021-02-10 23:29:49 +00:00
|
|
|
|
|
|
|
receivers.push(broadcast_stream::BroadcastStream::new(receiver).map(
|
|
|
|
|msg| {
|
|
|
|
match msg {
|
|
|
|
Ok(data) => Event::default()
|
|
|
|
.event(data.topic_name())
|
|
|
|
.json_data(data)
|
|
|
|
.map_err(|e| {
|
|
|
|
warp_utils::reject::server_sent_event_error(
|
|
|
|
format!("{:?}", e),
|
|
|
|
)
|
|
|
|
}),
|
|
|
|
Err(e) => Err(warp_utils::reject::server_sent_event_error(
|
|
|
|
format!("{:?}", e),
|
|
|
|
)),
|
|
|
|
}
|
|
|
|
},
|
|
|
|
));
|
2020-12-04 00:18:58 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return Err(warp_utils::reject::custom_server_error(
|
|
|
|
"event handler was not initialized".to_string(),
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
2021-02-10 23:29:49 +00:00
|
|
|
let s = futures::stream::select_all(receivers);
|
2020-12-04 00:18:58 +00:00
|
|
|
|
2021-02-10 23:29:49 +00:00
|
|
|
Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s)))
|
2020-12-04 00:18:58 +00:00
|
|
|
})
|
|
|
|
},
|
|
|
|
);
|
|
|
|
|
2020-09-29 03:46:54 +00:00
|
|
|
// Define the ultimate set of routes that will be provided to the server.
|
|
|
|
let routes = warp::get()
|
|
|
|
.and(
|
|
|
|
get_beacon_genesis
|
2020-11-09 23:13:56 +00:00
|
|
|
.boxed()
|
2020-09-29 03:46:54 +00:00
|
|
|
.or(get_beacon_state_root.boxed())
|
|
|
|
.or(get_beacon_state_fork.boxed())
|
|
|
|
.or(get_beacon_state_finality_checkpoints.boxed())
|
2020-11-09 23:13:56 +00:00
|
|
|
.or(get_beacon_state_validator_balances.boxed())
|
2020-09-29 03:46:54 +00:00
|
|
|
.or(get_beacon_state_validators.boxed())
|
|
|
|
.or(get_beacon_state_validators_id.boxed())
|
|
|
|
.or(get_beacon_state_committees.boxed())
|
|
|
|
.or(get_beacon_headers.boxed())
|
|
|
|
.or(get_beacon_headers_block_id.boxed())
|
|
|
|
.or(get_beacon_block.boxed())
|
|
|
|
.or(get_beacon_block_attestations.boxed())
|
|
|
|
.or(get_beacon_block_root.boxed())
|
|
|
|
.or(get_beacon_pool_attestations.boxed())
|
|
|
|
.or(get_beacon_pool_attester_slashings.boxed())
|
|
|
|
.or(get_beacon_pool_proposer_slashings.boxed())
|
|
|
|
.or(get_beacon_pool_voluntary_exits.boxed())
|
|
|
|
.or(get_config_fork_schedule.boxed())
|
|
|
|
.or(get_config_spec.boxed())
|
|
|
|
.or(get_config_deposit_contract.boxed())
|
|
|
|
.or(get_debug_beacon_states.boxed())
|
|
|
|
.or(get_debug_beacon_heads.boxed())
|
|
|
|
.or(get_node_identity.boxed())
|
|
|
|
.or(get_node_version.boxed())
|
|
|
|
.or(get_node_syncing.boxed())
|
2020-10-22 02:59:42 +00:00
|
|
|
.or(get_node_health.boxed())
|
|
|
|
.or(get_node_peers_by_id.boxed())
|
|
|
|
.or(get_node_peers.boxed())
|
2020-11-13 02:02:41 +00:00
|
|
|
.or(get_node_peer_count.boxed())
|
2020-09-29 03:46:54 +00:00
|
|
|
.or(get_validator_duties_proposer.boxed())
|
|
|
|
.or(get_validator_blocks.boxed())
|
|
|
|
.or(get_validator_attestation_data.boxed())
|
|
|
|
.or(get_validator_aggregate_attestation.boxed())
|
|
|
|
.or(get_lighthouse_health.boxed())
|
|
|
|
.or(get_lighthouse_syncing.boxed())
|
|
|
|
.or(get_lighthouse_peers.boxed())
|
|
|
|
.or(get_lighthouse_peers_connected.boxed())
|
|
|
|
.or(get_lighthouse_proto_array.boxed())
|
|
|
|
.or(get_lighthouse_validator_inclusion_global.boxed())
|
|
|
|
.or(get_lighthouse_validator_inclusion.boxed())
|
2020-11-02 00:37:30 +00:00
|
|
|
.or(get_lighthouse_eth1_syncing.boxed())
|
|
|
|
.or(get_lighthouse_eth1_block_cache.boxed())
|
|
|
|
.or(get_lighthouse_eth1_deposit_cache.boxed())
|
2020-11-23 01:00:22 +00:00
|
|
|
.or(get_lighthouse_beacon_states_ssz.boxed())
|
2020-12-04 00:18:58 +00:00
|
|
|
.or(get_lighthouse_staking.boxed())
|
|
|
|
.or(get_events.boxed()),
|
2020-09-29 03:46:54 +00:00
|
|
|
)
|
2020-11-09 23:13:56 +00:00
|
|
|
.or(warp::post().and(
|
|
|
|
post_beacon_blocks
|
|
|
|
.boxed()
|
|
|
|
.or(post_beacon_pool_attestations.boxed())
|
|
|
|
.or(post_beacon_pool_attester_slashings.boxed())
|
|
|
|
.or(post_beacon_pool_proposer_slashings.boxed())
|
|
|
|
.or(post_beacon_pool_voluntary_exits.boxed())
|
|
|
|
.or(post_validator_duties_attester.boxed())
|
|
|
|
.or(post_validator_aggregate_and_proofs.boxed())
|
|
|
|
.or(post_validator_beacon_committee_subscriptions.boxed()),
|
|
|
|
))
|
2020-09-29 03:46:54 +00:00
|
|
|
.recover(warp_utils::reject::handle_rejection)
|
|
|
|
.with(slog_logging(log.clone()))
|
|
|
|
.with(prometheus_metrics())
|
|
|
|
// Add a `Server` header.
|
|
|
|
.map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform()))
|
2020-10-22 04:47:27 +00:00
|
|
|
.with(cors_builder.build());
|
2020-09-29 03:46:54 +00:00
|
|
|
|
2020-11-28 05:30:57 +00:00
|
|
|
let (listening_socket, server) = {
|
|
|
|
warp::serve(routes).try_bind_with_graceful_shutdown(
|
|
|
|
SocketAddrV4::new(config.listen_addr, config.listen_port),
|
|
|
|
async {
|
|
|
|
shutdown.await;
|
|
|
|
},
|
|
|
|
)?
|
|
|
|
};
|
2020-09-29 03:46:54 +00:00
|
|
|
|
|
|
|
info!(
|
|
|
|
log,
|
|
|
|
"HTTP API started";
|
|
|
|
"listen_address" => listening_socket.to_string(),
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok((listening_socket, server))
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Publish a message to the libp2p pubsub network.
|
|
|
|
fn publish_pubsub_message<T: EthSpec>(
|
|
|
|
network_tx: &UnboundedSender<NetworkMessage<T>>,
|
|
|
|
message: PubsubMessage<T>,
|
|
|
|
) -> Result<(), warp::Rejection> {
|
|
|
|
publish_network_message(
|
|
|
|
network_tx,
|
|
|
|
NetworkMessage::Publish {
|
|
|
|
messages: vec![message],
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Publish a message to the libp2p network.
|
|
|
|
fn publish_network_message<T: EthSpec>(
|
|
|
|
network_tx: &UnboundedSender<NetworkMessage<T>>,
|
|
|
|
message: NetworkMessage<T>,
|
|
|
|
) -> Result<(), warp::Rejection> {
|
|
|
|
network_tx.send(message).map_err(|e| {
|
|
|
|
warp_utils::reject::custom_server_error(format!(
|
|
|
|
"unable to publish to network channel: {}",
|
|
|
|
e
|
|
|
|
))
|
|
|
|
})
|
|
|
|
}
|