commit
b185d7bbd8
2
.gitignore
vendored
2
.gitignore
vendored
@ -6,4 +6,4 @@ target/
|
|||||||
flamegraph.svg
|
flamegraph.svg
|
||||||
perf.data*
|
perf.data*
|
||||||
*.tar.gz
|
*.tar.gz
|
||||||
bin/
|
/bin
|
||||||
|
1421
Cargo.lock
generated
1421
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
10
Cargo.toml
10
Cargo.toml
@ -7,8 +7,9 @@ members = [
|
|||||||
"beacon_node/client",
|
"beacon_node/client",
|
||||||
"beacon_node/eth1",
|
"beacon_node/eth1",
|
||||||
"beacon_node/eth2_libp2p",
|
"beacon_node/eth2_libp2p",
|
||||||
|
"beacon_node/http_api",
|
||||||
|
"beacon_node/http_metrics",
|
||||||
"beacon_node/network",
|
"beacon_node/network",
|
||||||
"beacon_node/rest_api",
|
|
||||||
"beacon_node/store",
|
"beacon_node/store",
|
||||||
"beacon_node/timer",
|
"beacon_node/timer",
|
||||||
"beacon_node/websocket_server",
|
"beacon_node/websocket_server",
|
||||||
@ -20,6 +21,8 @@ members = [
|
|||||||
"common/compare_fields",
|
"common/compare_fields",
|
||||||
"common/compare_fields_derive",
|
"common/compare_fields_derive",
|
||||||
"common/deposit_contract",
|
"common/deposit_contract",
|
||||||
|
"common/directory",
|
||||||
|
"common/eth2",
|
||||||
"common/eth2_config",
|
"common/eth2_config",
|
||||||
"common/eth2_interop_keypairs",
|
"common/eth2_interop_keypairs",
|
||||||
"common/eth2_testnet_config",
|
"common/eth2_testnet_config",
|
||||||
@ -29,10 +32,10 @@ members = [
|
|||||||
"common/lighthouse_version",
|
"common/lighthouse_version",
|
||||||
"common/logging",
|
"common/logging",
|
||||||
"common/lru_cache",
|
"common/lru_cache",
|
||||||
"common/remote_beacon_node",
|
|
||||||
"common/rest_types",
|
|
||||||
"common/slot_clock",
|
"common/slot_clock",
|
||||||
"common/test_random_derive",
|
"common/test_random_derive",
|
||||||
|
"common/warp_utils",
|
||||||
|
"common/task_executor",
|
||||||
"common/validator_dir",
|
"common/validator_dir",
|
||||||
|
|
||||||
"consensus/cached_tree_hash",
|
"consensus/cached_tree_hash",
|
||||||
@ -43,7 +46,6 @@ members = [
|
|||||||
"consensus/ssz",
|
"consensus/ssz",
|
||||||
"consensus/ssz_derive",
|
"consensus/ssz_derive",
|
||||||
"consensus/ssz_types",
|
"consensus/ssz_types",
|
||||||
"consensus/serde_hex",
|
|
||||||
"consensus/serde_utils",
|
"consensus/serde_utils",
|
||||||
"consensus/state_processing",
|
"consensus/state_processing",
|
||||||
"consensus/swap_or_not_shuffle",
|
"consensus/swap_or_not_shuffle",
|
||||||
|
@ -1,33 +1,35 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "account_manager"
|
name = "account_manager"
|
||||||
version = "0.2.13"
|
version = "0.3.0"
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Luke Anderson <luke@sigmaprime.io>"]
|
authors = ["Paul Hauner <paul@paulhauner.com>", "Luke Anderson <luke@sigmaprime.io>"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
bls = { path = "../crypto/bls" }
|
bls = { path = "../crypto/bls" }
|
||||||
clap = "2.33.0"
|
clap = "2.33.3"
|
||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
slog-term = "2.5.0"
|
slog-term = "2.6.0"
|
||||||
slog-async = "2.5.0"
|
slog-async = "2.5.0"
|
||||||
types = { path = "../consensus/types" }
|
types = { path = "../consensus/types" }
|
||||||
state_processing = { path = "../consensus/state_processing" }
|
state_processing = { path = "../consensus/state_processing" }
|
||||||
dirs = "2.0.2"
|
dirs = "3.0.1"
|
||||||
environment = { path = "../lighthouse/environment" }
|
environment = { path = "../lighthouse/environment" }
|
||||||
deposit_contract = { path = "../common/deposit_contract" }
|
deposit_contract = { path = "../common/deposit_contract" }
|
||||||
libc = "0.2.65"
|
libc = "0.2.79"
|
||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
eth2_ssz_derive = "0.1.0"
|
eth2_ssz_derive = "0.1.0"
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
rayon = "1.3.0"
|
rayon = "1.4.1"
|
||||||
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
||||||
web3 = "0.11.0"
|
web3 = "0.11.0"
|
||||||
futures = { version = "0.3.5", features = ["compat"] }
|
futures = { version = "0.3.5", features = ["compat"] }
|
||||||
clap_utils = { path = "../common/clap_utils" }
|
clap_utils = { path = "../common/clap_utils" }
|
||||||
|
directory = { path = "../common/directory" }
|
||||||
eth2_wallet = { path = "../crypto/eth2_wallet" }
|
eth2_wallet = { path = "../crypto/eth2_wallet" }
|
||||||
eth2_wallet_manager = { path = "../common/eth2_wallet_manager" }
|
eth2_wallet_manager = { path = "../common/eth2_wallet_manager" }
|
||||||
rand = "0.7.2"
|
rand = "0.7.3"
|
||||||
validator_dir = { path = "../common/validator_dir" }
|
validator_dir = { path = "../common/validator_dir" }
|
||||||
tokio = { version = "0.2.22", features = ["full"] }
|
tokio = { version = "0.2.22", features = ["full"] }
|
||||||
eth2_keystore = { path = "../crypto/eth2_keystore" }
|
eth2_keystore = { path = "../crypto/eth2_keystore" }
|
||||||
account_utils = { path = "../common/account_utils" }
|
account_utils = { path = "../common/account_utils" }
|
||||||
|
slashing_protection = { path = "../validator_client/slashing_protection" }
|
||||||
|
@ -1,10 +1,8 @@
|
|||||||
use account_utils::PlainText;
|
use account_utils::PlainText;
|
||||||
use account_utils::{read_input_from_user, strip_off_newlines};
|
use account_utils::{read_input_from_user, strip_off_newlines};
|
||||||
use clap::ArgMatches;
|
|
||||||
use eth2_wallet::bip39::{Language, Mnemonic};
|
use eth2_wallet::bip39::{Language, Mnemonic};
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::fs::create_dir_all;
|
use std::path::PathBuf;
|
||||||
use std::path::{Path, PathBuf};
|
|
||||||
use std::str::from_utf8;
|
use std::str::from_utf8;
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@ -12,26 +10,6 @@ use std::time::Duration;
|
|||||||
pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:";
|
pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:";
|
||||||
pub const WALLET_NAME_PROMPT: &str = "Enter wallet name:";
|
pub const WALLET_NAME_PROMPT: &str = "Enter wallet name:";
|
||||||
|
|
||||||
pub fn ensure_dir_exists<P: AsRef<Path>>(path: P) -> Result<(), String> {
|
|
||||||
let path = path.as_ref();
|
|
||||||
|
|
||||||
if !path.exists() {
|
|
||||||
create_dir_all(path).map_err(|e| format!("Unable to create {:?}: {:?}", path, e))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn base_wallet_dir(matches: &ArgMatches, arg: &'static str) -> Result<PathBuf, String> {
|
|
||||||
clap_utils::parse_path_with_default_in_home_dir(
|
|
||||||
matches,
|
|
||||||
arg,
|
|
||||||
PathBuf::new().join(".lighthouse").join("wallets"),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reads in a mnemonic from the user. If the file path is provided, read from it. Otherwise, read
|
|
||||||
/// from an interactive prompt using tty, unless the `--stdin-inputs` flag is provided.
|
|
||||||
pub fn read_mnemonic_from_cli(
|
pub fn read_mnemonic_from_cli(
|
||||||
mnemonic_path: Option<PathBuf>,
|
mnemonic_path: Option<PathBuf>,
|
||||||
stdin_inputs: bool,
|
stdin_inputs: bool,
|
||||||
|
@ -10,7 +10,7 @@ use types::EthSpec;
|
|||||||
pub const CMD: &str = "account_manager";
|
pub const CMD: &str = "account_manager";
|
||||||
pub const SECRETS_DIR_FLAG: &str = "secrets-dir";
|
pub const SECRETS_DIR_FLAG: &str = "secrets-dir";
|
||||||
pub const VALIDATOR_DIR_FLAG: &str = "validator-dir";
|
pub const VALIDATOR_DIR_FLAG: &str = "validator-dir";
|
||||||
pub const BASE_DIR_FLAG: &str = "base-dir";
|
pub const WALLETS_DIR_FLAG: &str = "wallets-dir";
|
||||||
|
|
||||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||||
App::new(CMD)
|
App::new(CMD)
|
||||||
|
@ -1,12 +1,16 @@
|
|||||||
use crate::common::read_wallet_name_from_cli;
|
use crate::common::read_wallet_name_from_cli;
|
||||||
use crate::wallet::create::STDIN_INPUTS_FLAG;
|
use crate::wallet::create::STDIN_INPUTS_FLAG;
|
||||||
use crate::{common::ensure_dir_exists, SECRETS_DIR_FLAG, VALIDATOR_DIR_FLAG};
|
use crate::{SECRETS_DIR_FLAG, WALLETS_DIR_FLAG};
|
||||||
use account_utils::{
|
use account_utils::{
|
||||||
random_password, read_password_from_user, strip_off_newlines, validator_definitions, PlainText,
|
random_password, read_password_from_user, strip_off_newlines, validator_definitions, PlainText,
|
||||||
};
|
};
|
||||||
use clap::{App, Arg, ArgMatches};
|
use clap::{App, Arg, ArgMatches};
|
||||||
|
use directory::{
|
||||||
|
ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR,
|
||||||
|
};
|
||||||
use environment::Environment;
|
use environment::Environment;
|
||||||
use eth2_wallet_manager::WalletManager;
|
use eth2_wallet_manager::WalletManager;
|
||||||
|
use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME};
|
||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
@ -14,7 +18,6 @@ use types::EthSpec;
|
|||||||
use validator_dir::Builder as ValidatorDirBuilder;
|
use validator_dir::Builder as ValidatorDirBuilder;
|
||||||
|
|
||||||
pub const CMD: &str = "create";
|
pub const CMD: &str = "create";
|
||||||
pub const BASE_DIR_FLAG: &str = "base-dir";
|
|
||||||
pub const WALLET_NAME_FLAG: &str = "wallet-name";
|
pub const WALLET_NAME_FLAG: &str = "wallet-name";
|
||||||
pub const WALLET_PASSWORD_FLAG: &str = "wallet-password";
|
pub const WALLET_PASSWORD_FLAG: &str = "wallet-password";
|
||||||
pub const DEPOSIT_GWEI_FLAG: &str = "deposit-gwei";
|
pub const DEPOSIT_GWEI_FLAG: &str = "deposit-gwei";
|
||||||
@ -44,14 +47,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
Arg::with_name(WALLETS_DIR_FLAG)
|
||||||
.long(VALIDATOR_DIR_FLAG)
|
.long(WALLETS_DIR_FLAG)
|
||||||
.value_name("VALIDATOR_DIRECTORY")
|
.value_name(WALLETS_DIR_FLAG)
|
||||||
.help(
|
.help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{testnet}/wallets")
|
||||||
"The path where the validator directories will be created. \
|
.takes_value(true)
|
||||||
Defaults to ~/.lighthouse/validators",
|
.conflicts_with("datadir"),
|
||||||
)
|
|
||||||
.takes_value(true),
|
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name(SECRETS_DIR_FLAG)
|
Arg::with_name(SECRETS_DIR_FLAG)
|
||||||
@ -59,8 +60,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.value_name("SECRETS_DIR")
|
.value_name("SECRETS_DIR")
|
||||||
.help(
|
.help(
|
||||||
"The path where the validator keystore passwords will be stored. \
|
"The path where the validator keystore passwords will be stored. \
|
||||||
Defaults to ~/.lighthouse/secrets",
|
Defaults to ~/.lighthouse/{testnet}/secrets",
|
||||||
)
|
)
|
||||||
|
.conflicts_with("datadir")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
@ -111,23 +113,25 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
pub fn cli_run<T: EthSpec>(
|
pub fn cli_run<T: EthSpec>(
|
||||||
matches: &ArgMatches,
|
matches: &ArgMatches,
|
||||||
mut env: Environment<T>,
|
mut env: Environment<T>,
|
||||||
wallet_base_dir: PathBuf,
|
validator_dir: PathBuf,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
let spec = env.core_context().eth2_config.spec;
|
let spec = env.core_context().eth2_config.spec;
|
||||||
|
|
||||||
let name: Option<String> = clap_utils::parse_optional(matches, WALLET_NAME_FLAG)?;
|
let name: Option<String> = clap_utils::parse_optional(matches, WALLET_NAME_FLAG)?;
|
||||||
let stdin_inputs = matches.is_present(STDIN_INPUTS_FLAG);
|
let stdin_inputs = matches.is_present(STDIN_INPUTS_FLAG);
|
||||||
|
let wallet_base_dir = if matches.value_of("datadir").is_some() {
|
||||||
|
let path: PathBuf = clap_utils::parse_required(matches, "datadir")?;
|
||||||
|
path.join(DEFAULT_WALLET_DIR)
|
||||||
|
} else {
|
||||||
|
parse_path_or_default_with_flag(matches, WALLETS_DIR_FLAG, DEFAULT_WALLET_DIR)?
|
||||||
|
};
|
||||||
|
let secrets_dir = if matches.value_of("datadir").is_some() {
|
||||||
|
let path: PathBuf = clap_utils::parse_required(matches, "datadir")?;
|
||||||
|
path.join(DEFAULT_SECRET_DIR)
|
||||||
|
} else {
|
||||||
|
parse_path_or_default_with_flag(matches, SECRETS_DIR_FLAG, DEFAULT_SECRET_DIR)?
|
||||||
|
};
|
||||||
|
|
||||||
let validator_dir = clap_utils::parse_path_with_default_in_home_dir(
|
|
||||||
matches,
|
|
||||||
VALIDATOR_DIR_FLAG,
|
|
||||||
PathBuf::new().join(".lighthouse").join("validators"),
|
|
||||||
)?;
|
|
||||||
let secrets_dir = clap_utils::parse_path_with_default_in_home_dir(
|
|
||||||
matches,
|
|
||||||
SECRETS_DIR_FLAG,
|
|
||||||
PathBuf::new().join(".lighthouse").join("secrets"),
|
|
||||||
)?;
|
|
||||||
let deposit_gwei = clap_utils::parse_optional(matches, DEPOSIT_GWEI_FLAG)?
|
let deposit_gwei = clap_utils::parse_optional(matches, DEPOSIT_GWEI_FLAG)?
|
||||||
.unwrap_or_else(|| spec.max_effective_balance);
|
.unwrap_or_else(|| spec.max_effective_balance);
|
||||||
let count: Option<usize> = clap_utils::parse_optional(matches, COUNT_FLAG)?;
|
let count: Option<usize> = clap_utils::parse_optional(matches, COUNT_FLAG)?;
|
||||||
@ -136,6 +140,9 @@ pub fn cli_run<T: EthSpec>(
|
|||||||
ensure_dir_exists(&validator_dir)?;
|
ensure_dir_exists(&validator_dir)?;
|
||||||
ensure_dir_exists(&secrets_dir)?;
|
ensure_dir_exists(&secrets_dir)?;
|
||||||
|
|
||||||
|
eprintln!("secrets-dir path {:?}", secrets_dir);
|
||||||
|
eprintln!("wallets-dir path {:?}", wallet_base_dir);
|
||||||
|
|
||||||
let starting_validator_count = existing_validator_count(&validator_dir)?;
|
let starting_validator_count = existing_validator_count(&validator_dir)?;
|
||||||
|
|
||||||
let n = match (count, at_most) {
|
let n = match (count, at_most) {
|
||||||
@ -166,12 +173,22 @@ pub fn cli_run<T: EthSpec>(
|
|||||||
let wallet_password = read_wallet_password_from_cli(wallet_password_path, stdin_inputs)?;
|
let wallet_password = read_wallet_password_from_cli(wallet_password_path, stdin_inputs)?;
|
||||||
|
|
||||||
let mgr = WalletManager::open(&wallet_base_dir)
|
let mgr = WalletManager::open(&wallet_base_dir)
|
||||||
.map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?;
|
.map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?;
|
||||||
|
|
||||||
let mut wallet = mgr
|
let mut wallet = mgr
|
||||||
.wallet_by_name(&wallet_name)
|
.wallet_by_name(&wallet_name)
|
||||||
.map_err(|e| format!("Unable to open wallet: {:?}", e))?;
|
.map_err(|e| format!("Unable to open wallet: {:?}", e))?;
|
||||||
|
|
||||||
|
let slashing_protection_path = validator_dir.join(SLASHING_PROTECTION_FILENAME);
|
||||||
|
let slashing_protection =
|
||||||
|
SlashingDatabase::open_or_create(&slashing_protection_path).map_err(|e| {
|
||||||
|
format!(
|
||||||
|
"Unable to open or create slashing protection database at {}: {:?}",
|
||||||
|
slashing_protection_path.display(),
|
||||||
|
e
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
for i in 0..n {
|
for i in 0..n {
|
||||||
let voting_password = random_password();
|
let voting_password = random_password();
|
||||||
let withdrawal_password = random_password();
|
let withdrawal_password = random_password();
|
||||||
@ -184,9 +201,25 @@ pub fn cli_run<T: EthSpec>(
|
|||||||
)
|
)
|
||||||
.map_err(|e| format!("Unable to create validator keys: {:?}", e))?;
|
.map_err(|e| format!("Unable to create validator keys: {:?}", e))?;
|
||||||
|
|
||||||
let voting_pubkey = keystores.voting.pubkey().to_string();
|
let voting_pubkey = keystores.voting.public_key().ok_or_else(|| {
|
||||||
|
format!(
|
||||||
|
"Keystore public key is invalid: {}",
|
||||||
|
keystores.voting.pubkey()
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
ValidatorDirBuilder::new(validator_dir.clone(), secrets_dir.clone())
|
slashing_protection
|
||||||
|
.register_validator(&voting_pubkey)
|
||||||
|
.map_err(|e| {
|
||||||
|
format!(
|
||||||
|
"Error registering validator {}: {:?}",
|
||||||
|
voting_pubkey.to_hex_string(),
|
||||||
|
e
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
ValidatorDirBuilder::new(validator_dir.clone())
|
||||||
|
.password_dir(secrets_dir.clone())
|
||||||
.voting_keystore(keystores.voting, voting_password.as_bytes())
|
.voting_keystore(keystores.voting, voting_password.as_bytes())
|
||||||
.withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes())
|
.withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes())
|
||||||
.create_eth1_tx_data(deposit_gwei, &spec)
|
.create_eth1_tx_data(deposit_gwei, &spec)
|
||||||
@ -194,7 +227,7 @@ pub fn cli_run<T: EthSpec>(
|
|||||||
.build()
|
.build()
|
||||||
.map_err(|e| format!("Unable to build validator directory: {:?}", e))?;
|
.map_err(|e| format!("Unable to build validator directory: {:?}", e))?;
|
||||||
|
|
||||||
println!("{}/{}\t0x{}", i + 1, n, voting_pubkey);
|
println!("{}/{}\t{}", i + 1, n, voting_pubkey.to_hex_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -202,14 +235,18 @@ pub fn cli_run<T: EthSpec>(
|
|||||||
|
|
||||||
/// Returns the number of validators that exist in the given `validator_dir`.
|
/// Returns the number of validators that exist in the given `validator_dir`.
|
||||||
///
|
///
|
||||||
/// This function just assumes all files and directories, excluding the validator definitions YAML,
|
/// This function just assumes all files and directories, excluding the validator definitions YAML
|
||||||
/// are validator directories, making it likely to return a higher number than accurate
|
/// and slashing protection database are validator directories, making it likely to return a higher
|
||||||
/// but never a lower one.
|
/// number than accurate but never a lower one.
|
||||||
fn existing_validator_count<P: AsRef<Path>>(validator_dir: P) -> Result<usize, String> {
|
fn existing_validator_count<P: AsRef<Path>>(validator_dir: P) -> Result<usize, String> {
|
||||||
fs::read_dir(validator_dir.as_ref())
|
fs::read_dir(validator_dir.as_ref())
|
||||||
.map(|iter| {
|
.map(|iter| {
|
||||||
iter.filter_map(|e| e.ok())
|
iter.filter_map(|e| e.ok())
|
||||||
.filter(|e| e.file_name() != OsStr::new(validator_definitions::CONFIG_FILENAME))
|
.filter(|e| {
|
||||||
|
e.file_name() != OsStr::new(validator_definitions::CONFIG_FILENAME)
|
||||||
|
&& e.file_name()
|
||||||
|
!= OsStr::new(slashing_protection::SLASHING_PROTECTION_FILENAME)
|
||||||
|
})
|
||||||
.count()
|
.count()
|
||||||
})
|
})
|
||||||
.map_err(|e| format!("Unable to read {:?}: {}", validator_dir.as_ref(), e))
|
.map_err(|e| format!("Unable to read {:?}: {}", validator_dir.as_ref(), e))
|
||||||
|
@ -46,16 +46,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
The deposit contract address will be determined by the --testnet-dir flag on the \
|
The deposit contract address will be determined by the --testnet-dir flag on the \
|
||||||
primary Lighthouse binary.",
|
primary Lighthouse binary.",
|
||||||
)
|
)
|
||||||
.arg(
|
|
||||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
|
||||||
.long(VALIDATOR_DIR_FLAG)
|
|
||||||
.value_name("VALIDATOR_DIRECTORY")
|
|
||||||
.help(
|
|
||||||
"The path to the validator client data directory. \
|
|
||||||
Defaults to ~/.lighthouse/validators",
|
|
||||||
)
|
|
||||||
.takes_value(true),
|
|
||||||
)
|
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name(VALIDATOR_FLAG)
|
Arg::with_name(VALIDATOR_FLAG)
|
||||||
.long(VALIDATOR_FLAG)
|
.long(VALIDATOR_FLAG)
|
||||||
@ -209,14 +199,10 @@ where
|
|||||||
pub fn cli_run<T: EthSpec>(
|
pub fn cli_run<T: EthSpec>(
|
||||||
matches: &ArgMatches<'_>,
|
matches: &ArgMatches<'_>,
|
||||||
mut env: Environment<T>,
|
mut env: Environment<T>,
|
||||||
|
validator_dir: PathBuf,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
let log = env.core_context().log().clone();
|
let log = env.core_context().log().clone();
|
||||||
|
|
||||||
let data_dir = clap_utils::parse_path_with_default_in_home_dir(
|
|
||||||
matches,
|
|
||||||
VALIDATOR_DIR_FLAG,
|
|
||||||
PathBuf::new().join(".lighthouse").join("validators"),
|
|
||||||
)?;
|
|
||||||
let validator: String = clap_utils::parse_required(matches, VALIDATOR_FLAG)?;
|
let validator: String = clap_utils::parse_required(matches, VALIDATOR_FLAG)?;
|
||||||
let eth1_ipc_path: Option<PathBuf> = clap_utils::parse_optional(matches, ETH1_IPC_FLAG)?;
|
let eth1_ipc_path: Option<PathBuf> = clap_utils::parse_optional(matches, ETH1_IPC_FLAG)?;
|
||||||
let eth1_http_url: Option<String> = clap_utils::parse_optional(matches, ETH1_HTTP_FLAG)?;
|
let eth1_http_url: Option<String> = clap_utils::parse_optional(matches, ETH1_HTTP_FLAG)?;
|
||||||
@ -225,7 +211,7 @@ pub fn cli_run<T: EthSpec>(
|
|||||||
let confirmation_batch_size: usize =
|
let confirmation_batch_size: usize =
|
||||||
clap_utils::parse_required(matches, CONFIRMATION_BATCH_SIZE_FLAG)?;
|
clap_utils::parse_required(matches, CONFIRMATION_BATCH_SIZE_FLAG)?;
|
||||||
|
|
||||||
let manager = ValidatorManager::open(&data_dir)
|
let manager = ValidatorManager::open(&validator_dir)
|
||||||
.map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?;
|
.map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?;
|
||||||
|
|
||||||
let validators = match validator.as_ref() {
|
let validators = match validator.as_ref() {
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
use crate::wallet::create::STDIN_INPUTS_FLAG;
|
use crate::wallet::create::STDIN_INPUTS_FLAG;
|
||||||
use crate::{common::ensure_dir_exists, VALIDATOR_DIR_FLAG};
|
|
||||||
use account_utils::{
|
use account_utils::{
|
||||||
eth2_keystore::Keystore,
|
eth2_keystore::Keystore,
|
||||||
read_password_from_user,
|
read_password_from_user,
|
||||||
@ -10,6 +9,7 @@ use account_utils::{
|
|||||||
ZeroizeString,
|
ZeroizeString,
|
||||||
};
|
};
|
||||||
use clap::{App, Arg, ArgMatches};
|
use clap::{App, Arg, ArgMatches};
|
||||||
|
use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME};
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::thread::sleep;
|
use std::thread::sleep;
|
||||||
@ -55,16 +55,6 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.required_unless(KEYSTORE_FLAG)
|
.required_unless(KEYSTORE_FLAG)
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
.arg(
|
|
||||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
|
||||||
.long(VALIDATOR_DIR_FLAG)
|
|
||||||
.value_name("VALIDATOR_DIRECTORY")
|
|
||||||
.help(
|
|
||||||
"The path where the validator directories will be created. \
|
|
||||||
Defaults to ~/.lighthouse/validators",
|
|
||||||
)
|
|
||||||
.takes_value(true),
|
|
||||||
)
|
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name(STDIN_INPUTS_FLAG)
|
Arg::with_name(STDIN_INPUTS_FLAG)
|
||||||
.long(STDIN_INPUTS_FLAG)
|
.long(STDIN_INPUTS_FLAG)
|
||||||
@ -77,22 +67,25 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> {
|
||||||
let keystore: Option<PathBuf> = clap_utils::parse_optional(matches, KEYSTORE_FLAG)?;
|
let keystore: Option<PathBuf> = clap_utils::parse_optional(matches, KEYSTORE_FLAG)?;
|
||||||
let keystores_dir: Option<PathBuf> = clap_utils::parse_optional(matches, DIR_FLAG)?;
|
let keystores_dir: Option<PathBuf> = clap_utils::parse_optional(matches, DIR_FLAG)?;
|
||||||
let validator_dir = clap_utils::parse_path_with_default_in_home_dir(
|
|
||||||
matches,
|
|
||||||
VALIDATOR_DIR_FLAG,
|
|
||||||
PathBuf::new().join(".lighthouse").join("validators"),
|
|
||||||
)?;
|
|
||||||
let stdin_inputs = matches.is_present(STDIN_INPUTS_FLAG);
|
let stdin_inputs = matches.is_present(STDIN_INPUTS_FLAG);
|
||||||
let reuse_password = matches.is_present(REUSE_PASSWORD_FLAG);
|
let reuse_password = matches.is_present(REUSE_PASSWORD_FLAG);
|
||||||
|
|
||||||
ensure_dir_exists(&validator_dir)?;
|
|
||||||
|
|
||||||
let mut defs = ValidatorDefinitions::open_or_create(&validator_dir)
|
let mut defs = ValidatorDefinitions::open_or_create(&validator_dir)
|
||||||
.map_err(|e| format!("Unable to open {}: {:?}", CONFIG_FILENAME, e))?;
|
.map_err(|e| format!("Unable to open {}: {:?}", CONFIG_FILENAME, e))?;
|
||||||
|
|
||||||
|
let slashing_protection_path = validator_dir.join(SLASHING_PROTECTION_FILENAME);
|
||||||
|
let slashing_protection =
|
||||||
|
SlashingDatabase::open_or_create(&slashing_protection_path).map_err(|e| {
|
||||||
|
format!(
|
||||||
|
"Unable to open or create slashing protection database at {}: {:?}",
|
||||||
|
slashing_protection_path.display(),
|
||||||
|
e
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
// Collect the paths for the keystores that should be imported.
|
// Collect the paths for the keystores that should be imported.
|
||||||
let keystore_paths = match (keystore, keystores_dir) {
|
let keystore_paths = match (keystore, keystores_dir) {
|
||||||
(Some(keystore), None) => vec![keystore],
|
(Some(keystore), None) => vec![keystore],
|
||||||
@ -123,6 +116,7 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
//
|
//
|
||||||
// - Obtain the keystore password, if the user desires.
|
// - Obtain the keystore password, if the user desires.
|
||||||
// - Copy the keystore into the `validator_dir`.
|
// - Copy the keystore into the `validator_dir`.
|
||||||
|
// - Register the voting key with the slashing protection database.
|
||||||
// - Add the keystore to the validator definitions file.
|
// - Add the keystore to the validator definitions file.
|
||||||
//
|
//
|
||||||
// Skip keystores that already exist, but exit early if any operation fails.
|
// Skip keystores that already exist, but exit early if any operation fails.
|
||||||
@ -203,6 +197,20 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
fs::copy(&src_keystore, &dest_keystore)
|
fs::copy(&src_keystore, &dest_keystore)
|
||||||
.map_err(|e| format!("Unable to copy keystore: {:?}", e))?;
|
.map_err(|e| format!("Unable to copy keystore: {:?}", e))?;
|
||||||
|
|
||||||
|
// Register with slashing protection.
|
||||||
|
let voting_pubkey = keystore
|
||||||
|
.public_key()
|
||||||
|
.ok_or_else(|| format!("Keystore public key is invalid: {}", keystore.pubkey()))?;
|
||||||
|
slashing_protection
|
||||||
|
.register_validator(&voting_pubkey)
|
||||||
|
.map_err(|e| {
|
||||||
|
format!(
|
||||||
|
"Error registering validator {}: {:?}",
|
||||||
|
voting_pubkey.to_hex_string(),
|
||||||
|
e
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
eprintln!("Successfully imported keystore.");
|
eprintln!("Successfully imported keystore.");
|
||||||
num_imported_keystores += 1;
|
num_imported_keystores += 1;
|
||||||
|
|
||||||
|
@ -1,38 +1,21 @@
|
|||||||
use crate::VALIDATOR_DIR_FLAG;
|
use crate::VALIDATOR_DIR_FLAG;
|
||||||
use clap::{App, Arg, ArgMatches};
|
use clap::App;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use validator_dir::Manager as ValidatorManager;
|
use validator_dir::Manager as ValidatorManager;
|
||||||
|
|
||||||
pub const CMD: &str = "list";
|
pub const CMD: &str = "list";
|
||||||
|
|
||||||
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||||
App::new(CMD)
|
App::new(CMD).about("Lists the names of all validators.")
|
||||||
.arg(
|
|
||||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
|
||||||
.long(VALIDATOR_DIR_FLAG)
|
|
||||||
.value_name("VALIDATOR_DIRECTORY")
|
|
||||||
.help(
|
|
||||||
"The path to search for validator directories. \
|
|
||||||
Defaults to ~/.lighthouse/validators",
|
|
||||||
)
|
|
||||||
.takes_value(true),
|
|
||||||
)
|
|
||||||
.about("Lists the names of all validators.")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cli_run(matches: &ArgMatches<'_>) -> Result<(), String> {
|
pub fn cli_run(validator_dir: PathBuf) -> Result<(), String> {
|
||||||
let data_dir = clap_utils::parse_path_with_default_in_home_dir(
|
let mgr = ValidatorManager::open(&validator_dir)
|
||||||
matches,
|
|
||||||
VALIDATOR_DIR_FLAG,
|
|
||||||
PathBuf::new().join(".lighthouse").join("validators"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let mgr = ValidatorManager::open(&data_dir)
|
|
||||||
.map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?;
|
.map_err(|e| format!("Unable to read --{}: {:?}", VALIDATOR_DIR_FLAG, e))?;
|
||||||
|
|
||||||
for (name, _path) in mgr
|
for (name, _path) in mgr
|
||||||
.directory_names()
|
.directory_names()
|
||||||
.map_err(|e| format!("Unable to list wallets: {:?}", e))?
|
.map_err(|e| format!("Unable to list validators: {:?}", e))?
|
||||||
{
|
{
|
||||||
println!("{}", name)
|
println!("{}", name)
|
||||||
}
|
}
|
||||||
|
@ -3,10 +3,13 @@ pub mod deposit;
|
|||||||
pub mod import;
|
pub mod import;
|
||||||
pub mod list;
|
pub mod list;
|
||||||
pub mod recover;
|
pub mod recover;
|
||||||
|
pub mod slashing_protection;
|
||||||
|
|
||||||
use crate::common::base_wallet_dir;
|
use crate::VALIDATOR_DIR_FLAG;
|
||||||
use clap::{App, Arg, ArgMatches};
|
use clap::{App, Arg, ArgMatches};
|
||||||
|
use directory::{parse_path_or_default_with_flag, DEFAULT_VALIDATOR_DIR};
|
||||||
use environment::Environment;
|
use environment::Environment;
|
||||||
|
use std::path::PathBuf;
|
||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
|
|
||||||
pub const CMD: &str = "validator";
|
pub const CMD: &str = "validator";
|
||||||
@ -15,28 +18,42 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
App::new(CMD)
|
App::new(CMD)
|
||||||
.about("Provides commands for managing Eth2 validators.")
|
.about("Provides commands for managing Eth2 validators.")
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("base-dir")
|
Arg::with_name(VALIDATOR_DIR_FLAG)
|
||||||
.long("base-dir")
|
.long(VALIDATOR_DIR_FLAG)
|
||||||
.value_name("BASE_DIRECTORY")
|
.value_name("VALIDATOR_DIRECTORY")
|
||||||
.help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/wallets")
|
.help(
|
||||||
.takes_value(true),
|
"The path to search for validator directories. \
|
||||||
|
Defaults to ~/.lighthouse/{testnet}/validators",
|
||||||
|
)
|
||||||
|
.takes_value(true)
|
||||||
|
.conflicts_with("datadir"),
|
||||||
)
|
)
|
||||||
.subcommand(create::cli_app())
|
.subcommand(create::cli_app())
|
||||||
.subcommand(deposit::cli_app())
|
.subcommand(deposit::cli_app())
|
||||||
.subcommand(import::cli_app())
|
.subcommand(import::cli_app())
|
||||||
.subcommand(list::cli_app())
|
.subcommand(list::cli_app())
|
||||||
.subcommand(recover::cli_app())
|
.subcommand(recover::cli_app())
|
||||||
|
.subcommand(slashing_protection::cli_app())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cli_run<T: EthSpec>(matches: &ArgMatches, env: Environment<T>) -> Result<(), String> {
|
pub fn cli_run<T: EthSpec>(matches: &ArgMatches, env: Environment<T>) -> Result<(), String> {
|
||||||
let base_wallet_dir = base_wallet_dir(matches, "base-dir")?;
|
let validator_base_dir = if matches.value_of("datadir").is_some() {
|
||||||
|
let path: PathBuf = clap_utils::parse_required(matches, "datadir")?;
|
||||||
|
path.join(DEFAULT_VALIDATOR_DIR)
|
||||||
|
} else {
|
||||||
|
parse_path_or_default_with_flag(matches, VALIDATOR_DIR_FLAG, DEFAULT_VALIDATOR_DIR)?
|
||||||
|
};
|
||||||
|
eprintln!("validator-dir path: {:?}", validator_base_dir);
|
||||||
|
|
||||||
match matches.subcommand() {
|
match matches.subcommand() {
|
||||||
(create::CMD, Some(matches)) => create::cli_run::<T>(matches, env, base_wallet_dir),
|
(create::CMD, Some(matches)) => create::cli_run::<T>(matches, env, validator_base_dir),
|
||||||
(deposit::CMD, Some(matches)) => deposit::cli_run::<T>(matches, env),
|
(deposit::CMD, Some(matches)) => deposit::cli_run::<T>(matches, env, validator_base_dir),
|
||||||
(import::CMD, Some(matches)) => import::cli_run(matches),
|
(import::CMD, Some(matches)) => import::cli_run(matches, validator_base_dir),
|
||||||
(list::CMD, Some(matches)) => list::cli_run(matches),
|
(list::CMD, Some(_)) => list::cli_run(validator_base_dir),
|
||||||
(recover::CMD, Some(matches)) => recover::cli_run(matches),
|
(recover::CMD, Some(matches)) => recover::cli_run(matches, validator_base_dir),
|
||||||
|
(slashing_protection::CMD, Some(matches)) => {
|
||||||
|
slashing_protection::cli_run(matches, env, validator_base_dir)
|
||||||
|
}
|
||||||
(unknown, _) => Err(format!(
|
(unknown, _) => Err(format!(
|
||||||
"{} does not have a {} command. See --help",
|
"{} does not have a {} command. See --help",
|
||||||
CMD, unknown
|
CMD, unknown
|
||||||
|
@ -1,11 +1,13 @@
|
|||||||
use super::create::STORE_WITHDRAW_FLAG;
|
use super::create::STORE_WITHDRAW_FLAG;
|
||||||
use crate::common::{ensure_dir_exists, read_mnemonic_from_cli};
|
use crate::common::read_mnemonic_from_cli;
|
||||||
use crate::validator::create::COUNT_FLAG;
|
use crate::validator::create::COUNT_FLAG;
|
||||||
use crate::wallet::create::STDIN_INPUTS_FLAG;
|
use crate::wallet::create::STDIN_INPUTS_FLAG;
|
||||||
use crate::{SECRETS_DIR_FLAG, VALIDATOR_DIR_FLAG};
|
use crate::SECRETS_DIR_FLAG;
|
||||||
use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder};
|
use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder};
|
||||||
use account_utils::random_password;
|
use account_utils::random_password;
|
||||||
use clap::{App, Arg, ArgMatches};
|
use clap::{App, Arg, ArgMatches};
|
||||||
|
use directory::ensure_dir_exists;
|
||||||
|
use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR};
|
||||||
use eth2_wallet::bip39::Seed;
|
use eth2_wallet::bip39::Seed;
|
||||||
use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType, ValidatorKeystores};
|
use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType, ValidatorKeystores};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
@ -48,23 +50,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
)
|
)
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
)
|
)
|
||||||
.arg(
|
|
||||||
Arg::with_name(VALIDATOR_DIR_FLAG)
|
|
||||||
.long(VALIDATOR_DIR_FLAG)
|
|
||||||
.value_name("VALIDATOR_DIRECTORY")
|
|
||||||
.help(
|
|
||||||
"The path where the validator directories will be created. \
|
|
||||||
Defaults to ~/.lighthouse/validators",
|
|
||||||
)
|
|
||||||
.takes_value(true),
|
|
||||||
)
|
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name(SECRETS_DIR_FLAG)
|
Arg::with_name(SECRETS_DIR_FLAG)
|
||||||
.long(SECRETS_DIR_FLAG)
|
.long(SECRETS_DIR_FLAG)
|
||||||
.value_name("SECRETS_DIR")
|
.value_name("SECRETS_DIR")
|
||||||
.help(
|
.help(
|
||||||
"The path where the validator keystore passwords will be stored. \
|
"The path where the validator keystore passwords will be stored. \
|
||||||
Defaults to ~/.lighthouse/secrets",
|
Defaults to ~/.lighthouse/{testnet}/secrets",
|
||||||
)
|
)
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
@ -84,17 +76,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), String> {
|
||||||
let validator_dir = clap_utils::parse_path_with_default_in_home_dir(
|
let secrets_dir = if matches.value_of("datadir").is_some() {
|
||||||
matches,
|
let path: PathBuf = clap_utils::parse_required(matches, "datadir")?;
|
||||||
VALIDATOR_DIR_FLAG,
|
path.join(DEFAULT_SECRET_DIR)
|
||||||
PathBuf::new().join(".lighthouse").join("validators"),
|
} else {
|
||||||
)?;
|
parse_path_or_default_with_flag(matches, SECRETS_DIR_FLAG, DEFAULT_SECRET_DIR)?
|
||||||
let secrets_dir = clap_utils::parse_path_with_default_in_home_dir(
|
};
|
||||||
matches,
|
|
||||||
SECRETS_DIR_FLAG,
|
|
||||||
PathBuf::new().join(".lighthouse").join("secrets"),
|
|
||||||
)?;
|
|
||||||
let first_index: u32 = clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?;
|
let first_index: u32 = clap_utils::parse_required(matches, FIRST_INDEX_FLAG)?;
|
||||||
let count: u32 = clap_utils::parse_required(matches, COUNT_FLAG)?;
|
let count: u32 = clap_utils::parse_required(matches, COUNT_FLAG)?;
|
||||||
let mnemonic_path: Option<PathBuf> = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?;
|
let mnemonic_path: Option<PathBuf> = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?;
|
||||||
@ -136,7 +124,8 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
|
|
||||||
let voting_pubkey = keystores.voting.pubkey().to_string();
|
let voting_pubkey = keystores.voting.pubkey().to_string();
|
||||||
|
|
||||||
ValidatorDirBuilder::new(validator_dir.clone(), secrets_dir.clone())
|
ValidatorDirBuilder::new(validator_dir.clone())
|
||||||
|
.password_dir(secrets_dir.clone())
|
||||||
.voting_keystore(keystores.voting, voting_password.as_bytes())
|
.voting_keystore(keystores.voting, voting_password.as_bytes())
|
||||||
.withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes())
|
.withdrawal_keystore(keystores.withdrawal, withdrawal_password.as_bytes())
|
||||||
.store_withdrawal_keystore(matches.is_present(STORE_WITHDRAW_FLAG))
|
.store_withdrawal_keystore(matches.is_present(STORE_WITHDRAW_FLAG))
|
||||||
|
137
account_manager/src/validator/slashing_protection.rs
Normal file
137
account_manager/src/validator/slashing_protection.rs
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
use clap::{App, Arg, ArgMatches};
|
||||||
|
use environment::Environment;
|
||||||
|
use slashing_protection::{
|
||||||
|
interchange::Interchange, SlashingDatabase, SLASHING_PROTECTION_FILENAME,
|
||||||
|
};
|
||||||
|
use std::fs::File;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use types::EthSpec;
|
||||||
|
|
||||||
|
pub const CMD: &str = "slashing-protection";
|
||||||
|
pub const IMPORT_CMD: &str = "import";
|
||||||
|
pub const EXPORT_CMD: &str = "export";
|
||||||
|
|
||||||
|
pub const IMPORT_FILE_ARG: &str = "IMPORT-FILE";
|
||||||
|
pub const EXPORT_FILE_ARG: &str = "EXPORT-FILE";
|
||||||
|
|
||||||
|
pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
||||||
|
App::new(CMD)
|
||||||
|
.about("Import or export slashing protection data to or from another client")
|
||||||
|
.subcommand(
|
||||||
|
App::new(IMPORT_CMD)
|
||||||
|
.about("Import an interchange file")
|
||||||
|
.arg(
|
||||||
|
Arg::with_name(IMPORT_FILE_ARG)
|
||||||
|
.takes_value(true)
|
||||||
|
.value_name("FILE")
|
||||||
|
.help("The slashing protection interchange file to import (.json)"),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.subcommand(
|
||||||
|
App::new(EXPORT_CMD)
|
||||||
|
.about("Export an interchange file")
|
||||||
|
.arg(
|
||||||
|
Arg::with_name(EXPORT_FILE_ARG)
|
||||||
|
.takes_value(true)
|
||||||
|
.value_name("FILE")
|
||||||
|
.help("The filename to export the interchange file to"),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cli_run<T: EthSpec>(
|
||||||
|
matches: &ArgMatches<'_>,
|
||||||
|
env: Environment<T>,
|
||||||
|
validator_base_dir: PathBuf,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
let slashing_protection_db_path = validator_base_dir.join(SLASHING_PROTECTION_FILENAME);
|
||||||
|
|
||||||
|
let genesis_validators_root = env
|
||||||
|
.testnet
|
||||||
|
.and_then(|testnet_config| {
|
||||||
|
Some(
|
||||||
|
testnet_config
|
||||||
|
.genesis_state
|
||||||
|
.as_ref()?
|
||||||
|
.genesis_validators_root,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.ok_or_else(|| {
|
||||||
|
"Unable to get genesis validators root from testnet config, has genesis occurred?"
|
||||||
|
})?;
|
||||||
|
|
||||||
|
match matches.subcommand() {
|
||||||
|
(IMPORT_CMD, Some(matches)) => {
|
||||||
|
let import_filename: PathBuf = clap_utils::parse_required(&matches, IMPORT_FILE_ARG)?;
|
||||||
|
let import_file = File::open(&import_filename).map_err(|e| {
|
||||||
|
format!(
|
||||||
|
"Unable to open import file at {}: {:?}",
|
||||||
|
import_filename.display(),
|
||||||
|
e
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let interchange = Interchange::from_json_reader(&import_file)
|
||||||
|
.map_err(|e| format!("Error parsing file for import: {:?}", e))?;
|
||||||
|
|
||||||
|
let slashing_protection_database =
|
||||||
|
SlashingDatabase::open_or_create(&slashing_protection_db_path).map_err(|e| {
|
||||||
|
format!(
|
||||||
|
"Unable to open database at {}: {:?}",
|
||||||
|
slashing_protection_db_path.display(),
|
||||||
|
e
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
slashing_protection_database
|
||||||
|
.import_interchange_info(&interchange, genesis_validators_root)
|
||||||
|
.map_err(|e| {
|
||||||
|
format!(
|
||||||
|
"Error during import, no data imported: {:?}\n\
|
||||||
|
IT IS NOT SAFE TO START VALIDATING",
|
||||||
|
e
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
eprintln!("Import completed successfully");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
(EXPORT_CMD, Some(matches)) => {
|
||||||
|
let export_filename: PathBuf = clap_utils::parse_required(&matches, EXPORT_FILE_ARG)?;
|
||||||
|
|
||||||
|
if !slashing_protection_db_path.exists() {
|
||||||
|
return Err(format!(
|
||||||
|
"No slashing protection database exists at: {}",
|
||||||
|
slashing_protection_db_path.display()
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let slashing_protection_database = SlashingDatabase::open(&slashing_protection_db_path)
|
||||||
|
.map_err(|e| {
|
||||||
|
format!(
|
||||||
|
"Unable to open database at {}: {:?}",
|
||||||
|
slashing_protection_db_path.display(),
|
||||||
|
e
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let interchange = slashing_protection_database
|
||||||
|
.export_interchange_info(genesis_validators_root)
|
||||||
|
.map_err(|e| format!("Error during export: {:?}", e))?;
|
||||||
|
|
||||||
|
let output_file = File::create(export_filename)
|
||||||
|
.map_err(|e| format!("Error creating output file: {:?}", e))?;
|
||||||
|
|
||||||
|
interchange
|
||||||
|
.write_to(&output_file)
|
||||||
|
.map_err(|e| format!("Error writing output file: {:?}", e))?;
|
||||||
|
|
||||||
|
eprintln!("Export completed successfully");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
("", _) => Err("No subcommand provided, see --help for options".to_string()),
|
||||||
|
(command, _) => Err(format!("No such subcommand `{}`", command)),
|
||||||
|
}
|
||||||
|
}
|
@ -1,5 +1,5 @@
|
|||||||
use crate::common::read_wallet_name_from_cli;
|
use crate::common::read_wallet_name_from_cli;
|
||||||
use crate::BASE_DIR_FLAG;
|
use crate::WALLETS_DIR_FLAG;
|
||||||
use account_utils::{
|
use account_utils::{
|
||||||
is_password_sufficiently_complex, random_password, read_password_from_user, strip_off_newlines,
|
is_password_sufficiently_complex, random_password, read_password_from_user, strip_off_newlines,
|
||||||
};
|
};
|
||||||
@ -102,7 +102,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> {
|
pub fn cli_run(matches: &ArgMatches, wallet_base_dir: PathBuf) -> Result<(), String> {
|
||||||
let mnemonic_output_path: Option<PathBuf> = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?;
|
let mnemonic_output_path: Option<PathBuf> = clap_utils::parse_optional(matches, MNEMONIC_FLAG)?;
|
||||||
|
|
||||||
// Create a new random mnemonic.
|
// Create a new random mnemonic.
|
||||||
@ -114,7 +114,7 @@ pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> {
|
|||||||
Language::English,
|
Language::English,
|
||||||
);
|
);
|
||||||
|
|
||||||
let wallet = create_wallet_from_mnemonic(matches, &base_dir.as_path(), &mnemonic)?;
|
let wallet = create_wallet_from_mnemonic(matches, &wallet_base_dir.as_path(), &mnemonic)?;
|
||||||
|
|
||||||
if let Some(path) = mnemonic_output_path {
|
if let Some(path) = mnemonic_output_path {
|
||||||
create_with_600_perms(&path, mnemonic.phrase().as_bytes())
|
create_with_600_perms(&path, mnemonic.phrase().as_bytes())
|
||||||
@ -147,7 +147,7 @@ pub fn cli_run(matches: &ArgMatches, base_dir: PathBuf) -> Result<(), String> {
|
|||||||
|
|
||||||
pub fn create_wallet_from_mnemonic(
|
pub fn create_wallet_from_mnemonic(
|
||||||
matches: &ArgMatches,
|
matches: &ArgMatches,
|
||||||
base_dir: &Path,
|
wallet_base_dir: &Path,
|
||||||
mnemonic: &Mnemonic,
|
mnemonic: &Mnemonic,
|
||||||
) -> Result<LockedWallet, String> {
|
) -> Result<LockedWallet, String> {
|
||||||
let name: Option<String> = clap_utils::parse_optional(matches, NAME_FLAG)?;
|
let name: Option<String> = clap_utils::parse_optional(matches, NAME_FLAG)?;
|
||||||
@ -160,8 +160,8 @@ pub fn create_wallet_from_mnemonic(
|
|||||||
unknown => return Err(format!("--{} {} is not supported", TYPE_FLAG, unknown)),
|
unknown => return Err(format!("--{} {} is not supported", TYPE_FLAG, unknown)),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mgr = WalletManager::open(&base_dir)
|
let mgr = WalletManager::open(&wallet_base_dir)
|
||||||
.map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?;
|
.map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?;
|
||||||
|
|
||||||
let wallet_password: PlainText = match wallet_password_path {
|
let wallet_password: PlainText = match wallet_password_path {
|
||||||
Some(path) => {
|
Some(path) => {
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use crate::BASE_DIR_FLAG;
|
use crate::WALLETS_DIR_FLAG;
|
||||||
use clap::App;
|
use clap::App;
|
||||||
use eth2_wallet_manager::WalletManager;
|
use eth2_wallet_manager::WalletManager;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
@ -9,9 +9,9 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
App::new(CMD).about("Lists the names of all wallets.")
|
App::new(CMD).about("Lists the names of all wallets.")
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cli_run(base_dir: PathBuf) -> Result<(), String> {
|
pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> {
|
||||||
let mgr = WalletManager::open(&base_dir)
|
let mgr = WalletManager::open(&wallet_base_dir)
|
||||||
.map_err(|e| format!("Unable to open --{}: {:?}", BASE_DIR_FLAG, e))?;
|
.map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?;
|
||||||
|
|
||||||
for (name, _uuid) in mgr
|
for (name, _uuid) in mgr
|
||||||
.wallets()
|
.wallets()
|
||||||
|
@ -2,11 +2,10 @@ pub mod create;
|
|||||||
pub mod list;
|
pub mod list;
|
||||||
pub mod recover;
|
pub mod recover;
|
||||||
|
|
||||||
use crate::{
|
use crate::WALLETS_DIR_FLAG;
|
||||||
common::{base_wallet_dir, ensure_dir_exists},
|
|
||||||
BASE_DIR_FLAG,
|
|
||||||
};
|
|
||||||
use clap::{App, Arg, ArgMatches};
|
use clap::{App, Arg, ArgMatches};
|
||||||
|
use directory::{ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_WALLET_DIR};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
pub const CMD: &str = "wallet";
|
pub const CMD: &str = "wallet";
|
||||||
|
|
||||||
@ -14,11 +13,12 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
App::new(CMD)
|
App::new(CMD)
|
||||||
.about("Manage wallets, from which validator keys can be derived.")
|
.about("Manage wallets, from which validator keys can be derived.")
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name(BASE_DIR_FLAG)
|
Arg::with_name(WALLETS_DIR_FLAG)
|
||||||
.long(BASE_DIR_FLAG)
|
.long(WALLETS_DIR_FLAG)
|
||||||
.value_name("BASE_DIRECTORY")
|
.value_name("WALLETS_DIRECTORY")
|
||||||
.help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/wallets")
|
.help("A path containing Eth2 EIP-2386 wallets. Defaults to ~/.lighthouse/{testnet}/wallets")
|
||||||
.takes_value(true),
|
.takes_value(true)
|
||||||
|
.conflicts_with("datadir"),
|
||||||
)
|
)
|
||||||
.subcommand(create::cli_app())
|
.subcommand(create::cli_app())
|
||||||
.subcommand(list::cli_app())
|
.subcommand(list::cli_app())
|
||||||
@ -26,13 +26,20 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
pub fn cli_run(matches: &ArgMatches) -> Result<(), String> {
|
||||||
let base_dir = base_wallet_dir(matches, BASE_DIR_FLAG)?;
|
let wallet_base_dir = if matches.value_of("datadir").is_some() {
|
||||||
ensure_dir_exists(&base_dir)?;
|
let path: PathBuf = clap_utils::parse_required(matches, "datadir")?;
|
||||||
|
path.join(DEFAULT_WALLET_DIR)
|
||||||
|
} else {
|
||||||
|
parse_path_or_default_with_flag(matches, WALLETS_DIR_FLAG, DEFAULT_WALLET_DIR)?
|
||||||
|
};
|
||||||
|
ensure_dir_exists(&wallet_base_dir)?;
|
||||||
|
|
||||||
|
eprintln!("wallet-dir path: {:?}", wallet_base_dir);
|
||||||
|
|
||||||
match matches.subcommand() {
|
match matches.subcommand() {
|
||||||
(create::CMD, Some(matches)) => create::cli_run(matches, base_dir),
|
(create::CMD, Some(matches)) => create::cli_run(matches, wallet_base_dir),
|
||||||
(list::CMD, Some(_)) => list::cli_run(base_dir),
|
(list::CMD, Some(_)) => list::cli_run(wallet_base_dir),
|
||||||
(recover::CMD, Some(matches)) => recover::cli_run(matches, base_dir),
|
(recover::CMD, Some(matches)) => recover::cli_run(matches, wallet_base_dir),
|
||||||
(unknown, _) => Err(format!(
|
(unknown, _) => Err(format!(
|
||||||
"{} does not have a {} command. See --help",
|
"{} does not have a {} command. See --help",
|
||||||
CMD, unknown
|
CMD, unknown
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "beacon_node"
|
name = "beacon_node"
|
||||||
version = "0.2.13"
|
version = "0.3.0"
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
@ -20,24 +20,26 @@ beacon_chain = { path = "beacon_chain" }
|
|||||||
types = { path = "../consensus/types" }
|
types = { path = "../consensus/types" }
|
||||||
store = { path = "./store" }
|
store = { path = "./store" }
|
||||||
client = { path = "client" }
|
client = { path = "client" }
|
||||||
clap = "2.33.0"
|
clap = "2.33.3"
|
||||||
rand = "0.7.3"
|
rand = "0.7.3"
|
||||||
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
|
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
|
||||||
slog-term = "2.5.0"
|
slog-term = "2.6.0"
|
||||||
slog-async = "2.5.0"
|
slog-async = "2.5.0"
|
||||||
ctrlc = { version = "3.1.4", features = ["termination"] }
|
ctrlc = { version = "3.1.6", features = ["termination"] }
|
||||||
tokio = { version = "0.2.22", features = ["time"] }
|
tokio = { version = "0.2.22", features = ["time"] }
|
||||||
exit-future = "0.2.0"
|
exit-future = "0.2.0"
|
||||||
dirs = "2.0.2"
|
dirs = "3.0.1"
|
||||||
logging = { path = "../common/logging" }
|
logging = { path = "../common/logging" }
|
||||||
|
directory = {path = "../common/directory"}
|
||||||
futures = "0.3.5"
|
futures = "0.3.5"
|
||||||
environment = { path = "../lighthouse/environment" }
|
environment = { path = "../lighthouse/environment" }
|
||||||
|
task_executor = { path = "../common/task_executor" }
|
||||||
genesis = { path = "genesis" }
|
genesis = { path = "genesis" }
|
||||||
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
||||||
eth2_libp2p = { path = "./eth2_libp2p" }
|
eth2_libp2p = { path = "./eth2_libp2p" }
|
||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
serde = "1.0.110"
|
serde = "1.0.116"
|
||||||
clap_utils = { path = "../common/clap_utils" }
|
clap_utils = { path = "../common/clap_utils" }
|
||||||
hyper = "0.13.5"
|
hyper = "0.13.8"
|
||||||
lighthouse_version = { path = "../common/lighthouse_version" }
|
lighthouse_version = { path = "../common/lighthouse_version" }
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
|
@ -12,6 +12,7 @@ participation_metrics = [] # Exposes validator participation metrics to Prometh
|
|||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
int_to_bytes = { path = "../../consensus/int_to_bytes" }
|
int_to_bytes = { path = "../../consensus/int_to_bytes" }
|
||||||
maplit = "1.0.2"
|
maplit = "1.0.2"
|
||||||
|
environment = { path = "../../lighthouse/environment" }
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
eth2_config = { path = "../../common/eth2_config" }
|
eth2_config = { path = "../../common/eth2_config" }
|
||||||
@ -19,42 +20,42 @@ merkle_proof = { path = "../../consensus/merkle_proof" }
|
|||||||
store = { path = "../store" }
|
store = { path = "../store" }
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
smallvec = "1.4.1"
|
smallvec = "1.4.2"
|
||||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||||
log = "0.4.8"
|
log = "0.4.11"
|
||||||
operation_pool = { path = "../operation_pool" }
|
operation_pool = { path = "../operation_pool" }
|
||||||
rayon = "1.3.0"
|
rayon = "1.4.1"
|
||||||
serde = "1.0.110"
|
serde = "1.0.116"
|
||||||
serde_derive = "1.0.110"
|
serde_derive = "1.0.116"
|
||||||
serde_yaml = "0.8.11"
|
serde_yaml = "0.8.13"
|
||||||
serde_json = "1.0.52"
|
serde_json = "1.0.58"
|
||||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||||
slog-term = "2.6.0"
|
slog-term = "2.6.0"
|
||||||
sloggers = "1.0.0"
|
sloggers = "1.0.1"
|
||||||
slot_clock = { path = "../../common/slot_clock" }
|
slot_clock = { path = "../../common/slot_clock" }
|
||||||
eth2_hashing = "0.1.0"
|
eth2_hashing = "0.1.0"
|
||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
||||||
eth2_ssz_derive = "0.1.0"
|
eth2_ssz_derive = "0.1.0"
|
||||||
state_processing = { path = "../../consensus/state_processing" }
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
tree_hash = "0.1.0"
|
tree_hash = "0.1.1"
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
tokio = "0.2.22"
|
tokio = "0.2.22"
|
||||||
eth1 = { path = "../eth1" }
|
eth1 = { path = "../eth1" }
|
||||||
websocket_server = { path = "../websocket_server" }
|
websocket_server = { path = "../websocket_server" }
|
||||||
futures = "0.3.5"
|
futures = "0.3.5"
|
||||||
genesis = { path = "../genesis" }
|
genesis = { path = "../genesis" }
|
||||||
integer-sqrt = "0.1.3"
|
integer-sqrt = "0.1.5"
|
||||||
rand = "0.7.3"
|
rand = "0.7.3"
|
||||||
rand_core = "0.5.1"
|
rand_core = "0.5.1"
|
||||||
proto_array = { path = "../../consensus/proto_array" }
|
proto_array = { path = "../../consensus/proto_array" }
|
||||||
lru = "0.5.1"
|
lru = "0.6.0"
|
||||||
tempfile = "3.1.0"
|
tempfile = "3.1.0"
|
||||||
bitvec = "0.17.4"
|
bitvec = "0.19.3"
|
||||||
bls = { path = "../../crypto/bls" }
|
bls = { path = "../../crypto/bls" }
|
||||||
safe_arith = { path = "../../consensus/safe_arith" }
|
safe_arith = { path = "../../consensus/safe_arith" }
|
||||||
fork_choice = { path = "../../consensus/fork_choice" }
|
fork_choice = { path = "../../consensus/fork_choice" }
|
||||||
environment = { path = "../../lighthouse/environment" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
bus = "2.2.3"
|
bus = "2.2.3"
|
||||||
derivative = "2.1.1"
|
derivative = "2.1.1"
|
||||||
itertools = "0.9.0"
|
itertools = "0.9.0"
|
||||||
|
@ -28,8 +28,7 @@
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
beacon_chain::{
|
beacon_chain::{
|
||||||
ATTESTATION_CACHE_LOCK_TIMEOUT, HEAD_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
|
HEAD_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
|
||||||
VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
|
|
||||||
},
|
},
|
||||||
metrics,
|
metrics,
|
||||||
observed_attestations::ObserveOutcome,
|
observed_attestations::ObserveOutcome,
|
||||||
@ -38,12 +37,10 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use bls::verify_signature_sets;
|
use bls::verify_signature_sets;
|
||||||
use proto_array::Block as ProtoBlock;
|
use proto_array::Block as ProtoBlock;
|
||||||
use slog::debug;
|
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use state_processing::{
|
use state_processing::{
|
||||||
common::get_indexed_attestation,
|
common::get_indexed_attestation,
|
||||||
per_block_processing::errors::AttestationValidationError,
|
per_block_processing::errors::AttestationValidationError,
|
||||||
per_slot_processing,
|
|
||||||
signature_sets::{
|
signature_sets::{
|
||||||
indexed_attestation_signature_set_from_pubkeys,
|
indexed_attestation_signature_set_from_pubkeys,
|
||||||
signed_aggregate_selection_proof_signature_set, signed_aggregate_signature_set,
|
signed_aggregate_selection_proof_signature_set, signed_aggregate_signature_set,
|
||||||
@ -53,7 +50,7 @@ use std::borrow::Cow;
|
|||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{
|
use types::{
|
||||||
Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation,
|
Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation,
|
||||||
RelativeEpoch, SelectionProof, SignedAggregateAndProof, Slot, SubnetId,
|
SelectionProof, SignedAggregateAndProof, Slot, SubnetId,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Returned when an attestation was not successfully verified. It might not have been verified for
|
/// Returned when an attestation was not successfully verified. It might not have been verified for
|
||||||
@ -267,6 +264,7 @@ pub struct VerifiedAggregatedAttestation<T: BeaconChainTypes> {
|
|||||||
pub struct VerifiedUnaggregatedAttestation<T: BeaconChainTypes> {
|
pub struct VerifiedUnaggregatedAttestation<T: BeaconChainTypes> {
|
||||||
attestation: Attestation<T::EthSpec>,
|
attestation: Attestation<T::EthSpec>,
|
||||||
indexed_attestation: IndexedAttestation<T::EthSpec>,
|
indexed_attestation: IndexedAttestation<T::EthSpec>,
|
||||||
|
subnet_id: SubnetId,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Custom `Clone` implementation is to avoid the restrictive trait bounds applied by the usual derive
|
/// Custom `Clone` implementation is to avoid the restrictive trait bounds applied by the usual derive
|
||||||
@ -276,6 +274,7 @@ impl<T: BeaconChainTypes> Clone for VerifiedUnaggregatedAttestation<T> {
|
|||||||
Self {
|
Self {
|
||||||
attestation: self.attestation.clone(),
|
attestation: self.attestation.clone(),
|
||||||
indexed_attestation: self.indexed_attestation.clone(),
|
indexed_attestation: self.indexed_attestation.clone(),
|
||||||
|
subnet_id: self.subnet_id,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -437,6 +436,11 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
|
|||||||
pub fn attestation(&self) -> &Attestation<T::EthSpec> {
|
pub fn attestation(&self) -> &Attestation<T::EthSpec> {
|
||||||
&self.signed_aggregate.message.aggregate
|
&self.signed_aggregate.message.aggregate
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the underlying `signed_aggregate`.
|
||||||
|
pub fn aggregate(&self) -> &SignedAggregateAndProof<T::EthSpec> {
|
||||||
|
&self.signed_aggregate
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
||||||
@ -447,7 +451,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
|||||||
/// verify that it was received on the correct subnet.
|
/// verify that it was received on the correct subnet.
|
||||||
pub fn verify(
|
pub fn verify(
|
||||||
attestation: Attestation<T::EthSpec>,
|
attestation: Attestation<T::EthSpec>,
|
||||||
subnet_id: SubnetId,
|
subnet_id: Option<SubnetId>,
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch());
|
let attestation_epoch = attestation.data.slot.epoch(T::EthSpec::slots_per_epoch());
|
||||||
@ -493,13 +497,15 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
|||||||
)
|
)
|
||||||
.map_err(BeaconChainError::from)?;
|
.map_err(BeaconChainError::from)?;
|
||||||
|
|
||||||
// Ensure the attestation is from the correct subnet.
|
// If a subnet was specified, ensure that subnet is correct.
|
||||||
if subnet_id != expected_subnet_id {
|
if let Some(subnet_id) = subnet_id {
|
||||||
return Err(Error::InvalidSubnetId {
|
if subnet_id != expected_subnet_id {
|
||||||
received: subnet_id,
|
return Err(Error::InvalidSubnetId {
|
||||||
expected: expected_subnet_id,
|
received: subnet_id,
|
||||||
});
|
expected: expected_subnet_id,
|
||||||
}
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let validator_index = *indexed_attestation
|
let validator_index = *indexed_attestation
|
||||||
.attesting_indices
|
.attesting_indices
|
||||||
@ -544,6 +550,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
|||||||
Ok(Self {
|
Ok(Self {
|
||||||
attestation,
|
attestation,
|
||||||
indexed_attestation,
|
indexed_attestation,
|
||||||
|
subnet_id: expected_subnet_id,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -552,6 +559,11 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
|||||||
chain.add_to_naive_aggregation_pool(self)
|
chain.add_to_naive_aggregation_pool(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the correct subnet for the attestation.
|
||||||
|
pub fn subnet_id(&self) -> SubnetId {
|
||||||
|
self.subnet_id
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the wrapped `attestation`.
|
/// Returns the wrapped `attestation`.
|
||||||
pub fn attestation(&self) -> &Attestation<T::EthSpec> {
|
pub fn attestation(&self) -> &Attestation<T::EthSpec> {
|
||||||
&self.attestation
|
&self.attestation
|
||||||
@ -567,6 +579,7 @@ impl<T: BeaconChainTypes> VerifiedUnaggregatedAttestation<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `Ok(())` if the `attestation.data.beacon_block_root` is known to this chain.
|
/// Returns `Ok(())` if the `attestation.data.beacon_block_root` is known to this chain.
|
||||||
|
/// You can use this `shuffling_id` to read from the shuffling cache.
|
||||||
///
|
///
|
||||||
/// The block root may not be known for two reasons:
|
/// The block root may not be known for two reasons:
|
||||||
///
|
///
|
||||||
@ -595,6 +608,7 @@ fn verify_head_block_is_known<T: BeaconChainTypes>(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(block)
|
Ok(block)
|
||||||
} else {
|
} else {
|
||||||
Err(Error::UnknownHeadBlock {
|
Err(Error::UnknownHeadBlock {
|
||||||
@ -801,7 +815,7 @@ type CommitteesPerSlot = u64;
|
|||||||
|
|
||||||
/// Returns the `indexed_attestation` and committee count per slot for the `attestation` using the
|
/// Returns the `indexed_attestation` and committee count per slot for the `attestation` using the
|
||||||
/// public keys cached in the `chain`.
|
/// public keys cached in the `chain`.
|
||||||
pub fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>(
|
fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>(
|
||||||
chain: &BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
attestation: &Attestation<T::EthSpec>,
|
attestation: &Attestation<T::EthSpec>,
|
||||||
) -> Result<(IndexedAttestation<T::EthSpec>, CommitteesPerSlot), Error> {
|
) -> Result<(IndexedAttestation<T::EthSpec>, CommitteesPerSlot), Error> {
|
||||||
@ -821,8 +835,8 @@ pub fn obtain_indexed_attestation_and_committees_per_slot<T: BeaconChainTypes>(
|
|||||||
///
|
///
|
||||||
/// If the committee for `attestation` isn't found in the `shuffling_cache`, we will read a state
|
/// If the committee for `attestation` isn't found in the `shuffling_cache`, we will read a state
|
||||||
/// from disk and then update the `shuffling_cache`.
|
/// from disk and then update the `shuffling_cache`.
|
||||||
pub fn map_attestation_committee<'a, T, F, R>(
|
fn map_attestation_committee<T, F, R>(
|
||||||
chain: &'a BeaconChain<T>,
|
chain: &BeaconChain<T>,
|
||||||
attestation: &Attestation<T::EthSpec>,
|
attestation: &Attestation<T::EthSpec>,
|
||||||
map_fn: F,
|
map_fn: F,
|
||||||
) -> Result<R, Error>
|
) -> Result<R, Error>
|
||||||
@ -840,104 +854,23 @@ where
|
|||||||
// processing an attestation that does not include our latest finalized block in its chain.
|
// processing an attestation that does not include our latest finalized block in its chain.
|
||||||
//
|
//
|
||||||
// We do not delay consideration for later, we simply drop the attestation.
|
// We do not delay consideration for later, we simply drop the attestation.
|
||||||
let target_block = chain
|
if !chain.fork_choice.read().contains_block(&target.root) {
|
||||||
.fork_choice
|
return Err(Error::UnknownTargetRoot(target.root));
|
||||||
.read()
|
|
||||||
.get_block(&target.root)
|
|
||||||
.ok_or_else(|| Error::UnknownTargetRoot(target.root))?;
|
|
||||||
|
|
||||||
// Obtain the shuffling cache, timing how long we wait.
|
|
||||||
let cache_wait_timer =
|
|
||||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES);
|
|
||||||
|
|
||||||
let mut shuffling_cache = chain
|
|
||||||
.shuffling_cache
|
|
||||||
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
|
||||||
.ok_or_else(|| BeaconChainError::AttestationCacheLockTimeout)?;
|
|
||||||
|
|
||||||
metrics::stop_timer(cache_wait_timer);
|
|
||||||
|
|
||||||
if let Some(committee_cache) = shuffling_cache.get(attestation_epoch, target.root) {
|
|
||||||
let committees_per_slot = committee_cache.committees_per_slot();
|
|
||||||
committee_cache
|
|
||||||
.get_beacon_committee(attestation.data.slot, attestation.data.index)
|
|
||||||
.map(|committee| map_fn((committee, committees_per_slot)))
|
|
||||||
.unwrap_or_else(|| {
|
|
||||||
Err(Error::NoCommitteeForSlotAndIndex {
|
|
||||||
slot: attestation.data.slot,
|
|
||||||
index: attestation.data.index,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
// Drop the shuffling cache to avoid holding the lock for any longer than
|
|
||||||
// required.
|
|
||||||
drop(shuffling_cache);
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
chain.log,
|
|
||||||
"Attestation processing cache miss";
|
|
||||||
"attn_epoch" => attestation_epoch.as_u64(),
|
|
||||||
"target_block_epoch" => target_block.slot.epoch(T::EthSpec::slots_per_epoch()).as_u64(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let state_read_timer =
|
|
||||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES);
|
|
||||||
|
|
||||||
let mut state = chain
|
|
||||||
.store
|
|
||||||
.get_inconsistent_state_for_attestation_verification_only(
|
|
||||||
&target_block.state_root,
|
|
||||||
Some(target_block.slot),
|
|
||||||
)
|
|
||||||
.map_err(BeaconChainError::from)?
|
|
||||||
.ok_or_else(|| BeaconChainError::MissingBeaconState(target_block.state_root))?;
|
|
||||||
|
|
||||||
metrics::stop_timer(state_read_timer);
|
|
||||||
let state_skip_timer =
|
|
||||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES);
|
|
||||||
|
|
||||||
while state.current_epoch() + 1 < attestation_epoch {
|
|
||||||
// Here we tell `per_slot_processing` to skip hashing the state and just
|
|
||||||
// use the zero hash instead.
|
|
||||||
//
|
|
||||||
// The state roots are not useful for the shuffling, so there's no need to
|
|
||||||
// compute them.
|
|
||||||
per_slot_processing(&mut state, Some(Hash256::zero()), &chain.spec)
|
|
||||||
.map_err(BeaconChainError::from)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics::stop_timer(state_skip_timer);
|
|
||||||
let committee_building_timer =
|
|
||||||
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES);
|
|
||||||
|
|
||||||
let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), attestation_epoch)
|
|
||||||
.map_err(BeaconChainError::IncorrectStateForAttestation)?;
|
|
||||||
|
|
||||||
state
|
|
||||||
.build_committee_cache(relative_epoch, &chain.spec)
|
|
||||||
.map_err(BeaconChainError::from)?;
|
|
||||||
|
|
||||||
let committee_cache = state
|
|
||||||
.committee_cache(relative_epoch)
|
|
||||||
.map_err(BeaconChainError::from)?;
|
|
||||||
|
|
||||||
chain
|
|
||||||
.shuffling_cache
|
|
||||||
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
|
||||||
.ok_or_else(|| BeaconChainError::AttestationCacheLockTimeout)?
|
|
||||||
.insert(attestation_epoch, target.root, committee_cache);
|
|
||||||
|
|
||||||
metrics::stop_timer(committee_building_timer);
|
|
||||||
|
|
||||||
let committees_per_slot = committee_cache.committees_per_slot();
|
|
||||||
committee_cache
|
|
||||||
.get_beacon_committee(attestation.data.slot, attestation.data.index)
|
|
||||||
.map(|committee| map_fn((committee, committees_per_slot)))
|
|
||||||
.unwrap_or_else(|| {
|
|
||||||
Err(Error::NoCommitteeForSlotAndIndex {
|
|
||||||
slot: attestation.data.slot,
|
|
||||||
index: attestation.data.index,
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
chain
|
||||||
|
.with_committee_cache(target.root, attestation_epoch, |committee_cache| {
|
||||||
|
let committees_per_slot = committee_cache.committees_per_slot();
|
||||||
|
|
||||||
|
Ok(committee_cache
|
||||||
|
.get_beacon_committee(attestation.data.slot, attestation.data.index)
|
||||||
|
.map(|committee| map_fn((committee, committees_per_slot)))
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Err(Error::NoCommitteeForSlotAndIndex {
|
||||||
|
slot: attestation.data.slot,
|
||||||
|
index: attestation.data.index,
|
||||||
|
})
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
.map_err(BeaconChainError::from)?
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ use crate::observed_block_producers::ObservedBlockProducers;
|
|||||||
use crate::observed_operations::{ObservationOutcome, ObservedOperations};
|
use crate::observed_operations::{ObservationOutcome, ObservedOperations};
|
||||||
use crate::persisted_beacon_chain::PersistedBeaconChain;
|
use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||||
use crate::persisted_fork_choice::PersistedForkChoice;
|
use crate::persisted_fork_choice::PersistedForkChoice;
|
||||||
use crate::shuffling_cache::ShufflingCache;
|
use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache};
|
||||||
use crate::snapshot_cache::SnapshotCache;
|
use crate::snapshot_cache::SnapshotCache;
|
||||||
use crate::timeout_rw_lock::TimeoutRwLock;
|
use crate::timeout_rw_lock::TimeoutRwLock;
|
||||||
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
||||||
@ -32,7 +32,6 @@ use futures::channel::mpsc::Sender;
|
|||||||
use itertools::process_results;
|
use itertools::process_results;
|
||||||
use operation_pool::{OperationPool, PersistedOperationPool};
|
use operation_pool::{OperationPool, PersistedOperationPool};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use regex::bytes::Regex;
|
|
||||||
use slog::{crit, debug, error, info, trace, warn, Logger};
|
use slog::{crit, debug, error, info, trace, warn, Logger};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use state_processing::{
|
use state_processing::{
|
||||||
@ -68,10 +67,11 @@ pub const ATTESTATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1);
|
|||||||
/// validator pubkey cache.
|
/// validator pubkey cache.
|
||||||
pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1);
|
pub const VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1);
|
||||||
|
|
||||||
pub const BEACON_CHAIN_DB_KEY: [u8; 32] = [0; 32];
|
// These keys are all zero because they get stored in different columns, see `DBColumn` type.
|
||||||
pub const OP_POOL_DB_KEY: [u8; 32] = [0; 32];
|
pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::zero();
|
||||||
pub const ETH1_CACHE_DB_KEY: [u8; 32] = [0; 32];
|
pub const OP_POOL_DB_KEY: Hash256 = Hash256::zero();
|
||||||
pub const FORK_CHOICE_DB_KEY: [u8; 32] = [0; 32];
|
pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::zero();
|
||||||
|
pub const FORK_CHOICE_DB_KEY: Hash256 = Hash256::zero();
|
||||||
|
|
||||||
/// The result of a chain segment processing.
|
/// The result of a chain segment processing.
|
||||||
pub enum ChainSegmentResult<T: EthSpec> {
|
pub enum ChainSegmentResult<T: EthSpec> {
|
||||||
@ -202,6 +202,8 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
|||||||
pub(crate) canonical_head: TimeoutRwLock<BeaconSnapshot<T::EthSpec>>,
|
pub(crate) canonical_head: TimeoutRwLock<BeaconSnapshot<T::EthSpec>>,
|
||||||
/// The root of the genesis block.
|
/// The root of the genesis block.
|
||||||
pub genesis_block_root: Hash256,
|
pub genesis_block_root: Hash256,
|
||||||
|
/// The root of the genesis state.
|
||||||
|
pub genesis_state_root: Hash256,
|
||||||
/// The root of the list of genesis validators, used during syncing.
|
/// The root of the list of genesis validators, used during syncing.
|
||||||
pub genesis_validators_root: Hash256,
|
pub genesis_validators_root: Hash256,
|
||||||
|
|
||||||
@ -263,7 +265,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let fork_choice = self.fork_choice.read();
|
let fork_choice = self.fork_choice.read();
|
||||||
|
|
||||||
self.store.put_item(
|
self.store.put_item(
|
||||||
&Hash256::from_slice(&FORK_CHOICE_DB_KEY),
|
&FORK_CHOICE_DB_KEY,
|
||||||
&PersistedForkChoice {
|
&PersistedForkChoice {
|
||||||
fork_choice: fork_choice.to_persisted(),
|
fork_choice: fork_choice.to_persisted(),
|
||||||
fork_choice_store: fork_choice.fc_store().to_persisted(),
|
fork_choice_store: fork_choice.fc_store().to_persisted(),
|
||||||
@ -275,8 +277,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
metrics::stop_timer(fork_choice_timer);
|
metrics::stop_timer(fork_choice_timer);
|
||||||
let head_timer = metrics::start_timer(&metrics::PERSIST_HEAD);
|
let head_timer = metrics::start_timer(&metrics::PERSIST_HEAD);
|
||||||
|
|
||||||
self.store
|
self.store.put_item(&BEACON_CHAIN_DB_KEY, &persisted_head)?;
|
||||||
.put_item(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY), &persisted_head)?;
|
|
||||||
|
|
||||||
metrics::stop_timer(head_timer);
|
metrics::stop_timer(head_timer);
|
||||||
|
|
||||||
@ -293,7 +294,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
|
let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
|
||||||
|
|
||||||
self.store.put_item(
|
self.store.put_item(
|
||||||
&Hash256::from_slice(&OP_POOL_DB_KEY),
|
&OP_POOL_DB_KEY,
|
||||||
&PersistedOperationPool::from_operation_pool(&self.op_pool),
|
&PersistedOperationPool::from_operation_pool(&self.op_pool),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@ -305,10 +306,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
|
let _timer = metrics::start_timer(&metrics::PERSIST_OP_POOL);
|
||||||
|
|
||||||
if let Some(eth1_chain) = self.eth1_chain.as_ref() {
|
if let Some(eth1_chain) = self.eth1_chain.as_ref() {
|
||||||
self.store.put_item(
|
self.store
|
||||||
&Hash256::from_slice(Ð1_CACHE_DB_KEY),
|
.put_item(Ð1_CACHE_DB_KEY, ð1_chain.as_ssz_container())?;
|
||||||
ð1_chain.as_ssz_container(),
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -463,6 +462,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the block at the given slot, if any. Only returns blocks in the canonical chain.
|
||||||
|
///
|
||||||
|
/// ## Errors
|
||||||
|
///
|
||||||
|
/// May return a database error.
|
||||||
|
pub fn state_root_at_slot(&self, slot: Slot) -> Result<Option<Hash256>, Error> {
|
||||||
|
process_results(self.rev_iter_state_roots()?, |mut iter| {
|
||||||
|
iter.find(|(_, this_slot)| *this_slot == slot)
|
||||||
|
.map(|(root, _)| root)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the block root at the given slot, if any. Only returns roots in the canonical chain.
|
||||||
|
///
|
||||||
|
/// ## Errors
|
||||||
|
///
|
||||||
|
/// May return a database error.
|
||||||
|
pub fn block_root_at_slot(&self, slot: Slot) -> Result<Option<Hash256>, Error> {
|
||||||
|
process_results(self.rev_iter_block_roots()?, |mut iter| {
|
||||||
|
iter.find(|(_, this_slot)| *this_slot == slot)
|
||||||
|
.map(|(root, _)| root)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the block at the given root, if any.
|
/// Returns the block at the given root, if any.
|
||||||
///
|
///
|
||||||
/// ## Errors
|
/// ## Errors
|
||||||
@ -510,6 +533,30 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
f(&head_lock)
|
f(&head_lock)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the beacon block root at the head of the canonical chain.
|
||||||
|
///
|
||||||
|
/// See `Self::head` for more information.
|
||||||
|
pub fn head_beacon_block_root(&self) -> Result<Hash256, Error> {
|
||||||
|
self.with_head(|s| Ok(s.beacon_block_root))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the beacon block at the head of the canonical chain.
|
||||||
|
///
|
||||||
|
/// See `Self::head` for more information.
|
||||||
|
pub fn head_beacon_block(&self) -> Result<SignedBeaconBlock<T::EthSpec>, Error> {
|
||||||
|
self.with_head(|s| Ok(s.beacon_block.clone()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the beacon state at the head of the canonical chain.
|
||||||
|
///
|
||||||
|
/// See `Self::head` for more information.
|
||||||
|
pub fn head_beacon_state(&self) -> Result<BeaconState<T::EthSpec>, Error> {
|
||||||
|
self.with_head(|s| {
|
||||||
|
Ok(s.beacon_state
|
||||||
|
.clone_with(CloneConfig::committee_caches_only()))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns info representing the head block and state.
|
/// Returns info representing the head block and state.
|
||||||
///
|
///
|
||||||
/// A summarized version of `Self::head` that involves less cloning.
|
/// A summarized version of `Self::head` that involves less cloning.
|
||||||
@ -743,46 +790,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the attestation slot and committee index for a given validator index.
|
/// Returns the attestation duties for a given validator index.
|
||||||
///
|
///
|
||||||
/// Information is read from the current state, so only information from the present and prior
|
/// Information is read from the current state, so only information from the present and prior
|
||||||
/// epoch is available.
|
/// epoch is available.
|
||||||
pub fn validator_attestation_slot_and_index(
|
pub fn validator_attestation_duty(
|
||||||
&self,
|
&self,
|
||||||
validator_index: usize,
|
validator_index: usize,
|
||||||
epoch: Epoch,
|
epoch: Epoch,
|
||||||
) -> Result<Option<(Slot, u64)>, Error> {
|
) -> Result<Option<AttestationDuty>, Error> {
|
||||||
let as_epoch = |slot: Slot| slot.epoch(T::EthSpec::slots_per_epoch());
|
let head_block_root = self.head_beacon_block_root()?;
|
||||||
let head_state = &self.head()?.beacon_state;
|
|
||||||
|
|
||||||
let mut state = if epoch == as_epoch(head_state.slot) {
|
self.with_committee_cache(head_block_root, epoch, |committee_cache| {
|
||||||
self.head()?.beacon_state
|
Ok(committee_cache.get_attestation_duties(validator_index))
|
||||||
} else {
|
})
|
||||||
// The block proposer shuffling is not affected by the state roots, so we don't need to
|
|
||||||
// calculate them.
|
|
||||||
self.state_at_slot(
|
|
||||||
epoch.start_slot(T::EthSpec::slots_per_epoch()),
|
|
||||||
StateSkipConfig::WithoutStateRoots,
|
|
||||||
)?
|
|
||||||
};
|
|
||||||
|
|
||||||
state.build_committee_cache(RelativeEpoch::Current, &self.spec)?;
|
|
||||||
|
|
||||||
if as_epoch(state.slot) != epoch {
|
|
||||||
return Err(Error::InvariantViolated(format!(
|
|
||||||
"Epochs in consistent in attestation duties lookup: state: {}, requested: {}",
|
|
||||||
as_epoch(state.slot),
|
|
||||||
epoch
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(attestation_duty) =
|
|
||||||
state.get_attestation_duties(validator_index, RelativeEpoch::Current)?
|
|
||||||
{
|
|
||||||
Ok(Some((attestation_duty.slot, attestation_duty.index)))
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`.
|
/// Returns an aggregated `Attestation`, if any, that has a matching `attestation.data`.
|
||||||
@ -791,11 +812,22 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
pub fn get_aggregated_attestation(
|
pub fn get_aggregated_attestation(
|
||||||
&self,
|
&self,
|
||||||
data: &AttestationData,
|
data: &AttestationData,
|
||||||
) -> Result<Option<Attestation<T::EthSpec>>, Error> {
|
) -> Option<Attestation<T::EthSpec>> {
|
||||||
|
self.naive_aggregation_pool.read().get(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an aggregated `Attestation`, if any, that has a matching
|
||||||
|
/// `attestation.data.tree_hash_root()`.
|
||||||
|
///
|
||||||
|
/// The attestation will be obtained from `self.naive_aggregation_pool`.
|
||||||
|
pub fn get_aggregated_attestation_by_slot_and_root(
|
||||||
|
&self,
|
||||||
|
slot: Slot,
|
||||||
|
attestation_data_root: &Hash256,
|
||||||
|
) -> Option<Attestation<T::EthSpec>> {
|
||||||
self.naive_aggregation_pool
|
self.naive_aggregation_pool
|
||||||
.read()
|
.read()
|
||||||
.get(data)
|
.get_by_slot_and_root(slot, attestation_data_root)
|
||||||
.map_err(Into::into)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Produce an unaggregated `Attestation` that is valid for the given `slot` and `index`.
|
/// Produce an unaggregated `Attestation` that is valid for the given `slot` and `index`.
|
||||||
@ -922,7 +954,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
pub fn verify_unaggregated_attestation_for_gossip(
|
pub fn verify_unaggregated_attestation_for_gossip(
|
||||||
&self,
|
&self,
|
||||||
attestation: Attestation<T::EthSpec>,
|
attestation: Attestation<T::EthSpec>,
|
||||||
subnet_id: SubnetId,
|
subnet_id: Option<SubnetId>,
|
||||||
) -> Result<VerifiedUnaggregatedAttestation<T>, AttestationError> {
|
) -> Result<VerifiedUnaggregatedAttestation<T>, AttestationError> {
|
||||||
metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS);
|
metrics::inc_counter(&metrics::UNAGGREGATED_ATTESTATION_PROCESSING_REQUESTS);
|
||||||
let _timer =
|
let _timer =
|
||||||
@ -1344,11 +1376,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
block: SignedBeaconBlock<T::EthSpec>,
|
block: SignedBeaconBlock<T::EthSpec>,
|
||||||
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> {
|
) -> Result<GossipVerifiedBlock<T>, BlockError<T::EthSpec>> {
|
||||||
let slot = block.message.slot;
|
let slot = block.message.slot;
|
||||||
#[allow(clippy::invalid_regex)]
|
let graffiti_string = block.message.body.graffiti.as_utf8_lossy();
|
||||||
let re = Regex::new("\\p{C}").expect("regex is valid");
|
|
||||||
let graffiti_string =
|
|
||||||
String::from_utf8_lossy(&re.replace_all(&block.message.body.graffiti[..], &b""[..]))
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
match GossipVerifiedBlock::new(block, self) {
|
match GossipVerifiedBlock::new(block, self) {
|
||||||
Ok(verified) => {
|
Ok(verified) => {
|
||||||
@ -1473,8 +1501,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
||||||
let signed_block = fully_verified_block.block;
|
let signed_block = fully_verified_block.block;
|
||||||
let block_root = fully_verified_block.block_root;
|
let block_root = fully_verified_block.block_root;
|
||||||
let state = fully_verified_block.state;
|
let mut state = fully_verified_block.state;
|
||||||
let parent_block = fully_verified_block.parent_block;
|
|
||||||
let current_slot = self.slot()?;
|
let current_slot = self.slot()?;
|
||||||
let mut ops = fully_verified_block.intermediate_states;
|
let mut ops = fully_verified_block.intermediate_states;
|
||||||
|
|
||||||
@ -1506,29 +1533,25 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?
|
.ok_or_else(|| Error::ValidatorPubkeyCacheLockTimeout)?
|
||||||
.import_new_pubkeys(&state)?;
|
.import_new_pubkeys(&state)?;
|
||||||
|
|
||||||
// If the imported block is in the previous or current epochs (according to the
|
// For the current and next epoch of this state, ensure we have the shuffling from this
|
||||||
// wall-clock), check to see if this is the first block of the epoch. If so, add the
|
// block in our cache.
|
||||||
// committee to the shuffling cache.
|
for relative_epoch in &[RelativeEpoch::Current, RelativeEpoch::Next] {
|
||||||
if state.current_epoch() + 1 >= self.epoch()?
|
let shuffling_id = ShufflingId::new(block_root, &state, *relative_epoch)?;
|
||||||
&& parent_block.slot().epoch(T::EthSpec::slots_per_epoch()) != state.current_epoch()
|
|
||||||
{
|
let shuffling_is_cached = self
|
||||||
let mut shuffling_cache = self
|
|
||||||
.shuffling_cache
|
.shuffling_cache
|
||||||
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
.try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||||
.ok_or_else(|| Error::AttestationCacheLockTimeout)?;
|
.ok_or_else(|| Error::AttestationCacheLockTimeout)?
|
||||||
|
.contains(&shuffling_id);
|
||||||
|
|
||||||
let committee_cache = state.committee_cache(RelativeEpoch::Current)?;
|
if !shuffling_is_cached {
|
||||||
|
state.build_committee_cache(*relative_epoch, &self.spec)?;
|
||||||
let epoch_start_slot = state
|
let committee_cache = state.committee_cache(*relative_epoch)?;
|
||||||
.current_epoch()
|
self.shuffling_cache
|
||||||
.start_slot(T::EthSpec::slots_per_epoch());
|
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||||
let target_root = if state.slot == epoch_start_slot {
|
.ok_or_else(|| Error::AttestationCacheLockTimeout)?
|
||||||
block_root
|
.insert(shuffling_id, committee_cache);
|
||||||
} else {
|
}
|
||||||
*state.get_block_root(epoch_start_slot)?
|
|
||||||
};
|
|
||||||
|
|
||||||
shuffling_cache.insert(state.current_epoch(), target_root, committee_cache);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut fork_choice = self.fork_choice.write();
|
let mut fork_choice = self.fork_choice.write();
|
||||||
@ -2102,6 +2125,129 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Runs the `map_fn` with the committee cache for `shuffling_epoch` from the chain with head
|
||||||
|
/// `head_block_root`.
|
||||||
|
///
|
||||||
|
/// It's not necessary that `head_block_root` matches our current view of the chain, it can be
|
||||||
|
/// any block that is:
|
||||||
|
///
|
||||||
|
/// - Known to us.
|
||||||
|
/// - The finalized block or a descendant of the finalized block.
|
||||||
|
///
|
||||||
|
/// It would be quite common for attestation verification operations to use a `head_block_root`
|
||||||
|
/// that differs from our view of the head.
|
||||||
|
///
|
||||||
|
/// ## Important
|
||||||
|
///
|
||||||
|
/// This function is **not** suitable for determining proposer duties.
|
||||||
|
///
|
||||||
|
/// ## Notes
|
||||||
|
///
|
||||||
|
/// This function exists in this odd "map" pattern because efficiently obtaining a committee
|
||||||
|
/// can be complex. It might involve reading straight from the `beacon_chain.shuffling_cache`
|
||||||
|
/// or it might involve reading it from a state from the DB. Due to the complexities of
|
||||||
|
/// `RwLock`s on the shuffling cache, a simple `Cow` isn't suitable here.
|
||||||
|
///
|
||||||
|
/// If the committee for `(head_block_root, shuffling_epoch)` isn't found in the
|
||||||
|
/// `shuffling_cache`, we will read a state from disk and then update the `shuffling_cache`.
|
||||||
|
pub(crate) fn with_committee_cache<F, R>(
|
||||||
|
&self,
|
||||||
|
head_block_root: Hash256,
|
||||||
|
shuffling_epoch: Epoch,
|
||||||
|
map_fn: F,
|
||||||
|
) -> Result<R, Error>
|
||||||
|
where
|
||||||
|
F: Fn(&CommitteeCache) -> Result<R, Error>,
|
||||||
|
{
|
||||||
|
let head_block = self
|
||||||
|
.fork_choice
|
||||||
|
.read()
|
||||||
|
.get_block(&head_block_root)
|
||||||
|
.ok_or_else(|| Error::MissingBeaconBlock(head_block_root))?;
|
||||||
|
|
||||||
|
let shuffling_id = BlockShufflingIds {
|
||||||
|
current: head_block.current_epoch_shuffling_id.clone(),
|
||||||
|
next: head_block.next_epoch_shuffling_id.clone(),
|
||||||
|
block_root: head_block.root,
|
||||||
|
}
|
||||||
|
.id_for_epoch(shuffling_epoch)
|
||||||
|
.ok_or_else(|| Error::InvalidShufflingId {
|
||||||
|
shuffling_epoch,
|
||||||
|
head_block_epoch: head_block.slot.epoch(T::EthSpec::slots_per_epoch()),
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// Obtain the shuffling cache, timing how long we wait.
|
||||||
|
let cache_wait_timer =
|
||||||
|
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_SHUFFLING_CACHE_WAIT_TIMES);
|
||||||
|
|
||||||
|
let mut shuffling_cache = self
|
||||||
|
.shuffling_cache
|
||||||
|
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||||
|
.ok_or_else(|| Error::AttestationCacheLockTimeout)?;
|
||||||
|
|
||||||
|
metrics::stop_timer(cache_wait_timer);
|
||||||
|
|
||||||
|
if let Some(committee_cache) = shuffling_cache.get(&shuffling_id) {
|
||||||
|
map_fn(committee_cache)
|
||||||
|
} else {
|
||||||
|
// Drop the shuffling cache to avoid holding the lock for any longer than
|
||||||
|
// required.
|
||||||
|
drop(shuffling_cache);
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Committee cache miss";
|
||||||
|
"shuffling_epoch" => shuffling_epoch.as_u64(),
|
||||||
|
"head_block_root" => head_block_root.to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let state_read_timer =
|
||||||
|
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES);
|
||||||
|
|
||||||
|
let mut state = self
|
||||||
|
.store
|
||||||
|
.get_inconsistent_state_for_attestation_verification_only(
|
||||||
|
&head_block.state_root,
|
||||||
|
Some(head_block.slot),
|
||||||
|
)?
|
||||||
|
.ok_or_else(|| Error::MissingBeaconState(head_block.state_root))?;
|
||||||
|
|
||||||
|
metrics::stop_timer(state_read_timer);
|
||||||
|
let state_skip_timer =
|
||||||
|
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES);
|
||||||
|
|
||||||
|
while state.current_epoch() + 1 < shuffling_epoch {
|
||||||
|
// Here we tell `per_slot_processing` to skip hashing the state and just
|
||||||
|
// use the zero hash instead.
|
||||||
|
//
|
||||||
|
// The state roots are not useful for the shuffling, so there's no need to
|
||||||
|
// compute them.
|
||||||
|
per_slot_processing(&mut state, Some(Hash256::zero()), &self.spec)
|
||||||
|
.map_err(Error::from)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics::stop_timer(state_skip_timer);
|
||||||
|
let committee_building_timer =
|
||||||
|
metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES);
|
||||||
|
|
||||||
|
let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), shuffling_epoch)
|
||||||
|
.map_err(Error::IncorrectStateForAttestation)?;
|
||||||
|
|
||||||
|
state.build_committee_cache(relative_epoch, &self.spec)?;
|
||||||
|
|
||||||
|
let committee_cache = state.committee_cache(relative_epoch)?;
|
||||||
|
|
||||||
|
self.shuffling_cache
|
||||||
|
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||||
|
.ok_or_else(|| Error::AttestationCacheLockTimeout)?
|
||||||
|
.insert(shuffling_id, committee_cache);
|
||||||
|
|
||||||
|
metrics::stop_timer(committee_building_timer);
|
||||||
|
|
||||||
|
map_fn(&committee_cache)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns `true` if the given block root has not been processed.
|
/// Returns `true` if the given block root has not been processed.
|
||||||
pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> {
|
pub fn is_new_block_root(&self, beacon_block_root: &Hash256) -> Result<bool, Error> {
|
||||||
Ok(!self
|
Ok(!self
|
||||||
|
@ -232,7 +232,7 @@ where
|
|||||||
.ok_or_else(|| "get_persisted_eth1_backend requires a store.".to_string())?;
|
.ok_or_else(|| "get_persisted_eth1_backend requires a store.".to_string())?;
|
||||||
|
|
||||||
store
|
store
|
||||||
.get_item::<SszEth1>(&Hash256::from_slice(Ð1_CACHE_DB_KEY))
|
.get_item::<SszEth1>(Ð1_CACHE_DB_KEY)
|
||||||
.map_err(|e| format!("DB error whilst reading eth1 cache: {:?}", e))
|
.map_err(|e| format!("DB error whilst reading eth1 cache: {:?}", e))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -244,7 +244,7 @@ where
|
|||||||
.ok_or_else(|| "store_contains_beacon_chain requires a store.".to_string())?;
|
.ok_or_else(|| "store_contains_beacon_chain requires a store.".to_string())?;
|
||||||
|
|
||||||
Ok(store
|
Ok(store
|
||||||
.get_item::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY))
|
.get_item::<PersistedBeaconChain>(&BEACON_CHAIN_DB_KEY)
|
||||||
.map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))?
|
.map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))?
|
||||||
.is_some())
|
.is_some())
|
||||||
}
|
}
|
||||||
@ -275,7 +275,7 @@ where
|
|||||||
.ok_or_else(|| "resume_from_db requires a store.".to_string())?;
|
.ok_or_else(|| "resume_from_db requires a store.".to_string())?;
|
||||||
|
|
||||||
let chain = store
|
let chain = store
|
||||||
.get_item::<PersistedBeaconChain>(&Hash256::from_slice(&BEACON_CHAIN_DB_KEY))
|
.get_item::<PersistedBeaconChain>(&BEACON_CHAIN_DB_KEY)
|
||||||
.map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))?
|
.map_err(|e| format!("DB error when reading persisted beacon chain: {:?}", e))?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
"No persisted beacon chain found in store. Try purging the beacon chain database."
|
"No persisted beacon chain found in store. Try purging the beacon chain database."
|
||||||
@ -283,7 +283,7 @@ where
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
let persisted_fork_choice = store
|
let persisted_fork_choice = store
|
||||||
.get_item::<PersistedForkChoice>(&Hash256::from_slice(&FORK_CHOICE_DB_KEY))
|
.get_item::<PersistedForkChoice>(&FORK_CHOICE_DB_KEY)
|
||||||
.map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))?
|
.map_err(|e| format!("DB error when reading persisted fork choice: {:?}", e))?
|
||||||
.ok_or_else(|| "No persisted fork choice present in database.".to_string())?;
|
.ok_or_else(|| "No persisted fork choice present in database.".to_string())?;
|
||||||
|
|
||||||
@ -310,7 +310,7 @@ where
|
|||||||
|
|
||||||
self.op_pool = Some(
|
self.op_pool = Some(
|
||||||
store
|
store
|
||||||
.get_item::<PersistedOperationPool<TEthSpec>>(&Hash256::from_slice(&OP_POOL_DB_KEY))
|
.get_item::<PersistedOperationPool<TEthSpec>>(&OP_POOL_DB_KEY)
|
||||||
.map_err(|e| format!("DB error whilst reading persisted op pool: {:?}", e))?
|
.map_err(|e| format!("DB error whilst reading persisted op pool: {:?}", e))?
|
||||||
.map(PersistedOperationPool::into_operation_pool)
|
.map(PersistedOperationPool::into_operation_pool)
|
||||||
.unwrap_or_else(OperationPool::new),
|
.unwrap_or_else(OperationPool::new),
|
||||||
@ -377,8 +377,13 @@ where
|
|||||||
|
|
||||||
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis);
|
let fc_store = BeaconForkChoiceStore::get_forkchoice_store(store, &genesis);
|
||||||
|
|
||||||
let fork_choice = ForkChoice::from_genesis(fc_store, &genesis.beacon_block.message)
|
let fork_choice = ForkChoice::from_genesis(
|
||||||
.map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?;
|
fc_store,
|
||||||
|
genesis.beacon_block_root,
|
||||||
|
&genesis.beacon_block.message,
|
||||||
|
&genesis.beacon_state,
|
||||||
|
)
|
||||||
|
.map_err(|e| format!("Unable to build initialize ForkChoice: {:?}", e))?;
|
||||||
|
|
||||||
self.fork_choice = Some(fork_choice);
|
self.fork_choice = Some(fork_choice);
|
||||||
self.genesis_time = Some(genesis.beacon_state.genesis_time);
|
self.genesis_time = Some(genesis.beacon_state.genesis_time);
|
||||||
@ -570,6 +575,7 @@ where
|
|||||||
observed_attester_slashings: <_>::default(),
|
observed_attester_slashings: <_>::default(),
|
||||||
eth1_chain: self.eth1_chain,
|
eth1_chain: self.eth1_chain,
|
||||||
genesis_validators_root: canonical_head.beacon_state.genesis_validators_root,
|
genesis_validators_root: canonical_head.beacon_state.genesis_validators_root,
|
||||||
|
genesis_state_root: canonical_head.beacon_state_root,
|
||||||
canonical_head: TimeoutRwLock::new(canonical_head.clone()),
|
canonical_head: TimeoutRwLock::new(canonical_head.clone()),
|
||||||
genesis_block_root,
|
genesis_block_root,
|
||||||
fork_choice: RwLock::new(fork_choice),
|
fork_choice: RwLock::new(fork_choice),
|
||||||
|
@ -84,6 +84,10 @@ pub enum BeaconChainError {
|
|||||||
ObservedBlockProducersError(ObservedBlockProducersError),
|
ObservedBlockProducersError(ObservedBlockProducersError),
|
||||||
PruningError(PruningError),
|
PruningError(PruningError),
|
||||||
ArithError(ArithError),
|
ArithError(ArithError),
|
||||||
|
InvalidShufflingId {
|
||||||
|
shuffling_epoch: Epoch,
|
||||||
|
head_block_epoch: Epoch,
|
||||||
|
},
|
||||||
WeakSubjectivtyVerificationFailure,
|
WeakSubjectivtyVerificationFailure,
|
||||||
WeakSubjectivtyShutdownError(TrySendError<&'static str>),
|
WeakSubjectivtyShutdownError(TrySendError<&'static str>),
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use environment::TaskExecutor;
|
|
||||||
use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService};
|
use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService};
|
||||||
use eth2_hashing::hash;
|
use eth2_hashing::hash;
|
||||||
use slog::{debug, error, trace, Logger};
|
use slog::{debug, error, trace, Logger};
|
||||||
@ -11,6 +10,7 @@ use std::collections::HashMap;
|
|||||||
use std::iter::DoubleEndedIterator;
|
use std::iter::DoubleEndedIterator;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use store::{DBColumn, Error as StoreError, StoreItem};
|
use store::{DBColumn, Error as StoreError, StoreItem};
|
||||||
|
use task_executor::TaskExecutor;
|
||||||
use types::{
|
use types::{
|
||||||
BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned,
|
BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned,
|
||||||
DEPOSIT_TREE_DEPTH,
|
DEPOSIT_TREE_DEPTH,
|
||||||
|
@ -1,7 +1,9 @@
|
|||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use types::{Attestation, AttestationData, EthSpec, Slot};
|
use tree_hash::TreeHash;
|
||||||
|
use types::{Attestation, AttestationData, EthSpec, Hash256, Slot};
|
||||||
|
|
||||||
|
type AttestationDataRoot = Hash256;
|
||||||
/// The number of slots that will be stored in the pool.
|
/// The number of slots that will be stored in the pool.
|
||||||
///
|
///
|
||||||
/// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all attestations
|
/// For example, if `SLOTS_RETAINED == 3` and the pool is pruned at slot `6`, then all attestations
|
||||||
@ -53,7 +55,7 @@ pub enum Error {
|
|||||||
/// A collection of `Attestation` objects, keyed by their `attestation.data`. Enforces that all
|
/// A collection of `Attestation` objects, keyed by their `attestation.data`. Enforces that all
|
||||||
/// `attestation` are from the same slot.
|
/// `attestation` are from the same slot.
|
||||||
struct AggregatedAttestationMap<E: EthSpec> {
|
struct AggregatedAttestationMap<E: EthSpec> {
|
||||||
map: HashMap<AttestationData, Attestation<E>>,
|
map: HashMap<AttestationDataRoot, Attestation<E>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<E: EthSpec> AggregatedAttestationMap<E> {
|
impl<E: EthSpec> AggregatedAttestationMap<E> {
|
||||||
@ -87,7 +89,9 @@ impl<E: EthSpec> AggregatedAttestationMap<E> {
|
|||||||
return Err(Error::MoreThanOneAggregationBitSet(set_bits.len()));
|
return Err(Error::MoreThanOneAggregationBitSet(set_bits.len()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(existing_attestation) = self.map.get_mut(&a.data) {
|
let attestation_data_root = a.data.tree_hash_root();
|
||||||
|
|
||||||
|
if let Some(existing_attestation) = self.map.get_mut(&attestation_data_root) {
|
||||||
if existing_attestation
|
if existing_attestation
|
||||||
.aggregation_bits
|
.aggregation_bits
|
||||||
.get(committee_index)
|
.get(committee_index)
|
||||||
@ -107,7 +111,7 @@ impl<E: EthSpec> AggregatedAttestationMap<E> {
|
|||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
self.map.insert(a.data.clone(), a.clone());
|
self.map.insert(attestation_data_root, a.clone());
|
||||||
Ok(InsertOutcome::NewAttestationData { committee_index })
|
Ok(InsertOutcome::NewAttestationData { committee_index })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -115,8 +119,13 @@ impl<E: EthSpec> AggregatedAttestationMap<E> {
|
|||||||
/// Returns an aggregated `Attestation` with the given `data`, if any.
|
/// Returns an aggregated `Attestation` with the given `data`, if any.
|
||||||
///
|
///
|
||||||
/// The given `a.data.slot` must match the slot that `self` was initialized with.
|
/// The given `a.data.slot` must match the slot that `self` was initialized with.
|
||||||
pub fn get(&self, data: &AttestationData) -> Result<Option<Attestation<E>>, Error> {
|
pub fn get(&self, data: &AttestationData) -> Option<Attestation<E>> {
|
||||||
Ok(self.map.get(data).cloned())
|
self.map.get(&data.tree_hash_root()).cloned()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an aggregated `Attestation` with the given `root`, if any.
|
||||||
|
pub fn get_by_root(&self, root: &AttestationDataRoot) -> Option<&Attestation<E>> {
|
||||||
|
self.map.get(root)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Iterate all attestations in `self`.
|
/// Iterate all attestations in `self`.
|
||||||
@ -220,12 +229,19 @@ impl<E: EthSpec> NaiveAggregationPool<E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an aggregated `Attestation` with the given `data`, if any.
|
/// Returns an aggregated `Attestation` with the given `data`, if any.
|
||||||
pub fn get(&self, data: &AttestationData) -> Result<Option<Attestation<E>>, Error> {
|
pub fn get(&self, data: &AttestationData) -> Option<Attestation<E>> {
|
||||||
|
self.maps.get(&data.slot).and_then(|map| map.get(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an aggregated `Attestation` with the given `data`, if any.
|
||||||
|
pub fn get_by_slot_and_root(
|
||||||
|
&self,
|
||||||
|
slot: Slot,
|
||||||
|
root: &AttestationDataRoot,
|
||||||
|
) -> Option<Attestation<E>> {
|
||||||
self.maps
|
self.maps
|
||||||
.iter()
|
.get(&slot)
|
||||||
.find(|(slot, _map)| **slot == data.slot)
|
.and_then(|map| map.get_by_root(root).cloned())
|
||||||
.map(|(_slot, map)| map.get(data))
|
|
||||||
.unwrap_or_else(|| Ok(None))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Iterate all attestations in all slots of `self`.
|
/// Iterate all attestations in all slots of `self`.
|
||||||
@ -338,8 +354,7 @@ mod tests {
|
|||||||
|
|
||||||
let retrieved = pool
|
let retrieved = pool
|
||||||
.get(&a.data)
|
.get(&a.data)
|
||||||
.expect("should not error while getting attestation")
|
.expect("should not error while getting attestation");
|
||||||
.expect("should get an attestation");
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
retrieved, a,
|
retrieved, a,
|
||||||
"retrieved attestation should equal the one inserted"
|
"retrieved attestation should equal the one inserted"
|
||||||
@ -378,8 +393,7 @@ mod tests {
|
|||||||
|
|
||||||
let retrieved = pool
|
let retrieved = pool
|
||||||
.get(&a_0.data)
|
.get(&a_0.data)
|
||||||
.expect("should not error while getting attestation")
|
.expect("should not error while getting attestation");
|
||||||
.expect("should get an attestation");
|
|
||||||
|
|
||||||
let mut a_01 = a_0.clone();
|
let mut a_01 = a_0.clone();
|
||||||
a_01.aggregate(&a_1);
|
a_01.aggregate(&a_1);
|
||||||
@ -408,8 +422,7 @@ mod tests {
|
|||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
pool.get(&a_0.data)
|
pool.get(&a_0.data)
|
||||||
.expect("should not error while getting attestation")
|
.expect("should not error while getting attestation"),
|
||||||
.expect("should get an attestation"),
|
|
||||||
retrieved,
|
retrieved,
|
||||||
"should not have aggregated different attestation data"
|
"should not have aggregated different attestation data"
|
||||||
);
|
);
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use lru::LruCache;
|
use lru::LruCache;
|
||||||
use types::{beacon_state::CommitteeCache, Epoch, Hash256};
|
use types::{beacon_state::CommitteeCache, Epoch, Hash256, ShufflingId};
|
||||||
|
|
||||||
/// The size of the LRU cache that stores committee caches for quicker verification.
|
/// The size of the LRU cache that stores committee caches for quicker verification.
|
||||||
///
|
///
|
||||||
@ -14,7 +14,7 @@ const CACHE_SIZE: usize = 16;
|
|||||||
/// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like
|
/// It has been named `ShufflingCache` because `CommitteeCacheCache` is a bit weird and looks like
|
||||||
/// a find/replace error.
|
/// a find/replace error.
|
||||||
pub struct ShufflingCache {
|
pub struct ShufflingCache {
|
||||||
cache: LruCache<(Epoch, Hash256), CommitteeCache>,
|
cache: LruCache<ShufflingId, CommitteeCache>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ShufflingCache {
|
impl ShufflingCache {
|
||||||
@ -24,8 +24,8 @@ impl ShufflingCache {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get(&mut self, epoch: Epoch, root: Hash256) -> Option<&CommitteeCache> {
|
pub fn get(&mut self, key: &ShufflingId) -> Option<&CommitteeCache> {
|
||||||
let opt = self.cache.get(&(epoch, root));
|
let opt = self.cache.get(key);
|
||||||
|
|
||||||
if opt.is_some() {
|
if opt.is_some() {
|
||||||
metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS);
|
metrics::inc_counter(&metrics::SHUFFLING_CACHE_HITS);
|
||||||
@ -36,11 +36,37 @@ impl ShufflingCache {
|
|||||||
opt
|
opt
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn insert(&mut self, epoch: Epoch, root: Hash256, committee_cache: &CommitteeCache) {
|
pub fn contains(&self, key: &ShufflingId) -> bool {
|
||||||
let key = (epoch, root);
|
self.cache.contains(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn insert(&mut self, key: ShufflingId, committee_cache: &CommitteeCache) {
|
||||||
if !self.cache.contains(&key) {
|
if !self.cache.contains(&key) {
|
||||||
self.cache.put(key, committee_cache.clone());
|
self.cache.put(key, committee_cache.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Contains the shuffling IDs for a beacon block.
|
||||||
|
pub struct BlockShufflingIds {
|
||||||
|
pub current: ShufflingId,
|
||||||
|
pub next: ShufflingId,
|
||||||
|
pub block_root: Hash256,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlockShufflingIds {
|
||||||
|
/// Returns the shuffling ID for the given epoch.
|
||||||
|
///
|
||||||
|
/// Returns `None` if `epoch` is prior to `self.current.shuffling_epoch`.
|
||||||
|
pub fn id_for_epoch(&self, epoch: Epoch) -> Option<ShufflingId> {
|
||||||
|
if epoch == self.current.shuffling_epoch {
|
||||||
|
Some(self.current.clone())
|
||||||
|
} else if epoch == self.next.shuffling_epoch {
|
||||||
|
Some(self.next.clone())
|
||||||
|
} else if epoch > self.next.shuffling_epoch {
|
||||||
|
Some(ShufflingId::from_components(epoch, self.block_root))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -27,9 +27,11 @@ use store::{config::StoreConfig, BlockReplay, HotColdDB, ItemStore, LevelDB, Mem
|
|||||||
use tempfile::{tempdir, TempDir};
|
use tempfile::{tempdir, TempDir};
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{
|
use types::{
|
||||||
AggregateSignature, Attestation, BeaconState, BeaconStateHash, ChainSpec, Domain, Epoch,
|
AggregateSignature, Attestation, AttestationData, AttesterSlashing, BeaconState,
|
||||||
EthSpec, Hash256, Keypair, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock,
|
BeaconStateHash, ChainSpec, Checkpoint, Domain, Epoch, EthSpec, Hash256, IndexedAttestation,
|
||||||
SignedBeaconBlockHash, SignedRoot, Slot, SubnetId,
|
Keypair, ProposerSlashing, SelectionProof, SignedAggregateAndProof, SignedBeaconBlock,
|
||||||
|
SignedBeaconBlockHash, SignedRoot, SignedVoluntaryExit, Slot, SubnetId, VariableList,
|
||||||
|
VoluntaryExit,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub use types::test_utils::generate_deterministic_keypairs;
|
pub use types::test_utils::generate_deterministic_keypairs;
|
||||||
@ -131,7 +133,7 @@ impl<E: EthSpec> BeaconChainHarness<BlockingMigratorEphemeralHarnessType<E>> {
|
|||||||
|
|
||||||
let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter);
|
let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter);
|
||||||
let drain = slog_term::FullFormat::new(decorator).build();
|
let drain = slog_term::FullFormat::new(decorator).build();
|
||||||
let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug);
|
let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical);
|
||||||
let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!());
|
let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!());
|
||||||
|
|
||||||
let config = StoreConfig::default();
|
let config = StoreConfig::default();
|
||||||
@ -216,7 +218,7 @@ impl<E: EthSpec> BeaconChainHarness<NullMigratorEphemeralHarnessType<E>> {
|
|||||||
|
|
||||||
let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter);
|
let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter);
|
||||||
let drain = slog_term::FullFormat::new(decorator).build();
|
let drain = slog_term::FullFormat::new(decorator).build();
|
||||||
let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug);
|
let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical);
|
||||||
let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!());
|
let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!());
|
||||||
let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1);
|
let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1);
|
||||||
|
|
||||||
@ -265,7 +267,7 @@ impl<E: EthSpec> BeaconChainHarness<BlockingMigratorDiskHarnessType<E>> {
|
|||||||
|
|
||||||
let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter);
|
let decorator = slog_term::PlainDecorator::new(slog_term::TestStdoutWriter);
|
||||||
let drain = slog_term::FullFormat::new(decorator).build();
|
let drain = slog_term::FullFormat::new(decorator).build();
|
||||||
let debug_level = slog::LevelFilter::new(drain, slog::Level::Debug);
|
let debug_level = slog::LevelFilter::new(drain, slog::Level::Critical);
|
||||||
let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!());
|
let log = slog::Logger::root(std::sync::Mutex::new(debug_level).fuse(), o!());
|
||||||
let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1);
|
let (shutdown_tx, shutdown_receiver) = futures::channel::mpsc::channel(1);
|
||||||
|
|
||||||
@ -430,7 +432,7 @@ where
|
|||||||
// If we produce two blocks for the same slot, they hash up to the same value and
|
// If we produce two blocks for the same slot, they hash up to the same value and
|
||||||
// BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce
|
// BeaconChain errors out with `BlockIsAlreadyKnown`. Vary the graffiti so that we produce
|
||||||
// different blocks each time.
|
// different blocks each time.
|
||||||
self.chain.set_graffiti(self.rng.gen::<[u8; 32]>());
|
self.chain.set_graffiti(self.rng.gen::<[u8; 32]>().into());
|
||||||
|
|
||||||
let randao_reveal = {
|
let randao_reveal = {
|
||||||
let epoch = slot.epoch(E::slots_per_epoch());
|
let epoch = slot.epoch(E::slots_per_epoch());
|
||||||
@ -475,8 +477,8 @@ where
|
|||||||
let committee_count = state.get_committee_count_at_slot(state.slot).unwrap();
|
let committee_count = state.get_committee_count_at_slot(state.slot).unwrap();
|
||||||
|
|
||||||
state
|
state
|
||||||
.get_beacon_committees_at_slot(state.slot)
|
.get_beacon_committees_at_slot(attestation_slot)
|
||||||
.unwrap()
|
.expect("should get committees")
|
||||||
.iter()
|
.iter()
|
||||||
.map(|bc| {
|
.map(|bc| {
|
||||||
bc.committee
|
bc.committee
|
||||||
@ -603,7 +605,6 @@ where
|
|||||||
let aggregate = self
|
let aggregate = self
|
||||||
.chain
|
.chain
|
||||||
.get_aggregated_attestation(&attestation.data)
|
.get_aggregated_attestation(&attestation.data)
|
||||||
.unwrap()
|
|
||||||
.unwrap_or_else(|| {
|
.unwrap_or_else(|| {
|
||||||
committee_attestations.iter().skip(1).fold(attestation.clone(), |mut agg, (att, _)| {
|
committee_attestations.iter().skip(1).fold(attestation.clone(), |mut agg, (att, _)| {
|
||||||
agg.aggregate(att);
|
agg.aggregate(att);
|
||||||
@ -634,6 +635,94 @@ where
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn make_attester_slashing(&self, validator_indices: Vec<u64>) -> AttesterSlashing<E> {
|
||||||
|
let mut attestation_1 = IndexedAttestation {
|
||||||
|
attesting_indices: VariableList::new(validator_indices).unwrap(),
|
||||||
|
data: AttestationData {
|
||||||
|
slot: Slot::new(0),
|
||||||
|
index: 0,
|
||||||
|
beacon_block_root: Hash256::zero(),
|
||||||
|
target: Checkpoint {
|
||||||
|
root: Hash256::zero(),
|
||||||
|
epoch: Epoch::new(0),
|
||||||
|
},
|
||||||
|
source: Checkpoint {
|
||||||
|
root: Hash256::zero(),
|
||||||
|
epoch: Epoch::new(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
signature: AggregateSignature::infinity(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut attestation_2 = attestation_1.clone();
|
||||||
|
attestation_2.data.index += 1;
|
||||||
|
|
||||||
|
for attestation in &mut [&mut attestation_1, &mut attestation_2] {
|
||||||
|
for &i in &attestation.attesting_indices {
|
||||||
|
let sk = &self.validators_keypairs[i as usize].sk;
|
||||||
|
|
||||||
|
let fork = self.chain.head_info().unwrap().fork;
|
||||||
|
let genesis_validators_root = self.chain.genesis_validators_root;
|
||||||
|
|
||||||
|
let domain = self.chain.spec.get_domain(
|
||||||
|
attestation.data.target.epoch,
|
||||||
|
Domain::BeaconAttester,
|
||||||
|
&fork,
|
||||||
|
genesis_validators_root,
|
||||||
|
);
|
||||||
|
let message = attestation.data.signing_root(domain);
|
||||||
|
|
||||||
|
attestation.signature.add_assign(&sk.sign(message));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
AttesterSlashing {
|
||||||
|
attestation_1,
|
||||||
|
attestation_2,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn make_proposer_slashing(&self, validator_index: u64) -> ProposerSlashing {
|
||||||
|
let mut block_header_1 = self
|
||||||
|
.chain
|
||||||
|
.head_beacon_block()
|
||||||
|
.unwrap()
|
||||||
|
.message
|
||||||
|
.block_header();
|
||||||
|
block_header_1.proposer_index = validator_index;
|
||||||
|
|
||||||
|
let mut block_header_2 = block_header_1.clone();
|
||||||
|
block_header_2.state_root = Hash256::zero();
|
||||||
|
|
||||||
|
let sk = &self.validators_keypairs[validator_index as usize].sk;
|
||||||
|
let fork = self.chain.head_info().unwrap().fork;
|
||||||
|
let genesis_validators_root = self.chain.genesis_validators_root;
|
||||||
|
|
||||||
|
let mut signed_block_headers = vec![block_header_1, block_header_2]
|
||||||
|
.into_iter()
|
||||||
|
.map(|block_header| {
|
||||||
|
block_header.sign::<E>(&sk, &fork, genesis_validators_root, &self.chain.spec)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
ProposerSlashing {
|
||||||
|
signed_header_2: signed_block_headers.remove(1),
|
||||||
|
signed_header_1: signed_block_headers.remove(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn make_voluntary_exit(&self, validator_index: u64, epoch: Epoch) -> SignedVoluntaryExit {
|
||||||
|
let sk = &self.validators_keypairs[validator_index as usize].sk;
|
||||||
|
let fork = self.chain.head_info().unwrap().fork;
|
||||||
|
let genesis_validators_root = self.chain.genesis_validators_root;
|
||||||
|
|
||||||
|
VoluntaryExit {
|
||||||
|
epoch,
|
||||||
|
validator_index,
|
||||||
|
}
|
||||||
|
.sign(sk, &fork, genesis_validators_root, &self.chain.spec)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn process_block(&self, slot: Slot, block: SignedBeaconBlock<E>) -> SignedBeaconBlockHash {
|
pub fn process_block(&self, slot: Slot, block: SignedBeaconBlock<E>) -> SignedBeaconBlockHash {
|
||||||
assert_eq!(self.chain.slot().unwrap(), slot);
|
assert_eq!(self.chain.slot().unwrap(), slot);
|
||||||
let block_hash: SignedBeaconBlockHash = self.chain.process_block(block).unwrap().into();
|
let block_hash: SignedBeaconBlockHash = self.chain.process_block(block).unwrap().into();
|
||||||
@ -656,7 +745,10 @@ where
|
|||||||
for (unaggregated_attestations, maybe_signed_aggregate) in attestations.into_iter() {
|
for (unaggregated_attestations, maybe_signed_aggregate) in attestations.into_iter() {
|
||||||
for (attestation, subnet_id) in unaggregated_attestations {
|
for (attestation, subnet_id) in unaggregated_attestations {
|
||||||
self.chain
|
self.chain
|
||||||
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id)
|
.verify_unaggregated_attestation_for_gossip(
|
||||||
|
attestation.clone(),
|
||||||
|
Some(subnet_id),
|
||||||
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.add_to_pool(&self.chain)
|
.add_to_pool(&self.chain)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -585,7 +585,7 @@ fn unaggregated_gossip_verification() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.verify_unaggregated_attestation_for_gossip($attn_getter, $subnet_getter)
|
.verify_unaggregated_attestation_for_gossip($attn_getter, Some($subnet_getter))
|
||||||
.err()
|
.err()
|
||||||
.expect(&format!(
|
.expect(&format!(
|
||||||
"{} should error during verify_unaggregated_attestation_for_gossip",
|
"{} should error during verify_unaggregated_attestation_for_gossip",
|
||||||
@ -852,7 +852,7 @@ fn unaggregated_gossip_verification() {
|
|||||||
|
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.verify_unaggregated_attestation_for_gossip(valid_attestation.clone(), subnet_id)
|
.verify_unaggregated_attestation_for_gossip(valid_attestation.clone(), Some(subnet_id))
|
||||||
.expect("valid attestation should be verified");
|
.expect("valid attestation should be verified");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -941,6 +941,6 @@ fn attestation_that_skips_epochs() {
|
|||||||
|
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.verify_unaggregated_attestation_for_gossip(attestation, subnet_id)
|
.verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id))
|
||||||
.expect("should gossip verify attestation that skips slots");
|
.expect("should gossip verify attestation that skips slots");
|
||||||
}
|
}
|
||||||
|
@ -326,7 +326,7 @@ fn epoch_boundary_state_attestation_processing() {
|
|||||||
|
|
||||||
let res = harness
|
let res = harness
|
||||||
.chain
|
.chain
|
||||||
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id);
|
.verify_unaggregated_attestation_for_gossip(attestation.clone(), Some(subnet_id));
|
||||||
|
|
||||||
let current_slot = harness.chain.slot().expect("should get slot");
|
let current_slot = harness.chain.slot().expect("should get slot");
|
||||||
let expected_attestation_slot = attestation.data.slot;
|
let expected_attestation_slot = attestation.data.slot;
|
||||||
|
@ -357,11 +357,10 @@ fn roundtrip_operation_pool() {
|
|||||||
.persist_op_pool()
|
.persist_op_pool()
|
||||||
.expect("should persist op pool");
|
.expect("should persist op pool");
|
||||||
|
|
||||||
let key = Hash256::from_slice(&OP_POOL_DB_KEY);
|
|
||||||
let restored_op_pool = harness
|
let restored_op_pool = harness
|
||||||
.chain
|
.chain
|
||||||
.store
|
.store
|
||||||
.get_item::<PersistedOperationPool<MinimalEthSpec>>(&key)
|
.get_item::<PersistedOperationPool<MinimalEthSpec>>(&OP_POOL_DB_KEY)
|
||||||
.expect("should read db")
|
.expect("should read db")
|
||||||
.expect("should find op pool")
|
.expect("should find op pool")
|
||||||
.into_operation_pool();
|
.into_operation_pool();
|
||||||
@ -463,7 +462,7 @@ fn attestations_with_increasing_slots() {
|
|||||||
for (attestation, subnet_id) in attestations.into_iter().flatten() {
|
for (attestation, subnet_id) in attestations.into_iter().flatten() {
|
||||||
let res = harness
|
let res = harness
|
||||||
.chain
|
.chain
|
||||||
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id);
|
.verify_unaggregated_attestation_for_gossip(attestation.clone(), Some(subnet_id));
|
||||||
|
|
||||||
let current_slot = harness.chain.slot().expect("should get slot");
|
let current_slot = harness.chain.slot().expect("should get slot");
|
||||||
let expected_attestation_slot = attestation.data.slot;
|
let expected_attestation_slot = attestation.data.slot;
|
||||||
|
@ -5,7 +5,7 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
sloggers = "1.0.0"
|
sloggers = "1.0.1"
|
||||||
toml = "0.5.6"
|
toml = "0.5.6"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
@ -14,30 +14,33 @@ store = { path = "../store" }
|
|||||||
network = { path = "../network" }
|
network = { path = "../network" }
|
||||||
timer = { path = "../timer" }
|
timer = { path = "../timer" }
|
||||||
eth2_libp2p = { path = "../eth2_libp2p" }
|
eth2_libp2p = { path = "../eth2_libp2p" }
|
||||||
rest_api = { path = "../rest_api" }
|
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
websocket_server = { path = "../websocket_server" }
|
websocket_server = { path = "../websocket_server" }
|
||||||
prometheus = "0.9.0"
|
prometheus = "0.10.0"
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
tree_hash = "0.1.0"
|
tree_hash = "0.1.1"
|
||||||
eth2_config = { path = "../../common/eth2_config" }
|
eth2_config = { path = "../../common/eth2_config" }
|
||||||
slot_clock = { path = "../../common/slot_clock" }
|
slot_clock = { path = "../../common/slot_clock" }
|
||||||
serde = "1.0.110"
|
serde = "1.0.116"
|
||||||
serde_derive = "1.0.110"
|
serde_derive = "1.0.116"
|
||||||
error-chain = "0.12.2"
|
error-chain = "0.12.4"
|
||||||
serde_yaml = "0.8.11"
|
serde_yaml = "0.8.13"
|
||||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||||
slog-async = "2.5.0"
|
slog-async = "2.5.0"
|
||||||
tokio = "0.2.22"
|
tokio = "0.2.22"
|
||||||
dirs = "2.0.2"
|
dirs = "3.0.1"
|
||||||
futures = "0.3.5"
|
futures = "0.3.5"
|
||||||
reqwest = { version = "0.10.4", features = ["native-tls-vendored"] }
|
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] }
|
||||||
url = "2.1.1"
|
url = "2.1.1"
|
||||||
eth1 = { path = "../eth1" }
|
eth1 = { path = "../eth1" }
|
||||||
genesis = { path = "../genesis" }
|
genesis = { path = "../genesis" }
|
||||||
|
task_executor = { path = "../../common/task_executor" }
|
||||||
environment = { path = "../../lighthouse/environment" }
|
environment = { path = "../../lighthouse/environment" }
|
||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||||
time = "0.2.16"
|
time = "0.2.22"
|
||||||
bus = "2.2.3"
|
bus = "2.2.3"
|
||||||
|
directory = {path = "../../common/directory"}
|
||||||
|
http_api = { path = "../http_api" }
|
||||||
|
http_metrics = { path = "../http_metrics" }
|
||||||
|
@ -13,15 +13,14 @@ use beacon_chain::{
|
|||||||
use bus::Bus;
|
use bus::Bus;
|
||||||
use environment::RuntimeContext;
|
use environment::RuntimeContext;
|
||||||
use eth1::{Config as Eth1Config, Service as Eth1Service};
|
use eth1::{Config as Eth1Config, Service as Eth1Service};
|
||||||
use eth2_config::Eth2Config;
|
|
||||||
use eth2_libp2p::NetworkGlobals;
|
use eth2_libp2p::NetworkGlobals;
|
||||||
use genesis::{interop_genesis_state, Eth1GenesisService};
|
use genesis::{interop_genesis_state, Eth1GenesisService};
|
||||||
use network::{NetworkConfig, NetworkMessage, NetworkService};
|
use network::{NetworkConfig, NetworkMessage, NetworkService};
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use slog::info;
|
use slog::{debug, info};
|
||||||
use ssz::Decode;
|
use ssz::Decode;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::path::Path;
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use timer::spawn_timer;
|
use timer::spawn_timer;
|
||||||
@ -61,7 +60,10 @@ pub struct ClientBuilder<T: BeaconChainTypes> {
|
|||||||
event_handler: Option<T::EventHandler>,
|
event_handler: Option<T::EventHandler>,
|
||||||
network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
|
network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
|
||||||
network_send: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>,
|
network_send: Option<UnboundedSender<NetworkMessage<T::EthSpec>>>,
|
||||||
http_listen_addr: Option<SocketAddr>,
|
db_path: Option<PathBuf>,
|
||||||
|
freezer_db_path: Option<PathBuf>,
|
||||||
|
http_api_config: http_api::Config,
|
||||||
|
http_metrics_config: http_metrics::Config,
|
||||||
websocket_listen_addr: Option<SocketAddr>,
|
websocket_listen_addr: Option<SocketAddr>,
|
||||||
eth_spec_instance: T::EthSpec,
|
eth_spec_instance: T::EthSpec,
|
||||||
}
|
}
|
||||||
@ -103,7 +105,10 @@ where
|
|||||||
event_handler: None,
|
event_handler: None,
|
||||||
network_globals: None,
|
network_globals: None,
|
||||||
network_send: None,
|
network_send: None,
|
||||||
http_listen_addr: None,
|
db_path: None,
|
||||||
|
freezer_db_path: None,
|
||||||
|
http_api_config: <_>::default(),
|
||||||
|
http_metrics_config: <_>::default(),
|
||||||
websocket_listen_addr: None,
|
websocket_listen_addr: None,
|
||||||
eth_spec_instance,
|
eth_spec_instance,
|
||||||
}
|
}
|
||||||
@ -280,55 +285,16 @@ where
|
|||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Immediately starts the beacon node REST API http server.
|
/// Provides configuration for the HTTP API.
|
||||||
pub fn http_server(
|
pub fn http_api_config(mut self, config: http_api::Config) -> Self {
|
||||||
mut self,
|
self.http_api_config = config;
|
||||||
client_config: &ClientConfig,
|
self
|
||||||
eth2_config: &Eth2Config,
|
}
|
||||||
events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
|
|
||||||
) -> Result<Self, String> {
|
|
||||||
let beacon_chain = self
|
|
||||||
.beacon_chain
|
|
||||||
.clone()
|
|
||||||
.ok_or_else(|| "http_server requires a beacon chain")?;
|
|
||||||
let context = self
|
|
||||||
.runtime_context
|
|
||||||
.as_ref()
|
|
||||||
.ok_or_else(|| "http_server requires a runtime_context")?
|
|
||||||
.service_context("http".into());
|
|
||||||
let network_globals = self
|
|
||||||
.network_globals
|
|
||||||
.clone()
|
|
||||||
.ok_or_else(|| "http_server requires a libp2p network")?;
|
|
||||||
let network_send = self
|
|
||||||
.network_send
|
|
||||||
.clone()
|
|
||||||
.ok_or_else(|| "http_server requires a libp2p network sender")?;
|
|
||||||
|
|
||||||
let network_info = rest_api::NetworkInfo {
|
/// Provides configuration for the HTTP server that serves Prometheus metrics.
|
||||||
network_globals,
|
pub fn http_metrics_config(mut self, config: http_metrics::Config) -> Self {
|
||||||
network_chan: network_send,
|
self.http_metrics_config = config;
|
||||||
};
|
self
|
||||||
|
|
||||||
let listening_addr = rest_api::start_server(
|
|
||||||
context.executor,
|
|
||||||
&client_config.rest_api,
|
|
||||||
beacon_chain,
|
|
||||||
network_info,
|
|
||||||
client_config
|
|
||||||
.create_db_path()
|
|
||||||
.map_err(|_| "unable to read data dir")?,
|
|
||||||
client_config
|
|
||||||
.create_freezer_db_path()
|
|
||||||
.map_err(|_| "unable to read freezer DB dir")?,
|
|
||||||
eth2_config.clone(),
|
|
||||||
events,
|
|
||||||
)
|
|
||||||
.map_err(|e| format!("Failed to start HTTP API: {:?}", e))?;
|
|
||||||
|
|
||||||
self.http_listen_addr = Some(listening_addr);
|
|
||||||
|
|
||||||
Ok(self)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Immediately starts the service that periodically logs information each slot.
|
/// Immediately starts the service that periodically logs information each slot.
|
||||||
@ -367,25 +333,85 @@ where
|
|||||||
/// specified.
|
/// specified.
|
||||||
///
|
///
|
||||||
/// If type inference errors are being raised, see the comment on the definition of `Self`.
|
/// If type inference errors are being raised, see the comment on the definition of `Self`.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
pub fn build(
|
pub fn build(
|
||||||
self,
|
self,
|
||||||
) -> Client<
|
) -> Result<
|
||||||
Witness<
|
Client<
|
||||||
TStoreMigrator,
|
Witness<
|
||||||
TSlotClock,
|
TStoreMigrator,
|
||||||
TEth1Backend,
|
TSlotClock,
|
||||||
TEthSpec,
|
TEth1Backend,
|
||||||
TEventHandler,
|
TEthSpec,
|
||||||
THotStore,
|
TEventHandler,
|
||||||
TColdStore,
|
THotStore,
|
||||||
|
TColdStore,
|
||||||
|
>,
|
||||||
>,
|
>,
|
||||||
|
String,
|
||||||
> {
|
> {
|
||||||
Client {
|
let runtime_context = self
|
||||||
|
.runtime_context
|
||||||
|
.as_ref()
|
||||||
|
.ok_or_else(|| "build requires a runtime context".to_string())?;
|
||||||
|
let log = runtime_context.log().clone();
|
||||||
|
|
||||||
|
let http_api_listen_addr = if self.http_api_config.enabled {
|
||||||
|
let ctx = Arc::new(http_api::Context {
|
||||||
|
config: self.http_api_config.clone(),
|
||||||
|
chain: self.beacon_chain.clone(),
|
||||||
|
network_tx: self.network_send.clone(),
|
||||||
|
network_globals: self.network_globals.clone(),
|
||||||
|
log: log.clone(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let exit = runtime_context.executor.exit();
|
||||||
|
|
||||||
|
let (listen_addr, server) = http_api::serve(ctx, exit)
|
||||||
|
.map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?;
|
||||||
|
|
||||||
|
runtime_context
|
||||||
|
.clone()
|
||||||
|
.executor
|
||||||
|
.spawn_without_exit(async move { server.await }, "http-api");
|
||||||
|
|
||||||
|
Some(listen_addr)
|
||||||
|
} else {
|
||||||
|
info!(log, "HTTP server is disabled");
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let http_metrics_listen_addr = if self.http_metrics_config.enabled {
|
||||||
|
let ctx = Arc::new(http_metrics::Context {
|
||||||
|
config: self.http_metrics_config.clone(),
|
||||||
|
chain: self.beacon_chain.clone(),
|
||||||
|
db_path: self.db_path.clone(),
|
||||||
|
freezer_db_path: self.freezer_db_path.clone(),
|
||||||
|
log: log.clone(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let exit = runtime_context.executor.exit();
|
||||||
|
|
||||||
|
let (listen_addr, server) = http_metrics::serve(ctx, exit)
|
||||||
|
.map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?;
|
||||||
|
|
||||||
|
runtime_context
|
||||||
|
.executor
|
||||||
|
.spawn_without_exit(async move { server.await }, "http-api");
|
||||||
|
|
||||||
|
Some(listen_addr)
|
||||||
|
} else {
|
||||||
|
debug!(log, "Metrics server is disabled");
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Client {
|
||||||
beacon_chain: self.beacon_chain,
|
beacon_chain: self.beacon_chain,
|
||||||
network_globals: self.network_globals,
|
network_globals: self.network_globals,
|
||||||
http_listen_addr: self.http_listen_addr,
|
http_api_listen_addr,
|
||||||
|
http_metrics_listen_addr,
|
||||||
websocket_listen_addr: self.websocket_listen_addr,
|
websocket_listen_addr: self.websocket_listen_addr,
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -527,6 +553,9 @@ where
|
|||||||
.clone()
|
.clone()
|
||||||
.ok_or_else(|| "disk_store requires a chain spec".to_string())?;
|
.ok_or_else(|| "disk_store requires a chain spec".to_string())?;
|
||||||
|
|
||||||
|
self.db_path = Some(hot_path.into());
|
||||||
|
self.freezer_db_path = Some(cold_path.into());
|
||||||
|
|
||||||
let store = HotColdDB::open(hot_path, cold_path, config, spec, context.log().clone())
|
let store = HotColdDB::open(hot_path, cold_path, config, spec, context.log().clone())
|
||||||
.map_err(|e| format!("Unable to open database: {:?}", e))?;
|
.map_err(|e| format!("Unable to open database: {:?}", e))?;
|
||||||
self.store = Some(Arc::new(store));
|
self.store = Some(Arc::new(store));
|
||||||
|
@ -1,11 +1,10 @@
|
|||||||
|
use directory::DEFAULT_ROOT_DIR;
|
||||||
use network::NetworkConfig;
|
use network::NetworkConfig;
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use types::Graffiti;
|
use types::Graffiti;
|
||||||
|
|
||||||
pub const DEFAULT_DATADIR: &str = ".lighthouse";
|
|
||||||
|
|
||||||
/// The number initial validators when starting the `Minimal`.
|
/// The number initial validators when starting the `Minimal`.
|
||||||
const TESTNET_SPEC_CONSTANTS: &str = "minimal";
|
const TESTNET_SPEC_CONSTANTS: &str = "minimal";
|
||||||
|
|
||||||
@ -63,16 +62,17 @@ pub struct Config {
|
|||||||
pub genesis: ClientGenesis,
|
pub genesis: ClientGenesis,
|
||||||
pub store: store::StoreConfig,
|
pub store: store::StoreConfig,
|
||||||
pub network: network::NetworkConfig,
|
pub network: network::NetworkConfig,
|
||||||
pub rest_api: rest_api::Config,
|
|
||||||
pub chain: beacon_chain::ChainConfig,
|
pub chain: beacon_chain::ChainConfig,
|
||||||
pub websocket_server: websocket_server::Config,
|
pub websocket_server: websocket_server::Config,
|
||||||
pub eth1: eth1::Config,
|
pub eth1: eth1::Config,
|
||||||
|
pub http_api: http_api::Config,
|
||||||
|
pub http_metrics: http_metrics::Config,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
data_dir: PathBuf::from(DEFAULT_DATADIR),
|
data_dir: PathBuf::from(DEFAULT_ROOT_DIR),
|
||||||
db_name: "chain_db".to_string(),
|
db_name: "chain_db".to_string(),
|
||||||
freezer_db_path: None,
|
freezer_db_path: None,
|
||||||
log_file: PathBuf::from(""),
|
log_file: PathBuf::from(""),
|
||||||
@ -80,7 +80,6 @@ impl Default for Config {
|
|||||||
store: <_>::default(),
|
store: <_>::default(),
|
||||||
network: NetworkConfig::default(),
|
network: NetworkConfig::default(),
|
||||||
chain: <_>::default(),
|
chain: <_>::default(),
|
||||||
rest_api: <_>::default(),
|
|
||||||
websocket_server: <_>::default(),
|
websocket_server: <_>::default(),
|
||||||
spec_constants: TESTNET_SPEC_CONSTANTS.into(),
|
spec_constants: TESTNET_SPEC_CONSTANTS.into(),
|
||||||
dummy_eth1_backend: false,
|
dummy_eth1_backend: false,
|
||||||
@ -88,6 +87,8 @@ impl Default for Config {
|
|||||||
eth1: <_>::default(),
|
eth1: <_>::default(),
|
||||||
disabled_forks: Vec::new(),
|
disabled_forks: Vec::new(),
|
||||||
graffiti: Graffiti::default(),
|
graffiti: Graffiti::default(),
|
||||||
|
http_api: <_>::default(),
|
||||||
|
http_metrics: <_>::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,10 @@ pub use eth2_config::Eth2Config;
|
|||||||
pub struct Client<T: BeaconChainTypes> {
|
pub struct Client<T: BeaconChainTypes> {
|
||||||
beacon_chain: Option<Arc<BeaconChain<T>>>,
|
beacon_chain: Option<Arc<BeaconChain<T>>>,
|
||||||
network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
|
network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
|
||||||
http_listen_addr: Option<SocketAddr>,
|
/// Listen address for the standard eth2.0 API, if the service was started.
|
||||||
|
http_api_listen_addr: Option<SocketAddr>,
|
||||||
|
/// Listen address for the HTTP server which serves Prometheus metrics.
|
||||||
|
http_metrics_listen_addr: Option<SocketAddr>,
|
||||||
websocket_listen_addr: Option<SocketAddr>,
|
websocket_listen_addr: Option<SocketAddr>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -33,9 +36,14 @@ impl<T: BeaconChainTypes> Client<T> {
|
|||||||
self.beacon_chain.clone()
|
self.beacon_chain.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the address of the client's HTTP API server, if it was started.
|
/// Returns the address of the client's standard eth2.0 API server, if it was started.
|
||||||
pub fn http_listen_addr(&self) -> Option<SocketAddr> {
|
pub fn http_api_listen_addr(&self) -> Option<SocketAddr> {
|
||||||
self.http_listen_addr
|
self.http_api_listen_addr
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the address of the client's HTTP Prometheus metrics server, if it was started.
|
||||||
|
pub fn http_metrics_listen_addr(&self) -> Option<SocketAddr> {
|
||||||
|
self.http_metrics_listen_addr
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the address of the client's WebSocket API server, if it was started.
|
/// Returns the address of the client's WebSocket API server, if it was started.
|
||||||
|
@ -22,7 +22,7 @@ const SPEEDO_OBSERVATIONS: usize = 4;
|
|||||||
|
|
||||||
/// Spawns a notifier service which periodically logs information about the node.
|
/// Spawns a notifier service which periodically logs information about the node.
|
||||||
pub fn spawn_notifier<T: BeaconChainTypes>(
|
pub fn spawn_notifier<T: BeaconChainTypes>(
|
||||||
executor: environment::TaskExecutor,
|
executor: task_executor::TaskExecutor,
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
network: Arc<NetworkGlobals<T::EthSpec>>,
|
network: Arc<NetworkGlobals<T::EthSpec>>,
|
||||||
milliseconds_per_slot: u64,
|
milliseconds_per_slot: u64,
|
||||||
|
@ -8,25 +8,26 @@ edition = "2018"
|
|||||||
eth1_test_rig = { path = "../../testing/eth1_test_rig" }
|
eth1_test_rig = { path = "../../testing/eth1_test_rig" }
|
||||||
toml = "0.5.6"
|
toml = "0.5.6"
|
||||||
web3 = "0.11.0"
|
web3 = "0.11.0"
|
||||||
sloggers = "1.0.0"
|
sloggers = "1.0.1"
|
||||||
|
environment = { path = "../../lighthouse/environment" }
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
reqwest = { version = "0.10.4", features = ["native-tls-vendored"] }
|
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] }
|
||||||
futures = { version = "0.3.5", features = ["compat"] }
|
futures = { version = "0.3.5", features = ["compat"] }
|
||||||
serde_json = "1.0.52"
|
serde_json = "1.0.58"
|
||||||
serde = { version = "1.0.110", features = ["derive"] }
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
types = { path = "../../consensus/types"}
|
types = { path = "../../consensus/types"}
|
||||||
merkle_proof = { path = "../../consensus/merkle_proof"}
|
merkle_proof = { path = "../../consensus/merkle_proof"}
|
||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
eth2_ssz_derive = "0.1.0"
|
eth2_ssz_derive = "0.1.0"
|
||||||
tree_hash = "0.1.0"
|
tree_hash = "0.1.1"
|
||||||
eth2_hashing = "0.1.0"
|
eth2_hashing = "0.1.0"
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
tokio = { version = "0.2.22", features = ["full"] }
|
tokio = { version = "0.2.22", features = ["full"] }
|
||||||
state_processing = { path = "../../consensus/state_processing" }
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
libflate = "1.0.0"
|
libflate = "1.0.2"
|
||||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics"}
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics"}
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
environment = { path = "../../lighthouse/environment" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
|
@ -39,19 +39,34 @@ pub enum Eth1NetworkId {
|
|||||||
Custom(u64),
|
Custom(u64),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Into<u64> for Eth1NetworkId {
|
||||||
|
fn into(self) -> u64 {
|
||||||
|
match self {
|
||||||
|
Eth1NetworkId::Mainnet => 1,
|
||||||
|
Eth1NetworkId::Goerli => 5,
|
||||||
|
Eth1NetworkId::Custom(id) => id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<u64> for Eth1NetworkId {
|
||||||
|
fn from(id: u64) -> Self {
|
||||||
|
let into = |x: Eth1NetworkId| -> u64 { x.into() };
|
||||||
|
match id {
|
||||||
|
id if id == into(Eth1NetworkId::Mainnet) => Eth1NetworkId::Mainnet,
|
||||||
|
id if id == into(Eth1NetworkId::Goerli) => Eth1NetworkId::Goerli,
|
||||||
|
id => Eth1NetworkId::Custom(id),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl FromStr for Eth1NetworkId {
|
impl FromStr for Eth1NetworkId {
|
||||||
type Err = String;
|
type Err = String;
|
||||||
|
|
||||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
match s {
|
u64::from_str_radix(s, 10)
|
||||||
"1" => Ok(Eth1NetworkId::Mainnet),
|
.map(Into::into)
|
||||||
"5" => Ok(Eth1NetworkId::Goerli),
|
.map_err(|e| format!("Failed to parse eth1 network id {}", e))
|
||||||
custom => {
|
|
||||||
let network_id = u64::from_str_radix(custom, 10)
|
|
||||||
.map_err(|e| format!("Failed to parse eth1 network id {}", e))?;
|
|
||||||
Ok(Eth1NetworkId::Custom(network_id))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,4 +13,6 @@ pub use block_cache::{BlockCache, Eth1Block};
|
|||||||
pub use deposit_cache::DepositCache;
|
pub use deposit_cache::DepositCache;
|
||||||
pub use deposit_log::DepositLog;
|
pub use deposit_log::DepositLog;
|
||||||
pub use inner::SszEth1Cache;
|
pub use inner::SszEth1Cache;
|
||||||
pub use service::{BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service};
|
pub use service::{
|
||||||
|
BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Service, DEFAULT_NETWORK_ID,
|
||||||
|
};
|
||||||
|
@ -345,7 +345,7 @@ impl Service {
|
|||||||
/// - Err(_) if there is an error.
|
/// - Err(_) if there is an error.
|
||||||
///
|
///
|
||||||
/// Emits logs for debugging and errors.
|
/// Emits logs for debugging and errors.
|
||||||
pub fn auto_update(self, handle: environment::TaskExecutor) {
|
pub fn auto_update(self, handle: task_executor::TaskExecutor) {
|
||||||
let update_interval = Duration::from_millis(self.config().auto_update_interval_millis);
|
let update_interval = Duration::from_millis(self.config().auto_update_interval_millis);
|
||||||
|
|
||||||
let mut interval = interval_at(Instant::now(), update_interval);
|
let mut interval = interval_at(Instant::now(), update_interval);
|
||||||
|
@ -5,50 +5,50 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
hex = "0.4.2"
|
discv5 = { version = "0.1.0-alpha.13", features = ["libp2p"] }
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
hashset_delay = { path = "../../common/hashset_delay" }
|
hashset_delay = { path = "../../common/hashset_delay" }
|
||||||
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
||||||
serde = { version = "1.0.110", features = ["derive"] }
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
serde_derive = "1.0.110"
|
serde_derive = "1.0.116"
|
||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
eth2_ssz_derive = "0.1.0"
|
eth2_ssz_derive = "0.1.0"
|
||||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||||
lighthouse_version = { path = "../../common/lighthouse_version" }
|
lighthouse_version = { path = "../../common/lighthouse_version" }
|
||||||
tokio = { version = "0.2.22", features = ["time", "macros"] }
|
tokio = { version = "0.2.22", features = ["time", "macros"] }
|
||||||
futures = "0.3.5"
|
futures = "0.3.5"
|
||||||
error-chain = "0.12.2"
|
error-chain = "0.12.4"
|
||||||
dirs = "2.0.2"
|
dirs = "3.0.1"
|
||||||
fnv = "1.0.7"
|
fnv = "1.0.7"
|
||||||
unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "latest-codecs", features = ["codec"] }
|
unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "latest-codecs", features = ["codec"] }
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||||
smallvec = "1.4.1"
|
smallvec = "1.4.2"
|
||||||
lru = "0.5.1"
|
lru = "0.6.0"
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
sha2 = "0.9.1"
|
sha2 = "0.9.1"
|
||||||
base64 = "0.12.1"
|
base64 = "0.13.0"
|
||||||
snap = "1.0.0"
|
snap = "1.0.1"
|
||||||
void = "1.0.2"
|
void = "1.0.2"
|
||||||
|
hex = "0.4.2"
|
||||||
tokio-io-timeout = "0.4.0"
|
tokio-io-timeout = "0.4.0"
|
||||||
tokio-util = { version = "0.3.1", features = ["codec", "compat"] }
|
tokio-util = { version = "0.3.1", features = ["codec", "compat"] }
|
||||||
discv5 = { version = "0.1.0-alpha.12", features = ["libp2p"] }
|
|
||||||
tiny-keccak = "2.0.2"
|
tiny-keccak = "2.0.2"
|
||||||
environment = { path = "../../lighthouse/environment" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
rand = "0.7.3"
|
rand = "0.7.3"
|
||||||
|
directory = { path = "../../common/directory" }
|
||||||
regex = "1.3.9"
|
regex = "1.3.9"
|
||||||
|
|
||||||
[dependencies.libp2p]
|
[dependencies.libp2p]
|
||||||
#version = "0.23.0"
|
#version = "0.23.0"
|
||||||
git = "https://github.com/sigp/rust-libp2p"
|
git = "https://github.com/sigp/rust-libp2p"
|
||||||
rev = "03f998022ce2f566a6c6e6c4206bc0ce4d45109f"
|
rev = "5a9f0819af3990cfefad528e957297af596399b4"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["websocket", "identify", "mplex", "noise", "gossipsub", "dns", "tcp-tokio"]
|
features = ["websocket", "identify", "mplex", "noise", "gossipsub", "dns", "tcp-tokio"]
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { version = "0.2.22", features = ["full"] }
|
tokio = { version = "0.2.22", features = ["full"] }
|
||||||
slog-stdlog = "4.0.0"
|
slog-term = "2.6.0"
|
||||||
slog-term = "2.5.0"
|
|
||||||
slog-async = "2.5.0"
|
slog-async = "2.5.0"
|
||||||
tempdir = "0.3.7"
|
tempdir = "0.3.7"
|
||||||
exit-future = "0.2.0"
|
exit-future = "0.2.0"
|
||||||
|
@ -54,8 +54,6 @@ impl<TSpec: EthSpec> DelegatingHandler<TSpec> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: this can all be created with macros
|
|
||||||
|
|
||||||
/// Wrapper around the `ProtocolsHandler::InEvent` types of the handlers.
|
/// Wrapper around the `ProtocolsHandler::InEvent` types of the handlers.
|
||||||
/// Simply delegated to the corresponding behaviour's handler.
|
/// Simply delegated to the corresponding behaviour's handler.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
@ -115,7 +113,6 @@ pub type DelegateOutProto<TSpec> = EitherUpgrade<
|
|||||||
>,
|
>,
|
||||||
>;
|
>;
|
||||||
|
|
||||||
// TODO: prob make this an enum
|
|
||||||
pub type DelegateOutInfo<TSpec> = EitherOutput<
|
pub type DelegateOutInfo<TSpec> = EitherOutput<
|
||||||
<GossipHandler as ProtocolsHandler>::OutboundOpenInfo,
|
<GossipHandler as ProtocolsHandler>::OutboundOpenInfo,
|
||||||
EitherOutput<
|
EitherOutput<
|
||||||
@ -216,7 +213,6 @@ impl<TSpec: EthSpec> ProtocolsHandler for DelegatingHandler<TSpec> {
|
|||||||
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
|
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
|
||||||
>,
|
>,
|
||||||
) {
|
) {
|
||||||
// TODO: find how to clean up
|
|
||||||
match info {
|
match info {
|
||||||
// Gossipsub
|
// Gossipsub
|
||||||
EitherOutput::First(info) => match error {
|
EitherOutput::First(info) => match error {
|
||||||
|
@ -41,15 +41,9 @@ pub enum BehaviourHandlerIn<TSpec: EthSpec> {
|
|||||||
Shutdown(Option<(RequestId, RPCRequest<TSpec>)>),
|
Shutdown(Option<(RequestId, RPCRequest<TSpec>)>),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum BehaviourHandlerOut<TSpec: EthSpec> {
|
|
||||||
Delegate(Box<DelegateOut<TSpec>>),
|
|
||||||
// TODO: replace custom with events to send
|
|
||||||
Custom,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TSpec: EthSpec> ProtocolsHandler for BehaviourHandler<TSpec> {
|
impl<TSpec: EthSpec> ProtocolsHandler for BehaviourHandler<TSpec> {
|
||||||
type InEvent = BehaviourHandlerIn<TSpec>;
|
type InEvent = BehaviourHandlerIn<TSpec>;
|
||||||
type OutEvent = BehaviourHandlerOut<TSpec>;
|
type OutEvent = DelegateOut<TSpec>;
|
||||||
type Error = DelegateError<TSpec>;
|
type Error = DelegateError<TSpec>;
|
||||||
type InboundProtocol = DelegateInProto<TSpec>;
|
type InboundProtocol = DelegateInProto<TSpec>;
|
||||||
type OutboundProtocol = DelegateOutProto<TSpec>;
|
type OutboundProtocol = DelegateOutProto<TSpec>;
|
||||||
@ -122,9 +116,7 @@ impl<TSpec: EthSpec> ProtocolsHandler for BehaviourHandler<TSpec> {
|
|||||||
|
|
||||||
match self.delegate.poll(cx) {
|
match self.delegate.poll(cx) {
|
||||||
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
|
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
|
||||||
return Poll::Ready(ProtocolsHandlerEvent::Custom(
|
return Poll::Ready(ProtocolsHandlerEvent::Custom(event))
|
||||||
BehaviourHandlerOut::Delegate(Box::new(event)),
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
Poll::Ready(ProtocolsHandlerEvent::Close(err)) => {
|
Poll::Ready(ProtocolsHandlerEvent::Close(err)) => {
|
||||||
return Poll::Ready(ProtocolsHandlerEvent::Close(err))
|
return Poll::Ready(ProtocolsHandlerEvent::Close(err))
|
||||||
|
@ -5,7 +5,7 @@ use crate::types::{GossipEncoding, GossipKind, GossipTopic, SubnetDiscovery};
|
|||||||
use crate::Eth2Enr;
|
use crate::Eth2Enr;
|
||||||
use crate::{error, metrics, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash};
|
use crate::{error, metrics, Enr, NetworkConfig, NetworkGlobals, PubsubMessage, TopicHash};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use handler::{BehaviourHandler, BehaviourHandlerIn, BehaviourHandlerOut, DelegateIn, DelegateOut};
|
use handler::{BehaviourHandler, BehaviourHandlerIn, DelegateIn, DelegateOut};
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
core::{
|
core::{
|
||||||
connection::{ConnectedPoint, ConnectionId, ListenerId},
|
connection::{ConnectedPoint, ConnectionId, ListenerId},
|
||||||
@ -102,7 +102,7 @@ pub struct Behaviour<TSpec: EthSpec> {
|
|||||||
/// The Eth2 RPC specified in the wire-0 protocol.
|
/// The Eth2 RPC specified in the wire-0 protocol.
|
||||||
eth2_rpc: RPC<TSpec>,
|
eth2_rpc: RPC<TSpec>,
|
||||||
/// Keep regular connection to peers and disconnect if absent.
|
/// Keep regular connection to peers and disconnect if absent.
|
||||||
// TODO: Using id for initial interop. This will be removed by mainnet.
|
// NOTE: The id protocol is used for initial interop. This will be removed by mainnet.
|
||||||
/// Provides IP addresses and peer information.
|
/// Provides IP addresses and peer information.
|
||||||
identify: Identify,
|
identify: Identify,
|
||||||
/// The peer manager that keeps track of peer's reputation and status.
|
/// The peer manager that keeps track of peer's reputation and status.
|
||||||
@ -203,9 +203,6 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
|||||||
self.enr_fork_id.fork_digest,
|
self.enr_fork_id.fork_digest,
|
||||||
);
|
);
|
||||||
|
|
||||||
// TODO: Implement scoring
|
|
||||||
// let topic: Topic = gossip_topic.into();
|
|
||||||
// self.gossipsub.set_topic_params(t.hash(), TopicScoreParams::default());
|
|
||||||
self.subscribe(gossip_topic)
|
self.subscribe(gossip_topic)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,12 +224,6 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
|||||||
GossipEncoding::default(),
|
GossipEncoding::default(),
|
||||||
self.enr_fork_id.fork_digest,
|
self.enr_fork_id.fork_digest,
|
||||||
);
|
);
|
||||||
// TODO: Implement scoring
|
|
||||||
/*
|
|
||||||
let t: Topic = topic.clone().into();
|
|
||||||
self.gossipsub
|
|
||||||
.set_topic_params(t.hash(), TopicScoreParams::default());
|
|
||||||
*/
|
|
||||||
self.subscribe(topic)
|
self.subscribe(topic)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -591,7 +582,6 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
|||||||
} => {
|
} => {
|
||||||
if matches!(error, RPCError::HandlerRejected) {
|
if matches!(error, RPCError::HandlerRejected) {
|
||||||
// this peer's request got canceled
|
// this peer's request got canceled
|
||||||
// TODO: cancel processing for this request
|
|
||||||
}
|
}
|
||||||
// Inform the peer manager of the error.
|
// Inform the peer manager of the error.
|
||||||
// An inbound error here means we sent an error to the peer, or the stream
|
// An inbound error here means we sent an error to the peer, or the stream
|
||||||
@ -621,11 +611,8 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
|
|||||||
RPCRequest::MetaData(_) => {
|
RPCRequest::MetaData(_) => {
|
||||||
// send the requested meta-data
|
// send the requested meta-data
|
||||||
self.send_meta_data_response((handler_id, id), peer_id);
|
self.send_meta_data_response((handler_id, id), peer_id);
|
||||||
// TODO: inform the peer manager?
|
|
||||||
}
|
}
|
||||||
RPCRequest::Goodbye(reason) => {
|
RPCRequest::Goodbye(reason) => {
|
||||||
// let the peer manager know this peer is in the process of disconnecting
|
|
||||||
self.peer_manager._disconnecting_peer(&peer_id);
|
|
||||||
// queue for disconnection without a goodbye message
|
// queue for disconnection without a goodbye message
|
||||||
debug!(
|
debug!(
|
||||||
self.log, "Peer sent Goodbye";
|
self.log, "Peer sent Goodbye";
|
||||||
@ -975,17 +962,11 @@ impl<TSpec: EthSpec> NetworkBehaviour for Behaviour<TSpec> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Events comming from the handler, redirected to each behaviour
|
||||||
match event {
|
match event {
|
||||||
// Events comming from the handler, redirected to each behaviour
|
DelegateOut::Gossipsub(ev) => self.gossipsub.inject_event(peer_id, conn_id, ev),
|
||||||
BehaviourHandlerOut::Delegate(delegate) => match *delegate {
|
DelegateOut::RPC(ev) => self.eth2_rpc.inject_event(peer_id, conn_id, ev),
|
||||||
DelegateOut::Gossipsub(ev) => self.gossipsub.inject_event(peer_id, conn_id, ev),
|
DelegateOut::Identify(ev) => self.identify.inject_event(peer_id, conn_id, *ev),
|
||||||
DelegateOut::RPC(ev) => self.eth2_rpc.inject_event(peer_id, conn_id, ev),
|
|
||||||
DelegateOut::Identify(ev) => self.identify.inject_event(peer_id, conn_id, *ev),
|
|
||||||
},
|
|
||||||
/* Custom events sent BY the handler */
|
|
||||||
BehaviourHandlerOut::Custom => {
|
|
||||||
// TODO: implement
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1003,7 +984,6 @@ impl<TSpec: EthSpec> NetworkBehaviour for Behaviour<TSpec> {
|
|||||||
self.waker = Some(cx.waker().clone());
|
self.waker = Some(cx.waker().clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: move where it's less distracting
|
|
||||||
macro_rules! poll_behaviour {
|
macro_rules! poll_behaviour {
|
||||||
/* $behaviour: The sub-behaviour being polled.
|
/* $behaviour: The sub-behaviour being polled.
|
||||||
* $on_event_fn: Function to call if we get an event from the sub-behaviour.
|
* $on_event_fn: Function to call if we get an event from the sub-behaviour.
|
||||||
|
@ -1,5 +1,8 @@
|
|||||||
use crate::types::GossipKind;
|
use crate::types::GossipKind;
|
||||||
use crate::{Enr, PeerIdSerialized};
|
use crate::{Enr, PeerIdSerialized};
|
||||||
|
use directory::{
|
||||||
|
DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_TESTNET, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR,
|
||||||
|
};
|
||||||
use discv5::{Discv5Config, Discv5ConfigBuilder};
|
use discv5::{Discv5Config, Discv5ConfigBuilder};
|
||||||
use libp2p::gossipsub::{
|
use libp2p::gossipsub::{
|
||||||
GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId, ValidationMode,
|
GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId, ValidationMode,
|
||||||
@ -77,9 +80,14 @@ pub struct Config {
|
|||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
/// Generate a default network configuration.
|
/// Generate a default network configuration.
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
let mut network_dir = dirs::home_dir().unwrap_or_else(|| PathBuf::from("."));
|
// WARNING: this directory default should be always overrided with parameters
|
||||||
network_dir.push(".lighthouse");
|
// from cli for specific networks.
|
||||||
network_dir.push("network");
|
let network_dir = dirs::home_dir()
|
||||||
|
.unwrap_or_else(|| PathBuf::from("."))
|
||||||
|
.join(DEFAULT_ROOT_DIR)
|
||||||
|
.join(DEFAULT_HARDCODED_TESTNET)
|
||||||
|
.join(DEFAULT_BEACON_NODE_DIR)
|
||||||
|
.join(DEFAULT_NETWORK_DIR);
|
||||||
|
|
||||||
// The function used to generate a gossipsub message id
|
// The function used to generate a gossipsub message id
|
||||||
// We use the first 8 bytes of SHA256(data) for content addressing
|
// We use the first 8 bytes of SHA256(data) for content addressing
|
||||||
@ -115,6 +123,7 @@ impl Default for Config {
|
|||||||
.request_retries(1)
|
.request_retries(1)
|
||||||
.enr_peer_update_min(10)
|
.enr_peer_update_min(10)
|
||||||
.query_parallelism(5)
|
.query_parallelism(5)
|
||||||
|
.disable_report_discovered_peers()
|
||||||
.query_timeout(Duration::from_secs(30))
|
.query_timeout(Duration::from_secs(30))
|
||||||
.query_peer_timeout(Duration::from_secs(2))
|
.query_peer_timeout(Duration::from_secs(2))
|
||||||
.ip_limit() // limits /24 IP's in buckets.
|
.ip_limit() // limits /24 IP's in buckets.
|
||||||
|
@ -129,7 +129,6 @@ pub fn create_enr_builder_from_config<T: EnrKey>(config: &NetworkConfig) -> EnrB
|
|||||||
builder.udp(udp_port);
|
builder.udp(udp_port);
|
||||||
}
|
}
|
||||||
// we always give it our listening tcp port
|
// we always give it our listening tcp port
|
||||||
// TODO: Add uPnP support to map udp and tcp ports
|
|
||||||
let tcp_port = config.enr_tcp_port.unwrap_or_else(|| config.libp2p_port);
|
let tcp_port = config.enr_tcp_port.unwrap_or_else(|| config.libp2p_port);
|
||||||
builder.tcp(tcp_port).tcp(config.libp2p_port);
|
builder.tcp(tcp_port).tcp(config.libp2p_port);
|
||||||
builder
|
builder
|
||||||
@ -144,12 +143,12 @@ pub fn build_enr<T: EthSpec>(
|
|||||||
let mut builder = create_enr_builder_from_config(config);
|
let mut builder = create_enr_builder_from_config(config);
|
||||||
|
|
||||||
// set the `eth2` field on our ENR
|
// set the `eth2` field on our ENR
|
||||||
builder.add_value(ETH2_ENR_KEY.into(), enr_fork_id.as_ssz_bytes());
|
builder.add_value(ETH2_ENR_KEY, &enr_fork_id.as_ssz_bytes());
|
||||||
|
|
||||||
// set the "attnets" field on our ENR
|
// set the "attnets" field on our ENR
|
||||||
let bitfield = BitVector::<T::SubnetBitfieldLength>::new();
|
let bitfield = BitVector::<T::SubnetBitfieldLength>::new();
|
||||||
|
|
||||||
builder.add_value(BITFIELD_ENR_KEY.into(), bitfield.as_ssz_bytes());
|
builder.add_value(BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes());
|
||||||
|
|
||||||
builder
|
builder
|
||||||
.build(enr_key)
|
.build(enr_key)
|
||||||
|
@ -365,7 +365,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
/// If the external address needs to be modified, use `update_enr_udp_socket.
|
/// If the external address needs to be modified, use `update_enr_udp_socket.
|
||||||
pub fn update_enr_tcp_port(&mut self, port: u16) -> Result<(), String> {
|
pub fn update_enr_tcp_port(&mut self, port: u16) -> Result<(), String> {
|
||||||
self.discv5
|
self.discv5
|
||||||
.enr_insert("tcp", port.to_be_bytes().into())
|
.enr_insert("tcp", &port.to_be_bytes())
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
.map_err(|e| format!("{:?}", e))?;
|
||||||
|
|
||||||
// replace the global version
|
// replace the global version
|
||||||
@ -383,18 +383,18 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
match socket_addr {
|
match socket_addr {
|
||||||
SocketAddr::V4(socket) => {
|
SocketAddr::V4(socket) => {
|
||||||
self.discv5
|
self.discv5
|
||||||
.enr_insert("ip", socket.ip().octets().into())
|
.enr_insert("ip", &socket.ip().octets())
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
.map_err(|e| format!("{:?}", e))?;
|
||||||
self.discv5
|
self.discv5
|
||||||
.enr_insert("udp", socket.port().to_be_bytes().into())
|
.enr_insert("udp", &socket.port().to_be_bytes())
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
.map_err(|e| format!("{:?}", e))?;
|
||||||
}
|
}
|
||||||
SocketAddr::V6(socket) => {
|
SocketAddr::V6(socket) => {
|
||||||
self.discv5
|
self.discv5
|
||||||
.enr_insert("ip6", socket.ip().octets().into())
|
.enr_insert("ip6", &socket.ip().octets())
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
.map_err(|e| format!("{:?}", e))?;
|
||||||
self.discv5
|
self.discv5
|
||||||
.enr_insert("udp6", socket.port().to_be_bytes().into())
|
.enr_insert("udp6", &socket.port().to_be_bytes())
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
.map_err(|e| format!("{:?}", e))?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -439,7 +439,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
|
|
||||||
// insert the bitfield into the ENR record
|
// insert the bitfield into the ENR record
|
||||||
self.discv5
|
self.discv5
|
||||||
.enr_insert(BITFIELD_ENR_KEY, current_bitfield.as_ssz_bytes())
|
.enr_insert(BITFIELD_ENR_KEY, ¤t_bitfield.as_ssz_bytes())
|
||||||
.map_err(|e| format!("{:?}", e))?;
|
.map_err(|e| format!("{:?}", e))?;
|
||||||
|
|
||||||
// replace the global version
|
// replace the global version
|
||||||
@ -468,7 +468,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
|
|
||||||
let _ = self
|
let _ = self
|
||||||
.discv5
|
.discv5
|
||||||
.enr_insert(ETH2_ENR_KEY, enr_fork_id.as_ssz_bytes())
|
.enr_insert(ETH2_ENR_KEY, &enr_fork_id.as_ssz_bytes())
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
warn!(
|
warn!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -858,7 +858,10 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
// Still awaiting the event stream, poll it
|
// Still awaiting the event stream, poll it
|
||||||
if let Poll::Ready(event_stream) = fut.poll_unpin(cx) {
|
if let Poll::Ready(event_stream) = fut.poll_unpin(cx) {
|
||||||
match event_stream {
|
match event_stream {
|
||||||
Ok(stream) => self.event_stream = EventStream::Present(stream),
|
Ok(stream) => {
|
||||||
|
debug!(self.log, "Discv5 event stream ready");
|
||||||
|
self.event_stream = EventStream::Present(stream);
|
||||||
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
slog::crit!(self.log, "Discv5 event stream failed"; "error" => e.to_string());
|
slog::crit!(self.log, "Discv5 event stream failed"; "error" => e.to_string());
|
||||||
self.event_stream = EventStream::InActive;
|
self.event_stream = EventStream::InActive;
|
||||||
|
@ -147,8 +147,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
///
|
///
|
||||||
/// If the peer doesn't exist, log a warning and insert defaults.
|
/// If the peer doesn't exist, log a warning and insert defaults.
|
||||||
pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction) {
|
pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction) {
|
||||||
// TODO: Remove duplicate code - This is duplicated in the update_peer_scores()
|
// NOTE: This is duplicated in the update_peer_scores() and could be improved.
|
||||||
// function.
|
|
||||||
|
|
||||||
// Variables to update the PeerDb if required.
|
// Variables to update the PeerDb if required.
|
||||||
let mut ban_peer = None;
|
let mut ban_peer = None;
|
||||||
@ -179,7 +178,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
GoodbyeReason::BadScore,
|
GoodbyeReason::BadScore,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
// TODO: Update the peer manager to inform that the peer is disconnecting.
|
|
||||||
}
|
}
|
||||||
ScoreState::Healthy => {
|
ScoreState::Healthy => {
|
||||||
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
|
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
|
||||||
@ -322,15 +320,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
self.connect_peer(peer_id, ConnectingType::OutgoingConnected { multiaddr })
|
self.connect_peer(peer_id, ConnectingType::OutgoingConnected { multiaddr })
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Updates the database informing that a peer is being disconnected.
|
|
||||||
pub fn _disconnecting_peer(&mut self, _peer_id: &PeerId) -> bool {
|
|
||||||
// TODO: implement
|
|
||||||
// This informs the database that we are in the process of disconnecting the
|
|
||||||
// peer. Currently this state only exists for a short period of time before we force the
|
|
||||||
// disconnection.
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reports if a peer is banned or not.
|
/// Reports if a peer is banned or not.
|
||||||
///
|
///
|
||||||
/// This is used to determine if we should accept incoming connections.
|
/// This is used to determine if we should accept incoming connections.
|
||||||
@ -408,10 +397,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
// Not supporting a protocol shouldn't be considered a malicious action, but
|
// Not supporting a protocol shouldn't be considered a malicious action, but
|
||||||
// it is an action that in some cases will make the peer unfit to continue
|
// it is an action that in some cases will make the peer unfit to continue
|
||||||
// communicating.
|
// communicating.
|
||||||
// TODO: To avoid punishing a peer repeatedly for not supporting a protocol, this
|
|
||||||
// information could be stored and used to prevent sending requests for the given
|
|
||||||
// protocol to this peer. Similarly, to avoid blacklisting a peer for a protocol
|
|
||||||
// forever, if stored this information should expire.
|
|
||||||
match protocol {
|
match protocol {
|
||||||
Protocol::Ping => PeerAction::Fatal,
|
Protocol::Ping => PeerAction::Fatal,
|
||||||
Protocol::BlocksByRange => return,
|
Protocol::BlocksByRange => return,
|
||||||
@ -445,7 +431,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
|
|
||||||
/// A ping request has been received.
|
/// A ping request has been received.
|
||||||
// NOTE: The behaviour responds with a PONG automatically
|
// NOTE: The behaviour responds with a PONG automatically
|
||||||
// TODO: Update last seen
|
|
||||||
pub fn ping_request(&mut self, peer_id: &PeerId, seq: u64) {
|
pub fn ping_request(&mut self, peer_id: &PeerId, seq: u64) {
|
||||||
if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) {
|
if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) {
|
||||||
// received a ping
|
// received a ping
|
||||||
@ -475,7 +460,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A PONG has been returned from a peer.
|
/// A PONG has been returned from a peer.
|
||||||
// TODO: Update last seen
|
|
||||||
pub fn pong_response(&mut self, peer_id: &PeerId, seq: u64) {
|
pub fn pong_response(&mut self, peer_id: &PeerId, seq: u64) {
|
||||||
if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) {
|
if let Some(peer_info) = self.network_globals.peers.read().peer_info(peer_id) {
|
||||||
// received a pong
|
// received a pong
|
||||||
@ -501,7 +485,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Received a metadata response from a peer.
|
/// Received a metadata response from a peer.
|
||||||
// TODO: Update last seen
|
|
||||||
pub fn meta_data_response(&mut self, peer_id: &PeerId, meta_data: MetaData<TSpec>) {
|
pub fn meta_data_response(&mut self, peer_id: &PeerId, meta_data: MetaData<TSpec>) {
|
||||||
if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
|
if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
|
||||||
if let Some(known_meta_data) = &peer_info.meta_data {
|
if let Some(known_meta_data) = &peer_info.meta_data {
|
||||||
@ -597,7 +580,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
let connected_or_dialing = self.network_globals.connected_or_dialing_peers();
|
let connected_or_dialing = self.network_globals.connected_or_dialing_peers();
|
||||||
for (peer_id, min_ttl) in results {
|
for (peer_id, min_ttl) in results {
|
||||||
// we attempt a connection if this peer is a subnet peer or if the max peer count
|
// we attempt a connection if this peer is a subnet peer or if the max peer count
|
||||||
// is not yet filled (including dialling peers)
|
// is not yet filled (including dialing peers)
|
||||||
if (min_ttl.is_some() || connected_or_dialing + to_dial_peers.len() < self.max_peers)
|
if (min_ttl.is_some() || connected_or_dialing + to_dial_peers.len() < self.max_peers)
|
||||||
&& !self
|
&& !self
|
||||||
.network_globals
|
.network_globals
|
||||||
@ -610,7 +593,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
.read()
|
.read()
|
||||||
.is_banned_or_disconnected(&peer_id)
|
.is_banned_or_disconnected(&peer_id)
|
||||||
{
|
{
|
||||||
// TODO: Update output
|
|
||||||
// This should be updated with the peer dialing. In fact created once the peer is
|
// This should be updated with the peer dialing. In fact created once the peer is
|
||||||
// dialed
|
// dialed
|
||||||
if let Some(min_ttl) = min_ttl {
|
if let Some(min_ttl) = min_ttl {
|
||||||
@ -699,58 +681,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
// Update scores
|
// Update scores
|
||||||
info.score_update();
|
info.score_update();
|
||||||
|
|
||||||
/* TODO: Implement logic about connection lifetimes
|
|
||||||
match info.connection_status {
|
|
||||||
Connected { .. } => {
|
|
||||||
// Connected peers gain reputation by sending useful messages
|
|
||||||
}
|
|
||||||
Disconnected { since } | Banned { since } => {
|
|
||||||
// For disconnected peers, lower their reputation by 1 for every hour they
|
|
||||||
// stay disconnected. This helps us slowly forget disconnected peers.
|
|
||||||
// In the same way, slowly allow banned peers back again.
|
|
||||||
let dc_hours = now
|
|
||||||
.checked_duration_since(since)
|
|
||||||
.unwrap_or_else(|| Duration::from_secs(0))
|
|
||||||
.as_secs()
|
|
||||||
/ 3600;
|
|
||||||
let last_dc_hours = self
|
|
||||||
._last_updated
|
|
||||||
.checked_duration_since(since)
|
|
||||||
.unwrap_or_else(|| Duration::from_secs(0))
|
|
||||||
.as_secs()
|
|
||||||
/ 3600;
|
|
||||||
if dc_hours > last_dc_hours {
|
|
||||||
// this should be 1 most of the time
|
|
||||||
let rep_dif = (dc_hours - last_dc_hours)
|
|
||||||
.try_into()
|
|
||||||
.unwrap_or(Rep::max_value());
|
|
||||||
|
|
||||||
info.reputation = if info.connection_status.is_banned() {
|
|
||||||
info.reputation.saturating_add(rep_dif)
|
|
||||||
} else {
|
|
||||||
info.reputation.saturating_sub(rep_dif)
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Dialing { since } => {
|
|
||||||
// A peer shouldn't be dialing for more than 2 minutes
|
|
||||||
if since.elapsed().as_secs() > 120 {
|
|
||||||
warn!(self.log,"Peer has been dialing for too long"; "peer_id" => id.to_string());
|
|
||||||
// TODO: decide how to handle this
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Unknown => {} //TODO: Handle this case
|
|
||||||
}
|
|
||||||
// Check if the peer gets banned or unbanned and if it should be disconnected
|
|
||||||
if info.reputation < _MIN_REP_BEFORE_BAN && !info.connection_status.is_banned() {
|
|
||||||
// This peer gets banned. Check if we should request disconnection
|
|
||||||
ban_queue.push(id.clone());
|
|
||||||
} else if info.reputation >= _MIN_REP_BEFORE_BAN && info.connection_status.is_banned() {
|
|
||||||
// This peer gets unbanned
|
|
||||||
unban_queue.push(id.clone());
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// handle score transitions
|
// handle score transitions
|
||||||
if previous_state != info.score_state() {
|
if previous_state != info.score_state() {
|
||||||
match info.score_state() {
|
match info.score_state() {
|
||||||
@ -774,7 +704,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
GoodbyeReason::BadScore,
|
GoodbyeReason::BadScore,
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
// TODO: Update peer manager to report that it's disconnecting.
|
|
||||||
}
|
}
|
||||||
ScoreState::Healthy => {
|
ScoreState::Healthy => {
|
||||||
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
|
debug!(self.log, "Peer transitioned to healthy state"; "peer_id" => peer_id.to_string(), "score" => info.score().to_string(), "past_state" => previous_state.to_string());
|
||||||
@ -838,9 +767,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
///
|
///
|
||||||
/// NOTE: Discovery will only add a new query if one isn't already queued.
|
/// NOTE: Discovery will only add a new query if one isn't already queued.
|
||||||
fn heartbeat(&mut self) {
|
fn heartbeat(&mut self) {
|
||||||
// TODO: Provide a back-off time for discovery queries. I.e Queue many initially, then only
|
|
||||||
// perform discoveries over a larger fixed interval. Perhaps one every 6 heartbeats. This
|
|
||||||
// is achievable with a leaky bucket
|
|
||||||
let peer_count = self.network_globals.connected_or_dialing_peers();
|
let peer_count = self.network_globals.connected_or_dialing_peers();
|
||||||
if peer_count < self.target_peers {
|
if peer_count < self.target_peers {
|
||||||
// If we need more peers, queue a discovery lookup.
|
// If we need more peers, queue a discovery lookup.
|
||||||
|
@ -130,7 +130,6 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a mutable reference to a peer's info if known.
|
/// Returns a mutable reference to a peer's info if known.
|
||||||
/// TODO: make pub(super) to ensure that peer management is unified
|
|
||||||
pub fn peer_info_mut(&mut self, peer_id: &PeerId) -> Option<&mut PeerInfo<TSpec>> {
|
pub fn peer_info_mut(&mut self, peer_id: &PeerId) -> Option<&mut PeerInfo<TSpec>> {
|
||||||
self.peers.get_mut(peer_id)
|
self.peers.get_mut(peer_id)
|
||||||
}
|
}
|
||||||
|
@ -25,8 +25,6 @@ use std::{
|
|||||||
use tokio::time::{delay_queue, delay_until, Delay, DelayQueue, Instant as TInstant};
|
use tokio::time::{delay_queue, delay_until, Delay, DelayQueue, Instant as TInstant};
|
||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
|
|
||||||
//TODO: Implement check_timeout() on the substream types
|
|
||||||
|
|
||||||
/// The time (in seconds) before a substream that is awaiting a response from the user times out.
|
/// The time (in seconds) before a substream that is awaiting a response from the user times out.
|
||||||
pub const RESPONSE_TIMEOUT: u64 = 10;
|
pub const RESPONSE_TIMEOUT: u64 = 10;
|
||||||
|
|
||||||
@ -163,8 +161,6 @@ struct OutboundInfo<TSpec: EthSpec> {
|
|||||||
/// Info over the protocol this substream is handling.
|
/// Info over the protocol this substream is handling.
|
||||||
proto: Protocol,
|
proto: Protocol,
|
||||||
/// Number of chunks to be seen from the peer's response.
|
/// Number of chunks to be seen from the peer's response.
|
||||||
// TODO: removing the option could allow clossing the streams after the number of
|
|
||||||
// expected responses is met for all protocols.
|
|
||||||
remaining_chunks: Option<u64>,
|
remaining_chunks: Option<u64>,
|
||||||
/// `RequestId` as given by the application that sent the request.
|
/// `RequestId` as given by the application that sent the request.
|
||||||
req_id: RequestId,
|
req_id: RequestId,
|
||||||
|
@ -59,7 +59,7 @@ pub struct Service<TSpec: EthSpec> {
|
|||||||
|
|
||||||
impl<TSpec: EthSpec> Service<TSpec> {
|
impl<TSpec: EthSpec> Service<TSpec> {
|
||||||
pub async fn new(
|
pub async fn new(
|
||||||
executor: environment::TaskExecutor,
|
executor: task_executor::TaskExecutor,
|
||||||
config: &NetworkConfig,
|
config: &NetworkConfig,
|
||||||
enr_fork_id: EnrForkId,
|
enr_fork_id: EnrForkId,
|
||||||
log: &slog::Logger,
|
log: &slog::Logger,
|
||||||
@ -109,7 +109,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
|||||||
Behaviour::new(&local_keypair, config, network_globals.clone(), &log).await?;
|
Behaviour::new(&local_keypair, config, network_globals.clone(), &log).await?;
|
||||||
|
|
||||||
// use the executor for libp2p
|
// use the executor for libp2p
|
||||||
struct Executor(environment::TaskExecutor);
|
struct Executor(task_executor::TaskExecutor);
|
||||||
impl libp2p::core::Executor for Executor {
|
impl libp2p::core::Executor for Executor {
|
||||||
fn exec(&self, f: Pin<Box<dyn Future<Output = ()> + Send>>) {
|
fn exec(&self, f: Pin<Box<dyn Future<Output = ()> + Send>>) {
|
||||||
self.0.spawn(f, "libp2p");
|
self.0.spawn(f, "libp2p");
|
||||||
|
@ -99,7 +99,7 @@ pub async fn build_libp2p_instance(boot_nodes: Vec<Enr>, log: slog::Logger) -> L
|
|||||||
|
|
||||||
let (signal, exit) = exit_future::signal();
|
let (signal, exit) = exit_future::signal();
|
||||||
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
||||||
let executor = environment::TaskExecutor::new(
|
let executor = task_executor::TaskExecutor::new(
|
||||||
tokio::runtime::Handle::current(),
|
tokio::runtime::Handle::current(),
|
||||||
exit,
|
exit,
|
||||||
log.clone(),
|
log.clone(),
|
||||||
|
@ -12,16 +12,16 @@ futures = "0.3.5"
|
|||||||
types = { path = "../../consensus/types"}
|
types = { path = "../../consensus/types"}
|
||||||
environment = { path = "../../lighthouse/environment"}
|
environment = { path = "../../lighthouse/environment"}
|
||||||
eth1 = { path = "../eth1"}
|
eth1 = { path = "../eth1"}
|
||||||
rayon = "1.3.0"
|
rayon = "1.4.1"
|
||||||
state_processing = { path = "../../consensus/state_processing" }
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
merkle_proof = { path = "../../consensus/merkle_proof" }
|
merkle_proof = { path = "../../consensus/merkle_proof" }
|
||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
eth2_hashing = "0.1.0"
|
eth2_hashing = "0.1.0"
|
||||||
tree_hash = "0.1.0"
|
tree_hash = "0.1.1"
|
||||||
tokio = { version = "0.2.22", features = ["full"] }
|
tokio = { version = "0.2.22", features = ["full"] }
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
exit-future = "0.2.0"
|
exit-future = "0.2.0"
|
||||||
serde = "1.0.110"
|
serde = "1.0.116"
|
||||||
serde_derive = "1.0.110"
|
serde_derive = "1.0.116"
|
||||||
int_to_bytes = { path = "../../consensus/int_to_bytes" }
|
int_to_bytes = { path = "../../consensus/int_to_bytes" }
|
||||||
|
32
beacon_node/http_api/Cargo.toml
Normal file
32
beacon_node/http_api/Cargo.toml
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
[package]
|
||||||
|
name = "http_api"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
warp = "0.2.5"
|
||||||
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
|
tokio = { version = "0.2.22", features = ["macros"] }
|
||||||
|
parking_lot = "0.11.0"
|
||||||
|
types = { path = "../../consensus/types" }
|
||||||
|
hex = "0.4.2"
|
||||||
|
beacon_chain = { path = "../beacon_chain" }
|
||||||
|
eth2 = { path = "../../common/eth2", features = ["lighthouse"] }
|
||||||
|
slog = "2.5.2"
|
||||||
|
network = { path = "../network" }
|
||||||
|
eth2_libp2p = { path = "../eth2_libp2p" }
|
||||||
|
eth1 = { path = "../eth1" }
|
||||||
|
fork_choice = { path = "../../consensus/fork_choice" }
|
||||||
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
|
lighthouse_version = { path = "../../common/lighthouse_version" }
|
||||||
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||||
|
lazy_static = "1.4.0"
|
||||||
|
warp_utils = { path = "../../common/warp_utils" }
|
||||||
|
slot_clock = { path = "../../common/slot_clock" }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
store = { path = "../store" }
|
||||||
|
environment = { path = "../../lighthouse/environment" }
|
||||||
|
tree_hash = "0.1.1"
|
||||||
|
discv5 = { version = "0.1.0-alpha.13", features = ["libp2p"] }
|
185
beacon_node/http_api/src/beacon_proposer_cache.rs
Normal file
185
beacon_node/http_api/src/beacon_proposer_cache.rs
Normal file
@ -0,0 +1,185 @@
|
|||||||
|
use crate::metrics;
|
||||||
|
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||||
|
use eth2::types::ProposerData;
|
||||||
|
use fork_choice::ProtoBlock;
|
||||||
|
use slot_clock::SlotClock;
|
||||||
|
use state_processing::per_slot_processing;
|
||||||
|
use types::{BeaconState, Epoch, EthSpec, Hash256, PublicKeyBytes};
|
||||||
|
|
||||||
|
/// This sets a maximum bound on the number of epochs to skip whilst instantiating the cache for
|
||||||
|
/// the first time.
|
||||||
|
const EPOCHS_TO_SKIP: u64 = 2;
|
||||||
|
|
||||||
|
/// Caches the beacon block proposers for a given `epoch` and `epoch_boundary_root`.
|
||||||
|
///
|
||||||
|
/// This cache is only able to contain a single set of proposers and is only
|
||||||
|
/// intended to cache the proposers for the current epoch according to the head
|
||||||
|
/// of the chain. A change in epoch or re-org to a different chain may cause a
|
||||||
|
/// cache miss and rebuild.
|
||||||
|
pub struct BeaconProposerCache {
|
||||||
|
epoch: Epoch,
|
||||||
|
decision_block_root: Hash256,
|
||||||
|
proposers: Vec<ProposerData>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BeaconProposerCache {
|
||||||
|
/// Create a new cache for the current epoch of the `chain`.
|
||||||
|
pub fn new<T: BeaconChainTypes>(chain: &BeaconChain<T>) -> Result<Self, BeaconChainError> {
|
||||||
|
let head_root = chain.head_beacon_block_root()?;
|
||||||
|
let head_block = chain
|
||||||
|
.fork_choice
|
||||||
|
.read()
|
||||||
|
.get_block(&head_root)
|
||||||
|
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(head_root))?;
|
||||||
|
|
||||||
|
// If the head epoch is more than `EPOCHS_TO_SKIP` in the future, just build the cache at
|
||||||
|
// the epoch of the head. This prevents doing a massive amount of skip slots when starting
|
||||||
|
// a new database from genesis.
|
||||||
|
let epoch = {
|
||||||
|
let epoch_now = chain
|
||||||
|
.epoch()
|
||||||
|
.unwrap_or_else(|_| chain.spec.genesis_slot.epoch(T::EthSpec::slots_per_epoch()));
|
||||||
|
let head_epoch = head_block.slot.epoch(T::EthSpec::slots_per_epoch());
|
||||||
|
if epoch_now > head_epoch + EPOCHS_TO_SKIP {
|
||||||
|
head_epoch
|
||||||
|
} else {
|
||||||
|
epoch_now
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Self::for_head_block(chain, epoch, head_root, head_block)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new cache that contains the shuffling for `current_epoch`,
|
||||||
|
/// assuming that `head_root` and `head_block` represents the most recent
|
||||||
|
/// canonical block.
|
||||||
|
fn for_head_block<T: BeaconChainTypes>(
|
||||||
|
chain: &BeaconChain<T>,
|
||||||
|
current_epoch: Epoch,
|
||||||
|
head_root: Hash256,
|
||||||
|
head_block: ProtoBlock,
|
||||||
|
) -> Result<Self, BeaconChainError> {
|
||||||
|
let _timer = metrics::start_timer(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_TIMES);
|
||||||
|
|
||||||
|
let mut head_state = chain
|
||||||
|
.get_state(&head_block.state_root, Some(head_block.slot))?
|
||||||
|
.ok_or_else(|| BeaconChainError::MissingBeaconState(head_block.state_root))?;
|
||||||
|
|
||||||
|
let decision_block_root = Self::decision_block_root(current_epoch, head_root, &head_state)?;
|
||||||
|
|
||||||
|
// We *must* skip forward to the current epoch to obtain valid proposer
|
||||||
|
// duties. We cannot skip to the previous epoch, like we do with
|
||||||
|
// attester duties.
|
||||||
|
while head_state.current_epoch() < current_epoch {
|
||||||
|
// Skip slots until the current epoch, providing `Hash256::zero()` as the state root
|
||||||
|
// since we don't require it to be valid to identify producers.
|
||||||
|
per_slot_processing(&mut head_state, Some(Hash256::zero()), &chain.spec)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let proposers = current_epoch
|
||||||
|
.slot_iter(T::EthSpec::slots_per_epoch())
|
||||||
|
.map(|slot| {
|
||||||
|
head_state
|
||||||
|
.get_beacon_proposer_index(slot, &chain.spec)
|
||||||
|
.map_err(BeaconChainError::from)
|
||||||
|
.and_then(|i| {
|
||||||
|
let pubkey = chain
|
||||||
|
.validator_pubkey(i)?
|
||||||
|
.ok_or_else(|| BeaconChainError::ValidatorPubkeyCacheIncomplete(i))?;
|
||||||
|
|
||||||
|
Ok(ProposerData {
|
||||||
|
pubkey: PublicKeyBytes::from(pubkey),
|
||||||
|
slot,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<Result<_, _>>()?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
epoch: current_epoch,
|
||||||
|
decision_block_root,
|
||||||
|
proposers,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a block root which can be used to key the shuffling obtained from the following
|
||||||
|
/// parameters:
|
||||||
|
///
|
||||||
|
/// - `shuffling_epoch`: the epoch for which the shuffling pertains.
|
||||||
|
/// - `head_block_root`: the block root at the head of the chain.
|
||||||
|
/// - `head_block_state`: the state of `head_block_root`.
|
||||||
|
pub fn decision_block_root<E: EthSpec>(
|
||||||
|
shuffling_epoch: Epoch,
|
||||||
|
head_block_root: Hash256,
|
||||||
|
head_block_state: &BeaconState<E>,
|
||||||
|
) -> Result<Hash256, BeaconChainError> {
|
||||||
|
let decision_slot = shuffling_epoch
|
||||||
|
.start_slot(E::slots_per_epoch())
|
||||||
|
.saturating_sub(1_u64);
|
||||||
|
|
||||||
|
// If decision slot is equal to or ahead of the head, the block root is the head block root
|
||||||
|
if decision_slot >= head_block_state.slot {
|
||||||
|
Ok(head_block_root)
|
||||||
|
} else {
|
||||||
|
head_block_state
|
||||||
|
.get_block_root(decision_slot)
|
||||||
|
.map(|root| *root)
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the proposers for the given `Epoch`.
|
||||||
|
///
|
||||||
|
/// The cache may be rebuilt if:
|
||||||
|
///
|
||||||
|
/// - The epoch has changed since the last cache build.
|
||||||
|
/// - There has been a re-org that crosses an epoch boundary.
|
||||||
|
pub fn get_proposers<T: BeaconChainTypes>(
|
||||||
|
&mut self,
|
||||||
|
chain: &BeaconChain<T>,
|
||||||
|
epoch: Epoch,
|
||||||
|
) -> Result<Vec<ProposerData>, warp::Rejection> {
|
||||||
|
let current_epoch = chain
|
||||||
|
.slot_clock
|
||||||
|
.now_or_genesis()
|
||||||
|
.ok_or_else(|| {
|
||||||
|
warp_utils::reject::custom_server_error("unable to read slot clock".to_string())
|
||||||
|
})?
|
||||||
|
.epoch(T::EthSpec::slots_per_epoch());
|
||||||
|
|
||||||
|
// Disallow requests that are outside the current epoch. This ensures the cache doesn't get
|
||||||
|
// washed-out with old values.
|
||||||
|
if current_epoch != epoch {
|
||||||
|
return Err(warp_utils::reject::custom_bad_request(format!(
|
||||||
|
"requested epoch is {} but only current epoch {} is allowed",
|
||||||
|
epoch, current_epoch
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let (head_block_root, head_decision_block_root) = chain
|
||||||
|
.with_head(|head| {
|
||||||
|
Self::decision_block_root(current_epoch, head.beacon_block_root, &head.beacon_state)
|
||||||
|
.map(|decision_root| (head.beacon_block_root, decision_root))
|
||||||
|
})
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||||
|
|
||||||
|
let head_block = chain
|
||||||
|
.fork_choice
|
||||||
|
.read()
|
||||||
|
.get_block(&head_block_root)
|
||||||
|
.ok_or_else(|| BeaconChainError::MissingBeaconBlock(head_block_root))
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||||
|
|
||||||
|
// Rebuild the cache if this call causes a cache-miss.
|
||||||
|
if self.epoch != current_epoch || self.decision_block_root != head_decision_block_root {
|
||||||
|
metrics::inc_counter(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_MISSES_TOTAL);
|
||||||
|
|
||||||
|
*self = Self::for_head_block(chain, current_epoch, head_block_root, head_block)
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||||
|
} else {
|
||||||
|
metrics::inc_counter(&metrics::HTTP_API_BEACON_PROPOSER_CACHE_HITS_TOTAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(self.proposers.clone())
|
||||||
|
}
|
||||||
|
}
|
87
beacon_node/http_api/src/block_id.rs
Normal file
87
beacon_node/http_api/src/block_id.rs
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
|
use eth2::types::BlockId as CoreBlockId;
|
||||||
|
use std::str::FromStr;
|
||||||
|
use types::{Hash256, SignedBeaconBlock, Slot};
|
||||||
|
|
||||||
|
/// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given
|
||||||
|
/// `BlockId`.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct BlockId(pub CoreBlockId);
|
||||||
|
|
||||||
|
impl BlockId {
|
||||||
|
pub fn from_slot(slot: Slot) -> Self {
|
||||||
|
Self(CoreBlockId::Slot(slot))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_root(root: Hash256) -> Self {
|
||||||
|
Self(CoreBlockId::Root(root))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the block root identified by `self`.
|
||||||
|
pub fn root<T: BeaconChainTypes>(
|
||||||
|
&self,
|
||||||
|
chain: &BeaconChain<T>,
|
||||||
|
) -> Result<Hash256, warp::Rejection> {
|
||||||
|
match &self.0 {
|
||||||
|
CoreBlockId::Head => chain
|
||||||
|
.head_info()
|
||||||
|
.map(|head| head.block_root)
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error),
|
||||||
|
CoreBlockId::Genesis => Ok(chain.genesis_block_root),
|
||||||
|
CoreBlockId::Finalized => chain
|
||||||
|
.head_info()
|
||||||
|
.map(|head| head.finalized_checkpoint.root)
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error),
|
||||||
|
CoreBlockId::Justified => chain
|
||||||
|
.head_info()
|
||||||
|
.map(|head| head.current_justified_checkpoint.root)
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error),
|
||||||
|
CoreBlockId::Slot(slot) => chain
|
||||||
|
.block_root_at_slot(*slot)
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error)
|
||||||
|
.and_then(|root_opt| {
|
||||||
|
root_opt.ok_or_else(|| {
|
||||||
|
warp_utils::reject::custom_not_found(format!(
|
||||||
|
"beacon block at slot {}",
|
||||||
|
slot
|
||||||
|
))
|
||||||
|
})
|
||||||
|
}),
|
||||||
|
CoreBlockId::Root(root) => Ok(*root),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the `SignedBeaconBlock` identified by `self`.
|
||||||
|
pub fn block<T: BeaconChainTypes>(
|
||||||
|
&self,
|
||||||
|
chain: &BeaconChain<T>,
|
||||||
|
) -> Result<SignedBeaconBlock<T::EthSpec>, warp::Rejection> {
|
||||||
|
match &self.0 {
|
||||||
|
CoreBlockId::Head => chain
|
||||||
|
.head_beacon_block()
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error),
|
||||||
|
_ => {
|
||||||
|
let root = self.root(chain)?;
|
||||||
|
chain
|
||||||
|
.get_block(&root)
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error)
|
||||||
|
.and_then(|root_opt| {
|
||||||
|
root_opt.ok_or_else(|| {
|
||||||
|
warp_utils::reject::custom_not_found(format!(
|
||||||
|
"beacon block with root {}",
|
||||||
|
root
|
||||||
|
))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for BlockId {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
CoreBlockId::from_str(s).map(Self)
|
||||||
|
}
|
||||||
|
}
|
1730
beacon_node/http_api/src/lib.rs
Normal file
1730
beacon_node/http_api/src/lib.rs
Normal file
File diff suppressed because it is too large
Load Diff
32
beacon_node/http_api/src/metrics.rs
Normal file
32
beacon_node/http_api/src/metrics.rs
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
pub use lighthouse_metrics::*;
|
||||||
|
|
||||||
|
lazy_static::lazy_static! {
|
||||||
|
pub static ref HTTP_API_PATHS_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||||
|
"http_api_paths_total",
|
||||||
|
"Count of HTTP requests received",
|
||||||
|
&["path"]
|
||||||
|
);
|
||||||
|
pub static ref HTTP_API_STATUS_CODES_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||||
|
"http_api_status_codes_total",
|
||||||
|
"Count of HTTP status codes returned",
|
||||||
|
&["status"]
|
||||||
|
);
|
||||||
|
pub static ref HTTP_API_PATHS_TIMES: Result<HistogramVec> = try_create_histogram_vec(
|
||||||
|
"http_api_paths_times",
|
||||||
|
"Duration to process HTTP requests per path",
|
||||||
|
&["path"]
|
||||||
|
);
|
||||||
|
|
||||||
|
pub static ref HTTP_API_BEACON_PROPOSER_CACHE_TIMES: Result<Histogram> = try_create_histogram(
|
||||||
|
"http_api_beacon_proposer_cache_build_times",
|
||||||
|
"Duration to process HTTP requests per path",
|
||||||
|
);
|
||||||
|
pub static ref HTTP_API_BEACON_PROPOSER_CACHE_HITS_TOTAL: Result<IntCounter> = try_create_int_counter(
|
||||||
|
"http_api_beacon_proposer_cache_hits_total",
|
||||||
|
"Count of times the proposer cache has been hit",
|
||||||
|
);
|
||||||
|
pub static ref HTTP_API_BEACON_PROPOSER_CACHE_MISSES_TOTAL: Result<IntCounter> = try_create_int_counter(
|
||||||
|
"http_api_beacon_proposer_cache_misses_total",
|
||||||
|
"Count of times the proposer cache has been missed",
|
||||||
|
);
|
||||||
|
}
|
118
beacon_node/http_api/src/state_id.rs
Normal file
118
beacon_node/http_api/src/state_id.rs
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
|
use eth2::types::StateId as CoreStateId;
|
||||||
|
use std::str::FromStr;
|
||||||
|
use types::{BeaconState, EthSpec, Fork, Hash256, Slot};
|
||||||
|
|
||||||
|
/// Wraps `eth2::types::StateId` and provides common state-access functionality. E.g., reading
|
||||||
|
/// states or parts of states from the database.
|
||||||
|
pub struct StateId(CoreStateId);
|
||||||
|
|
||||||
|
impl StateId {
|
||||||
|
pub fn head() -> Self {
|
||||||
|
Self(CoreStateId::Head)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn slot(slot: Slot) -> Self {
|
||||||
|
Self(CoreStateId::Slot(slot))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the state root identified by `self`.
|
||||||
|
pub fn root<T: BeaconChainTypes>(
|
||||||
|
&self,
|
||||||
|
chain: &BeaconChain<T>,
|
||||||
|
) -> Result<Hash256, warp::Rejection> {
|
||||||
|
let slot = match &self.0 {
|
||||||
|
CoreStateId::Head => {
|
||||||
|
return chain
|
||||||
|
.head_info()
|
||||||
|
.map(|head| head.state_root)
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error)
|
||||||
|
}
|
||||||
|
CoreStateId::Genesis => return Ok(chain.genesis_state_root),
|
||||||
|
CoreStateId::Finalized => chain.head_info().map(|head| {
|
||||||
|
head.finalized_checkpoint
|
||||||
|
.epoch
|
||||||
|
.start_slot(T::EthSpec::slots_per_epoch())
|
||||||
|
}),
|
||||||
|
CoreStateId::Justified => chain.head_info().map(|head| {
|
||||||
|
head.current_justified_checkpoint
|
||||||
|
.epoch
|
||||||
|
.start_slot(T::EthSpec::slots_per_epoch())
|
||||||
|
}),
|
||||||
|
CoreStateId::Slot(slot) => Ok(*slot),
|
||||||
|
CoreStateId::Root(root) => return Ok(*root),
|
||||||
|
}
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error)?;
|
||||||
|
|
||||||
|
chain
|
||||||
|
.state_root_at_slot(slot)
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error)?
|
||||||
|
.ok_or_else(|| {
|
||||||
|
warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the `fork` field of the state identified by `self`.
|
||||||
|
pub fn fork<T: BeaconChainTypes>(
|
||||||
|
&self,
|
||||||
|
chain: &BeaconChain<T>,
|
||||||
|
) -> Result<Fork, warp::Rejection> {
|
||||||
|
self.map_state(chain, |state| Ok(state.fork))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the `BeaconState` identified by `self`.
|
||||||
|
pub fn state<T: BeaconChainTypes>(
|
||||||
|
&self,
|
||||||
|
chain: &BeaconChain<T>,
|
||||||
|
) -> Result<BeaconState<T::EthSpec>, warp::Rejection> {
|
||||||
|
let (state_root, slot_opt) = match &self.0 {
|
||||||
|
CoreStateId::Head => {
|
||||||
|
return chain
|
||||||
|
.head_beacon_state()
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error)
|
||||||
|
}
|
||||||
|
CoreStateId::Slot(slot) => (self.root(chain)?, Some(*slot)),
|
||||||
|
_ => (self.root(chain)?, None),
|
||||||
|
};
|
||||||
|
|
||||||
|
chain
|
||||||
|
.get_state(&state_root, slot_opt)
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error)
|
||||||
|
.and_then(|opt| {
|
||||||
|
opt.ok_or_else(|| {
|
||||||
|
warp_utils::reject::custom_not_found(format!(
|
||||||
|
"beacon state at root {}",
|
||||||
|
state_root
|
||||||
|
))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Map a function across the `BeaconState` identified by `self`.
|
||||||
|
///
|
||||||
|
/// This function will avoid instantiating/copying a new state when `self` points to the head
|
||||||
|
/// of the chain.
|
||||||
|
pub fn map_state<T: BeaconChainTypes, F, U>(
|
||||||
|
&self,
|
||||||
|
chain: &BeaconChain<T>,
|
||||||
|
func: F,
|
||||||
|
) -> Result<U, warp::Rejection>
|
||||||
|
where
|
||||||
|
F: Fn(&BeaconState<T::EthSpec>) -> Result<U, warp::Rejection>,
|
||||||
|
{
|
||||||
|
match &self.0 {
|
||||||
|
CoreStateId::Head => chain
|
||||||
|
.with_head(|snapshot| Ok(func(&snapshot.beacon_state)))
|
||||||
|
.map_err(warp_utils::reject::beacon_chain_error)?,
|
||||||
|
_ => func(&self.state(chain)?),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for StateId {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
CoreStateId::from_str(s).map(Self)
|
||||||
|
}
|
||||||
|
}
|
88
beacon_node/http_api/src/validator_inclusion.rs
Normal file
88
beacon_node/http_api/src/validator_inclusion.rs
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
use crate::state_id::StateId;
|
||||||
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
|
use eth2::{
|
||||||
|
lighthouse::{GlobalValidatorInclusionData, ValidatorInclusionData},
|
||||||
|
types::ValidatorId,
|
||||||
|
};
|
||||||
|
use state_processing::per_epoch_processing::ValidatorStatuses;
|
||||||
|
use types::{Epoch, EthSpec};
|
||||||
|
|
||||||
|
/// Returns information about *all validators* (i.e., global) and how they performed during a given
|
||||||
|
/// epoch.
|
||||||
|
pub fn global_validator_inclusion_data<T: BeaconChainTypes>(
|
||||||
|
epoch: Epoch,
|
||||||
|
chain: &BeaconChain<T>,
|
||||||
|
) -> Result<GlobalValidatorInclusionData, warp::Rejection> {
|
||||||
|
let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch());
|
||||||
|
|
||||||
|
let state = StateId::slot(target_slot).state(chain)?;
|
||||||
|
|
||||||
|
let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec)
|
||||||
|
.map_err(warp_utils::reject::beacon_state_error)?;
|
||||||
|
validator_statuses
|
||||||
|
.process_attestations(&state, &chain.spec)
|
||||||
|
.map_err(warp_utils::reject::beacon_state_error)?;
|
||||||
|
|
||||||
|
let totals = validator_statuses.total_balances;
|
||||||
|
|
||||||
|
Ok(GlobalValidatorInclusionData {
|
||||||
|
current_epoch_active_gwei: totals.current_epoch(),
|
||||||
|
previous_epoch_active_gwei: totals.previous_epoch(),
|
||||||
|
current_epoch_attesting_gwei: totals.current_epoch_attesters(),
|
||||||
|
current_epoch_target_attesting_gwei: totals.current_epoch_target_attesters(),
|
||||||
|
previous_epoch_attesting_gwei: totals.previous_epoch_attesters(),
|
||||||
|
previous_epoch_target_attesting_gwei: totals.previous_epoch_target_attesters(),
|
||||||
|
previous_epoch_head_attesting_gwei: totals.previous_epoch_head_attesters(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns information about a single validator and how it performed during a given epoch.
|
||||||
|
pub fn validator_inclusion_data<T: BeaconChainTypes>(
|
||||||
|
epoch: Epoch,
|
||||||
|
validator_id: &ValidatorId,
|
||||||
|
chain: &BeaconChain<T>,
|
||||||
|
) -> Result<Option<ValidatorInclusionData>, warp::Rejection> {
|
||||||
|
let target_slot = epoch.end_slot(T::EthSpec::slots_per_epoch());
|
||||||
|
|
||||||
|
let mut state = StateId::slot(target_slot).state(chain)?;
|
||||||
|
|
||||||
|
let mut validator_statuses = ValidatorStatuses::new(&state, &chain.spec)
|
||||||
|
.map_err(warp_utils::reject::beacon_state_error)?;
|
||||||
|
validator_statuses
|
||||||
|
.process_attestations(&state, &chain.spec)
|
||||||
|
.map_err(warp_utils::reject::beacon_state_error)?;
|
||||||
|
|
||||||
|
state
|
||||||
|
.update_pubkey_cache()
|
||||||
|
.map_err(warp_utils::reject::beacon_state_error)?;
|
||||||
|
|
||||||
|
let validator_index = match validator_id {
|
||||||
|
ValidatorId::Index(index) => *index as usize,
|
||||||
|
ValidatorId::PublicKey(pubkey) => {
|
||||||
|
if let Some(index) = state
|
||||||
|
.get_validator_index(pubkey)
|
||||||
|
.map_err(warp_utils::reject::beacon_state_error)?
|
||||||
|
{
|
||||||
|
index
|
||||||
|
} else {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(validator_statuses
|
||||||
|
.statuses
|
||||||
|
.get(validator_index)
|
||||||
|
.map(|vote| ValidatorInclusionData {
|
||||||
|
is_slashed: vote.is_slashed,
|
||||||
|
is_withdrawable_in_current_epoch: vote.is_withdrawable_in_current_epoch,
|
||||||
|
is_active_in_current_epoch: vote.is_active_in_current_epoch,
|
||||||
|
is_active_in_previous_epoch: vote.is_active_in_previous_epoch,
|
||||||
|
current_epoch_effective_balance_gwei: vote.current_epoch_effective_balance,
|
||||||
|
is_current_epoch_attester: vote.is_current_epoch_attester,
|
||||||
|
is_current_epoch_target_attester: vote.is_current_epoch_target_attester,
|
||||||
|
is_previous_epoch_attester: vote.is_previous_epoch_attester,
|
||||||
|
is_previous_epoch_target_attester: vote.is_previous_epoch_target_attester,
|
||||||
|
is_previous_epoch_head_attester: vote.is_previous_epoch_head_attester,
|
||||||
|
}))
|
||||||
|
}
|
1786
beacon_node/http_api/tests/tests.rs
Normal file
1786
beacon_node/http_api/tests/tests.rs
Normal file
File diff suppressed because it is too large
Load Diff
28
beacon_node/http_metrics/Cargo.toml
Normal file
28
beacon_node/http_metrics/Cargo.toml
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
[package]
|
||||||
|
name = "http_metrics"
|
||||||
|
version = "0.1.0"
|
||||||
|
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||||
|
edition = "2018"
|
||||||
|
|
||||||
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
prometheus = "0.10.0"
|
||||||
|
warp = "0.2.5"
|
||||||
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
|
slog = "2.5.2"
|
||||||
|
beacon_chain = { path = "../beacon_chain" }
|
||||||
|
store = { path = "../store" }
|
||||||
|
eth2_libp2p = { path = "../eth2_libp2p" }
|
||||||
|
slot_clock = { path = "../../common/slot_clock" }
|
||||||
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||||
|
lazy_static = "1.4.0"
|
||||||
|
eth2 = { path = "../../common/eth2" }
|
||||||
|
lighthouse_version = { path = "../../common/lighthouse_version" }
|
||||||
|
warp_utils = { path = "../../common/warp_utils" }
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tokio = { version = "0.2.22", features = ["sync"] }
|
||||||
|
reqwest = { version = "0.10.8", features = ["json"] }
|
||||||
|
environment = { path = "../../lighthouse/environment" }
|
||||||
|
types = { path = "../../consensus/types" }
|
135
beacon_node/http_metrics/src/lib.rs
Normal file
135
beacon_node/http_metrics/src/lib.rs
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
//! This crate provides a HTTP server that is solely dedicated to serving the `/metrics` endpoint.
|
||||||
|
//!
|
||||||
|
//! For other endpoints, see the `http_api` crate.
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate lazy_static;
|
||||||
|
|
||||||
|
mod metrics;
|
||||||
|
|
||||||
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
|
use lighthouse_version::version_with_platform;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use slog::{crit, info, Logger};
|
||||||
|
use std::future::Future;
|
||||||
|
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
|
||||||
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use warp::{http::Response, Filter};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum Error {
|
||||||
|
Warp(warp::Error),
|
||||||
|
Other(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<warp::Error> for Error {
|
||||||
|
fn from(e: warp::Error) -> Self {
|
||||||
|
Error::Warp(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<String> for Error {
|
||||||
|
fn from(e: String) -> Self {
|
||||||
|
Error::Other(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A wrapper around all the items required to spawn the HTTP server.
|
||||||
|
///
|
||||||
|
/// The server will gracefully handle the case where any fields are `None`.
|
||||||
|
pub struct Context<T: BeaconChainTypes> {
|
||||||
|
pub config: Config,
|
||||||
|
pub chain: Option<Arc<BeaconChain<T>>>,
|
||||||
|
pub db_path: Option<PathBuf>,
|
||||||
|
pub freezer_db_path: Option<PathBuf>,
|
||||||
|
pub log: Logger,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Configuration for the HTTP server.
|
||||||
|
#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct Config {
|
||||||
|
pub enabled: bool,
|
||||||
|
pub listen_addr: Ipv4Addr,
|
||||||
|
pub listen_port: u16,
|
||||||
|
pub allow_origin: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Config {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
enabled: false,
|
||||||
|
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
|
||||||
|
listen_port: 5054,
|
||||||
|
allow_origin: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a server that will serve requests using information from `ctx`.
|
||||||
|
///
|
||||||
|
/// The server will shut down gracefully when the `shutdown` future resolves.
|
||||||
|
///
|
||||||
|
/// ## Returns
|
||||||
|
///
|
||||||
|
/// This function will bind the server to the provided address and then return a tuple of:
|
||||||
|
///
|
||||||
|
/// - `SocketAddr`: the address that the HTTP server will listen on.
|
||||||
|
/// - `Future`: the actual server future that will need to be awaited.
|
||||||
|
///
|
||||||
|
/// ## Errors
|
||||||
|
///
|
||||||
|
/// Returns an error if the server is unable to bind or there is another error during
|
||||||
|
/// configuration.
|
||||||
|
pub fn serve<T: BeaconChainTypes>(
|
||||||
|
ctx: Arc<Context<T>>,
|
||||||
|
shutdown: impl Future<Output = ()> + Send + Sync + 'static,
|
||||||
|
) -> Result<(SocketAddr, impl Future<Output = ()>), Error> {
|
||||||
|
let config = &ctx.config;
|
||||||
|
let log = ctx.log.clone();
|
||||||
|
let allow_origin = config.allow_origin.clone();
|
||||||
|
|
||||||
|
// Sanity check.
|
||||||
|
if !config.enabled {
|
||||||
|
crit!(log, "Cannot start disabled metrics HTTP server");
|
||||||
|
return Err(Error::Other(
|
||||||
|
"A disabled metrics server should not be started".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let inner_ctx = ctx.clone();
|
||||||
|
let routes = warp::get()
|
||||||
|
.and(warp::path("metrics"))
|
||||||
|
.map(move || inner_ctx.clone())
|
||||||
|
.and_then(|ctx: Arc<Context<T>>| async move {
|
||||||
|
Ok::<_, warp::Rejection>(
|
||||||
|
metrics::gather_prometheus_metrics(&ctx)
|
||||||
|
.map(|body| Response::builder().status(200).body(body).unwrap())
|
||||||
|
.unwrap_or_else(|e| {
|
||||||
|
Response::builder()
|
||||||
|
.status(500)
|
||||||
|
.body(format!("Unable to gather metrics: {:?}", e))
|
||||||
|
.unwrap()
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
// Add a `Server` header.
|
||||||
|
.map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform()))
|
||||||
|
// Maybe add some CORS headers.
|
||||||
|
.map(move |reply| warp_utils::reply::maybe_cors(reply, allow_origin.as_ref()));
|
||||||
|
|
||||||
|
let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown(
|
||||||
|
SocketAddrV4::new(config.listen_addr, config.listen_port),
|
||||||
|
async {
|
||||||
|
shutdown.await;
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
log,
|
||||||
|
"Metrics HTTP server started";
|
||||||
|
"listen_address" => listening_socket.to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok((listening_socket, server))
|
||||||
|
}
|
@ -1,38 +1,11 @@
|
|||||||
use crate::{ApiError, Context};
|
use crate::Context;
|
||||||
use beacon_chain::BeaconChainTypes;
|
use beacon_chain::BeaconChainTypes;
|
||||||
|
use eth2::lighthouse::Health;
|
||||||
use lighthouse_metrics::{Encoder, TextEncoder};
|
use lighthouse_metrics::{Encoder, TextEncoder};
|
||||||
use rest_types::Health;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
pub use lighthouse_metrics::*;
|
pub use lighthouse_metrics::*;
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
pub static ref BEACON_HTTP_API_REQUESTS_TOTAL: Result<IntCounterVec> =
|
|
||||||
try_create_int_counter_vec(
|
|
||||||
"beacon_http_api_requests_total",
|
|
||||||
"Count of HTTP requests received",
|
|
||||||
&["endpoint"]
|
|
||||||
);
|
|
||||||
pub static ref BEACON_HTTP_API_SUCCESS_TOTAL: Result<IntCounterVec> =
|
|
||||||
try_create_int_counter_vec(
|
|
||||||
"beacon_http_api_success_total",
|
|
||||||
"Count of HTTP requests that returned 200 OK",
|
|
||||||
&["endpoint"]
|
|
||||||
);
|
|
||||||
pub static ref BEACON_HTTP_API_ERROR_TOTAL: Result<IntCounterVec> = try_create_int_counter_vec(
|
|
||||||
"beacon_http_api_error_total",
|
|
||||||
"Count of HTTP that did not return 200 OK",
|
|
||||||
&["endpoint"]
|
|
||||||
);
|
|
||||||
pub static ref BEACON_HTTP_API_TIMES_TOTAL: Result<HistogramVec> = try_create_histogram_vec(
|
|
||||||
"beacon_http_api_times_total",
|
|
||||||
"Duration to process HTTP requests",
|
|
||||||
&["endpoint"]
|
|
||||||
);
|
|
||||||
pub static ref REQUEST_RESPONSE_TIME: Result<Histogram> = try_create_histogram(
|
|
||||||
"http_server_request_duration_seconds",
|
|
||||||
"Time taken to build a response to a HTTP request"
|
|
||||||
);
|
|
||||||
pub static ref PROCESS_NUM_THREADS: Result<IntGauge> = try_create_int_gauge(
|
pub static ref PROCESS_NUM_THREADS: Result<IntGauge> = try_create_int_gauge(
|
||||||
"process_num_threads",
|
"process_num_threads",
|
||||||
"Number of threads used by the current process"
|
"Number of threads used by the current process"
|
||||||
@ -67,14 +40,9 @@ lazy_static! {
|
|||||||
try_create_float_gauge("system_loadavg_15", "Loadavg over 15 minutes");
|
try_create_float_gauge("system_loadavg_15", "Loadavg over 15 minutes");
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the full set of Prometheus metrics for the Beacon Node application.
|
pub fn gather_prometheus_metrics<T: BeaconChainTypes>(
|
||||||
///
|
ctx: &Context<T>,
|
||||||
/// # Note
|
) -> std::result::Result<String, String> {
|
||||||
///
|
|
||||||
/// This is a HTTP handler method.
|
|
||||||
pub fn get_prometheus<T: BeaconChainTypes>(
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> std::result::Result<String, ApiError> {
|
|
||||||
let mut buffer = vec![];
|
let mut buffer = vec![];
|
||||||
let encoder = TextEncoder::new();
|
let encoder = TextEncoder::new();
|
||||||
|
|
||||||
@ -94,9 +62,17 @@ pub fn get_prometheus<T: BeaconChainTypes>(
|
|||||||
// using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into
|
// using `lighthouse_metrics::gather(..)` to collect the global `DEFAULT_REGISTRY` metrics into
|
||||||
// a string that can be returned via HTTP.
|
// a string that can be returned via HTTP.
|
||||||
|
|
||||||
slot_clock::scrape_for_metrics::<T::EthSpec, T::SlotClock>(&ctx.beacon_chain.slot_clock);
|
if let Some(beacon_chain) = ctx.chain.as_ref() {
|
||||||
store::scrape_for_metrics(&ctx.db_path, &ctx.freezer_db_path);
|
slot_clock::scrape_for_metrics::<T::EthSpec, T::SlotClock>(&beacon_chain.slot_clock);
|
||||||
beacon_chain::scrape_for_metrics(&ctx.beacon_chain);
|
beacon_chain::scrape_for_metrics(beacon_chain);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let (Some(db_path), Some(freezer_db_path)) =
|
||||||
|
(ctx.db_path.as_ref(), ctx.freezer_db_path.as_ref())
|
||||||
|
{
|
||||||
|
store::scrape_for_metrics(db_path, freezer_db_path);
|
||||||
|
}
|
||||||
|
|
||||||
eth2_libp2p::scrape_discovery_metrics();
|
eth2_libp2p::scrape_discovery_metrics();
|
||||||
|
|
||||||
// This will silently fail if we are unable to observe the health. This is desired behaviour
|
// This will silently fail if we are unable to observe the health. This is desired behaviour
|
||||||
@ -125,6 +101,5 @@ pub fn get_prometheus<T: BeaconChainTypes>(
|
|||||||
.encode(&lighthouse_metrics::gather(), &mut buffer)
|
.encode(&lighthouse_metrics::gather(), &mut buffer)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
String::from_utf8(buffer)
|
String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e))
|
||||||
.map_err(|e| ApiError::ServerError(format!("Failed to encode prometheus info: {:?}", e)))
|
|
||||||
}
|
}
|
46
beacon_node/http_metrics/tests/tests.rs
Normal file
46
beacon_node/http_metrics/tests/tests.rs
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
use beacon_chain::test_utils::BlockingMigratorEphemeralHarnessType;
|
||||||
|
use environment::null_logger;
|
||||||
|
use http_metrics::Config;
|
||||||
|
use reqwest::StatusCode;
|
||||||
|
use std::net::Ipv4Addr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::sync::oneshot;
|
||||||
|
use types::MainnetEthSpec;
|
||||||
|
|
||||||
|
type Context = http_metrics::Context<BlockingMigratorEphemeralHarnessType<MainnetEthSpec>>;
|
||||||
|
|
||||||
|
#[tokio::test(core_threads = 2)]
|
||||||
|
async fn returns_200_ok() {
|
||||||
|
let log = null_logger().unwrap();
|
||||||
|
|
||||||
|
let context = Arc::new(Context {
|
||||||
|
config: Config {
|
||||||
|
enabled: true,
|
||||||
|
listen_addr: Ipv4Addr::new(127, 0, 0, 1),
|
||||||
|
listen_port: 0,
|
||||||
|
allow_origin: None,
|
||||||
|
},
|
||||||
|
chain: None,
|
||||||
|
db_path: None,
|
||||||
|
freezer_db_path: None,
|
||||||
|
log,
|
||||||
|
});
|
||||||
|
|
||||||
|
let ctx = context.clone();
|
||||||
|
let (_shutdown_tx, shutdown_rx) = oneshot::channel::<()>();
|
||||||
|
let server_shutdown = async {
|
||||||
|
// It's not really interesting why this triggered, just that it happened.
|
||||||
|
let _ = shutdown_rx.await;
|
||||||
|
};
|
||||||
|
let (listening_socket, server) = http_metrics::serve(ctx, server_shutdown).unwrap();
|
||||||
|
|
||||||
|
tokio::spawn(async { server.await });
|
||||||
|
|
||||||
|
let url = format!(
|
||||||
|
"http://{}:{}/metrics",
|
||||||
|
listening_socket.ip(),
|
||||||
|
listening_socket.port()
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK);
|
||||||
|
}
|
@ -5,7 +5,7 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
sloggers = "1.0.0"
|
sloggers = "1.0.1"
|
||||||
genesis = { path = "../genesis" }
|
genesis = { path = "../genesis" }
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
matches = "0.1.8"
|
matches = "0.1.8"
|
||||||
@ -17,7 +17,6 @@ beacon_chain = { path = "../beacon_chain" }
|
|||||||
store = { path = "../store" }
|
store = { path = "../store" }
|
||||||
eth2_libp2p = { path = "../eth2_libp2p" }
|
eth2_libp2p = { path = "../eth2_libp2p" }
|
||||||
hashset_delay = { path = "../../common/hashset_delay" }
|
hashset_delay = { path = "../../common/hashset_delay" }
|
||||||
rest_types = { path = "../../common/rest_types" }
|
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
state_processing = { path = "../../consensus/state_processing" }
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
slot_clock = { path = "../../common/slot_clock" }
|
slot_clock = { path = "../../common/slot_clock" }
|
||||||
@ -25,18 +24,18 @@ slog = { version = "2.5.2", features = ["max_level_trace"] }
|
|||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
||||||
tree_hash = "0.1.0"
|
tree_hash = "0.1.1"
|
||||||
futures = "0.3.5"
|
futures = "0.3.5"
|
||||||
error-chain = "0.12.2"
|
error-chain = "0.12.4"
|
||||||
tokio = { version = "0.2.22", features = ["full"] }
|
tokio = { version = "0.2.22", features = ["full"] }
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
smallvec = "1.4.1"
|
smallvec = "1.4.2"
|
||||||
rand = "0.7.3"
|
rand = "0.7.3"
|
||||||
fnv = "1.0.6"
|
fnv = "1.0.7"
|
||||||
rlp = "0.4.5"
|
rlp = "0.4.6"
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||||
environment = { path = "../../lighthouse/environment" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
igd = "0.11.1"
|
igd = "0.11.1"
|
||||||
itertools = "0.9.0"
|
itertools = "0.9.0"
|
||||||
num_cpus = "1.13.0"
|
num_cpus = "1.13.0"
|
||||||
|
@ -15,9 +15,8 @@ use slog::{debug, error, o, trace, warn};
|
|||||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
||||||
use eth2_libp2p::SubnetDiscovery;
|
use eth2_libp2p::SubnetDiscovery;
|
||||||
use hashset_delay::HashSetDelay;
|
use hashset_delay::HashSetDelay;
|
||||||
use rest_types::ValidatorSubscription;
|
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use types::{Attestation, EthSpec, Slot, SubnetId};
|
use types::{Attestation, EthSpec, Slot, SubnetId, ValidatorSubscription};
|
||||||
|
|
||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
|
|
||||||
@ -196,14 +195,9 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
slot: subscription.slot,
|
slot: subscription.slot,
|
||||||
};
|
};
|
||||||
|
|
||||||
// determine if the validator is an aggregator. If so, we subscribe to the subnet and
|
// Determine if the validator is an aggregator. If so, we subscribe to the subnet and
|
||||||
// if successful add the validator to a mapping of known aggregators for that exact
|
// if successful add the validator to a mapping of known aggregators for that exact
|
||||||
// subnet.
|
// subnet.
|
||||||
// NOTE: There is a chance that a fork occurs between now and when the validator needs
|
|
||||||
// to aggregate attestations. If this happens, the signature will no longer be valid
|
|
||||||
// and it could be likely the validator no longer needs to aggregate. More
|
|
||||||
// sophisticated logic should be added using known future forks.
|
|
||||||
// TODO: Implement
|
|
||||||
|
|
||||||
if subscription.is_aggregator {
|
if subscription.is_aggregator {
|
||||||
metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS);
|
metrics::inc_counter(&metrics::SUBNET_SUBSCRIPTION_AGGREGATOR_REQUESTS);
|
||||||
@ -287,8 +281,6 @@ impl<T: BeaconChainTypes> AttestationService<T> {
|
|||||||
min_ttl,
|
min_ttl,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
// TODO: Send the time frame needed to have a peer connected, so that we can
|
|
||||||
// maintain peers for a least this duration.
|
|
||||||
// We may want to check the global PeerInfo to see estimated timeouts for each
|
// We may want to check the global PeerInfo to see estimated timeouts for each
|
||||||
// peer before they can be removed.
|
// peer before they can be removed.
|
||||||
warn!(self.log,
|
warn!(self.log,
|
||||||
|
@ -37,12 +37,12 @@
|
|||||||
|
|
||||||
use crate::{metrics, service::NetworkMessage, sync::SyncMessage};
|
use crate::{metrics, service::NetworkMessage, sync::SyncMessage};
|
||||||
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError};
|
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError};
|
||||||
use environment::TaskExecutor;
|
|
||||||
use eth2_libp2p::{MessageId, NetworkGlobals, PeerId};
|
use eth2_libp2p::{MessageId, NetworkGlobals, PeerId};
|
||||||
use slog::{crit, debug, error, trace, warn, Logger};
|
use slog::{crit, debug, error, trace, warn, Logger};
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
use task_executor::TaskExecutor;
|
||||||
use tokio::sync::{mpsc, oneshot};
|
use tokio::sync::{mpsc, oneshot};
|
||||||
use types::{
|
use types::{
|
||||||
Attestation, AttesterSlashing, EthSpec, Hash256, ProposerSlashing, SignedAggregateAndProof,
|
Attestation, AttesterSlashing, EthSpec, Hash256, ProposerSlashing, SignedAggregateAndProof,
|
||||||
|
@ -45,7 +45,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
|
|
||||||
let attestation = match self
|
let attestation = match self
|
||||||
.chain
|
.chain
|
||||||
.verify_unaggregated_attestation_for_gossip(attestation, subnet_id)
|
.verify_unaggregated_attestation_for_gossip(attestation, Some(subnet_id))
|
||||||
{
|
{
|
||||||
Ok(attestation) => attestation,
|
Ok(attestation) => attestation,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -3,15 +3,14 @@ use std::sync::Arc;
|
|||||||
use store::{DBColumn, Error as StoreError, HotColdDB, ItemStore, StoreItem};
|
use store::{DBColumn, Error as StoreError, HotColdDB, ItemStore, StoreItem};
|
||||||
use types::{EthSpec, Hash256};
|
use types::{EthSpec, Hash256};
|
||||||
|
|
||||||
/// 32-byte key for accessing the `DhtEnrs`.
|
/// 32-byte key for accessing the `DhtEnrs`. All zero because `DhtEnrs` has its own column.
|
||||||
pub const DHT_DB_KEY: &str = "PERSISTEDDHTPERSISTEDDHTPERSISTE";
|
pub const DHT_DB_KEY: Hash256 = Hash256::zero();
|
||||||
|
|
||||||
pub fn load_dht<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
pub fn load_dht<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
||||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||||
) -> Vec<Enr> {
|
) -> Vec<Enr> {
|
||||||
// Load DHT from store
|
// Load DHT from store
|
||||||
let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes());
|
match store.get_item(&DHT_DB_KEY) {
|
||||||
match store.get_item(&key) {
|
|
||||||
Ok(Some(p)) => {
|
Ok(Some(p)) => {
|
||||||
let p: PersistedDht = p;
|
let p: PersistedDht = p;
|
||||||
p.enrs
|
p.enrs
|
||||||
@ -25,9 +24,7 @@ pub fn persist_dht<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|||||||
store: Arc<HotColdDB<E, Hot, Cold>>,
|
store: Arc<HotColdDB<E, Hot, Cold>>,
|
||||||
enrs: Vec<Enr>,
|
enrs: Vec<Enr>,
|
||||||
) -> Result<(), store::Error> {
|
) -> Result<(), store::Error> {
|
||||||
let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes());
|
store.put_item(&DHT_DB_KEY, &PersistedDht { enrs })
|
||||||
store.put_item(&key, &PersistedDht { enrs })?;
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wrapper around DHT for persistence to disk.
|
/// Wrapper around DHT for persistence to disk.
|
||||||
@ -61,7 +58,7 @@ mod tests {
|
|||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use store::config::StoreConfig;
|
use store::config::StoreConfig;
|
||||||
use store::{HotColdDB, MemoryStore};
|
use store::{HotColdDB, MemoryStore};
|
||||||
use types::{ChainSpec, Hash256, MinimalEthSpec};
|
use types::{ChainSpec, MinimalEthSpec};
|
||||||
#[test]
|
#[test]
|
||||||
fn test_persisted_dht() {
|
fn test_persisted_dht() {
|
||||||
let log = NullLoggerBuilder.build().unwrap();
|
let log = NullLoggerBuilder.build().unwrap();
|
||||||
@ -71,11 +68,10 @@ mod tests {
|
|||||||
MemoryStore<MinimalEthSpec>,
|
MemoryStore<MinimalEthSpec>,
|
||||||
> = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log).unwrap();
|
> = HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log).unwrap();
|
||||||
let enrs = vec![Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap()];
|
let enrs = vec![Enr::from_str("enr:-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04jRzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0xOIN1ZHCCdl8").unwrap()];
|
||||||
let key = Hash256::from_slice(&DHT_DB_KEY.as_bytes());
|
|
||||||
store
|
store
|
||||||
.put_item(&key, &PersistedDht { enrs: enrs.clone() })
|
.put_item(&DHT_DB_KEY, &PersistedDht { enrs: enrs.clone() })
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let dht: PersistedDht = store.get_item(&key).unwrap().unwrap();
|
let dht: PersistedDht = store.get_item(&DHT_DB_KEY).unwrap().unwrap();
|
||||||
assert_eq!(dht.enrs, enrs);
|
assert_eq!(dht.enrs, enrs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -74,7 +74,7 @@ impl<T: BeaconChainTypes> Router<T> {
|
|||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
|
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
|
||||||
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
executor: environment::TaskExecutor,
|
executor: task_executor::TaskExecutor,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
) -> error::Result<mpsc::UnboundedSender<RouterMessage<T::EthSpec>>> {
|
) -> error::Result<mpsc::UnboundedSender<RouterMessage<T::EthSpec>>> {
|
||||||
let message_handler_log = log.new(o!("service"=> "router"));
|
let message_handler_log = log.new(o!("service"=> "router"));
|
||||||
|
@ -41,7 +41,7 @@ pub struct Processor<T: BeaconChainTypes> {
|
|||||||
impl<T: BeaconChainTypes> Processor<T> {
|
impl<T: BeaconChainTypes> Processor<T> {
|
||||||
/// Instantiate a `Processor` instance
|
/// Instantiate a `Processor` instance
|
||||||
pub fn new(
|
pub fn new(
|
||||||
executor: environment::TaskExecutor,
|
executor: task_executor::TaskExecutor,
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
|
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
|
||||||
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
|
@ -15,13 +15,12 @@ use eth2_libp2p::{
|
|||||||
};
|
};
|
||||||
use eth2_libp2p::{MessageAcceptance, Service as LibP2PService};
|
use eth2_libp2p::{MessageAcceptance, Service as LibP2PService};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use rest_types::ValidatorSubscription;
|
|
||||||
use slog::{debug, error, info, o, trace, warn};
|
use slog::{debug, error, info, o, trace, warn};
|
||||||
use std::{collections::HashMap, net::SocketAddr, sync::Arc, time::Duration};
|
use std::{collections::HashMap, net::SocketAddr, sync::Arc, time::Duration};
|
||||||
use store::HotColdDB;
|
use store::HotColdDB;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use tokio::time::Delay;
|
use tokio::time::Delay;
|
||||||
use types::EthSpec;
|
use types::{EthSpec, ValidatorSubscription};
|
||||||
|
|
||||||
mod tests;
|
mod tests;
|
||||||
|
|
||||||
@ -52,7 +51,7 @@ pub enum NetworkMessage<T: EthSpec> {
|
|||||||
},
|
},
|
||||||
/// Respond to a peer's request with an error.
|
/// Respond to a peer's request with an error.
|
||||||
SendError {
|
SendError {
|
||||||
// TODO: note that this is never used, we just say goodbye without nicely closing the
|
// NOTE: Currently this is never used, we just say goodbye without nicely closing the
|
||||||
// stream assigned to the request
|
// stream assigned to the request
|
||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
error: RPCResponseErrorCode,
|
error: RPCResponseErrorCode,
|
||||||
@ -122,7 +121,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
pub async fn start(
|
pub async fn start(
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
config: &NetworkConfig,
|
config: &NetworkConfig,
|
||||||
executor: environment::TaskExecutor,
|
executor: task_executor::TaskExecutor,
|
||||||
) -> error::Result<(
|
) -> error::Result<(
|
||||||
Arc<NetworkGlobals<T::EthSpec>>,
|
Arc<NetworkGlobals<T::EthSpec>>,
|
||||||
mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
@ -164,7 +163,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
"Loading peers into the routing table"; "peers" => enrs_to_load.len()
|
"Loading peers into the routing table"; "peers" => enrs_to_load.len()
|
||||||
);
|
);
|
||||||
for enr in enrs_to_load {
|
for enr in enrs_to_load {
|
||||||
libp2p.swarm.add_enr(enr.clone()); //TODO change?
|
libp2p.swarm.add_enr(enr.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
// launch derived network services
|
// launch derived network services
|
||||||
@ -208,7 +207,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn spawn_service<T: BeaconChainTypes>(
|
fn spawn_service<T: BeaconChainTypes>(
|
||||||
executor: environment::TaskExecutor,
|
executor: task_executor::TaskExecutor,
|
||||||
mut service: NetworkService<T>,
|
mut service: NetworkService<T>,
|
||||||
) -> error::Result<()> {
|
) -> error::Result<()> {
|
||||||
let mut exit_rx = executor.exit();
|
let mut exit_rx = executor.exit();
|
||||||
@ -350,7 +349,6 @@ fn spawn_service<T: BeaconChainTypes>(
|
|||||||
// process any attestation service events
|
// process any attestation service events
|
||||||
Some(attestation_service_message) = service.attestation_service.next() => {
|
Some(attestation_service_message) = service.attestation_service.next() => {
|
||||||
match attestation_service_message {
|
match attestation_service_message {
|
||||||
// TODO: Implement
|
|
||||||
AttServiceMessage::Subscribe(subnet_id) => {
|
AttServiceMessage::Subscribe(subnet_id) => {
|
||||||
service.libp2p.swarm.subscribe_to_subnet(subnet_id);
|
service.libp2p.swarm.subscribe_to_subnet(subnet_id);
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@ mod tests {
|
|||||||
|
|
||||||
let (signal, exit) = exit_future::signal();
|
let (signal, exit) = exit_future::signal();
|
||||||
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
||||||
let executor = environment::TaskExecutor::new(
|
let executor = task_executor::TaskExecutor::new(
|
||||||
runtime.handle().clone(),
|
runtime.handle().clone(),
|
||||||
exit,
|
exit,
|
||||||
log.clone(),
|
log.clone(),
|
||||||
|
@ -119,7 +119,6 @@ pub enum SyncMessage<T: EthSpec> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The result of processing a multiple blocks (a chain segment).
|
/// The result of processing a multiple blocks (a chain segment).
|
||||||
// TODO: When correct batch error handling occurs, we will include an error type.
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum BatchProcessResult {
|
pub enum BatchProcessResult {
|
||||||
/// The batch was completed successfully. It carries whether the sent batch contained blocks.
|
/// The batch was completed successfully. It carries whether the sent batch contained blocks.
|
||||||
@ -205,7 +204,7 @@ impl SingleBlockRequest {
|
|||||||
/// chain. This allows the chain to be
|
/// chain. This allows the chain to be
|
||||||
/// dropped during the syncing process which will gracefully end the `SyncManager`.
|
/// dropped during the syncing process which will gracefully end the `SyncManager`.
|
||||||
pub fn spawn<T: BeaconChainTypes>(
|
pub fn spawn<T: BeaconChainTypes>(
|
||||||
executor: environment::TaskExecutor,
|
executor: task_executor::TaskExecutor,
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
beacon_chain: Arc<BeaconChain<T>>,
|
||||||
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
|
network_globals: Arc<NetworkGlobals<T::EthSpec>>,
|
||||||
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
network_send: mpsc::UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||||
@ -629,7 +628,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
self.update_sync_state();
|
self.update_sync_state();
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Group these functions into one.
|
// TODO: Group these functions into one for cleaner code.
|
||||||
/// Updates the syncing state of a peer to be synced.
|
/// Updates the syncing state of a peer to be synced.
|
||||||
fn synced_peer(&mut self, peer_id: &PeerId, sync_info: PeerSyncInfo) {
|
fn synced_peer(&mut self, peer_id: &PeerId, sync_info: PeerSyncInfo) {
|
||||||
if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
|
if let Some(peer_info) = self.network_globals.peers.write().peer_info_mut(peer_id) {
|
||||||
@ -792,7 +791,6 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
|
|
||||||
// This currently can be a host of errors. We permit this due to the partial
|
// This currently can be a host of errors. We permit this due to the partial
|
||||||
// ambiguity.
|
// ambiguity.
|
||||||
// TODO: Refine the error types and score the peer appropriately.
|
|
||||||
self.network.report_peer(
|
self.network.report_peer(
|
||||||
parent_request.last_submitted_peer,
|
parent_request.last_submitted_peer,
|
||||||
PeerAction::MidToleranceError,
|
PeerAction::MidToleranceError,
|
||||||
|
@ -613,9 +613,7 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
BatchState::Failed | BatchState::Poisoned | BatchState::AwaitingDownload => {
|
BatchState::Failed | BatchState::Poisoned | BatchState::AwaitingDownload => {
|
||||||
unreachable!("batch indicates inconsistent chain state while advancing chain")
|
unreachable!("batch indicates inconsistent chain state while advancing chain")
|
||||||
}
|
}
|
||||||
BatchState::AwaitingProcessing(..) => {
|
BatchState::AwaitingProcessing(..) => {}
|
||||||
// TODO: can we be sure the old attempts are wrong?
|
|
||||||
}
|
|
||||||
BatchState::Processing(_) => {
|
BatchState::Processing(_) => {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
id,
|
id,
|
||||||
@ -651,9 +649,6 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
|
|||||||
/// These events occur when a peer has successfully responded with blocks, but the blocks we
|
/// These events occur when a peer has successfully responded with blocks, but the blocks we
|
||||||
/// have received are incorrect or invalid. This indicates the peer has not performed as
|
/// have received are incorrect or invalid. This indicates the peer has not performed as
|
||||||
/// intended and can result in downvoting a peer.
|
/// intended and can result in downvoting a peer.
|
||||||
// TODO: Batches could have been partially downloaded due to RPC size-limit restrictions. We
|
|
||||||
// need to add logic for partial batch downloads. Potentially, if another peer returns the same
|
|
||||||
// batch, we try a partial download.
|
|
||||||
fn handle_invalid_batch(
|
fn handle_invalid_batch(
|
||||||
&mut self,
|
&mut self,
|
||||||
network: &mut SyncNetworkContext<T::EthSpec>,
|
network: &mut SyncNetworkContext<T::EthSpec>,
|
||||||
|
@ -220,7 +220,10 @@ impl<T: BeaconChainTypes> RangeSync<T> {
|
|||||||
if let Some(removed_chain) = removed_chain {
|
if let Some(removed_chain) = removed_chain {
|
||||||
debug!(self.log, "Chain removed after block response"; "sync_type" => ?sync_type, "chain_id" => chain_id);
|
debug!(self.log, "Chain removed after block response"; "sync_type" => ?sync_type, "chain_id" => chain_id);
|
||||||
removed_chain.status_peers(network);
|
removed_chain.status_peers(network);
|
||||||
// TODO: update & update_sync_state?
|
// update the state of the collection
|
||||||
|
self.chains.update(network);
|
||||||
|
// update the global state and inform the user
|
||||||
|
self.chains.update_sync_state(network);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
@ -319,7 +322,10 @@ impl<T: BeaconChainTypes> RangeSync<T> {
|
|||||||
.call_all(|chain| chain.remove_peer(peer_id, network))
|
.call_all(|chain| chain.remove_peer(peer_id, network))
|
||||||
{
|
{
|
||||||
debug!(self.log, "Chain removed after removing peer"; "sync_type" => ?sync_type, "chain" => removed_chain.get_id());
|
debug!(self.log, "Chain removed after removing peer"; "sync_type" => ?sync_type, "chain" => removed_chain.get_id());
|
||||||
// TODO: anything else to do?
|
// update the state of the collection
|
||||||
|
self.chains.update(network);
|
||||||
|
// update the global state and inform the user
|
||||||
|
self.chains.update_sync_state(network);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -343,7 +349,10 @@ impl<T: BeaconChainTypes> RangeSync<T> {
|
|||||||
if let Some(removed_chain) = removed_chain {
|
if let Some(removed_chain) = removed_chain {
|
||||||
debug!(self.log, "Chain removed on rpc error"; "sync_type" => ?sync_type, "chain" => removed_chain.get_id());
|
debug!(self.log, "Chain removed on rpc error"; "sync_type" => ?sync_type, "chain" => removed_chain.get_id());
|
||||||
removed_chain.status_peers(network);
|
removed_chain.status_peers(network);
|
||||||
// TODO: update & update_sync_state?
|
// update the state of the collection
|
||||||
|
self.chains.update(network);
|
||||||
|
// update the global state and inform the user
|
||||||
|
self.chains.update_sync_state(network);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
|
@ -11,8 +11,8 @@ types = { path = "../../consensus/types" }
|
|||||||
state_processing = { path = "../../consensus/state_processing" }
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
eth2_ssz_derive = "0.1.0"
|
eth2_ssz_derive = "0.1.0"
|
||||||
serde = "1.0.110"
|
serde = "1.0.116"
|
||||||
serde_derive = "1.0.110"
|
serde_derive = "1.0.116"
|
||||||
store = { path = "../store" }
|
store = { path = "../store" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
@ -332,6 +332,51 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
pub fn num_voluntary_exits(&self) -> usize {
|
pub fn num_voluntary_exits(&self) -> usize {
|
||||||
self.voluntary_exits.read().len()
|
self.voluntary_exits.read().len()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns all known `Attestation` objects.
|
||||||
|
///
|
||||||
|
/// This method may return objects that are invalid for block inclusion.
|
||||||
|
pub fn get_all_attestations(&self) -> Vec<Attestation<T>> {
|
||||||
|
self.attestations
|
||||||
|
.read()
|
||||||
|
.iter()
|
||||||
|
.map(|(_, attns)| attns.iter().cloned())
|
||||||
|
.flatten()
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns all known `AttesterSlashing` objects.
|
||||||
|
///
|
||||||
|
/// This method may return objects that are invalid for block inclusion.
|
||||||
|
pub fn get_all_attester_slashings(&self) -> Vec<AttesterSlashing<T>> {
|
||||||
|
self.attester_slashings
|
||||||
|
.read()
|
||||||
|
.iter()
|
||||||
|
.map(|(slashing, _)| slashing.clone())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns all known `ProposerSlashing` objects.
|
||||||
|
///
|
||||||
|
/// This method may return objects that are invalid for block inclusion.
|
||||||
|
pub fn get_all_proposer_slashings(&self) -> Vec<ProposerSlashing> {
|
||||||
|
self.proposer_slashings
|
||||||
|
.read()
|
||||||
|
.iter()
|
||||||
|
.map(|(_, slashing)| slashing.clone())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns all known `SignedVoluntaryExit` objects.
|
||||||
|
///
|
||||||
|
/// This method may return objects that are invalid for block inclusion.
|
||||||
|
pub fn get_all_voluntary_exits(&self) -> Vec<SignedVoluntaryExit> {
|
||||||
|
self.voluntary_exits
|
||||||
|
.read()
|
||||||
|
.iter()
|
||||||
|
.map(|(_, exit)| exit.clone())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Filter up to a maximum number of operations out of an iterator.
|
/// Filter up to a maximum number of operations out of an iterator.
|
||||||
|
@ -1,50 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "rest_api"
|
|
||||||
version = "0.2.0"
|
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com>", "Luke Anderson <luke@sigmaprime.io>"]
|
|
||||||
edition = "2018"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
|
||||||
[dependencies]
|
|
||||||
bls = { path = "../../crypto/bls" }
|
|
||||||
rest_types = { path = "../../common/rest_types" }
|
|
||||||
beacon_chain = { path = "../beacon_chain" }
|
|
||||||
network = { path = "../network" }
|
|
||||||
eth2_libp2p = { path = "../eth2_libp2p" }
|
|
||||||
store = { path = "../store" }
|
|
||||||
serde = { version = "1.0.110", features = ["derive"] }
|
|
||||||
serde_json = "1.0.52"
|
|
||||||
serde_yaml = "0.8.11"
|
|
||||||
slog = "2.5.2"
|
|
||||||
slog-term = "2.5.0"
|
|
||||||
slog-async = "2.5.0"
|
|
||||||
eth2_ssz = "0.1.2"
|
|
||||||
eth2_ssz_derive = "0.1.0"
|
|
||||||
state_processing = { path = "../../consensus/state_processing" }
|
|
||||||
types = { path = "../../consensus/types" }
|
|
||||||
http = "0.2.1"
|
|
||||||
hyper = "0.13.5"
|
|
||||||
tokio = { version = "0.2.22", features = ["sync"] }
|
|
||||||
url = "2.1.1"
|
|
||||||
lazy_static = "1.4.0"
|
|
||||||
eth2_config = { path = "../../common/eth2_config" }
|
|
||||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
|
||||||
slot_clock = { path = "../../common/slot_clock" }
|
|
||||||
hex = "0.4.2"
|
|
||||||
parking_lot = "0.11.0"
|
|
||||||
futures = "0.3.5"
|
|
||||||
operation_pool = { path = "../operation_pool" }
|
|
||||||
environment = { path = "../../lighthouse/environment" }
|
|
||||||
uhttp_sse = "0.5.1"
|
|
||||||
bus = "2.2.3"
|
|
||||||
itertools = "0.9.0"
|
|
||||||
lighthouse_version = { path = "../../common/lighthouse_version" }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
assert_matches = "1.3.0"
|
|
||||||
remote_beacon_node = { path = "../../common/remote_beacon_node" }
|
|
||||||
node_test_rig = { path = "../../testing/node_test_rig" }
|
|
||||||
tree_hash = "0.1.0"
|
|
||||||
|
|
||||||
[features]
|
|
||||||
fake_crypto = []
|
|
@ -1,499 +0,0 @@
|
|||||||
use crate::helpers::*;
|
|
||||||
use crate::validator::get_state_for_epoch;
|
|
||||||
use crate::Context;
|
|
||||||
use crate::{ApiError, UrlQuery};
|
|
||||||
use beacon_chain::{
|
|
||||||
observed_operations::ObservationOutcome, BeaconChain, BeaconChainTypes, StateSkipConfig,
|
|
||||||
};
|
|
||||||
use futures::executor::block_on;
|
|
||||||
use hyper::body::Bytes;
|
|
||||||
use hyper::{Body, Request};
|
|
||||||
use rest_types::{
|
|
||||||
BlockResponse, CanonicalHeadResponse, Committee, HeadBeaconBlock, StateResponse,
|
|
||||||
ValidatorRequest, ValidatorResponse,
|
|
||||||
};
|
|
||||||
use std::io::Write;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use slog::error;
|
|
||||||
use types::{
|
|
||||||
AttesterSlashing, BeaconState, EthSpec, Hash256, ProposerSlashing, PublicKeyBytes,
|
|
||||||
RelativeEpoch, SignedBeaconBlockHash, Slot,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Returns a summary of the head of the beacon chain.
|
|
||||||
pub fn get_head<T: BeaconChainTypes>(
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<CanonicalHeadResponse, ApiError> {
|
|
||||||
let beacon_chain = &ctx.beacon_chain;
|
|
||||||
let chain_head = beacon_chain.head()?;
|
|
||||||
|
|
||||||
Ok(CanonicalHeadResponse {
|
|
||||||
slot: chain_head.beacon_state.slot,
|
|
||||||
block_root: chain_head.beacon_block_root,
|
|
||||||
state_root: chain_head.beacon_state_root,
|
|
||||||
finalized_slot: chain_head
|
|
||||||
.beacon_state
|
|
||||||
.finalized_checkpoint
|
|
||||||
.epoch
|
|
||||||
.start_slot(T::EthSpec::slots_per_epoch()),
|
|
||||||
finalized_block_root: chain_head.beacon_state.finalized_checkpoint.root,
|
|
||||||
justified_slot: chain_head
|
|
||||||
.beacon_state
|
|
||||||
.current_justified_checkpoint
|
|
||||||
.epoch
|
|
||||||
.start_slot(T::EthSpec::slots_per_epoch()),
|
|
||||||
justified_block_root: chain_head.beacon_state.current_justified_checkpoint.root,
|
|
||||||
previous_justified_slot: chain_head
|
|
||||||
.beacon_state
|
|
||||||
.previous_justified_checkpoint
|
|
||||||
.epoch
|
|
||||||
.start_slot(T::EthSpec::slots_per_epoch()),
|
|
||||||
previous_justified_block_root: chain_head.beacon_state.previous_justified_checkpoint.root,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the list of heads of the beacon chain.
|
|
||||||
pub fn get_heads<T: BeaconChainTypes>(ctx: Arc<Context<T>>) -> Vec<HeadBeaconBlock> {
|
|
||||||
ctx.beacon_chain
|
|
||||||
.heads()
|
|
||||||
.into_iter()
|
|
||||||
.map(|(beacon_block_root, beacon_block_slot)| HeadBeaconBlock {
|
|
||||||
beacon_block_root,
|
|
||||||
beacon_block_slot,
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP handler to return a `BeaconBlock` at a given `root` or `slot`.
|
|
||||||
pub fn get_block<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<BlockResponse<T::EthSpec>, ApiError> {
|
|
||||||
let beacon_chain = &ctx.beacon_chain;
|
|
||||||
let query_params = ["root", "slot"];
|
|
||||||
let (key, value) = UrlQuery::from_request(&req)?.first_of(&query_params)?;
|
|
||||||
|
|
||||||
let block_root = match (key.as_ref(), value) {
|
|
||||||
("slot", value) => {
|
|
||||||
let target = parse_slot(&value)?;
|
|
||||||
|
|
||||||
block_root_at_slot(beacon_chain, target)?.ok_or_else(|| {
|
|
||||||
ApiError::NotFound(format!(
|
|
||||||
"Unable to find SignedBeaconBlock for slot {:?}",
|
|
||||||
target
|
|
||||||
))
|
|
||||||
})?
|
|
||||||
}
|
|
||||||
("root", value) => parse_root(&value)?,
|
|
||||||
_ => return Err(ApiError::ServerError("Unexpected query parameter".into())),
|
|
||||||
};
|
|
||||||
|
|
||||||
let block = beacon_chain.store.get_block(&block_root)?.ok_or_else(|| {
|
|
||||||
ApiError::NotFound(format!(
|
|
||||||
"Unable to find SignedBeaconBlock for root {:?}",
|
|
||||||
block_root
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(BlockResponse {
|
|
||||||
root: block_root,
|
|
||||||
beacon_block: block,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP handler to return a `SignedBeaconBlock` root at a given `slot`.
|
|
||||||
pub fn get_block_root<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Hash256, ApiError> {
|
|
||||||
let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?;
|
|
||||||
let target = parse_slot(&slot_string)?;
|
|
||||||
|
|
||||||
block_root_at_slot(&ctx.beacon_chain, target)?.ok_or_else(|| {
|
|
||||||
ApiError::NotFound(format!(
|
|
||||||
"Unable to find SignedBeaconBlock for slot {:?}",
|
|
||||||
target
|
|
||||||
))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_sse_response_chunk(new_head_hash: SignedBeaconBlockHash) -> std::io::Result<Bytes> {
|
|
||||||
let mut buffer = Vec::new();
|
|
||||||
{
|
|
||||||
let mut sse_message = uhttp_sse::SseMessage::new(&mut buffer);
|
|
||||||
let untyped_hash: Hash256 = new_head_hash.into();
|
|
||||||
write!(sse_message.data()?, "{:?}", untyped_hash)?;
|
|
||||||
}
|
|
||||||
let bytes: Bytes = buffer.into();
|
|
||||||
Ok(bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn stream_forks<T: BeaconChainTypes>(ctx: Arc<Context<T>>) -> Result<Body, ApiError> {
|
|
||||||
let mut events = ctx.events.lock().add_rx();
|
|
||||||
let (mut sender, body) = Body::channel();
|
|
||||||
std::thread::spawn(move || {
|
|
||||||
while let Ok(new_head_hash) = events.recv() {
|
|
||||||
let chunk = match make_sse_response_chunk(new_head_hash) {
|
|
||||||
Ok(chunk) => chunk,
|
|
||||||
Err(e) => {
|
|
||||||
error!(ctx.log, "Failed to make SSE chunk"; "error" => e.to_string());
|
|
||||||
sender.abort();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
match block_on(sender.send_data(chunk)) {
|
|
||||||
Err(e) if e.is_closed() => break,
|
|
||||||
Err(e) => error!(ctx.log, "Couldn't stream piece {:?}", e),
|
|
||||||
Ok(_) => (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
Ok(body)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP handler to which accepts a query string of a list of validator pubkeys and maps it to a
|
|
||||||
/// `ValidatorResponse`.
|
|
||||||
///
|
|
||||||
/// This method is limited to as many `pubkeys` that can fit in a URL. See `post_validators` for
|
|
||||||
/// doing bulk requests.
|
|
||||||
pub fn get_validators<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Vec<ValidatorResponse>, ApiError> {
|
|
||||||
let query = UrlQuery::from_request(&req)?;
|
|
||||||
|
|
||||||
let validator_pubkeys = query
|
|
||||||
.all_of("validator_pubkeys")?
|
|
||||||
.iter()
|
|
||||||
.map(|validator_pubkey_str| parse_pubkey_bytes(validator_pubkey_str))
|
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
|
||||||
|
|
||||||
let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) {
|
|
||||||
Some(parse_root(&value)?)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
validator_responses_by_pubkey(&ctx.beacon_chain, state_root_opt, validator_pubkeys)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP handler to return all validators, each as a `ValidatorResponse`.
|
|
||||||
pub fn get_all_validators<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Vec<ValidatorResponse>, ApiError> {
|
|
||||||
let query = UrlQuery::from_request(&req)?;
|
|
||||||
|
|
||||||
let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) {
|
|
||||||
Some(parse_root(&value)?)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?;
|
|
||||||
|
|
||||||
let validators = state.validators.clone();
|
|
||||||
validators
|
|
||||||
.iter()
|
|
||||||
.map(|validator| validator_response_by_pubkey(&mut state, validator.pubkey.clone()))
|
|
||||||
.collect::<Result<Vec<_>, _>>()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP handler to return all active validators, each as a `ValidatorResponse`.
|
|
||||||
pub fn get_active_validators<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Vec<ValidatorResponse>, ApiError> {
|
|
||||||
let query = UrlQuery::from_request(&req)?;
|
|
||||||
|
|
||||||
let state_root_opt = if let Some((_key, value)) = query.first_of_opt(&["state_root"]) {
|
|
||||||
Some(parse_root(&value)?)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut state = get_state_from_root_opt(&ctx.beacon_chain, state_root_opt)?;
|
|
||||||
|
|
||||||
let validators = state.validators.clone();
|
|
||||||
let current_epoch = state.current_epoch();
|
|
||||||
|
|
||||||
validators
|
|
||||||
.iter()
|
|
||||||
.filter(|validator| validator.is_active_at(current_epoch))
|
|
||||||
.map(|validator| validator_response_by_pubkey(&mut state, validator.pubkey.clone()))
|
|
||||||
.collect::<Result<Vec<_>, _>>()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP handler to which accepts a `ValidatorRequest` and returns a `ValidatorResponse` for
|
|
||||||
/// each of the given `pubkeys`. When `state_root` is `None`, the canonical head is used.
|
|
||||||
///
|
|
||||||
/// This method allows for a basically unbounded list of `pubkeys`, where as the `get_validators`
|
|
||||||
/// request is limited by the max number of pubkeys you can fit in a URL.
|
|
||||||
pub fn post_validators<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Vec<ValidatorResponse>, ApiError> {
|
|
||||||
serde_json::from_slice::<ValidatorRequest>(&req.into_body())
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::BadRequest(format!(
|
|
||||||
"Unable to parse JSON into ValidatorRequest: {:?}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.and_then(|bulk_request| {
|
|
||||||
validator_responses_by_pubkey(
|
|
||||||
&ctx.beacon_chain,
|
|
||||||
bulk_request.state_root,
|
|
||||||
bulk_request.pubkeys,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns either the state given by `state_root_opt`, or the canonical head state if it is
|
|
||||||
/// `None`.
|
|
||||||
fn get_state_from_root_opt<T: BeaconChainTypes>(
|
|
||||||
beacon_chain: &BeaconChain<T>,
|
|
||||||
state_root_opt: Option<Hash256>,
|
|
||||||
) -> Result<BeaconState<T::EthSpec>, ApiError> {
|
|
||||||
if let Some(state_root) = state_root_opt {
|
|
||||||
beacon_chain
|
|
||||||
.get_state(&state_root, None)
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::ServerError(format!(
|
|
||||||
"Database error when reading state root {}: {:?}",
|
|
||||||
state_root, e
|
|
||||||
))
|
|
||||||
})?
|
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("No state exists with root: {}", state_root)))
|
|
||||||
} else {
|
|
||||||
Ok(beacon_chain.head()?.beacon_state)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Maps a vec of `validator_pubkey` to a vec of `ValidatorResponse`, using the state at the given
|
|
||||||
/// `state_root`. If `state_root.is_none()`, uses the canonial head state.
|
|
||||||
fn validator_responses_by_pubkey<T: BeaconChainTypes>(
|
|
||||||
beacon_chain: &BeaconChain<T>,
|
|
||||||
state_root_opt: Option<Hash256>,
|
|
||||||
validator_pubkeys: Vec<PublicKeyBytes>,
|
|
||||||
) -> Result<Vec<ValidatorResponse>, ApiError> {
|
|
||||||
let mut state = get_state_from_root_opt(beacon_chain, state_root_opt)?;
|
|
||||||
|
|
||||||
validator_pubkeys
|
|
||||||
.into_iter()
|
|
||||||
.map(|validator_pubkey| validator_response_by_pubkey(&mut state, validator_pubkey))
|
|
||||||
.collect::<Result<Vec<_>, ApiError>>()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Maps a `validator_pubkey` to a `ValidatorResponse`, using the given state.
|
|
||||||
///
|
|
||||||
/// The provided `state` must have a fully up-to-date pubkey cache.
|
|
||||||
fn validator_response_by_pubkey<E: EthSpec>(
|
|
||||||
state: &mut BeaconState<E>,
|
|
||||||
validator_pubkey: PublicKeyBytes,
|
|
||||||
) -> Result<ValidatorResponse, ApiError> {
|
|
||||||
let validator_index_opt = state
|
|
||||||
.get_validator_index(&validator_pubkey)
|
|
||||||
.map_err(|e| ApiError::ServerError(format!("Unable to read pubkey cache: {:?}", e)))?;
|
|
||||||
|
|
||||||
if let Some(validator_index) = validator_index_opt {
|
|
||||||
let balance = state.balances.get(validator_index).ok_or_else(|| {
|
|
||||||
ApiError::ServerError(format!("Invalid balances index: {:?}", validator_index))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let validator = state
|
|
||||||
.validators
|
|
||||||
.get(validator_index)
|
|
||||||
.ok_or_else(|| {
|
|
||||||
ApiError::ServerError(format!("Invalid validator index: {:?}", validator_index))
|
|
||||||
})?
|
|
||||||
.clone();
|
|
||||||
|
|
||||||
Ok(ValidatorResponse {
|
|
||||||
pubkey: validator_pubkey,
|
|
||||||
validator_index: Some(validator_index),
|
|
||||||
balance: Some(*balance),
|
|
||||||
validator: Some(validator),
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Ok(ValidatorResponse {
|
|
||||||
pubkey: validator_pubkey,
|
|
||||||
validator_index: None,
|
|
||||||
balance: None,
|
|
||||||
validator: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP handler
|
|
||||||
pub fn get_committees<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Vec<Committee>, ApiError> {
|
|
||||||
let query = UrlQuery::from_request(&req)?;
|
|
||||||
|
|
||||||
let epoch = query.epoch()?;
|
|
||||||
|
|
||||||
let mut state =
|
|
||||||
get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?;
|
|
||||||
|
|
||||||
let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch).map_err(|e| {
|
|
||||||
ApiError::ServerError(format!("Failed to get state suitable for epoch: {:?}", e))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
state
|
|
||||||
.build_committee_cache(relative_epoch, &ctx.beacon_chain.spec)
|
|
||||||
.map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?;
|
|
||||||
|
|
||||||
Ok(state
|
|
||||||
.get_beacon_committees_at_epoch(relative_epoch)
|
|
||||||
.map_err(|e| ApiError::ServerError(format!("Unable to get all committees: {:?}", e)))?
|
|
||||||
.into_iter()
|
|
||||||
.map(|c| Committee {
|
|
||||||
slot: c.slot,
|
|
||||||
index: c.index,
|
|
||||||
committee: c.committee.to_vec(),
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP handler to return a `BeaconState` at a given `root` or `slot`.
|
|
||||||
///
|
|
||||||
/// Will not return a state if the request slot is in the future. Will return states higher than
|
|
||||||
/// the current head by skipping slots.
|
|
||||||
pub fn get_state<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<StateResponse<T::EthSpec>, ApiError> {
|
|
||||||
let head_state = ctx.beacon_chain.head()?.beacon_state;
|
|
||||||
|
|
||||||
let (key, value) = match UrlQuery::from_request(&req) {
|
|
||||||
Ok(query) => {
|
|
||||||
// We have *some* parameters, just check them.
|
|
||||||
let query_params = ["root", "slot"];
|
|
||||||
query.first_of(&query_params)?
|
|
||||||
}
|
|
||||||
Err(ApiError::BadRequest(_)) => {
|
|
||||||
// No parameters provided at all, use current slot.
|
|
||||||
(String::from("slot"), head_state.slot.to_string())
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let (root, state): (Hash256, BeaconState<T::EthSpec>) = match (key.as_ref(), value) {
|
|
||||||
("slot", value) => state_at_slot(&ctx.beacon_chain, parse_slot(&value)?)?,
|
|
||||||
("root", value) => {
|
|
||||||
let root = &parse_root(&value)?;
|
|
||||||
|
|
||||||
let state = ctx
|
|
||||||
.beacon_chain
|
|
||||||
.store
|
|
||||||
.get_state(root, None)?
|
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("No state for root: {:?}", root)))?;
|
|
||||||
|
|
||||||
(*root, state)
|
|
||||||
}
|
|
||||||
_ => return Err(ApiError::ServerError("Unexpected query parameter".into())),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(StateResponse {
|
|
||||||
root,
|
|
||||||
beacon_state: state,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP handler to return a `BeaconState` root at a given `slot`.
|
|
||||||
///
|
|
||||||
/// Will not return a state if the request slot is in the future. Will return states higher than
|
|
||||||
/// the current head by skipping slots.
|
|
||||||
pub fn get_state_root<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Hash256, ApiError> {
|
|
||||||
let slot_string = UrlQuery::from_request(&req)?.only_one("slot")?;
|
|
||||||
let slot = parse_slot(&slot_string)?;
|
|
||||||
|
|
||||||
state_root_at_slot(&ctx.beacon_chain, slot, StateSkipConfig::WithStateRoots)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP handler to return a `BeaconState` at the genesis block.
|
|
||||||
///
|
|
||||||
/// This is an undocumented convenience method used during testing. For production, simply do a
|
|
||||||
/// state request at slot 0.
|
|
||||||
pub fn get_genesis_state<T: BeaconChainTypes>(
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<BeaconState<T::EthSpec>, ApiError> {
|
|
||||||
state_at_slot(&ctx.beacon_chain, Slot::new(0)).map(|(_root, state)| state)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn proposer_slashing<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<bool, ApiError> {
|
|
||||||
let body = req.into_body();
|
|
||||||
|
|
||||||
serde_json::from_slice::<ProposerSlashing>(&body)
|
|
||||||
.map_err(|e| format!("Unable to parse JSON into ProposerSlashing: {:?}", e))
|
|
||||||
.and_then(move |proposer_slashing| {
|
|
||||||
if ctx.beacon_chain.eth1_chain.is_some() {
|
|
||||||
let obs_outcome = ctx
|
|
||||||
.beacon_chain
|
|
||||||
.verify_proposer_slashing_for_gossip(proposer_slashing)
|
|
||||||
.map_err(|e| format!("Error while verifying proposer slashing: {:?}", e))?;
|
|
||||||
if let ObservationOutcome::New(verified_proposer_slashing) = obs_outcome {
|
|
||||||
ctx.beacon_chain
|
|
||||||
.import_proposer_slashing(verified_proposer_slashing);
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err("Proposer slashing for that validator index already known".into())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Err("Cannot insert proposer slashing on node without Eth1 connection.".to_string())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.map_err(ApiError::BadRequest)?;
|
|
||||||
|
|
||||||
Ok(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn attester_slashing<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<bool, ApiError> {
|
|
||||||
let body = req.into_body();
|
|
||||||
serde_json::from_slice::<AttesterSlashing<T::EthSpec>>(&body)
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::BadRequest(format!(
|
|
||||||
"Unable to parse JSON into AttesterSlashing: {:?}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.and_then(move |attester_slashing| {
|
|
||||||
if ctx.beacon_chain.eth1_chain.is_some() {
|
|
||||||
ctx.beacon_chain
|
|
||||||
.verify_attester_slashing_for_gossip(attester_slashing)
|
|
||||||
.map_err(|e| format!("Error while verifying attester slashing: {:?}", e))
|
|
||||||
.and_then(|outcome| {
|
|
||||||
if let ObservationOutcome::New(verified_attester_slashing) = outcome {
|
|
||||||
ctx.beacon_chain
|
|
||||||
.import_attester_slashing(verified_attester_slashing)
|
|
||||||
.map_err(|e| {
|
|
||||||
format!("Error while importing attester slashing: {:?}", e)
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Err("Attester slashing only covers already slashed indices".to_string())
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.map_err(ApiError::BadRequest)
|
|
||||||
} else {
|
|
||||||
Err(ApiError::BadRequest(
|
|
||||||
"Cannot insert attester slashing on node without Eth1 connection.".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(true)
|
|
||||||
}
|
|
@ -1,55 +0,0 @@
|
|||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use std::net::Ipv4Addr;
|
|
||||||
|
|
||||||
/// Defines the encoding for the API.
|
|
||||||
#[derive(Clone, Serialize, Deserialize, Copy)]
|
|
||||||
pub enum ApiEncodingFormat {
|
|
||||||
JSON,
|
|
||||||
YAML,
|
|
||||||
SSZ,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ApiEncodingFormat {
|
|
||||||
pub fn get_content_type(&self) -> &str {
|
|
||||||
match self {
|
|
||||||
ApiEncodingFormat::JSON => "application/json",
|
|
||||||
ApiEncodingFormat::YAML => "application/yaml",
|
|
||||||
ApiEncodingFormat::SSZ => "application/ssz",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<&str> for ApiEncodingFormat {
|
|
||||||
fn from(f: &str) -> ApiEncodingFormat {
|
|
||||||
match f {
|
|
||||||
"application/yaml" => ApiEncodingFormat::YAML,
|
|
||||||
"application/ssz" => ApiEncodingFormat::SSZ,
|
|
||||||
_ => ApiEncodingFormat::JSON,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP REST API Configuration
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
||||||
pub struct Config {
|
|
||||||
/// Enable the REST API server.
|
|
||||||
pub enabled: bool,
|
|
||||||
/// The IPv4 address the REST API HTTP server will listen on.
|
|
||||||
pub listen_address: Ipv4Addr,
|
|
||||||
/// The port the REST API HTTP server will listen on.
|
|
||||||
pub port: u16,
|
|
||||||
/// If something else than "", a 'Access-Control-Allow-Origin' header will be present in
|
|
||||||
/// responses. Put *, to allow any origin.
|
|
||||||
pub allow_origin: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for Config {
|
|
||||||
fn default() -> Self {
|
|
||||||
Config {
|
|
||||||
enabled: false,
|
|
||||||
listen_address: Ipv4Addr::new(127, 0, 0, 1),
|
|
||||||
port: 5052,
|
|
||||||
allow_origin: "".to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,126 +0,0 @@
|
|||||||
use crate::helpers::*;
|
|
||||||
use crate::{ApiError, Context, UrlQuery};
|
|
||||||
use beacon_chain::BeaconChainTypes;
|
|
||||||
use hyper::Request;
|
|
||||||
use rest_types::{IndividualVotesRequest, IndividualVotesResponse};
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
use ssz_derive::{Decode, Encode};
|
|
||||||
use state_processing::per_epoch_processing::{TotalBalances, ValidatorStatuses};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use types::EthSpec;
|
|
||||||
|
|
||||||
/// The results of validators voting during an epoch.
|
|
||||||
///
|
|
||||||
/// Provides information about the current and previous epochs.
|
|
||||||
#[derive(Serialize, Deserialize, Encode, Decode)]
|
|
||||||
pub struct VoteCount {
|
|
||||||
/// The total effective balance of all active validators during the _current_ epoch.
|
|
||||||
pub current_epoch_active_gwei: u64,
|
|
||||||
/// The total effective balance of all active validators during the _previous_ epoch.
|
|
||||||
pub previous_epoch_active_gwei: u64,
|
|
||||||
/// The total effective balance of all validators who attested during the _current_ epoch.
|
|
||||||
pub current_epoch_attesting_gwei: u64,
|
|
||||||
/// The total effective balance of all validators who attested during the _current_ epoch and
|
|
||||||
/// agreed with the state about the beacon block at the first slot of the _current_ epoch.
|
|
||||||
pub current_epoch_target_attesting_gwei: u64,
|
|
||||||
/// The total effective balance of all validators who attested during the _previous_ epoch.
|
|
||||||
pub previous_epoch_attesting_gwei: u64,
|
|
||||||
/// The total effective balance of all validators who attested during the _previous_ epoch and
|
|
||||||
/// agreed with the state about the beacon block at the first slot of the _previous_ epoch.
|
|
||||||
pub previous_epoch_target_attesting_gwei: u64,
|
|
||||||
/// The total effective balance of all validators who attested during the _previous_ epoch and
|
|
||||||
/// agreed with the state about the beacon block at the time of attestation.
|
|
||||||
pub previous_epoch_head_attesting_gwei: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<VoteCount> for TotalBalances {
|
|
||||||
fn into(self) -> VoteCount {
|
|
||||||
VoteCount {
|
|
||||||
current_epoch_active_gwei: self.current_epoch(),
|
|
||||||
previous_epoch_active_gwei: self.previous_epoch(),
|
|
||||||
current_epoch_attesting_gwei: self.current_epoch_attesters(),
|
|
||||||
current_epoch_target_attesting_gwei: self.current_epoch_target_attesters(),
|
|
||||||
previous_epoch_attesting_gwei: self.previous_epoch_attesters(),
|
|
||||||
previous_epoch_target_attesting_gwei: self.previous_epoch_target_attesters(),
|
|
||||||
previous_epoch_head_attesting_gwei: self.previous_epoch_head_attesters(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP handler return a `VoteCount` for some given `Epoch`.
|
|
||||||
pub fn get_vote_count<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<VoteCount, ApiError> {
|
|
||||||
let query = UrlQuery::from_request(&req)?;
|
|
||||||
|
|
||||||
let epoch = query.epoch()?;
|
|
||||||
// This is the last slot of the given epoch (one prior to the first slot of the next epoch).
|
|
||||||
let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1;
|
|
||||||
|
|
||||||
let (_root, state) = state_at_slot(&ctx.beacon_chain, target_slot)?;
|
|
||||||
let spec = &ctx.beacon_chain.spec;
|
|
||||||
|
|
||||||
let mut validator_statuses = ValidatorStatuses::new(&state, spec)?;
|
|
||||||
validator_statuses.process_attestations(&state, spec)?;
|
|
||||||
|
|
||||||
Ok(validator_statuses.total_balances.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn post_individual_votes<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Vec<IndividualVotesResponse>, ApiError> {
|
|
||||||
let body = req.into_body();
|
|
||||||
|
|
||||||
serde_json::from_slice::<IndividualVotesRequest>(&body)
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::BadRequest(format!(
|
|
||||||
"Unable to parse JSON into ValidatorDutiesRequest: {:?}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.and_then(move |body| {
|
|
||||||
let epoch = body.epoch;
|
|
||||||
|
|
||||||
// This is the last slot of the given epoch (one prior to the first slot of the next epoch).
|
|
||||||
let target_slot = (epoch + 1).start_slot(T::EthSpec::slots_per_epoch()) - 1;
|
|
||||||
|
|
||||||
let (_root, mut state) = state_at_slot(&ctx.beacon_chain, target_slot)?;
|
|
||||||
let spec = &ctx.beacon_chain.spec;
|
|
||||||
|
|
||||||
let mut validator_statuses = ValidatorStatuses::new(&state, spec)?;
|
|
||||||
validator_statuses.process_attestations(&state, spec)?;
|
|
||||||
|
|
||||||
body.pubkeys
|
|
||||||
.into_iter()
|
|
||||||
.map(|pubkey| {
|
|
||||||
let validator_index_opt = state.get_validator_index(&pubkey).map_err(|e| {
|
|
||||||
ApiError::ServerError(format!("Unable to read pubkey cache: {:?}", e))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if let Some(validator_index) = validator_index_opt {
|
|
||||||
let vote = validator_statuses
|
|
||||||
.statuses
|
|
||||||
.get(validator_index)
|
|
||||||
.cloned()
|
|
||||||
.map(Into::into);
|
|
||||||
|
|
||||||
Ok(IndividualVotesResponse {
|
|
||||||
epoch,
|
|
||||||
pubkey,
|
|
||||||
validator_index: Some(validator_index),
|
|
||||||
vote,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Ok(IndividualVotesResponse {
|
|
||||||
epoch,
|
|
||||||
pubkey,
|
|
||||||
validator_index: None,
|
|
||||||
vote: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<Result<Vec<_>, _>>()
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,260 +0,0 @@
|
|||||||
use crate::{ApiError, NetworkChannel};
|
|
||||||
use beacon_chain::{BeaconChain, BeaconChainTypes, StateSkipConfig};
|
|
||||||
use bls::PublicKeyBytes;
|
|
||||||
use eth2_libp2p::PubsubMessage;
|
|
||||||
use itertools::process_results;
|
|
||||||
use network::NetworkMessage;
|
|
||||||
use ssz::Decode;
|
|
||||||
use store::iter::AncestorIter;
|
|
||||||
use types::{
|
|
||||||
BeaconState, CommitteeIndex, Epoch, EthSpec, Hash256, RelativeEpoch, SignedBeaconBlock, Slot,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Parse a slot.
|
|
||||||
///
|
|
||||||
/// E.g., `"1234"`
|
|
||||||
pub fn parse_slot(string: &str) -> Result<Slot, ApiError> {
|
|
||||||
string
|
|
||||||
.parse::<u64>()
|
|
||||||
.map(Slot::from)
|
|
||||||
.map_err(|e| ApiError::BadRequest(format!("Unable to parse slot: {:?}", e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse an epoch.
|
|
||||||
///
|
|
||||||
/// E.g., `"13"`
|
|
||||||
pub fn parse_epoch(string: &str) -> Result<Epoch, ApiError> {
|
|
||||||
string
|
|
||||||
.parse::<u64>()
|
|
||||||
.map(Epoch::from)
|
|
||||||
.map_err(|e| ApiError::BadRequest(format!("Unable to parse epoch: {:?}", e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse a CommitteeIndex.
|
|
||||||
///
|
|
||||||
/// E.g., `"18"`
|
|
||||||
pub fn parse_committee_index(string: &str) -> Result<CommitteeIndex, ApiError> {
|
|
||||||
string
|
|
||||||
.parse::<CommitteeIndex>()
|
|
||||||
.map_err(|e| ApiError::BadRequest(format!("Unable to parse committee index: {:?}", e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse an SSZ object from some hex-encoded bytes.
|
|
||||||
///
|
|
||||||
/// E.g., A signature is `"0x0000000000000000000000000000000000000000000000000000000000000000"`
|
|
||||||
pub fn parse_hex_ssz_bytes<T: Decode>(string: &str) -> Result<T, ApiError> {
|
|
||||||
const PREFIX: &str = "0x";
|
|
||||||
|
|
||||||
if string.starts_with(PREFIX) {
|
|
||||||
let trimmed = string.trim_start_matches(PREFIX);
|
|
||||||
let bytes = hex::decode(trimmed)
|
|
||||||
.map_err(|e| ApiError::BadRequest(format!("Unable to parse SSZ hex: {:?}", e)))?;
|
|
||||||
T::from_ssz_bytes(&bytes)
|
|
||||||
.map_err(|e| ApiError::BadRequest(format!("Unable to parse SSZ bytes: {:?}", e)))
|
|
||||||
} else {
|
|
||||||
Err(ApiError::BadRequest(
|
|
||||||
"Hex bytes must have a 0x prefix".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse a root from a `0x` prefixed string.
|
|
||||||
///
|
|
||||||
/// E.g., `"0x0000000000000000000000000000000000000000000000000000000000000000"`
|
|
||||||
pub fn parse_root(string: &str) -> Result<Hash256, ApiError> {
|
|
||||||
const PREFIX: &str = "0x";
|
|
||||||
|
|
||||||
if string.starts_with(PREFIX) {
|
|
||||||
let trimmed = string.trim_start_matches(PREFIX);
|
|
||||||
trimmed
|
|
||||||
.parse()
|
|
||||||
.map_err(|e| ApiError::BadRequest(format!("Unable to parse root: {:?}", e)))
|
|
||||||
} else {
|
|
||||||
Err(ApiError::BadRequest(
|
|
||||||
"Root must have a 0x prefix".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse a PublicKey from a `0x` prefixed hex string
|
|
||||||
pub fn parse_pubkey_bytes(string: &str) -> Result<PublicKeyBytes, ApiError> {
|
|
||||||
const PREFIX: &str = "0x";
|
|
||||||
if string.starts_with(PREFIX) {
|
|
||||||
let pubkey_bytes = hex::decode(string.trim_start_matches(PREFIX))
|
|
||||||
.map_err(|e| ApiError::BadRequest(format!("Invalid hex string: {:?}", e)))?;
|
|
||||||
let pubkey = PublicKeyBytes::deserialize(pubkey_bytes.as_slice()).map_err(|e| {
|
|
||||||
ApiError::BadRequest(format!("Unable to deserialize public key: {:?}.", e))
|
|
||||||
})?;
|
|
||||||
Ok(pubkey)
|
|
||||||
} else {
|
|
||||||
Err(ApiError::BadRequest(
|
|
||||||
"Public key must have a 0x prefix".to_string(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the root of the `SignedBeaconBlock` in the canonical chain of `beacon_chain` at the given
|
|
||||||
/// `slot`, if possible.
|
|
||||||
///
|
|
||||||
/// May return a root for a previous slot, in the case of skip slots.
|
|
||||||
pub fn block_root_at_slot<T: BeaconChainTypes>(
|
|
||||||
beacon_chain: &BeaconChain<T>,
|
|
||||||
target: Slot,
|
|
||||||
) -> Result<Option<Hash256>, ApiError> {
|
|
||||||
Ok(process_results(
|
|
||||||
beacon_chain.rev_iter_block_roots()?,
|
|
||||||
|iter| {
|
|
||||||
iter.take_while(|(_, slot)| *slot >= target)
|
|
||||||
.find(|(_, slot)| *slot == target)
|
|
||||||
.map(|(root, _)| root)
|
|
||||||
},
|
|
||||||
)?)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a `BeaconState` and it's root in the canonical chain of `beacon_chain` at the given
|
|
||||||
/// `slot`, if possible.
|
|
||||||
///
|
|
||||||
/// Will not return a state if the request slot is in the future. Will return states higher than
|
|
||||||
/// the current head by skipping slots.
|
|
||||||
pub fn state_at_slot<T: BeaconChainTypes>(
|
|
||||||
beacon_chain: &BeaconChain<T>,
|
|
||||||
slot: Slot,
|
|
||||||
) -> Result<(Hash256, BeaconState<T::EthSpec>), ApiError> {
|
|
||||||
let head = beacon_chain.head()?;
|
|
||||||
|
|
||||||
if head.beacon_state.slot == slot {
|
|
||||||
Ok((head.beacon_state_root, head.beacon_state))
|
|
||||||
} else {
|
|
||||||
let root = state_root_at_slot(beacon_chain, slot, StateSkipConfig::WithStateRoots)?;
|
|
||||||
|
|
||||||
let state: BeaconState<T::EthSpec> = beacon_chain
|
|
||||||
.store
|
|
||||||
.get_state(&root, Some(slot))?
|
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Unable to find state at root {}", root)))?;
|
|
||||||
|
|
||||||
Ok((root, state))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the root of the `BeaconState` in the canonical chain of `beacon_chain` at the given
|
|
||||||
/// `slot`, if possible.
|
|
||||||
///
|
|
||||||
/// Will not return a state root if the request slot is in the future. Will return state roots
|
|
||||||
/// higher than the current head by skipping slots.
|
|
||||||
pub fn state_root_at_slot<T: BeaconChainTypes>(
|
|
||||||
beacon_chain: &BeaconChain<T>,
|
|
||||||
slot: Slot,
|
|
||||||
config: StateSkipConfig,
|
|
||||||
) -> Result<Hash256, ApiError> {
|
|
||||||
let head_state = &beacon_chain.head()?.beacon_state;
|
|
||||||
let current_slot = beacon_chain
|
|
||||||
.slot()
|
|
||||||
.map_err(|_| ApiError::ServerError("Unable to read slot clock".to_string()))?;
|
|
||||||
|
|
||||||
// There are four scenarios when obtaining a state for a given slot:
|
|
||||||
//
|
|
||||||
// 1. The request slot is in the future.
|
|
||||||
// 2. The request slot is the same as the best block (head) slot.
|
|
||||||
// 3. The request slot is prior to the head slot.
|
|
||||||
// 4. The request slot is later than the head slot.
|
|
||||||
if current_slot < slot {
|
|
||||||
// 1. The request slot is in the future. Reject the request.
|
|
||||||
//
|
|
||||||
// We could actually speculate about future state roots by skipping slots, however that's
|
|
||||||
// likely to cause confusion for API users.
|
|
||||||
Err(ApiError::BadRequest(format!(
|
|
||||||
"Requested slot {} is past the current slot {}",
|
|
||||||
slot, current_slot
|
|
||||||
)))
|
|
||||||
} else if head_state.slot == slot {
|
|
||||||
// 2. The request slot is the same as the best block (head) slot.
|
|
||||||
//
|
|
||||||
// The head state root is stored in memory, return a reference.
|
|
||||||
Ok(beacon_chain.head()?.beacon_state_root)
|
|
||||||
} else if head_state.slot > slot {
|
|
||||||
// 3. The request slot is prior to the head slot.
|
|
||||||
//
|
|
||||||
// Iterate through the state roots on the head state to find the root for that
|
|
||||||
// slot. Once the root is found, load it from the database.
|
|
||||||
process_results(
|
|
||||||
head_state
|
|
||||||
.try_iter_ancestor_roots(beacon_chain.store.clone())
|
|
||||||
.ok_or_else(|| {
|
|
||||||
ApiError::ServerError("Failed to create roots iterator".to_string())
|
|
||||||
})?,
|
|
||||||
|mut iter| iter.find(|(_, s)| *s == slot).map(|(root, _)| root),
|
|
||||||
)?
|
|
||||||
.ok_or_else(|| ApiError::NotFound(format!("Unable to find state at slot {}", slot)))
|
|
||||||
} else {
|
|
||||||
// 4. The request slot is later than the head slot.
|
|
||||||
//
|
|
||||||
// Use `per_slot_processing` to advance the head state to the present slot,
|
|
||||||
// assuming that all slots do not contain a block (i.e., they are skipped slots).
|
|
||||||
let mut state = beacon_chain.head()?.beacon_state;
|
|
||||||
let spec = &T::EthSpec::default_spec();
|
|
||||||
|
|
||||||
let skip_state_root = match config {
|
|
||||||
StateSkipConfig::WithStateRoots => None,
|
|
||||||
StateSkipConfig::WithoutStateRoots => Some(Hash256::zero()),
|
|
||||||
};
|
|
||||||
|
|
||||||
for _ in state.slot.as_u64()..slot.as_u64() {
|
|
||||||
// Ensure the next epoch state caches are built in case of an epoch transition.
|
|
||||||
state.build_committee_cache(RelativeEpoch::Next, spec)?;
|
|
||||||
|
|
||||||
state_processing::per_slot_processing(&mut state, skip_state_root, spec)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: this is an expensive operation. Once the tree hash cache is implement it may be
|
|
||||||
// used here.
|
|
||||||
Ok(state.canonical_root())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn publish_beacon_block_to_network<T: BeaconChainTypes + 'static>(
|
|
||||||
chan: &NetworkChannel<T::EthSpec>,
|
|
||||||
block: SignedBeaconBlock<T::EthSpec>,
|
|
||||||
) -> Result<(), ApiError> {
|
|
||||||
// send the block via SSZ encoding
|
|
||||||
let messages = vec![PubsubMessage::BeaconBlock(Box::new(block))];
|
|
||||||
|
|
||||||
// Publish the block to the p2p network via gossipsub.
|
|
||||||
if let Err(e) = chan.send(NetworkMessage::Publish { messages }) {
|
|
||||||
return Err(ApiError::ServerError(format!(
|
|
||||||
"Unable to send new block to network: {:?}",
|
|
||||||
e
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_root_works() {
|
|
||||||
assert_eq!(
|
|
||||||
parse_root("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
|
||||||
Ok(Hash256::zero())
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
parse_root("0x000000000000000000000000000000000000000000000000000000000000002a"),
|
|
||||||
Ok(Hash256::from_low_u64_be(42))
|
|
||||||
);
|
|
||||||
assert!(
|
|
||||||
parse_root("0000000000000000000000000000000000000000000000000000000000000042").is_err()
|
|
||||||
);
|
|
||||||
assert!(parse_root("0x").is_err());
|
|
||||||
assert!(parse_root("0x00").is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn parse_slot_works() {
|
|
||||||
assert_eq!(parse_slot("0"), Ok(Slot::new(0)));
|
|
||||||
assert_eq!(parse_slot("42"), Ok(Slot::new(42)));
|
|
||||||
assert_eq!(parse_slot("10000000"), Ok(Slot::new(10_000_000)));
|
|
||||||
assert!(parse_slot("cats").is_err());
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,127 +0,0 @@
|
|||||||
#[macro_use]
|
|
||||||
extern crate lazy_static;
|
|
||||||
mod router;
|
|
||||||
extern crate network as client_network;
|
|
||||||
|
|
||||||
mod beacon;
|
|
||||||
pub mod config;
|
|
||||||
mod consensus;
|
|
||||||
mod helpers;
|
|
||||||
mod lighthouse;
|
|
||||||
mod metrics;
|
|
||||||
mod node;
|
|
||||||
mod url_query;
|
|
||||||
mod validator;
|
|
||||||
|
|
||||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
|
||||||
use bus::Bus;
|
|
||||||
use client_network::NetworkMessage;
|
|
||||||
pub use config::ApiEncodingFormat;
|
|
||||||
use eth2_config::Eth2Config;
|
|
||||||
use eth2_libp2p::NetworkGlobals;
|
|
||||||
use futures::future::TryFutureExt;
|
|
||||||
use hyper::server::conn::AddrStream;
|
|
||||||
use hyper::service::{make_service_fn, service_fn};
|
|
||||||
use hyper::{Body, Request, Server};
|
|
||||||
use parking_lot::Mutex;
|
|
||||||
use rest_types::ApiError;
|
|
||||||
use slog::{info, warn};
|
|
||||||
use std::net::SocketAddr;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use tokio::sync::mpsc;
|
|
||||||
use types::SignedBeaconBlockHash;
|
|
||||||
use url_query::UrlQuery;
|
|
||||||
|
|
||||||
pub use crate::helpers::parse_pubkey_bytes;
|
|
||||||
pub use config::Config;
|
|
||||||
pub use router::Context;
|
|
||||||
|
|
||||||
pub type NetworkChannel<T> = mpsc::UnboundedSender<NetworkMessage<T>>;
|
|
||||||
|
|
||||||
pub struct NetworkInfo<T: BeaconChainTypes> {
|
|
||||||
pub network_globals: Arc<NetworkGlobals<T::EthSpec>>,
|
|
||||||
pub network_chan: NetworkChannel<T::EthSpec>,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allowing more than 7 arguments.
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
pub fn start_server<T: BeaconChainTypes>(
|
|
||||||
executor: environment::TaskExecutor,
|
|
||||||
config: &Config,
|
|
||||||
beacon_chain: Arc<BeaconChain<T>>,
|
|
||||||
network_info: NetworkInfo<T>,
|
|
||||||
db_path: PathBuf,
|
|
||||||
freezer_db_path: PathBuf,
|
|
||||||
eth2_config: Eth2Config,
|
|
||||||
events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
|
|
||||||
) -> Result<SocketAddr, hyper::Error> {
|
|
||||||
let log = executor.log();
|
|
||||||
let eth2_config = Arc::new(eth2_config);
|
|
||||||
|
|
||||||
let context = Arc::new(Context {
|
|
||||||
executor: executor.clone(),
|
|
||||||
config: config.clone(),
|
|
||||||
beacon_chain,
|
|
||||||
network_globals: network_info.network_globals.clone(),
|
|
||||||
network_chan: network_info.network_chan,
|
|
||||||
eth2_config,
|
|
||||||
log: log.clone(),
|
|
||||||
db_path,
|
|
||||||
freezer_db_path,
|
|
||||||
events,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Define the function that will build the request handler.
|
|
||||||
let make_service = make_service_fn(move |_socket: &AddrStream| {
|
|
||||||
let ctx = context.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
Ok::<_, hyper::Error>(service_fn(move |req: Request<Body>| {
|
|
||||||
router::on_http_request(req, ctx.clone())
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
let bind_addr = (config.listen_address, config.port).into();
|
|
||||||
let server = Server::bind(&bind_addr).serve(make_service);
|
|
||||||
|
|
||||||
// Determine the address the server is actually listening on.
|
|
||||||
//
|
|
||||||
// This may be different to `bind_addr` if bind port was 0 (this allows the OS to choose a free
|
|
||||||
// port).
|
|
||||||
let actual_listen_addr = server.local_addr();
|
|
||||||
|
|
||||||
// Build a channel to kill the HTTP server.
|
|
||||||
let exit = executor.exit();
|
|
||||||
let inner_log = log.clone();
|
|
||||||
let server_exit = async move {
|
|
||||||
let _ = exit.await;
|
|
||||||
info!(inner_log, "HTTP service shutdown");
|
|
||||||
};
|
|
||||||
|
|
||||||
// Configure the `hyper` server to gracefully shutdown when the shutdown channel is triggered.
|
|
||||||
let inner_log = log.clone();
|
|
||||||
let server_future = server
|
|
||||||
.with_graceful_shutdown(async {
|
|
||||||
server_exit.await;
|
|
||||||
})
|
|
||||||
.map_err(move |e| {
|
|
||||||
warn!(
|
|
||||||
inner_log,
|
|
||||||
"HTTP server failed to start, Unable to bind"; "address" => format!("{:?}", e)
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.unwrap_or_else(|_| ());
|
|
||||||
|
|
||||||
info!(
|
|
||||||
log,
|
|
||||||
"HTTP API started";
|
|
||||||
"address" => format!("{}", actual_listen_addr.ip()),
|
|
||||||
"port" => actual_listen_addr.port(),
|
|
||||||
);
|
|
||||||
|
|
||||||
executor.spawn_without_exit(server_future, "http");
|
|
||||||
|
|
||||||
Ok(actual_listen_addr)
|
|
||||||
}
|
|
@ -1,48 +0,0 @@
|
|||||||
//! This contains a collection of lighthouse specific HTTP endpoints.
|
|
||||||
|
|
||||||
use crate::{ApiError, Context};
|
|
||||||
use beacon_chain::BeaconChainTypes;
|
|
||||||
use eth2_libp2p::PeerInfo;
|
|
||||||
use serde::Serialize;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use types::EthSpec;
|
|
||||||
|
|
||||||
/// Returns all known peers and corresponding information
|
|
||||||
pub fn peers<T: BeaconChainTypes>(ctx: Arc<Context<T>>) -> Result<Vec<Peer<T::EthSpec>>, ApiError> {
|
|
||||||
Ok(ctx
|
|
||||||
.network_globals
|
|
||||||
.peers
|
|
||||||
.read()
|
|
||||||
.peers()
|
|
||||||
.map(|(peer_id, peer_info)| Peer {
|
|
||||||
peer_id: peer_id.to_string(),
|
|
||||||
peer_info: peer_info.clone(),
|
|
||||||
})
|
|
||||||
.collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns all known connected peers and their corresponding information
|
|
||||||
pub fn connected_peers<T: BeaconChainTypes>(
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Vec<Peer<T::EthSpec>>, ApiError> {
|
|
||||||
Ok(ctx
|
|
||||||
.network_globals
|
|
||||||
.peers
|
|
||||||
.read()
|
|
||||||
.connected_peers()
|
|
||||||
.map(|(peer_id, peer_info)| Peer {
|
|
||||||
peer_id: peer_id.to_string(),
|
|
||||||
peer_info: peer_info.clone(),
|
|
||||||
})
|
|
||||||
.collect())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Information returned by `peers` and `connected_peers`.
|
|
||||||
#[derive(Clone, Debug, Serialize)]
|
|
||||||
#[serde(bound = "T: EthSpec")]
|
|
||||||
pub struct Peer<T: EthSpec> {
|
|
||||||
/// The Peer's ID
|
|
||||||
peer_id: String,
|
|
||||||
/// The PeerInfo associated with the peer.
|
|
||||||
peer_info: PeerInfo<T>,
|
|
||||||
}
|
|
@ -1,39 +0,0 @@
|
|||||||
use crate::{ApiError, Context};
|
|
||||||
use beacon_chain::BeaconChainTypes;
|
|
||||||
use eth2_libp2p::types::SyncState;
|
|
||||||
use rest_types::{SyncingResponse, SyncingStatus};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use types::Slot;
|
|
||||||
|
|
||||||
/// Returns a syncing status.
|
|
||||||
pub fn syncing<T: BeaconChainTypes>(ctx: Arc<Context<T>>) -> Result<SyncingResponse, ApiError> {
|
|
||||||
let current_slot = ctx
|
|
||||||
.beacon_chain
|
|
||||||
.head_info()
|
|
||||||
.map_err(|e| ApiError::ServerError(format!("Unable to read head slot: {:?}", e)))?
|
|
||||||
.slot;
|
|
||||||
|
|
||||||
let (starting_slot, highest_slot) = match ctx.network_globals.sync_state() {
|
|
||||||
SyncState::SyncingFinalized {
|
|
||||||
start_slot,
|
|
||||||
head_slot,
|
|
||||||
..
|
|
||||||
}
|
|
||||||
| SyncState::SyncingHead {
|
|
||||||
start_slot,
|
|
||||||
head_slot,
|
|
||||||
} => (start_slot, head_slot),
|
|
||||||
SyncState::Synced | SyncState::Stalled => (Slot::from(0u64), current_slot),
|
|
||||||
};
|
|
||||||
|
|
||||||
let sync_status = SyncingStatus {
|
|
||||||
starting_slot,
|
|
||||||
current_slot,
|
|
||||||
highest_slot,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(SyncingResponse {
|
|
||||||
is_syncing: ctx.network_globals.is_syncing(),
|
|
||||||
sync_status,
|
|
||||||
})
|
|
||||||
}
|
|
@ -1,322 +0,0 @@
|
|||||||
use crate::{
|
|
||||||
beacon, config::Config, consensus, lighthouse, metrics, node, validator, NetworkChannel,
|
|
||||||
};
|
|
||||||
use beacon_chain::{BeaconChain, BeaconChainTypes};
|
|
||||||
use bus::Bus;
|
|
||||||
use environment::TaskExecutor;
|
|
||||||
use eth2_config::Eth2Config;
|
|
||||||
use eth2_libp2p::{NetworkGlobals, PeerId};
|
|
||||||
use hyper::header::HeaderValue;
|
|
||||||
use hyper::{Body, Method, Request, Response};
|
|
||||||
use lighthouse_version::version_with_platform;
|
|
||||||
use operation_pool::PersistedOperationPool;
|
|
||||||
use parking_lot::Mutex;
|
|
||||||
use rest_types::{ApiError, Handler, Health};
|
|
||||||
use slog::debug;
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::time::Instant;
|
|
||||||
use types::{EthSpec, SignedBeaconBlockHash};
|
|
||||||
|
|
||||||
pub struct Context<T: BeaconChainTypes> {
|
|
||||||
pub executor: TaskExecutor,
|
|
||||||
pub config: Config,
|
|
||||||
pub beacon_chain: Arc<BeaconChain<T>>,
|
|
||||||
pub network_globals: Arc<NetworkGlobals<T::EthSpec>>,
|
|
||||||
pub network_chan: NetworkChannel<T::EthSpec>,
|
|
||||||
pub eth2_config: Arc<Eth2Config>,
|
|
||||||
pub log: slog::Logger,
|
|
||||||
pub db_path: PathBuf,
|
|
||||||
pub freezer_db_path: PathBuf,
|
|
||||||
pub events: Arc<Mutex<Bus<SignedBeaconBlockHash>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn on_http_request<T: BeaconChainTypes>(
|
|
||||||
req: Request<Body>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Response<Body>, ApiError> {
|
|
||||||
let path = req.uri().path().to_string();
|
|
||||||
|
|
||||||
let _timer = metrics::start_timer_vec(&metrics::BEACON_HTTP_API_TIMES_TOTAL, &[&path]);
|
|
||||||
metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_REQUESTS_TOTAL, &[&path]);
|
|
||||||
|
|
||||||
let received_instant = Instant::now();
|
|
||||||
let log = ctx.log.clone();
|
|
||||||
let allow_origin = ctx.config.allow_origin.clone();
|
|
||||||
|
|
||||||
match route(req, ctx).await {
|
|
||||||
Ok(mut response) => {
|
|
||||||
metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_SUCCESS_TOTAL, &[&path]);
|
|
||||||
|
|
||||||
if allow_origin != "" {
|
|
||||||
let headers = response.headers_mut();
|
|
||||||
headers.insert(
|
|
||||||
hyper::header::ACCESS_CONTROL_ALLOW_ORIGIN,
|
|
||||||
HeaderValue::from_str(&allow_origin)?,
|
|
||||||
);
|
|
||||||
headers.insert(hyper::header::VARY, HeaderValue::from_static("Origin"));
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
log,
|
|
||||||
"HTTP API request successful";
|
|
||||||
"path" => path,
|
|
||||||
"duration_ms" => Instant::now().duration_since(received_instant).as_millis()
|
|
||||||
);
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(error) => {
|
|
||||||
metrics::inc_counter_vec(&metrics::BEACON_HTTP_API_ERROR_TOTAL, &[&path]);
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
log,
|
|
||||||
"HTTP API request failure";
|
|
||||||
"path" => path,
|
|
||||||
"duration_ms" => Instant::now().duration_since(received_instant).as_millis()
|
|
||||||
);
|
|
||||||
Ok(error.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn route<T: BeaconChainTypes>(
|
|
||||||
req: Request<Body>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Response<Body>, ApiError> {
|
|
||||||
let path = req.uri().path().to_string();
|
|
||||||
let ctx = ctx.clone();
|
|
||||||
let method = req.method().clone();
|
|
||||||
let executor = ctx.executor.clone();
|
|
||||||
let handler = Handler::new(req, ctx, executor)?;
|
|
||||||
|
|
||||||
match (method, path.as_ref()) {
|
|
||||||
(Method::GET, "/node/version") => handler
|
|
||||||
.static_value(version_with_platform())
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/node/health") => handler
|
|
||||||
.static_value(Health::observe().map_err(ApiError::ServerError)?)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/node/syncing") => handler
|
|
||||||
.allow_body()
|
|
||||||
.in_blocking_task(|_, ctx| node::syncing(ctx))
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/network/enr") => handler
|
|
||||||
.in_core_task(|_, ctx| Ok(ctx.network_globals.local_enr().to_base64()))
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/network/peer_count") => handler
|
|
||||||
.in_core_task(|_, ctx| Ok(ctx.network_globals.connected_peers()))
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/network/peer_id") => handler
|
|
||||||
.in_core_task(|_, ctx| Ok(ctx.network_globals.local_peer_id().to_base58()))
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/network/peers") => handler
|
|
||||||
.in_blocking_task(|_, ctx| {
|
|
||||||
Ok(ctx
|
|
||||||
.network_globals
|
|
||||||
.peers
|
|
||||||
.read()
|
|
||||||
.connected_peer_ids()
|
|
||||||
.map(PeerId::to_string)
|
|
||||||
.collect::<Vec<_>>())
|
|
||||||
})
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/network/listen_port") => handler
|
|
||||||
.in_core_task(|_, ctx| Ok(ctx.network_globals.listen_port_tcp()))
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/network/listen_addresses") => handler
|
|
||||||
.in_blocking_task(|_, ctx| Ok(ctx.network_globals.listen_multiaddrs()))
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/beacon/head") => handler
|
|
||||||
.in_blocking_task(|_, ctx| beacon::get_head(ctx))
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::GET, "/beacon/heads") => handler
|
|
||||||
.in_blocking_task(|_, ctx| Ok(beacon::get_heads(ctx)))
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::GET, "/beacon/block") => handler
|
|
||||||
.in_blocking_task(beacon::get_block)
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::GET, "/beacon/block_root") => handler
|
|
||||||
.in_blocking_task(beacon::get_block_root)
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::GET, "/beacon/fork") => handler
|
|
||||||
.in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.fork))
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::GET, "/beacon/fork/stream") => {
|
|
||||||
handler.sse_stream(|_, ctx| beacon::stream_forks(ctx)).await
|
|
||||||
}
|
|
||||||
(Method::GET, "/beacon/genesis_time") => handler
|
|
||||||
.in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.genesis_time))
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::GET, "/beacon/genesis_validators_root") => handler
|
|
||||||
.in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.head_info()?.genesis_validators_root))
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::GET, "/beacon/validators") => handler
|
|
||||||
.in_blocking_task(beacon::get_validators)
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::POST, "/beacon/validators") => handler
|
|
||||||
.allow_body()
|
|
||||||
.in_blocking_task(beacon::post_validators)
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::GET, "/beacon/validators/all") => handler
|
|
||||||
.in_blocking_task(beacon::get_all_validators)
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::GET, "/beacon/validators/active") => handler
|
|
||||||
.in_blocking_task(beacon::get_active_validators)
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::GET, "/beacon/state") => handler
|
|
||||||
.in_blocking_task(beacon::get_state)
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::GET, "/beacon/state_root") => handler
|
|
||||||
.in_blocking_task(beacon::get_state_root)
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::GET, "/beacon/state/genesis") => handler
|
|
||||||
.in_blocking_task(|_, ctx| beacon::get_genesis_state(ctx))
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::GET, "/beacon/committees") => handler
|
|
||||||
.in_blocking_task(beacon::get_committees)
|
|
||||||
.await?
|
|
||||||
.all_encodings(),
|
|
||||||
(Method::POST, "/beacon/proposer_slashing") => handler
|
|
||||||
.allow_body()
|
|
||||||
.in_blocking_task(beacon::proposer_slashing)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::POST, "/beacon/attester_slashing") => handler
|
|
||||||
.allow_body()
|
|
||||||
.in_blocking_task(beacon::attester_slashing)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::POST, "/validator/duties") => handler
|
|
||||||
.allow_body()
|
|
||||||
.in_blocking_task(validator::post_validator_duties)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::POST, "/validator/subscribe") => handler
|
|
||||||
.allow_body()
|
|
||||||
.in_blocking_task(validator::post_validator_subscriptions)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/validator/duties/all") => handler
|
|
||||||
.in_blocking_task(validator::get_all_validator_duties)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/validator/duties/active") => handler
|
|
||||||
.in_blocking_task(validator::get_active_validator_duties)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/validator/block") => handler
|
|
||||||
.in_blocking_task(validator::get_new_beacon_block)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::POST, "/validator/block") => handler
|
|
||||||
.allow_body()
|
|
||||||
.in_blocking_task(validator::publish_beacon_block)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/validator/attestation") => handler
|
|
||||||
.in_blocking_task(validator::get_new_attestation)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/validator/aggregate_attestation") => handler
|
|
||||||
.in_blocking_task(validator::get_aggregate_attestation)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::POST, "/validator/attestations") => handler
|
|
||||||
.allow_body()
|
|
||||||
.in_blocking_task(validator::publish_attestations)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::POST, "/validator/aggregate_and_proofs") => handler
|
|
||||||
.allow_body()
|
|
||||||
.in_blocking_task(validator::publish_aggregate_and_proofs)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/consensus/global_votes") => handler
|
|
||||||
.allow_body()
|
|
||||||
.in_blocking_task(consensus::get_vote_count)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::POST, "/consensus/individual_votes") => handler
|
|
||||||
.allow_body()
|
|
||||||
.in_blocking_task(consensus::post_individual_votes)
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/spec") => handler
|
|
||||||
// TODO: this clone is not ideal.
|
|
||||||
.in_blocking_task(|_, ctx| Ok(ctx.beacon_chain.spec.clone()))
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/spec/slots_per_epoch") => handler
|
|
||||||
.static_value(T::EthSpec::slots_per_epoch())
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/spec/eth2_config") => handler
|
|
||||||
// TODO: this clone is not ideal.
|
|
||||||
.in_blocking_task(|_, ctx| Ok(ctx.eth2_config.as_ref().clone()))
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/advanced/fork_choice") => handler
|
|
||||||
.in_blocking_task(|_, ctx| {
|
|
||||||
Ok(ctx
|
|
||||||
.beacon_chain
|
|
||||||
.fork_choice
|
|
||||||
.read()
|
|
||||||
.proto_array()
|
|
||||||
.core_proto_array()
|
|
||||||
.clone())
|
|
||||||
})
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/advanced/operation_pool") => handler
|
|
||||||
.in_blocking_task(|_, ctx| {
|
|
||||||
Ok(PersistedOperationPool::from_operation_pool(
|
|
||||||
&ctx.beacon_chain.op_pool,
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/metrics") => handler
|
|
||||||
.in_blocking_task(|_, ctx| metrics::get_prometheus(ctx))
|
|
||||||
.await?
|
|
||||||
.text_encoding(),
|
|
||||||
(Method::GET, "/lighthouse/syncing") => handler
|
|
||||||
.in_blocking_task(|_, ctx| Ok(ctx.network_globals.sync_state()))
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/lighthouse/peers") => handler
|
|
||||||
.in_blocking_task(|_, ctx| lighthouse::peers(ctx))
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
(Method::GET, "/lighthouse/connected_peers") => handler
|
|
||||||
.in_blocking_task(|_, ctx| lighthouse::connected_peers(ctx))
|
|
||||||
.await?
|
|
||||||
.serde_encodings(),
|
|
||||||
_ => Err(ApiError::NotFound(
|
|
||||||
"Request path and/or method not found.".to_owned(),
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,166 +0,0 @@
|
|||||||
use crate::helpers::{parse_committee_index, parse_epoch, parse_hex_ssz_bytes, parse_slot};
|
|
||||||
use crate::ApiError;
|
|
||||||
use hyper::Request;
|
|
||||||
use types::{AttestationData, CommitteeIndex, Epoch, Signature, Slot};
|
|
||||||
|
|
||||||
/// Provides handy functions for parsing the query parameters of a URL.
|
|
||||||
|
|
||||||
#[derive(Clone, Copy)]
|
|
||||||
pub struct UrlQuery<'a>(url::form_urlencoded::Parse<'a>);
|
|
||||||
|
|
||||||
impl<'a> UrlQuery<'a> {
|
|
||||||
/// Instantiate from an existing `Request`.
|
|
||||||
///
|
|
||||||
/// Returns `Err` if `req` does not contain any query parameters.
|
|
||||||
pub fn from_request<T>(req: &'a Request<T>) -> Result<Self, ApiError> {
|
|
||||||
let query_str = req.uri().query().unwrap_or_else(|| "");
|
|
||||||
|
|
||||||
Ok(UrlQuery(url::form_urlencoded::parse(query_str.as_bytes())))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the first `(key, value)` pair found where the `key` is in `keys`.
|
|
||||||
///
|
|
||||||
/// If no match is found, an `InvalidQueryParams` error is returned.
|
|
||||||
pub fn first_of(mut self, keys: &[&str]) -> Result<(String, String), ApiError> {
|
|
||||||
self.0
|
|
||||||
.find(|(key, _value)| keys.contains(&&**key))
|
|
||||||
.map(|(key, value)| (key.into_owned(), value.into_owned()))
|
|
||||||
.ok_or_else(|| {
|
|
||||||
ApiError::BadRequest(format!(
|
|
||||||
"URL query must be valid and contain at least one of the following keys: {:?}",
|
|
||||||
keys
|
|
||||||
))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the first `(key, value)` pair found where the `key` is in `keys`, if any.
|
|
||||||
///
|
|
||||||
/// Returns `None` if no match is found.
|
|
||||||
pub fn first_of_opt(mut self, keys: &[&str]) -> Option<(String, String)> {
|
|
||||||
self.0
|
|
||||||
.find(|(key, _value)| keys.contains(&&**key))
|
|
||||||
.map(|(key, value)| (key.into_owned(), value.into_owned()))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the value for `key`, if and only if `key` is the only key present in the query
|
|
||||||
/// parameters.
|
|
||||||
pub fn only_one(self, key: &str) -> Result<String, ApiError> {
|
|
||||||
let queries: Vec<_> = self
|
|
||||||
.0
|
|
||||||
.map(|(k, v)| (k.into_owned(), v.into_owned()))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if queries.len() == 1 {
|
|
||||||
let (first_key, first_value) = &queries[0]; // Must have 0 index if len is 1.
|
|
||||||
if first_key == key {
|
|
||||||
Ok(first_value.to_string())
|
|
||||||
} else {
|
|
||||||
Err(ApiError::BadRequest(format!(
|
|
||||||
"Only the {} query parameter is supported",
|
|
||||||
key
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Err(ApiError::BadRequest(format!(
|
|
||||||
"Only one query parameter is allowed, {} supplied",
|
|
||||||
queries.len()
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a vector of all values present where `key` is in `keys
|
|
||||||
///
|
|
||||||
/// If no match is found, an `InvalidQueryParams` error is returned.
|
|
||||||
pub fn all_of(self, key: &str) -> Result<Vec<String>, ApiError> {
|
|
||||||
let queries: Vec<_> = self
|
|
||||||
.0
|
|
||||||
.filter_map(|(k, v)| {
|
|
||||||
if k.eq(key) {
|
|
||||||
Some(v.into_owned())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
Ok(queries)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the value of the first occurrence of the `epoch` key.
|
|
||||||
pub fn epoch(self) -> Result<Epoch, ApiError> {
|
|
||||||
self.first_of(&["epoch"])
|
|
||||||
.and_then(|(_key, value)| parse_epoch(&value))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the value of the first occurrence of the `slot` key.
|
|
||||||
pub fn slot(self) -> Result<Slot, ApiError> {
|
|
||||||
self.first_of(&["slot"])
|
|
||||||
.and_then(|(_key, value)| parse_slot(&value))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the value of the first occurrence of the `committee_index` key.
|
|
||||||
pub fn committee_index(self) -> Result<CommitteeIndex, ApiError> {
|
|
||||||
self.first_of(&["committee_index"])
|
|
||||||
.and_then(|(_key, value)| parse_committee_index(&value))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the value of the first occurrence of the `randao_reveal` key.
|
|
||||||
pub fn randao_reveal(self) -> Result<Signature, ApiError> {
|
|
||||||
self.first_of(&["randao_reveal"])
|
|
||||||
.and_then(|(_key, value)| parse_hex_ssz_bytes(&value))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the value of the first occurrence of the `attestation_data` key.
|
|
||||||
pub fn attestation_data(self) -> Result<AttestationData, ApiError> {
|
|
||||||
self.first_of(&["attestation_data"])
|
|
||||||
.and_then(|(_key, value)| parse_hex_ssz_bytes(&value))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn only_one() {
|
|
||||||
let get_result = |addr: &str, key: &str| -> Result<String, ApiError> {
|
|
||||||
UrlQuery(url::Url::parse(addr).unwrap().query_pairs()).only_one(key)
|
|
||||||
};
|
|
||||||
|
|
||||||
assert_eq!(get_result("http://cat.io/?a=42", "a"), Ok("42".to_string()));
|
|
||||||
assert!(get_result("http://cat.io/?a=42", "b").is_err());
|
|
||||||
assert!(get_result("http://cat.io/?a=42&b=12", "a").is_err());
|
|
||||||
assert!(get_result("http://cat.io/", "").is_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn first_of() {
|
|
||||||
let url = url::Url::parse("http://lighthouse.io/cats?a=42&b=12&c=100").unwrap();
|
|
||||||
let get_query = || UrlQuery(url.query_pairs());
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
get_query().first_of(&["a"]),
|
|
||||||
Ok(("a".to_string(), "42".to_string()))
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
get_query().first_of(&["a", "b", "c"]),
|
|
||||||
Ok(("a".to_string(), "42".to_string()))
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
get_query().first_of(&["a", "a", "a"]),
|
|
||||||
Ok(("a".to_string(), "42".to_string()))
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
get_query().first_of(&["a", "b", "c"]),
|
|
||||||
Ok(("a".to_string(), "42".to_string()))
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
get_query().first_of(&["b", "c"]),
|
|
||||||
Ok(("b".to_string(), "12".to_string()))
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
get_query().first_of(&["c"]),
|
|
||||||
Ok(("c".to_string(), "100".to_string()))
|
|
||||||
);
|
|
||||||
assert!(get_query().first_of(&["nothing"]).is_err());
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,747 +0,0 @@
|
|||||||
use crate::helpers::{parse_hex_ssz_bytes, publish_beacon_block_to_network};
|
|
||||||
use crate::{ApiError, Context, NetworkChannel, UrlQuery};
|
|
||||||
use beacon_chain::{
|
|
||||||
attestation_verification::Error as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes,
|
|
||||||
BlockError, ForkChoiceError, StateSkipConfig,
|
|
||||||
};
|
|
||||||
use bls::PublicKeyBytes;
|
|
||||||
use eth2_libp2p::PubsubMessage;
|
|
||||||
use hyper::Request;
|
|
||||||
use network::NetworkMessage;
|
|
||||||
use rest_types::{ValidatorDutiesRequest, ValidatorDutyBytes, ValidatorSubscription};
|
|
||||||
use slog::{error, info, trace, warn, Logger};
|
|
||||||
use std::sync::Arc;
|
|
||||||
use types::beacon_state::EthSpec;
|
|
||||||
use types::{
|
|
||||||
Attestation, AttestationData, BeaconBlock, BeaconState, Epoch, RelativeEpoch, SelectionProof,
|
|
||||||
SignedAggregateAndProof, SignedBeaconBlock, SubnetId,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// HTTP Handler to retrieve the duties for a set of validators during a particular epoch. This
|
|
||||||
/// method allows for collecting bulk sets of validator duties without risking exceeding the max
|
|
||||||
/// URL length with query pairs.
|
|
||||||
pub fn post_validator_duties<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Vec<ValidatorDutyBytes>, ApiError> {
|
|
||||||
let body = req.into_body();
|
|
||||||
|
|
||||||
serde_json::from_slice::<ValidatorDutiesRequest>(&body)
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::BadRequest(format!(
|
|
||||||
"Unable to parse JSON into ValidatorDutiesRequest: {:?}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.and_then(|bulk_request| {
|
|
||||||
return_validator_duties(
|
|
||||||
&ctx.beacon_chain.clone(),
|
|
||||||
bulk_request.epoch,
|
|
||||||
bulk_request.pubkeys.into_iter().map(Into::into).collect(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP Handler to retrieve subscriptions for a set of validators. This allows the node to
|
|
||||||
/// organise peer discovery and topic subscription for known validators.
|
|
||||||
pub fn post_validator_subscriptions<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<(), ApiError> {
|
|
||||||
let body = req.into_body();
|
|
||||||
|
|
||||||
serde_json::from_slice(&body)
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::BadRequest(format!(
|
|
||||||
"Unable to parse JSON into ValidatorSubscriptions: {:?}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})
|
|
||||||
.and_then(move |subscriptions: Vec<ValidatorSubscription>| {
|
|
||||||
ctx.network_chan
|
|
||||||
.send(NetworkMessage::Subscribe { subscriptions })
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::ServerError(format!(
|
|
||||||
"Unable to subscriptions to the network: {:?}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
Ok(())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP Handler to retrieve all validator duties for the given epoch.
|
|
||||||
pub fn get_all_validator_duties<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Vec<ValidatorDutyBytes>, ApiError> {
|
|
||||||
let query = UrlQuery::from_request(&req)?;
|
|
||||||
|
|
||||||
let epoch = query.epoch()?;
|
|
||||||
|
|
||||||
let state = get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?;
|
|
||||||
|
|
||||||
let validator_pubkeys = state
|
|
||||||
.validators
|
|
||||||
.iter()
|
|
||||||
.map(|validator| validator.pubkey.clone())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
return_validator_duties(&ctx.beacon_chain, epoch, validator_pubkeys)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP Handler to retrieve all active validator duties for the given epoch.
|
|
||||||
pub fn get_active_validator_duties<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Vec<ValidatorDutyBytes>, ApiError> {
|
|
||||||
let query = UrlQuery::from_request(&req)?;
|
|
||||||
|
|
||||||
let epoch = query.epoch()?;
|
|
||||||
|
|
||||||
let state = get_state_for_epoch(&ctx.beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?;
|
|
||||||
|
|
||||||
let validator_pubkeys = state
|
|
||||||
.validators
|
|
||||||
.iter()
|
|
||||||
.filter(|validator| validator.is_active_at(state.current_epoch()))
|
|
||||||
.map(|validator| validator.pubkey.clone())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
return_validator_duties(&ctx.beacon_chain, epoch, validator_pubkeys)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper function to return the state that can be used to determine the duties for some `epoch`.
|
|
||||||
pub fn get_state_for_epoch<T: BeaconChainTypes>(
|
|
||||||
beacon_chain: &BeaconChain<T>,
|
|
||||||
epoch: Epoch,
|
|
||||||
config: StateSkipConfig,
|
|
||||||
) -> Result<BeaconState<T::EthSpec>, ApiError> {
|
|
||||||
let slots_per_epoch = T::EthSpec::slots_per_epoch();
|
|
||||||
let head = beacon_chain.head()?;
|
|
||||||
let current_epoch = beacon_chain.epoch()?;
|
|
||||||
let head_epoch = head.beacon_state.current_epoch();
|
|
||||||
|
|
||||||
if head_epoch == current_epoch && RelativeEpoch::from_epoch(current_epoch, epoch).is_ok() {
|
|
||||||
Ok(head.beacon_state)
|
|
||||||
} else {
|
|
||||||
// If epoch is ahead of current epoch, then it should be a "next epoch" request for
|
|
||||||
// attestation duties. So, go to the start slot of the epoch prior to that,
|
|
||||||
// which should be just the next wall-clock epoch.
|
|
||||||
let slot = if epoch > current_epoch {
|
|
||||||
(epoch - 1).start_slot(slots_per_epoch)
|
|
||||||
}
|
|
||||||
// Otherwise, go to the start of the request epoch.
|
|
||||||
else {
|
|
||||||
epoch.start_slot(slots_per_epoch)
|
|
||||||
};
|
|
||||||
|
|
||||||
beacon_chain.state_at_slot(slot, config).map_err(|e| {
|
|
||||||
ApiError::ServerError(format!("Unable to load state for epoch {}: {:?}", epoch, e))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper function to get the duties for some `validator_pubkeys` in some `epoch`.
|
|
||||||
fn return_validator_duties<T: BeaconChainTypes>(
|
|
||||||
beacon_chain: &BeaconChain<T>,
|
|
||||||
epoch: Epoch,
|
|
||||||
validator_pubkeys: Vec<PublicKeyBytes>,
|
|
||||||
) -> Result<Vec<ValidatorDutyBytes>, ApiError> {
|
|
||||||
let mut state = get_state_for_epoch(&beacon_chain, epoch, StateSkipConfig::WithoutStateRoots)?;
|
|
||||||
|
|
||||||
let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch)
|
|
||||||
.map_err(|_| ApiError::ServerError(String::from("Loaded state is in the wrong epoch")))?;
|
|
||||||
|
|
||||||
state
|
|
||||||
.build_committee_cache(relative_epoch, &beacon_chain.spec)
|
|
||||||
.map_err(|e| ApiError::ServerError(format!("Unable to build committee cache: {:?}", e)))?;
|
|
||||||
|
|
||||||
// Get a list of all validators for this epoch.
|
|
||||||
//
|
|
||||||
// Used for quickly determining the slot for a proposer.
|
|
||||||
let validator_proposers = if epoch == state.current_epoch() {
|
|
||||||
Some(
|
|
||||||
epoch
|
|
||||||
.slot_iter(T::EthSpec::slots_per_epoch())
|
|
||||||
.map(|slot| {
|
|
||||||
state
|
|
||||||
.get_beacon_proposer_index(slot, &beacon_chain.spec)
|
|
||||||
.map(|i| (i, slot))
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::ServerError(format!(
|
|
||||||
"Unable to get proposer index for validator: {:?}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect::<Result<Vec<_>, _>>()?,
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
validator_pubkeys
|
|
||||||
.into_iter()
|
|
||||||
.map(|validator_pubkey| {
|
|
||||||
// The `beacon_chain` can return a validator index that does not exist in all states.
|
|
||||||
// Therefore, we must check to ensure that the validator index is valid for our
|
|
||||||
// `state`.
|
|
||||||
let validator_index = beacon_chain
|
|
||||||
.validator_index(&validator_pubkey)
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::ServerError(format!("Unable to get validator index: {:?}", e))
|
|
||||||
})?
|
|
||||||
.filter(|i| *i < state.validators.len());
|
|
||||||
|
|
||||||
if let Some(validator_index) = validator_index {
|
|
||||||
let duties = state
|
|
||||||
.get_attestation_duties(validator_index, relative_epoch)
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::ServerError(format!(
|
|
||||||
"Unable to obtain attestation duties: {:?}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let committee_count_at_slot = duties
|
|
||||||
.map(|d| state.get_committee_count_at_slot(d.slot))
|
|
||||||
.transpose()
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::ServerError(format!(
|
|
||||||
"Unable to find committee count at slot: {:?}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let aggregator_modulo = duties
|
|
||||||
.map(|duties| SelectionProof::modulo(duties.committee_len, &beacon_chain.spec))
|
|
||||||
.transpose()
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::ServerError(format!("Unable to find modulo: {:?}", e))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let block_proposal_slots = validator_proposers.as_ref().map(|proposers| {
|
|
||||||
proposers
|
|
||||||
.iter()
|
|
||||||
.filter(|(i, _slot)| validator_index == *i)
|
|
||||||
.map(|(_i, slot)| *slot)
|
|
||||||
.collect()
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(ValidatorDutyBytes {
|
|
||||||
validator_pubkey,
|
|
||||||
validator_index: Some(validator_index as u64),
|
|
||||||
attestation_slot: duties.map(|d| d.slot),
|
|
||||||
attestation_committee_index: duties.map(|d| d.index),
|
|
||||||
committee_count_at_slot,
|
|
||||||
attestation_committee_position: duties.map(|d| d.committee_position),
|
|
||||||
block_proposal_slots,
|
|
||||||
aggregator_modulo,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Ok(ValidatorDutyBytes {
|
|
||||||
validator_pubkey,
|
|
||||||
validator_index: None,
|
|
||||||
attestation_slot: None,
|
|
||||||
attestation_committee_index: None,
|
|
||||||
attestation_committee_position: None,
|
|
||||||
block_proposal_slots: None,
|
|
||||||
committee_count_at_slot: None,
|
|
||||||
aggregator_modulo: None,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<Result<Vec<_>, ApiError>>()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP Handler to produce a new BeaconBlock from the current state, ready to be signed by a validator.
|
|
||||||
pub fn get_new_beacon_block<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<BeaconBlock<T::EthSpec>, ApiError> {
|
|
||||||
let query = UrlQuery::from_request(&req)?;
|
|
||||||
|
|
||||||
let slot = query.slot()?;
|
|
||||||
let randao_reveal = query.randao_reveal()?;
|
|
||||||
|
|
||||||
let validator_graffiti = if let Some((_key, value)) = query.first_of_opt(&["graffiti"]) {
|
|
||||||
Some(parse_hex_ssz_bytes(&value)?)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let (new_block, _state) = ctx
|
|
||||||
.beacon_chain
|
|
||||||
.produce_block(randao_reveal, slot, validator_graffiti)
|
|
||||||
.map_err(|e| {
|
|
||||||
error!(
|
|
||||||
ctx.log,
|
|
||||||
"Error whilst producing block";
|
|
||||||
"error" => format!("{:?}", e)
|
|
||||||
);
|
|
||||||
|
|
||||||
ApiError::ServerError(format!(
|
|
||||||
"Beacon node is not able to produce a block: {:?}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(new_block)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP Handler to publish a SignedBeaconBlock, which has been signed by a validator.
|
|
||||||
pub fn publish_beacon_block<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<(), ApiError> {
|
|
||||||
let body = req.into_body();
|
|
||||||
|
|
||||||
serde_json::from_slice(&body).map_err(|e| {
|
|
||||||
ApiError::BadRequest(format!("Unable to parse JSON into SignedBeaconBlock: {:?}", e))
|
|
||||||
})
|
|
||||||
.and_then(move |block: SignedBeaconBlock<T::EthSpec>| {
|
|
||||||
let slot = block.slot();
|
|
||||||
match ctx.beacon_chain.process_block(block.clone()) {
|
|
||||||
Ok(block_root) => {
|
|
||||||
// Block was processed, publish via gossipsub
|
|
||||||
info!(
|
|
||||||
ctx.log,
|
|
||||||
"Block from local validator";
|
|
||||||
"block_root" => format!("{}", block_root),
|
|
||||||
"block_slot" => slot,
|
|
||||||
);
|
|
||||||
|
|
||||||
publish_beacon_block_to_network::<T>(&ctx.network_chan, block)?;
|
|
||||||
|
|
||||||
// Run the fork choice algorithm and enshrine a new canonical head, if
|
|
||||||
// found.
|
|
||||||
//
|
|
||||||
// The new head may or may not be the block we just received.
|
|
||||||
if let Err(e) = ctx.beacon_chain.fork_choice() {
|
|
||||||
error!(
|
|
||||||
ctx.log,
|
|
||||||
"Failed to find beacon chain head";
|
|
||||||
"error" => format!("{:?}", e)
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
// In the best case, validators should produce blocks that become the
|
|
||||||
// head.
|
|
||||||
//
|
|
||||||
// Potential reasons this may not be the case:
|
|
||||||
//
|
|
||||||
// - A quick re-org between block produce and publish.
|
|
||||||
// - Excessive time between block produce and publish.
|
|
||||||
// - A validator is using another beacon node to produce blocks and
|
|
||||||
// submitting them here.
|
|
||||||
if ctx.beacon_chain.head()?.beacon_block_root != block_root {
|
|
||||||
warn!(
|
|
||||||
ctx.log,
|
|
||||||
"Block from validator is not head";
|
|
||||||
"desc" => "potential re-org",
|
|
||||||
);
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
Err(BlockError::BeaconChainError(e)) => {
|
|
||||||
error!(
|
|
||||||
ctx.log,
|
|
||||||
"Error whilst processing block";
|
|
||||||
"error" => format!("{:?}", e)
|
|
||||||
);
|
|
||||||
|
|
||||||
Err(ApiError::ServerError(format!(
|
|
||||||
"Error while processing block: {:?}",
|
|
||||||
e
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
Err(other) => {
|
|
||||||
warn!(
|
|
||||||
ctx.log,
|
|
||||||
"Invalid block from local validator";
|
|
||||||
"outcome" => format!("{:?}", other)
|
|
||||||
);
|
|
||||||
|
|
||||||
Err(ApiError::ProcessingError(format!(
|
|
||||||
"The SignedBeaconBlock could not be processed and has not been published: {:?}",
|
|
||||||
other
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP Handler to produce a new Attestation from the current state, ready to be signed by a validator.
|
|
||||||
pub fn get_new_attestation<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Attestation<T::EthSpec>, ApiError> {
|
|
||||||
let query = UrlQuery::from_request(&req)?;
|
|
||||||
|
|
||||||
let slot = query.slot()?;
|
|
||||||
let index = query.committee_index()?;
|
|
||||||
|
|
||||||
ctx.beacon_chain
|
|
||||||
.produce_unaggregated_attestation(slot, index)
|
|
||||||
.map_err(|e| ApiError::BadRequest(format!("Unable to produce attestation: {:?}", e)))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP Handler to retrieve the aggregate attestation for a slot
|
|
||||||
pub fn get_aggregate_attestation<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<Attestation<T::EthSpec>, ApiError> {
|
|
||||||
let query = UrlQuery::from_request(&req)?;
|
|
||||||
|
|
||||||
let attestation_data = query.attestation_data()?;
|
|
||||||
|
|
||||||
match ctx
|
|
||||||
.beacon_chain
|
|
||||||
.get_aggregated_attestation(&attestation_data)
|
|
||||||
{
|
|
||||||
Ok(Some(attestation)) => Ok(attestation),
|
|
||||||
Ok(None) => Err(ApiError::NotFound(format!(
|
|
||||||
"No matching aggregate attestation for slot {:?} is known in slot {:?}",
|
|
||||||
attestation_data.slot,
|
|
||||||
ctx.beacon_chain.slot()
|
|
||||||
))),
|
|
||||||
Err(e) => Err(ApiError::ServerError(format!(
|
|
||||||
"Unable to obtain attestation: {:?}",
|
|
||||||
e
|
|
||||||
))),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP Handler to publish a list of Attestations, which have been signed by a number of validators.
|
|
||||||
pub fn publish_attestations<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<(), ApiError> {
|
|
||||||
let bytes = req.into_body();
|
|
||||||
|
|
||||||
serde_json::from_slice(&bytes)
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::BadRequest(format!(
|
|
||||||
"Unable to deserialize JSON into a list of attestations: {:?}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})
|
|
||||||
// Process all of the aggregates _without_ exiting early if one fails.
|
|
||||||
.map(
|
|
||||||
move |attestations: Vec<(Attestation<T::EthSpec>, SubnetId)>| {
|
|
||||||
attestations
|
|
||||||
.into_iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, (attestation, subnet_id))| {
|
|
||||||
process_unaggregated_attestation(
|
|
||||||
&ctx.beacon_chain,
|
|
||||||
ctx.network_chan.clone(),
|
|
||||||
attestation,
|
|
||||||
subnet_id,
|
|
||||||
i,
|
|
||||||
&ctx.log,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect::<Vec<Result<_, _>>>()
|
|
||||||
},
|
|
||||||
)
|
|
||||||
// Iterate through all the results and return on the first `Err`.
|
|
||||||
//
|
|
||||||
// Note: this will only provide info about the _first_ failure, not all failures.
|
|
||||||
.and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result))
|
|
||||||
.map(|_| ())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Processes an unaggregrated attestation that was included in a list of attestations with the
|
|
||||||
/// index `i`.
|
|
||||||
#[allow(clippy::redundant_clone)] // false positives in this function.
|
|
||||||
fn process_unaggregated_attestation<T: BeaconChainTypes>(
|
|
||||||
beacon_chain: &BeaconChain<T>,
|
|
||||||
network_chan: NetworkChannel<T::EthSpec>,
|
|
||||||
attestation: Attestation<T::EthSpec>,
|
|
||||||
subnet_id: SubnetId,
|
|
||||||
i: usize,
|
|
||||||
log: &Logger,
|
|
||||||
) -> Result<(), ApiError> {
|
|
||||||
let data = &attestation.data.clone();
|
|
||||||
|
|
||||||
// Verify that the attestation is valid to included on the gossip network.
|
|
||||||
let verified_attestation = beacon_chain
|
|
||||||
.verify_unaggregated_attestation_for_gossip(attestation.clone(), subnet_id)
|
|
||||||
.map_err(|e| {
|
|
||||||
handle_attestation_error(
|
|
||||||
e,
|
|
||||||
&format!("unaggregated attestation {} failed gossip verification", i),
|
|
||||||
data,
|
|
||||||
log,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// Publish the attestation to the network
|
|
||||||
if let Err(e) = network_chan.send(NetworkMessage::Publish {
|
|
||||||
messages: vec![PubsubMessage::Attestation(Box::new((
|
|
||||||
subnet_id,
|
|
||||||
attestation,
|
|
||||||
)))],
|
|
||||||
}) {
|
|
||||||
return Err(ApiError::ServerError(format!(
|
|
||||||
"Unable to send unaggregated attestation {} to network: {:?}",
|
|
||||||
i, e
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
beacon_chain
|
|
||||||
.apply_attestation_to_fork_choice(&verified_attestation)
|
|
||||||
.map_err(|e| {
|
|
||||||
handle_fork_choice_error(
|
|
||||||
e,
|
|
||||||
&format!(
|
|
||||||
"unaggregated attestation {} was unable to be added to fork choice",
|
|
||||||
i
|
|
||||||
),
|
|
||||||
data,
|
|
||||||
log,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
beacon_chain
|
|
||||||
.add_to_naive_aggregation_pool(verified_attestation)
|
|
||||||
.map_err(|e| {
|
|
||||||
handle_attestation_error(
|
|
||||||
e,
|
|
||||||
&format!(
|
|
||||||
"unaggregated attestation {} was unable to be added to aggregation pool",
|
|
||||||
i
|
|
||||||
),
|
|
||||||
data,
|
|
||||||
log,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// HTTP Handler to publish an Attestation, which has been signed by a validator.
|
|
||||||
pub fn publish_aggregate_and_proofs<T: BeaconChainTypes>(
|
|
||||||
req: Request<Vec<u8>>,
|
|
||||||
ctx: Arc<Context<T>>,
|
|
||||||
) -> Result<(), ApiError> {
|
|
||||||
let body = req.into_body();
|
|
||||||
|
|
||||||
serde_json::from_slice(&body)
|
|
||||||
.map_err(|e| {
|
|
||||||
ApiError::BadRequest(format!(
|
|
||||||
"Unable to deserialize JSON into a list of SignedAggregateAndProof: {:?}",
|
|
||||||
e
|
|
||||||
))
|
|
||||||
})
|
|
||||||
// Process all of the aggregates _without_ exiting early if one fails.
|
|
||||||
.map(
|
|
||||||
move |signed_aggregates: Vec<SignedAggregateAndProof<T::EthSpec>>| {
|
|
||||||
signed_aggregates
|
|
||||||
.into_iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, signed_aggregate)| {
|
|
||||||
process_aggregated_attestation(
|
|
||||||
&ctx.beacon_chain,
|
|
||||||
ctx.network_chan.clone(),
|
|
||||||
signed_aggregate,
|
|
||||||
i,
|
|
||||||
&ctx.log,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.collect::<Vec<Result<_, _>>>()
|
|
||||||
},
|
|
||||||
)
|
|
||||||
// Iterate through all the results and return on the first `Err`.
|
|
||||||
//
|
|
||||||
// Note: this will only provide info about the _first_ failure, not all failures.
|
|
||||||
.and_then(|processing_results| processing_results.into_iter().try_for_each(|result| result))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Processes an aggregrated attestation that was included in a list of attestations with the index
|
|
||||||
/// `i`.
|
|
||||||
#[allow(clippy::redundant_clone)] // false positives in this function.
|
|
||||||
fn process_aggregated_attestation<T: BeaconChainTypes>(
|
|
||||||
beacon_chain: &BeaconChain<T>,
|
|
||||||
network_chan: NetworkChannel<T::EthSpec>,
|
|
||||||
signed_aggregate: SignedAggregateAndProof<T::EthSpec>,
|
|
||||||
i: usize,
|
|
||||||
log: &Logger,
|
|
||||||
) -> Result<(), ApiError> {
|
|
||||||
let data = &signed_aggregate.message.aggregate.data.clone();
|
|
||||||
|
|
||||||
// Verify that the attestation is valid to be included on the gossip network.
|
|
||||||
//
|
|
||||||
// Using this gossip check for local validators is not necessarily ideal, there will be some
|
|
||||||
// attestations that we reject that could possibly be included in a block (e.g., attestations
|
|
||||||
// that late by more than 1 epoch but less than 2). We can come pick this back up if we notice
|
|
||||||
// that it's materially affecting validator profits. Until then, I'm hesitant to introduce yet
|
|
||||||
// _another_ attestation verification path.
|
|
||||||
let verified_attestation =
|
|
||||||
match beacon_chain.verify_aggregated_attestation_for_gossip(signed_aggregate.clone()) {
|
|
||||||
Ok(verified_attestation) => verified_attestation,
|
|
||||||
Err(AttnError::AttestationAlreadyKnown(attestation_root)) => {
|
|
||||||
trace!(
|
|
||||||
log,
|
|
||||||
"Ignored known attn from local validator";
|
|
||||||
"attn_root" => format!("{}", attestation_root)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Exit early with success for a known attestation, there's no need to re-process
|
|
||||||
// an aggregate we already know.
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* It's worth noting that we don't check for `Error::AggregatorAlreadyKnown` since (at
|
|
||||||
* the time of writing) we check for `AttestationAlreadyKnown` first.
|
|
||||||
*
|
|
||||||
* Given this, it's impossible to hit `Error::AggregatorAlreadyKnown` without that
|
|
||||||
* aggregator having already produced a conflicting aggregation. This is not slashable
|
|
||||||
* but I think it's still the sort of condition we should error on, at least for now.
|
|
||||||
*/
|
|
||||||
Err(e) => {
|
|
||||||
return Err(handle_attestation_error(
|
|
||||||
e,
|
|
||||||
&format!("aggregated attestation {} failed gossip verification", i),
|
|
||||||
data,
|
|
||||||
log,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Publish the attestation to the network
|
|
||||||
if let Err(e) = network_chan.send(NetworkMessage::Publish {
|
|
||||||
messages: vec![PubsubMessage::AggregateAndProofAttestation(Box::new(
|
|
||||||
signed_aggregate,
|
|
||||||
))],
|
|
||||||
}) {
|
|
||||||
return Err(ApiError::ServerError(format!(
|
|
||||||
"Unable to send aggregated attestation {} to network: {:?}",
|
|
||||||
i, e
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
beacon_chain
|
|
||||||
.apply_attestation_to_fork_choice(&verified_attestation)
|
|
||||||
.map_err(|e| {
|
|
||||||
handle_fork_choice_error(
|
|
||||||
e,
|
|
||||||
&format!(
|
|
||||||
"aggregated attestation {} was unable to be added to fork choice",
|
|
||||||
i
|
|
||||||
),
|
|
||||||
data,
|
|
||||||
log,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
beacon_chain
|
|
||||||
.add_to_block_inclusion_pool(verified_attestation)
|
|
||||||
.map_err(|e| {
|
|
||||||
handle_attestation_error(
|
|
||||||
e,
|
|
||||||
&format!(
|
|
||||||
"aggregated attestation {} was unable to be added to op pool",
|
|
||||||
i
|
|
||||||
),
|
|
||||||
data,
|
|
||||||
log,
|
|
||||||
)
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Common handler for `AttnError` during attestation verification.
|
|
||||||
fn handle_attestation_error(
|
|
||||||
e: AttnError,
|
|
||||||
detail: &str,
|
|
||||||
data: &AttestationData,
|
|
||||||
log: &Logger,
|
|
||||||
) -> ApiError {
|
|
||||||
match e {
|
|
||||||
AttnError::BeaconChainError(e) => {
|
|
||||||
error!(
|
|
||||||
log,
|
|
||||||
"Internal error verifying local attestation";
|
|
||||||
"detail" => detail,
|
|
||||||
"error" => format!("{:?}", e),
|
|
||||||
"target" => data.target.epoch,
|
|
||||||
"source" => data.source.epoch,
|
|
||||||
"index" => data.index,
|
|
||||||
"slot" => data.slot,
|
|
||||||
);
|
|
||||||
|
|
||||||
ApiError::ServerError(format!(
|
|
||||||
"Internal error verifying local attestation. Error: {:?}. Detail: {}",
|
|
||||||
e, detail
|
|
||||||
))
|
|
||||||
}
|
|
||||||
e => {
|
|
||||||
error!(
|
|
||||||
log,
|
|
||||||
"Invalid local attestation";
|
|
||||||
"detail" => detail,
|
|
||||||
"reason" => format!("{:?}", e),
|
|
||||||
"target" => data.target.epoch,
|
|
||||||
"source" => data.source.epoch,
|
|
||||||
"index" => data.index,
|
|
||||||
"slot" => data.slot,
|
|
||||||
);
|
|
||||||
|
|
||||||
ApiError::ProcessingError(format!(
|
|
||||||
"Invalid local attestation. Error: {:?} Detail: {}",
|
|
||||||
e, detail
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Common handler for `ForkChoiceError` during attestation verification.
|
|
||||||
fn handle_fork_choice_error(
|
|
||||||
e: BeaconChainError,
|
|
||||||
detail: &str,
|
|
||||||
data: &AttestationData,
|
|
||||||
log: &Logger,
|
|
||||||
) -> ApiError {
|
|
||||||
match e {
|
|
||||||
BeaconChainError::ForkChoiceError(ForkChoiceError::InvalidAttestation(e)) => {
|
|
||||||
error!(
|
|
||||||
log,
|
|
||||||
"Local attestation invalid for fork choice";
|
|
||||||
"detail" => detail,
|
|
||||||
"reason" => format!("{:?}", e),
|
|
||||||
"target" => data.target.epoch,
|
|
||||||
"source" => data.source.epoch,
|
|
||||||
"index" => data.index,
|
|
||||||
"slot" => data.slot,
|
|
||||||
);
|
|
||||||
|
|
||||||
ApiError::ProcessingError(format!(
|
|
||||||
"Invalid local attestation. Error: {:?} Detail: {}",
|
|
||||||
e, detail
|
|
||||||
))
|
|
||||||
}
|
|
||||||
e => {
|
|
||||||
error!(
|
|
||||||
log,
|
|
||||||
"Internal error applying attn to fork choice";
|
|
||||||
"detail" => detail,
|
|
||||||
"error" => format!("{:?}", e),
|
|
||||||
"target" => data.target.epoch,
|
|
||||||
"source" => data.source.epoch,
|
|
||||||
"index" => data.index,
|
|
||||||
"slot" => data.slot,
|
|
||||||
);
|
|
||||||
|
|
||||||
ApiError::ServerError(format!(
|
|
||||||
"Internal error verifying local attestation. Error: {:?}. Detail: {}",
|
|
||||||
e, detail
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
@ -148,7 +148,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("http")
|
Arg::with_name("http")
|
||||||
.long("http")
|
.long("http")
|
||||||
.help("Enable RESTful HTTP API server. Disabled by default.")
|
.help("Enable the RESTful HTTP API server. Disabled by default.")
|
||||||
.takes_value(false),
|
.takes_value(false),
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
@ -175,6 +175,38 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.default_value("")
|
.default_value("")
|
||||||
.takes_value(true),
|
.takes_value(true),
|
||||||
)
|
)
|
||||||
|
/* Prometheus metrics HTTP server related arguments */
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("metrics")
|
||||||
|
.long("metrics")
|
||||||
|
.help("Enable the Prometheus metrics HTTP server. Disabled by default.")
|
||||||
|
.takes_value(false),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("metrics-address")
|
||||||
|
.long("metrics-address")
|
||||||
|
.value_name("ADDRESS")
|
||||||
|
.help("Set the listen address for the Prometheus metrics HTTP server.")
|
||||||
|
.default_value("127.0.0.1")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("metrics-port")
|
||||||
|
.long("metrics-port")
|
||||||
|
.value_name("PORT")
|
||||||
|
.help("Set the listen TCP port for the Prometheus metrics HTTP server.")
|
||||||
|
.default_value("5054")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("metrics-allow-origin")
|
||||||
|
.long("metrics-allow-origin")
|
||||||
|
.value_name("ORIGIN")
|
||||||
|
.help("Set the value of the Access-Control-Allow-Origin response HTTP header for the Prometheus metrics HTTP server. \
|
||||||
|
Use * to allow any origin (not recommended in production)")
|
||||||
|
.default_value("")
|
||||||
|
.takes_value(true),
|
||||||
|
)
|
||||||
/* Websocket related arguments */
|
/* Websocket related arguments */
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("ws")
|
Arg::with_name("ws")
|
||||||
@ -241,7 +273,8 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.long("slots-per-restore-point")
|
.long("slots-per-restore-point")
|
||||||
.value_name("SLOT_COUNT")
|
.value_name("SLOT_COUNT")
|
||||||
.help("Specifies how often a freezer DB restore point should be stored. \
|
.help("Specifies how often a freezer DB restore point should be stored. \
|
||||||
DO NOT DECREASE AFTER INITIALIZATION. [default: 2048 (mainnet) or 64 (minimal)]")
|
Cannot be changed after initialization. \
|
||||||
|
[default: 2048 (mainnet) or 64 (minimal)]")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
)
|
)
|
||||||
.arg(
|
.arg(
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
use beacon_chain::builder::PUBKEY_CACHE_FILENAME;
|
use beacon_chain::builder::PUBKEY_CACHE_FILENAME;
|
||||||
use clap::ArgMatches;
|
use clap::ArgMatches;
|
||||||
use clap_utils::BAD_TESTNET_DIR_MESSAGE;
|
use clap_utils::BAD_TESTNET_DIR_MESSAGE;
|
||||||
use client::{config::DEFAULT_DATADIR, ClientConfig, ClientGenesis};
|
use client::{ClientConfig, ClientGenesis};
|
||||||
|
use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR};
|
||||||
use eth2_libp2p::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized};
|
use eth2_libp2p::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized};
|
||||||
use eth2_testnet_config::Eth2TestnetConfig;
|
use eth2_testnet_config::Eth2TestnetConfig;
|
||||||
use slog::{crit, info, warn, Logger};
|
use slog::{crit, info, warn, Logger};
|
||||||
@ -13,9 +14,6 @@ use std::net::{TcpListener, UdpSocket};
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, GRAFFITI_BYTES_LEN};
|
use types::{ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, GRAFFITI_BYTES_LEN};
|
||||||
|
|
||||||
pub const BEACON_NODE_DIR: &str = "beacon";
|
|
||||||
pub const NETWORK_DIR: &str = "network";
|
|
||||||
|
|
||||||
/// Gets the fully-initialized global client.
|
/// Gets the fully-initialized global client.
|
||||||
///
|
///
|
||||||
/// The top-level `clap` arguments should be provided as `cli_args`.
|
/// The top-level `clap` arguments should be provided as `cli_args`.
|
||||||
@ -89,26 +87,26 @@ pub fn get_config<E: EthSpec>(
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
if cli_args.is_present("staking") {
|
if cli_args.is_present("staking") {
|
||||||
client_config.rest_api.enabled = true;
|
client_config.http_api.enabled = true;
|
||||||
client_config.sync_eth1_chain = true;
|
client_config.sync_eth1_chain = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Http server
|
* Http API server
|
||||||
*/
|
*/
|
||||||
|
|
||||||
if cli_args.is_present("http") {
|
if cli_args.is_present("http") {
|
||||||
client_config.rest_api.enabled = true;
|
client_config.http_api.enabled = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(address) = cli_args.value_of("http-address") {
|
if let Some(address) = cli_args.value_of("http-address") {
|
||||||
client_config.rest_api.listen_address = address
|
client_config.http_api.listen_addr = address
|
||||||
.parse::<Ipv4Addr>()
|
.parse::<Ipv4Addr>()
|
||||||
.map_err(|_| "http-address is not a valid IPv4 address.")?;
|
.map_err(|_| "http-address is not a valid IPv4 address.")?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(port) = cli_args.value_of("http-port") {
|
if let Some(port) = cli_args.value_of("http-port") {
|
||||||
client_config.rest_api.port = port
|
client_config.http_api.listen_port = port
|
||||||
.parse::<u16>()
|
.parse::<u16>()
|
||||||
.map_err(|_| "http-port is not a valid u16.")?;
|
.map_err(|_| "http-port is not a valid u16.")?;
|
||||||
}
|
}
|
||||||
@ -119,7 +117,36 @@ pub fn get_config<E: EthSpec>(
|
|||||||
hyper::header::HeaderValue::from_str(allow_origin)
|
hyper::header::HeaderValue::from_str(allow_origin)
|
||||||
.map_err(|_| "Invalid allow-origin value")?;
|
.map_err(|_| "Invalid allow-origin value")?;
|
||||||
|
|
||||||
client_config.rest_api.allow_origin = allow_origin.to_string();
|
client_config.http_api.allow_origin = Some(allow_origin.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Prometheus metrics HTTP server
|
||||||
|
*/
|
||||||
|
|
||||||
|
if cli_args.is_present("metrics") {
|
||||||
|
client_config.http_metrics.enabled = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(address) = cli_args.value_of("metrics-address") {
|
||||||
|
client_config.http_metrics.listen_addr = address
|
||||||
|
.parse::<Ipv4Addr>()
|
||||||
|
.map_err(|_| "metrics-address is not a valid IPv4 address.")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(port) = cli_args.value_of("metrics-port") {
|
||||||
|
client_config.http_metrics.listen_port = port
|
||||||
|
.parse::<u16>()
|
||||||
|
.map_err(|_| "metrics-port is not a valid u16.")?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(allow_origin) = cli_args.value_of("metrics-allow-origin") {
|
||||||
|
// Pre-validate the config value to give feedback to the user on node startup, instead of
|
||||||
|
// as late as when the first API response is produced.
|
||||||
|
hyper::header::HeaderValue::from_str(allow_origin)
|
||||||
|
.map_err(|_| "Invalid allow-origin value")?;
|
||||||
|
|
||||||
|
client_config.http_metrics.allow_origin = Some(allow_origin.to_string());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log a warning indicating an open HTTP server if it wasn't specified explicitly
|
// Log a warning indicating an open HTTP server if it wasn't specified explicitly
|
||||||
@ -127,7 +154,7 @@ pub fn get_config<E: EthSpec>(
|
|||||||
if cli_args.is_present("staking") {
|
if cli_args.is_present("staking") {
|
||||||
warn!(
|
warn!(
|
||||||
log,
|
log,
|
||||||
"Running HTTP server on port {}", client_config.rest_api.port
|
"Running HTTP server on port {}", client_config.http_api.listen_port
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -221,7 +248,8 @@ pub fn get_config<E: EthSpec>(
|
|||||||
unused_port("tcp").map_err(|e| format!("Failed to get port for libp2p: {}", e))?;
|
unused_port("tcp").map_err(|e| format!("Failed to get port for libp2p: {}", e))?;
|
||||||
client_config.network.discovery_port =
|
client_config.network.discovery_port =
|
||||||
unused_port("udp").map_err(|e| format!("Failed to get port for discovery: {}", e))?;
|
unused_port("udp").map_err(|e| format!("Failed to get port for discovery: {}", e))?;
|
||||||
client_config.rest_api.port = 0;
|
client_config.http_api.listen_port = 0;
|
||||||
|
client_config.http_metrics.listen_port = 0;
|
||||||
client_config.websocket_server.port = 0;
|
client_config.websocket_server.port = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -232,6 +260,11 @@ pub fn get_config<E: EthSpec>(
|
|||||||
|
|
||||||
client_config.eth1.deposit_contract_address =
|
client_config.eth1.deposit_contract_address =
|
||||||
format!("{:?}", eth2_testnet_config.deposit_contract_address()?);
|
format!("{:?}", eth2_testnet_config.deposit_contract_address()?);
|
||||||
|
let spec_contract_address = format!("{:?}", spec.deposit_contract_address);
|
||||||
|
if client_config.eth1.deposit_contract_address != spec_contract_address {
|
||||||
|
return Err("Testnet contract address does not match spec".into());
|
||||||
|
}
|
||||||
|
|
||||||
client_config.eth1.deposit_contract_deploy_block =
|
client_config.eth1.deposit_contract_deploy_block =
|
||||||
eth2_testnet_config.deposit_contract_deploy_block;
|
eth2_testnet_config.deposit_contract_deploy_block;
|
||||||
client_config.eth1.lowest_cached_block_number =
|
client_config.eth1.lowest_cached_block_number =
|
||||||
@ -267,7 +300,7 @@ pub fn get_config<E: EthSpec>(
|
|||||||
};
|
};
|
||||||
|
|
||||||
let trimmed_graffiti_len = cmp::min(raw_graffiti.len(), GRAFFITI_BYTES_LEN);
|
let trimmed_graffiti_len = cmp::min(raw_graffiti.len(), GRAFFITI_BYTES_LEN);
|
||||||
client_config.graffiti[..trimmed_graffiti_len]
|
client_config.graffiti.0[..trimmed_graffiti_len]
|
||||||
.copy_from_slice(&raw_graffiti[..trimmed_graffiti_len]);
|
.copy_from_slice(&raw_graffiti[..trimmed_graffiti_len]);
|
||||||
|
|
||||||
if let Some(wss_checkpoint) = cli_args.value_of("wss-checkpoint") {
|
if let Some(wss_checkpoint) = cli_args.value_of("wss-checkpoint") {
|
||||||
@ -330,7 +363,7 @@ pub fn set_network_config(
|
|||||||
if let Some(dir) = cli_args.value_of("network-dir") {
|
if let Some(dir) = cli_args.value_of("network-dir") {
|
||||||
config.network_dir = PathBuf::from(dir);
|
config.network_dir = PathBuf::from(dir);
|
||||||
} else {
|
} else {
|
||||||
config.network_dir = data_dir.join(NETWORK_DIR);
|
config.network_dir = data_dir.join(DEFAULT_NETWORK_DIR);
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(listen_address_str) = cli_args.value_of("listen-address") {
|
if let Some(listen_address_str) = cli_args.value_of("listen-address") {
|
||||||
@ -495,11 +528,18 @@ pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf {
|
|||||||
// Read the `--datadir` flag.
|
// Read the `--datadir` flag.
|
||||||
//
|
//
|
||||||
// If it's not present, try and find the home directory (`~`) and push the default data
|
// If it's not present, try and find the home directory (`~`) and push the default data
|
||||||
// directory onto it.
|
// directory and the testnet name onto it.
|
||||||
|
|
||||||
cli_args
|
cli_args
|
||||||
.value_of("datadir")
|
.value_of("datadir")
|
||||||
.map(|path| PathBuf::from(path).join(BEACON_NODE_DIR))
|
.map(|path| PathBuf::from(path).join(DEFAULT_BEACON_NODE_DIR))
|
||||||
.or_else(|| dirs::home_dir().map(|home| home.join(DEFAULT_DATADIR).join(BEACON_NODE_DIR)))
|
.or_else(|| {
|
||||||
|
dirs::home_dir().map(|home| {
|
||||||
|
home.join(DEFAULT_ROOT_DIR)
|
||||||
|
.join(directory::get_testnet_name(cli_args))
|
||||||
|
.join(DEFAULT_BEACON_NODE_DIR)
|
||||||
|
})
|
||||||
|
})
|
||||||
.unwrap_or_else(|| PathBuf::from("."))
|
.unwrap_or_else(|| PathBuf::from("."))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,7 +7,7 @@ mod config;
|
|||||||
pub use beacon_chain;
|
pub use beacon_chain;
|
||||||
pub use cli::cli_app;
|
pub use cli::cli_app;
|
||||||
pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis};
|
pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis};
|
||||||
pub use config::{get_data_dir, get_eth2_testnet_config, set_network_config};
|
pub use config::{get_config, get_data_dir, get_eth2_testnet_config, set_network_config};
|
||||||
pub use eth2_config::Eth2Config;
|
pub use eth2_config::Eth2Config;
|
||||||
|
|
||||||
use beacon_chain::events::TeeEventHandler;
|
use beacon_chain::events::TeeEventHandler;
|
||||||
@ -17,7 +17,6 @@ use beacon_chain::{
|
|||||||
builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock,
|
builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock,
|
||||||
};
|
};
|
||||||
use clap::ArgMatches;
|
use clap::ArgMatches;
|
||||||
use config::get_config;
|
|
||||||
use environment::RuntimeContext;
|
use environment::RuntimeContext;
|
||||||
use slog::{info, warn};
|
use slog::{info, warn};
|
||||||
use std::ops::{Deref, DerefMut};
|
use std::ops::{Deref, DerefMut};
|
||||||
@ -54,7 +53,7 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
|
|||||||
/// configurations hosted remotely.
|
/// configurations hosted remotely.
|
||||||
pub async fn new_from_cli(
|
pub async fn new_from_cli(
|
||||||
context: RuntimeContext<E>,
|
context: RuntimeContext<E>,
|
||||||
matches: &ArgMatches<'_>,
|
matches: ArgMatches<'static>,
|
||||||
) -> Result<Self, String> {
|
) -> Result<Self, String> {
|
||||||
let client_config = get_config::<E>(
|
let client_config = get_config::<E>(
|
||||||
&matches,
|
&matches,
|
||||||
@ -72,7 +71,6 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
|
|||||||
context: RuntimeContext<E>,
|
context: RuntimeContext<E>,
|
||||||
mut client_config: ClientConfig,
|
mut client_config: ClientConfig,
|
||||||
) -> Result<Self, String> {
|
) -> Result<Self, String> {
|
||||||
let http_eth2_config = context.eth2_config().clone();
|
|
||||||
let spec = context.eth2_config().spec.clone();
|
let spec = context.eth2_config().spec.clone();
|
||||||
let client_config_1 = client_config.clone();
|
let client_config_1 = client_config.clone();
|
||||||
let client_genesis = client_config.genesis.clone();
|
let client_genesis = client_config.genesis.clone();
|
||||||
@ -119,26 +117,23 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
|
|||||||
builder.no_eth1_backend()?
|
builder.no_eth1_backend()?
|
||||||
};
|
};
|
||||||
|
|
||||||
let (builder, events) = builder
|
let (builder, _events) = builder
|
||||||
.system_time_slot_clock()?
|
.system_time_slot_clock()?
|
||||||
.tee_event_handler(client_config.websocket_server.clone())?;
|
.tee_event_handler(client_config.websocket_server.clone())?;
|
||||||
|
|
||||||
// Inject the executor into the discv5 network config.
|
// Inject the executor into the discv5 network config.
|
||||||
client_config.network.discv5_config.executor = Some(Box::new(executor));
|
let discv5_executor = Discv5Executor(executor);
|
||||||
|
client_config.network.discv5_config.executor = Some(Box::new(discv5_executor));
|
||||||
|
|
||||||
let builder = builder
|
builder
|
||||||
.build_beacon_chain()?
|
.build_beacon_chain()?
|
||||||
.network(&client_config.network)
|
.network(&client_config.network)
|
||||||
.await?
|
.await?
|
||||||
.notifier()?;
|
.notifier()?
|
||||||
|
.http_api_config(client_config.http_api.clone())
|
||||||
let builder = if client_config.rest_api.enabled {
|
.http_metrics_config(client_config.http_metrics.clone())
|
||||||
builder.http_server(&client_config, &http_eth2_config, events)?
|
.build()
|
||||||
} else {
|
.map(Self)
|
||||||
builder
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Self(builder.build()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn into_inner(self) -> ProductionClient<E> {
|
pub fn into_inner(self) -> ProductionClient<E> {
|
||||||
@ -159,3 +154,13 @@ impl<E: EthSpec> DerefMut for ProductionBeaconNode<E> {
|
|||||||
&mut self.0
|
&mut self.0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Implements the Discv5 Executor trait over our global executor
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct Discv5Executor(task_executor::TaskExecutor);
|
||||||
|
|
||||||
|
impl eth2_libp2p::discv5::Executor for Discv5Executor {
|
||||||
|
fn spawn(&self, future: std::pin::Pin<Box<dyn std::future::Future<Output = ()> + Send>>) {
|
||||||
|
self.0.spawn(future, "discv5")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -10,8 +10,8 @@ harness = false
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tempfile = "3.1.0"
|
tempfile = "3.1.0"
|
||||||
criterion = "0.3.2"
|
criterion = "0.3.3"
|
||||||
rayon = "1.3.0"
|
rayon = "1.4.1"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
db-key = "0.0.5"
|
db-key = "0.0.5"
|
||||||
@ -20,13 +20,13 @@ parking_lot = "0.11.0"
|
|||||||
itertools = "0.9.0"
|
itertools = "0.9.0"
|
||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
eth2_ssz_derive = "0.1.0"
|
eth2_ssz_derive = "0.1.0"
|
||||||
tree_hash = "0.1.0"
|
tree_hash = "0.1.1"
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
state_processing = { path = "../../consensus/state_processing" }
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
serde = "1.0.110"
|
serde = "1.0.116"
|
||||||
serde_derive = "1.0.110"
|
serde_derive = "1.0.116"
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||||
lru = "0.5.1"
|
lru = "0.6.0"
|
||||||
sloggers = "1.0.0"
|
sloggers = "1.0.1"
|
||||||
|
@ -1,11 +1,14 @@
|
|||||||
|
use crate::{DBColumn, Error, StoreItem};
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
use ssz::{Decode, Encode};
|
||||||
|
use ssz_derive::{Decode, Encode};
|
||||||
use types::{EthSpec, MinimalEthSpec};
|
use types::{EthSpec, MinimalEthSpec};
|
||||||
|
|
||||||
pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048;
|
pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048;
|
||||||
pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5;
|
pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5;
|
||||||
|
|
||||||
/// Database configuration parameters.
|
/// Database configuration parameters.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Encode, Decode)]
|
||||||
pub struct StoreConfig {
|
pub struct StoreConfig {
|
||||||
/// Number of slots to wait between storing restore points in the freezer database.
|
/// Number of slots to wait between storing restore points in the freezer database.
|
||||||
pub slots_per_restore_point: u64,
|
pub slots_per_restore_point: u64,
|
||||||
@ -13,6 +16,11 @@ pub struct StoreConfig {
|
|||||||
pub block_cache_size: usize,
|
pub block_cache_size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub enum StoreConfigError {
|
||||||
|
MismatchedSlotsPerRestorePoint { config: u64, on_disk: u64 },
|
||||||
|
}
|
||||||
|
|
||||||
impl Default for StoreConfig {
|
impl Default for StoreConfig {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
@ -22,3 +30,29 @@ impl Default for StoreConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl StoreConfig {
|
||||||
|
pub fn check_compatibility(&self, on_disk_config: &Self) -> Result<(), StoreConfigError> {
|
||||||
|
if self.slots_per_restore_point != on_disk_config.slots_per_restore_point {
|
||||||
|
return Err(StoreConfigError::MismatchedSlotsPerRestorePoint {
|
||||||
|
config: self.slots_per_restore_point,
|
||||||
|
on_disk: on_disk_config.slots_per_restore_point,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl StoreItem for StoreConfig {
|
||||||
|
fn db_column() -> DBColumn {
|
||||||
|
DBColumn::BeaconMeta
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_store_bytes(&self) -> Vec<u8> {
|
||||||
|
self.as_ssz_bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
||||||
|
Ok(Self::from_ssz_bytes(bytes)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use crate::chunked_vector::ChunkError;
|
use crate::chunked_vector::ChunkError;
|
||||||
|
use crate::config::StoreConfigError;
|
||||||
use crate::hot_cold_store::HotColdDBError;
|
use crate::hot_cold_store::HotColdDBError;
|
||||||
use ssz::DecodeError;
|
use ssz::DecodeError;
|
||||||
use types::{BeaconStateError, Hash256, Slot};
|
use types::{BeaconStateError, Hash256, Slot};
|
||||||
@ -17,6 +18,7 @@ pub enum Error {
|
|||||||
BlockNotFound(Hash256),
|
BlockNotFound(Hash256),
|
||||||
NoContinuationData,
|
NoContinuationData,
|
||||||
SplitPointModified(Slot, Slot),
|
SplitPointModified(Slot, Slot),
|
||||||
|
ConfigError(StoreConfigError),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<DecodeError> for Error {
|
impl From<DecodeError> for Error {
|
||||||
@ -49,6 +51,12 @@ impl From<DBError> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<StoreConfigError> for Error {
|
||||||
|
fn from(e: StoreConfigError) -> Error {
|
||||||
|
Error::ConfigError(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct DBError {
|
pub struct DBError {
|
||||||
pub message: String,
|
pub message: String,
|
||||||
|
@ -7,6 +7,9 @@ use crate::impls::beacon_state::{get_full_state, store_full_state};
|
|||||||
use crate::iter::{ParentRootBlockIterator, StateRootsIterator};
|
use crate::iter::{ParentRootBlockIterator, StateRootsIterator};
|
||||||
use crate::leveldb_store::LevelDB;
|
use crate::leveldb_store::LevelDB;
|
||||||
use crate::memory_store::MemoryStore;
|
use crate::memory_store::MemoryStore;
|
||||||
|
use crate::metadata::{
|
||||||
|
SchemaVersion, CONFIG_KEY, CURRENT_SCHEMA_VERSION, SCHEMA_VERSION_KEY, SPLIT_KEY,
|
||||||
|
};
|
||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use crate::{
|
use crate::{
|
||||||
get_key_for_col, DBColumn, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem,
|
get_key_for_col, DBColumn, Error, ItemStore, KeyValueStoreOp, PartialBeaconState, StoreItem,
|
||||||
@ -27,9 +30,6 @@ use std::path::Path;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
/// 32-byte key for accessing the `split` of the freezer DB.
|
|
||||||
pub const SPLIT_DB_KEY: &str = "FREEZERDBSPLITFREEZERDBSPLITFREE";
|
|
||||||
|
|
||||||
/// Defines how blocks should be replayed on states.
|
/// Defines how blocks should be replayed on states.
|
||||||
#[derive(PartialEq)]
|
#[derive(PartialEq)]
|
||||||
pub enum BlockReplay {
|
pub enum BlockReplay {
|
||||||
@ -46,6 +46,8 @@ pub enum BlockReplay {
|
|||||||
/// intermittent "restore point" states pre-finalization.
|
/// intermittent "restore point" states pre-finalization.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
||||||
|
/// The schema version. Loaded from disk on initialization.
|
||||||
|
schema_version: SchemaVersion,
|
||||||
/// The slot and state root at the point where the database is split between hot and cold.
|
/// The slot and state root at the point where the database is split between hot and cold.
|
||||||
///
|
///
|
||||||
/// States with slots less than `split.slot` are in the cold DB, while states with slots
|
/// States with slots less than `split.slot` are in the cold DB, while states with slots
|
||||||
@ -70,6 +72,10 @@ pub struct HotColdDB<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> {
|
|||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
pub enum HotColdDBError {
|
pub enum HotColdDBError {
|
||||||
|
UnsupportedSchemaVersion {
|
||||||
|
software_version: SchemaVersion,
|
||||||
|
disk_version: SchemaVersion,
|
||||||
|
},
|
||||||
/// Recoverable error indicating that the database freeze point couldn't be updated
|
/// Recoverable error indicating that the database freeze point couldn't be updated
|
||||||
/// due to the finalized block not lying on an epoch boundary (should be infrequent).
|
/// due to the finalized block not lying on an epoch boundary (should be infrequent).
|
||||||
FreezeSlotUnaligned(Slot),
|
FreezeSlotUnaligned(Slot),
|
||||||
@ -106,6 +112,7 @@ impl<E: EthSpec> HotColdDB<E, MemoryStore<E>, MemoryStore<E>> {
|
|||||||
Self::verify_slots_per_restore_point(config.slots_per_restore_point)?;
|
Self::verify_slots_per_restore_point(config.slots_per_restore_point)?;
|
||||||
|
|
||||||
let db = HotColdDB {
|
let db = HotColdDB {
|
||||||
|
schema_version: CURRENT_SCHEMA_VERSION,
|
||||||
split: RwLock::new(Split::default()),
|
split: RwLock::new(Split::default()),
|
||||||
cold_db: MemoryStore::open(),
|
cold_db: MemoryStore::open(),
|
||||||
hot_db: MemoryStore::open(),
|
hot_db: MemoryStore::open(),
|
||||||
@ -134,6 +141,7 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> {
|
|||||||
Self::verify_slots_per_restore_point(config.slots_per_restore_point)?;
|
Self::verify_slots_per_restore_point(config.slots_per_restore_point)?;
|
||||||
|
|
||||||
let db = HotColdDB {
|
let db = HotColdDB {
|
||||||
|
schema_version: CURRENT_SCHEMA_VERSION,
|
||||||
split: RwLock::new(Split::default()),
|
split: RwLock::new(Split::default()),
|
||||||
cold_db: LevelDB::open(cold_path)?,
|
cold_db: LevelDB::open(cold_path)?,
|
||||||
hot_db: LevelDB::open(hot_path)?,
|
hot_db: LevelDB::open(hot_path)?,
|
||||||
@ -144,12 +152,33 @@ impl<E: EthSpec> HotColdDB<E, LevelDB<E>, LevelDB<E>> {
|
|||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Ensure that the schema version of the on-disk database matches the software.
|
||||||
|
// In the future, this would be the spot to hook in auto-migration, etc.
|
||||||
|
if let Some(schema_version) = db.load_schema_version()? {
|
||||||
|
if schema_version != CURRENT_SCHEMA_VERSION {
|
||||||
|
return Err(HotColdDBError::UnsupportedSchemaVersion {
|
||||||
|
software_version: CURRENT_SCHEMA_VERSION,
|
||||||
|
disk_version: schema_version,
|
||||||
|
}
|
||||||
|
.into());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
db.store_schema_version(CURRENT_SCHEMA_VERSION)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that any on-disk config is compatible with the supplied config.
|
||||||
|
if let Some(disk_config) = db.load_config()? {
|
||||||
|
db.config.check_compatibility(&disk_config)?;
|
||||||
|
}
|
||||||
|
db.store_config()?;
|
||||||
|
|
||||||
// Load the previous split slot from the database (if any). This ensures we can
|
// Load the previous split slot from the database (if any). This ensures we can
|
||||||
// stop and restart correctly.
|
// stop and restart correctly.
|
||||||
if let Some(split) = db.load_split()? {
|
if let Some(split) = db.load_split()? {
|
||||||
info!(
|
info!(
|
||||||
db.log,
|
db.log,
|
||||||
"Hot-Cold DB initialized";
|
"Hot-Cold DB initialized";
|
||||||
|
"version" => db.schema_version.0,
|
||||||
"split_slot" => split.slot,
|
"split_slot" => split.slot,
|
||||||
"split_state" => format!("{:?}", split.state_root)
|
"split_state" => format!("{:?}", split.state_root)
|
||||||
);
|
);
|
||||||
@ -744,11 +773,29 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
* self.config.slots_per_restore_point
|
* self.config.slots_per_restore_point
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Load the database schema version from disk.
|
||||||
|
fn load_schema_version(&self) -> Result<Option<SchemaVersion>, Error> {
|
||||||
|
self.hot_db.get(&SCHEMA_VERSION_KEY)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Store the database schema version.
|
||||||
|
fn store_schema_version(&self, schema_version: SchemaVersion) -> Result<(), Error> {
|
||||||
|
self.hot_db.put(&SCHEMA_VERSION_KEY, &schema_version)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load previously-stored config from disk.
|
||||||
|
fn load_config(&self) -> Result<Option<StoreConfig>, Error> {
|
||||||
|
self.hot_db.get(&CONFIG_KEY)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write the config to disk.
|
||||||
|
fn store_config(&self) -> Result<(), Error> {
|
||||||
|
self.hot_db.put(&CONFIG_KEY, &self.config)
|
||||||
|
}
|
||||||
|
|
||||||
/// Load the split point from disk.
|
/// Load the split point from disk.
|
||||||
fn load_split(&self) -> Result<Option<Split>, Error> {
|
fn load_split(&self) -> Result<Option<Split>, Error> {
|
||||||
let key = Hash256::from_slice(SPLIT_DB_KEY.as_bytes());
|
self.hot_db.get(&SPLIT_KEY)
|
||||||
let split: Option<Split> = self.hot_db.get(&key)?;
|
|
||||||
Ok(split)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Load the state root of a restore point.
|
/// Load the state root of a restore point.
|
||||||
@ -927,9 +974,7 @@ pub fn migrate_database<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
|
|||||||
slot: frozen_head.slot,
|
slot: frozen_head.slot,
|
||||||
state_root: frozen_head_root,
|
state_root: frozen_head_root,
|
||||||
};
|
};
|
||||||
store
|
store.hot_db.put_sync(&SPLIT_KEY, &split)?;
|
||||||
.hot_db
|
|
||||||
.put_sync(&Hash256::from_slice(SPLIT_DB_KEY.as_bytes()), &split)?;
|
|
||||||
|
|
||||||
// Split point is now persisted in the hot database on disk. The in-memory split point
|
// Split point is now persisted in the hot database on disk. The in-memory split point
|
||||||
// hasn't been modified elsewhere since we keep a write lock on it. It's safe to update
|
// hasn't been modified elsewhere since we keep a write lock on it. It's safe to update
|
||||||
|
@ -19,6 +19,7 @@ pub mod hot_cold_store;
|
|||||||
mod impls;
|
mod impls;
|
||||||
mod leveldb_store;
|
mod leveldb_store;
|
||||||
mod memory_store;
|
mod memory_store;
|
||||||
|
mod metadata;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
mod partial_beacon_state;
|
mod partial_beacon_state;
|
||||||
|
|
||||||
@ -153,7 +154,7 @@ pub enum DBColumn {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Into<&'static str> for DBColumn {
|
impl Into<&'static str> for DBColumn {
|
||||||
/// Returns a `&str` that can be used for keying a key-value data base.
|
/// Returns a `&str` prefix to be added to keys before they hit the key-value database.
|
||||||
fn into(self) -> &'static str {
|
fn into(self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
DBColumn::BeaconMeta => "bma",
|
DBColumn::BeaconMeta => "bma",
|
||||||
|
29
beacon_node/store/src/metadata.rs
Normal file
29
beacon_node/store/src/metadata.rs
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
use crate::{DBColumn, Error, StoreItem};
|
||||||
|
use ssz::{Decode, Encode};
|
||||||
|
use types::Hash256;
|
||||||
|
|
||||||
|
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(1);
|
||||||
|
|
||||||
|
// All the keys that get stored under the `BeaconMeta` column.
|
||||||
|
//
|
||||||
|
// We use `repeat_byte` because it's a const fn.
|
||||||
|
pub const SCHEMA_VERSION_KEY: Hash256 = Hash256::repeat_byte(0);
|
||||||
|
pub const CONFIG_KEY: Hash256 = Hash256::repeat_byte(1);
|
||||||
|
pub const SPLIT_KEY: Hash256 = Hash256::repeat_byte(2);
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
pub struct SchemaVersion(pub u64);
|
||||||
|
|
||||||
|
impl StoreItem for SchemaVersion {
|
||||||
|
fn db_column() -> DBColumn {
|
||||||
|
DBColumn::BeaconMeta
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_store_bytes(&self) -> Vec<u8> {
|
||||||
|
self.0.as_ssz_bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, Error> {
|
||||||
|
Ok(SchemaVersion(u64::from_ssz_bytes(bytes)?))
|
||||||
|
}
|
||||||
|
}
|
@ -3,6 +3,7 @@
|
|||||||
use beacon_chain::StateSkipConfig;
|
use beacon_chain::StateSkipConfig;
|
||||||
use node_test_rig::{
|
use node_test_rig::{
|
||||||
environment::{Environment, EnvironmentBuilder},
|
environment::{Environment, EnvironmentBuilder},
|
||||||
|
eth2::types::StateId,
|
||||||
testing_client_config, LocalBeaconNode,
|
testing_client_config, LocalBeaconNode,
|
||||||
};
|
};
|
||||||
use types::{EthSpec, MinimalEthSpec, Slot};
|
use types::{EthSpec, MinimalEthSpec, Slot};
|
||||||
@ -34,10 +35,12 @@ fn http_server_genesis_state() {
|
|||||||
let node = build_node(&mut env);
|
let node = build_node(&mut env);
|
||||||
let remote_node = node.remote_node().expect("should produce remote node");
|
let remote_node = node.remote_node().expect("should produce remote node");
|
||||||
|
|
||||||
let (api_state, _root) = env
|
let api_state = env
|
||||||
.runtime()
|
.runtime()
|
||||||
.block_on(remote_node.http.beacon().get_state_by_slot(Slot::new(0)))
|
.block_on(remote_node.get_debug_beacon_states(StateId::Slot(Slot::new(0))))
|
||||||
.expect("should fetch state from http api");
|
.expect("should fetch state from http api")
|
||||||
|
.unwrap()
|
||||||
|
.data;
|
||||||
|
|
||||||
let mut db_state = node
|
let mut db_state = node
|
||||||
.client
|
.client
|
||||||
|
@ -12,4 +12,4 @@ tokio = { version = "0.2.22", features = ["full"] }
|
|||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
futures = "0.3.5"
|
futures = "0.3.5"
|
||||||
environment = { path = "../../lighthouse/environment" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user