Upgrade to tokio 0.3 (#1839)
## Description This PR updates Lighthouse to tokio 0.3. It includes a number of dependency updates and some structural changes as to how we create and spawn tasks. This also brings with it a number of various improvements: - Discv5 update - Libp2p update - Fix for recompilation issues - Improved UPnP port mapping handling - Futures dependency update - Log downgrade to traces for rejecting peers when we've reached our max Co-authored-by: blacktemplar <blacktemplar@a1.net>
This commit is contained in:
parent
5a3b94cbb4
commit
a567f788bd
811
Cargo.lock
generated
811
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
2
Makefile
2
Makefile
@ -140,7 +140,7 @@ audit:
|
|||||||
#
|
#
|
||||||
# Tracking issue:
|
# Tracking issue:
|
||||||
# https://github.com/sigp/lighthouse/issues/1669
|
# https://github.com/sigp/lighthouse/issues/1669
|
||||||
cargo audit --ignore RUSTSEC-2020-0043
|
cargo audit --ignore RUSTSEC-2020-0043 --ignore RUSTSEC-2016-0002 --ignore RUSTSEC-2020-0008 --ignore RUSTSEC-2017-0002
|
||||||
|
|
||||||
# Runs `cargo udeps` to check for unused dependencies
|
# Runs `cargo udeps` to check for unused dependencies
|
||||||
udeps:
|
udeps:
|
||||||
|
@ -20,20 +20,21 @@ eth2_ssz_derive = "0.1.0"
|
|||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
rayon = "1.4.1"
|
rayon = "1.4.1"
|
||||||
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
||||||
futures = { version = "0.3.5", features = ["compat"] }
|
futures = { version = "0.3.7", features = ["compat"] }
|
||||||
clap_utils = { path = "../common/clap_utils" }
|
clap_utils = { path = "../common/clap_utils" }
|
||||||
directory = { path = "../common/directory" }
|
directory = { path = "../common/directory" }
|
||||||
eth2_wallet = { path = "../crypto/eth2_wallet" }
|
eth2_wallet = { path = "../crypto/eth2_wallet" }
|
||||||
eth2_wallet_manager = { path = "../common/eth2_wallet_manager" }
|
eth2_wallet_manager = { path = "../common/eth2_wallet_manager" }
|
||||||
rand = "0.7.3"
|
rand = "0.7.3"
|
||||||
validator_dir = { path = "../common/validator_dir" }
|
validator_dir = { path = "../common/validator_dir" }
|
||||||
tokio = { version = "0.2.22", features = ["full"] }
|
tokio = { version = "0.3.2", features = ["full"] }
|
||||||
eth2_keystore = { path = "../crypto/eth2_keystore" }
|
eth2_keystore = { path = "../crypto/eth2_keystore" }
|
||||||
account_utils = { path = "../common/account_utils" }
|
account_utils = { path = "../common/account_utils" }
|
||||||
slashing_protection = { path = "../validator_client/slashing_protection" }
|
slashing_protection = { path = "../validator_client/slashing_protection" }
|
||||||
eth2 = {path = "../common/eth2"}
|
eth2 = {path = "../common/eth2"}
|
||||||
safe_arith = {path = "../consensus/safe_arith"}
|
safe_arith = {path = "../consensus/safe_arith"}
|
||||||
slot_clock = { path = "../common/slot_clock" }
|
slot_clock = { path = "../common/slot_clock" }
|
||||||
|
tokio-compat-02 = "0.1"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tempfile = "3.1.0"
|
tempfile = "3.1.0"
|
||||||
|
@ -12,6 +12,7 @@ use safe_arith::SafeArith;
|
|||||||
use slot_clock::{SlotClock, SystemTimeSlotClock};
|
use slot_clock::{SlotClock, SystemTimeSlotClock};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use tokio_compat_02::FutureExt;
|
||||||
use types::{ChainSpec, Epoch, EthSpec, Fork, VoluntaryExit};
|
use types::{ChainSpec, Epoch, EthSpec, Fork, VoluntaryExit};
|
||||||
|
|
||||||
pub const CMD: &str = "exit";
|
pub const CMD: &str = "exit";
|
||||||
@ -58,7 +59,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cli_run<E: EthSpec>(matches: &ArgMatches, mut env: Environment<E>) -> Result<(), String> {
|
pub fn cli_run<E: EthSpec>(matches: &ArgMatches, env: Environment<E>) -> Result<(), String> {
|
||||||
let keystore_path: PathBuf = clap_utils::parse_required(matches, KEYSTORE_FLAG)?;
|
let keystore_path: PathBuf = clap_utils::parse_required(matches, KEYSTORE_FLAG)?;
|
||||||
let password_file_path: Option<PathBuf> =
|
let password_file_path: Option<PathBuf> =
|
||||||
clap_utils::parse_optional(matches, PASSWORD_FILE_FLAG)?;
|
clap_utils::parse_optional(matches, PASSWORD_FILE_FLAG)?;
|
||||||
@ -76,14 +77,17 @@ pub fn cli_run<E: EthSpec>(matches: &ArgMatches, mut env: Environment<E>) -> Res
|
|||||||
.clone()
|
.clone()
|
||||||
.expect("network should have a valid config");
|
.expect("network should have a valid config");
|
||||||
|
|
||||||
env.runtime().block_on(publish_voluntary_exit::<E>(
|
env.runtime().block_on(
|
||||||
|
publish_voluntary_exit::<E>(
|
||||||
&keystore_path,
|
&keystore_path,
|
||||||
password_file_path.as_ref(),
|
password_file_path.as_ref(),
|
||||||
&client,
|
&client,
|
||||||
&spec,
|
&spec,
|
||||||
stdin_inputs,
|
stdin_inputs,
|
||||||
&testnet_config,
|
&testnet_config,
|
||||||
))?;
|
)
|
||||||
|
.compat(),
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -155,7 +159,7 @@ async fn publish_voluntary_exit<E: EthSpec>(
|
|||||||
.post_beacon_pool_voluntary_exits(&signed_voluntary_exit)
|
.post_beacon_pool_voluntary_exits(&signed_voluntary_exit)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("Failed to publish voluntary exit: {}", e))?;
|
.map_err(|e| format!("Failed to publish voluntary exit: {}", e))?;
|
||||||
tokio::time::delay_for(std::time::Duration::from_secs(1)).await; // Provides nicer UX.
|
tokio::time::sleep(std::time::Duration::from_secs(1)).await; // Provides nicer UX.
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"Successfully validated and published voluntary exit for validator {}",
|
"Successfully validated and published voluntary exit for validator {}",
|
||||||
keypair.pk
|
keypair.pk
|
||||||
|
@ -10,6 +10,7 @@ path = "src/lib.rs"
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
node_test_rig = { path = "../testing/node_test_rig" }
|
node_test_rig = { path = "../testing/node_test_rig" }
|
||||||
|
tokio-compat-02 = "0.1"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing.
|
write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing.
|
||||||
@ -26,12 +27,12 @@ slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_tr
|
|||||||
slog-term = "2.6.0"
|
slog-term = "2.6.0"
|
||||||
slog-async = "2.5.0"
|
slog-async = "2.5.0"
|
||||||
ctrlc = { version = "3.1.6", features = ["termination"] }
|
ctrlc = { version = "3.1.6", features = ["termination"] }
|
||||||
tokio = { version = "0.2.22", features = ["time"] }
|
tokio = { version = "0.3.2", features = ["time"] }
|
||||||
exit-future = "0.2.0"
|
exit-future = "0.2.0"
|
||||||
dirs = "3.0.1"
|
dirs = "3.0.1"
|
||||||
logging = { path = "../common/logging" }
|
logging = { path = "../common/logging" }
|
||||||
directory = {path = "../common/directory"}
|
directory = {path = "../common/directory"}
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
environment = { path = "../lighthouse/environment" }
|
environment = { path = "../lighthouse/environment" }
|
||||||
task_executor = { path = "../common/task_executor" }
|
task_executor = { path = "../common/task_executor" }
|
||||||
genesis = { path = "genesis" }
|
genesis = { path = "genesis" }
|
||||||
|
@ -40,10 +40,10 @@ eth2_ssz_derive = "0.1.0"
|
|||||||
state_processing = { path = "../../consensus/state_processing" }
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
tree_hash = "0.1.1"
|
tree_hash = "0.1.1"
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
tokio = "0.2.22"
|
tokio = "0.3.2"
|
||||||
eth1 = { path = "../eth1" }
|
eth1 = { path = "../eth1" }
|
||||||
websocket_server = { path = "../websocket_server" }
|
websocket_server = { path = "../websocket_server" }
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
genesis = { path = "../genesis" }
|
genesis = { path = "../genesis" }
|
||||||
integer-sqrt = "0.1.5"
|
integer-sqrt = "0.1.5"
|
||||||
rand = "0.7.3"
|
rand = "0.7.3"
|
||||||
|
@ -413,7 +413,7 @@ impl<T: BeaconChainTypes> VerifiedAggregatedAttestation<T> {
|
|||||||
|
|
||||||
// Ensure there has been no other observed aggregate for the given `aggregator_index`.
|
// Ensure there has been no other observed aggregate for the given `aggregator_index`.
|
||||||
//
|
//
|
||||||
// Note: do not observe yet, only observe once the attestation has been verfied.
|
// Note: do not observe yet, only observe once the attestation has been verified.
|
||||||
match chain
|
match chain
|
||||||
.observed_aggregators
|
.observed_aggregators
|
||||||
.read()
|
.read()
|
||||||
|
@ -27,9 +27,9 @@ error-chain = "0.12.4"
|
|||||||
serde_yaml = "0.8.13"
|
serde_yaml = "0.8.13"
|
||||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||||
slog-async = "2.5.0"
|
slog-async = "2.5.0"
|
||||||
tokio = "0.2.22"
|
tokio = "0.3.2"
|
||||||
dirs = "3.0.1"
|
dirs = "3.0.1"
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] }
|
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] }
|
||||||
url = "2.1.1"
|
url = "2.1.1"
|
||||||
eth1 = { path = "../eth1" }
|
eth1 = { path = "../eth1" }
|
||||||
|
@ -254,10 +254,16 @@ where
|
|||||||
let (listen_addr, server) = http_api::serve(ctx, exit_future)
|
let (listen_addr, server) = http_api::serve(ctx, exit_future)
|
||||||
.map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?;
|
.map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?;
|
||||||
|
|
||||||
|
let log_clone = context.log().clone();
|
||||||
|
let http_api_task = async move {
|
||||||
|
server.await;
|
||||||
|
debug!(log_clone, "HTTP API server task ended");
|
||||||
|
};
|
||||||
|
|
||||||
context
|
context
|
||||||
.clone()
|
.clone()
|
||||||
.executor
|
.executor
|
||||||
.spawn_without_exit(async move { server.await }, "http-api");
|
.spawn_without_exit(http_api_task, "http-api");
|
||||||
|
|
||||||
Some(listen_addr)
|
Some(listen_addr)
|
||||||
} else {
|
} else {
|
||||||
@ -283,7 +289,7 @@ where
|
|||||||
"Waiting for HTTP server port to open";
|
"Waiting for HTTP server port to open";
|
||||||
"port" => http_listen
|
"port" => http_listen
|
||||||
);
|
);
|
||||||
tokio::time::delay_for(Duration::from_secs(1)).await;
|
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -442,10 +448,16 @@ where
|
|||||||
let (listen_addr, server) = http_api::serve(ctx, exit)
|
let (listen_addr, server) = http_api::serve(ctx, exit)
|
||||||
.map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?;
|
.map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?;
|
||||||
|
|
||||||
|
let http_log = runtime_context.log().clone();
|
||||||
|
let http_api_task = async move {
|
||||||
|
server.await;
|
||||||
|
debug!(http_log, "HTTP API server task ended");
|
||||||
|
};
|
||||||
|
|
||||||
runtime_context
|
runtime_context
|
||||||
.clone()
|
.clone()
|
||||||
.executor
|
.executor
|
||||||
.spawn_without_exit(async move { server.await }, "http-api");
|
.spawn_without_exit(http_api_task, "http-api");
|
||||||
|
|
||||||
Some(listen_addr)
|
Some(listen_addr)
|
||||||
} else {
|
} else {
|
||||||
|
@ -7,7 +7,7 @@ use slog::{debug, error, info, warn};
|
|||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tokio::time::delay_for;
|
use tokio::time::sleep;
|
||||||
use types::{EthSpec, Slot};
|
use types::{EthSpec, Slot};
|
||||||
|
|
||||||
/// Create a warning log whenever the peer count is at or below this value.
|
/// Create a warning log whenever the peer count is at or below this value.
|
||||||
@ -56,7 +56,7 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
|
|||||||
"peers" => peer_count_pretty(network.connected_peers()),
|
"peers" => peer_count_pretty(network.connected_peers()),
|
||||||
"wait_time" => estimated_time_pretty(Some(next_slot.as_secs() as f64)),
|
"wait_time" => estimated_time_pretty(Some(next_slot.as_secs() as f64)),
|
||||||
);
|
);
|
||||||
delay_for(slot_duration).await;
|
sleep(slot_duration).await;
|
||||||
}
|
}
|
||||||
_ => break,
|
_ => break,
|
||||||
}
|
}
|
||||||
|
@ -10,10 +10,11 @@ toml = "0.5.6"
|
|||||||
web3 = "0.11.0"
|
web3 = "0.11.0"
|
||||||
sloggers = "1.0.1"
|
sloggers = "1.0.1"
|
||||||
environment = { path = "../../lighthouse/environment" }
|
environment = { path = "../../lighthouse/environment" }
|
||||||
|
tokio-compat-02 = "0.1"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] }
|
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] }
|
||||||
futures = { version = "0.3.5", features = ["compat"] }
|
futures = { version = "0.3.7", features = ["compat"] }
|
||||||
serde_json = "1.0.58"
|
serde_json = "1.0.58"
|
||||||
serde = { version = "1.0.116", features = ["derive"] }
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
@ -25,7 +26,7 @@ tree_hash = "0.1.1"
|
|||||||
eth2_hashing = "0.1.0"
|
eth2_hashing = "0.1.0"
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
tokio = { version = "0.2.22", features = ["full"] }
|
tokio = { version = "0.3.2", features = ["full"] }
|
||||||
state_processing = { path = "../../consensus/state_processing" }
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
libflate = "1.0.2"
|
libflate = "1.0.2"
|
||||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics"}
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics"}
|
||||||
|
@ -10,6 +10,7 @@ use slog::Logger;
|
|||||||
use sloggers::{null::NullLoggerBuilder, Build};
|
use sloggers::{null::NullLoggerBuilder, Build};
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use tokio_compat_02::FutureExt;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{DepositData, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, Signature};
|
use types::{DepositData, EthSpec, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, Signature};
|
||||||
use web3::{transports::Http, Web3};
|
use web3::{transports::Http, Web3};
|
||||||
@ -23,9 +24,7 @@ pub fn null_logger() -> Logger {
|
|||||||
|
|
||||||
pub fn new_env() -> Environment<MinimalEthSpec> {
|
pub fn new_env() -> Environment<MinimalEthSpec> {
|
||||||
EnvironmentBuilder::minimal()
|
EnvironmentBuilder::minimal()
|
||||||
// Use a single thread, so that when all tests are run in parallel they don't have so many
|
.multi_threaded_tokio_runtime()
|
||||||
// threads.
|
|
||||||
.single_thread_tokio_runtime()
|
|
||||||
.expect("should start tokio runtime")
|
.expect("should start tokio runtime")
|
||||||
.null_logger()
|
.null_logger()
|
||||||
.expect("should start null logger")
|
.expect("should start null logger")
|
||||||
@ -107,6 +106,7 @@ mod eth1_cache {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn simple_scenario() {
|
async fn simple_scenario() {
|
||||||
|
async {
|
||||||
let log = null_logger();
|
let log = null_logger();
|
||||||
|
|
||||||
for follow_distance in 0..2 {
|
for follow_distance in 0..2 {
|
||||||
@ -180,11 +180,15 @@ mod eth1_cache {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
/// Tests the case where we attempt to download more blocks than will fit in the cache.
|
/// Tests the case where we attempt to download more blocks than will fit in the cache.
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn big_skip() {
|
async fn big_skip() {
|
||||||
|
async {
|
||||||
let log = null_logger();
|
let log = null_logger();
|
||||||
|
|
||||||
let eth1 = new_ganache_instance()
|
let eth1 = new_ganache_instance()
|
||||||
@ -231,11 +235,15 @@ mod eth1_cache {
|
|||||||
"should not grow cache beyond target"
|
"should not grow cache beyond target"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
/// Tests to ensure that the cache gets pruned when doing multiple downloads smaller than the
|
/// Tests to ensure that the cache gets pruned when doing multiple downloads smaller than the
|
||||||
/// cache size.
|
/// cache size.
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn pruning() {
|
async fn pruning() {
|
||||||
|
async {
|
||||||
let log = null_logger();
|
let log = null_logger();
|
||||||
|
|
||||||
let eth1 = new_ganache_instance()
|
let eth1 = new_ganache_instance()
|
||||||
@ -280,9 +288,13 @@ mod eth1_cache {
|
|||||||
"should not grow cache beyond target"
|
"should not grow cache beyond target"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn double_update() {
|
async fn double_update() {
|
||||||
|
async {
|
||||||
let log = null_logger();
|
let log = null_logger();
|
||||||
|
|
||||||
let n = 16;
|
let n = 16;
|
||||||
@ -323,6 +335,9 @@ mod eth1_cache {
|
|||||||
|
|
||||||
assert!(service.block_cache_len() >= n, "should grow the cache");
|
assert!(service.block_cache_len() >= n, "should grow the cache");
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mod deposit_tree {
|
mod deposit_tree {
|
||||||
@ -330,6 +345,7 @@ mod deposit_tree {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn updating() {
|
async fn updating() {
|
||||||
|
async {
|
||||||
let log = null_logger();
|
let log = null_logger();
|
||||||
|
|
||||||
let n = 4;
|
let n = 4;
|
||||||
@ -404,9 +420,13 @@ mod deposit_tree {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn double_update() {
|
async fn double_update() {
|
||||||
|
async {
|
||||||
let log = null_logger();
|
let log = null_logger();
|
||||||
|
|
||||||
let n = 8;
|
let n = 8;
|
||||||
@ -450,9 +470,13 @@ mod deposit_tree {
|
|||||||
|
|
||||||
assert_eq!(service.deposit_cache_len(), n);
|
assert_eq!(service.deposit_cache_len(), n);
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn cache_consistency() {
|
async fn cache_consistency() {
|
||||||
|
async {
|
||||||
let n = 8;
|
let n = 8;
|
||||||
|
|
||||||
let spec = &MainnetEthSpec::default_spec();
|
let spec = &MainnetEthSpec::default_spec();
|
||||||
@ -547,6 +571,9 @@ mod deposit_tree {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tests for the base HTTP requests and response handlers.
|
/// Tests for the base HTTP requests and response handlers.
|
||||||
@ -566,6 +593,7 @@ mod http {
|
|||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn incrementing_deposits() {
|
async fn incrementing_deposits() {
|
||||||
|
async {
|
||||||
let eth1 = new_ganache_instance()
|
let eth1 = new_ganache_instance()
|
||||||
.await
|
.await
|
||||||
.expect("should start eth1 environment");
|
.expect("should start eth1 environment");
|
||||||
@ -647,6 +675,9 @@ mod http {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mod fast {
|
mod fast {
|
||||||
@ -656,6 +687,7 @@ mod fast {
|
|||||||
// with the deposit count and root computed from the deposit cache.
|
// with the deposit count and root computed from the deposit cache.
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn deposit_cache_query() {
|
async fn deposit_cache_query() {
|
||||||
|
async {
|
||||||
let log = null_logger();
|
let log = null_logger();
|
||||||
|
|
||||||
let eth1 = new_ganache_instance()
|
let eth1 = new_ganache_instance()
|
||||||
@ -724,12 +756,16 @@ mod fast {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mod persist {
|
mod persist {
|
||||||
use super::*;
|
use super::*;
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_persist_caches() {
|
async fn test_persist_caches() {
|
||||||
|
async {
|
||||||
let log = null_logger();
|
let log = null_logger();
|
||||||
|
|
||||||
let eth1 = new_ganache_instance()
|
let eth1 = new_ganache_instance()
|
||||||
@ -789,7 +825,8 @@ mod persist {
|
|||||||
drop(service);
|
drop(service);
|
||||||
|
|
||||||
let recovered_service =
|
let recovered_service =
|
||||||
Service::from_bytes(ð1_bytes, config, log, MainnetEthSpec::default_spec()).unwrap();
|
Service::from_bytes(ð1_bytes, config, log, MainnetEthSpec::default_spec())
|
||||||
|
.unwrap();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
recovered_service.block_cache_len(),
|
recovered_service.block_cache_len(),
|
||||||
block_count,
|
block_count,
|
||||||
@ -801,15 +838,19 @@ mod persist {
|
|||||||
"Should have equal cached deposits as before recovery"
|
"Should have equal cached deposits as before recovery"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tests for eth1 fallback
|
/// Tests for eth1 fallback
|
||||||
mod fallbacks {
|
mod fallbacks {
|
||||||
use super::*;
|
use super::*;
|
||||||
use tokio::time::delay_for;
|
use tokio::time::sleep;
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_fallback_when_offline() {
|
async fn test_fallback_when_offline() {
|
||||||
|
async {
|
||||||
let log = null_logger();
|
let log = null_logger();
|
||||||
let endpoint2 = new_ganache_instance()
|
let endpoint2 = new_ganache_instance()
|
||||||
.await
|
.await
|
||||||
@ -874,9 +915,13 @@ mod fallbacks {
|
|||||||
endpoint2_block_number
|
endpoint2_block_number
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_fallback_when_wrong_network_id() {
|
async fn test_fallback_when_wrong_network_id() {
|
||||||
|
async {
|
||||||
let log = null_logger();
|
let log = null_logger();
|
||||||
let correct_network_id: u64 = DEFAULT_NETWORK_ID.into();
|
let correct_network_id: u64 = DEFAULT_NETWORK_ID.into();
|
||||||
let wrong_network_id = correct_network_id + 1;
|
let wrong_network_id = correct_network_id + 1;
|
||||||
@ -937,9 +982,13 @@ mod fallbacks {
|
|||||||
endpoint2_block_number
|
endpoint2_block_number
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_fallback_when_wrong_chain_id() {
|
async fn test_fallback_when_wrong_chain_id() {
|
||||||
|
async {
|
||||||
let log = null_logger();
|
let log = null_logger();
|
||||||
let correct_chain_id: u64 = DEFAULT_CHAIN_ID.into();
|
let correct_chain_id: u64 = DEFAULT_CHAIN_ID.into();
|
||||||
let wrong_chain_id = correct_chain_id + 1;
|
let wrong_chain_id = correct_chain_id + 1;
|
||||||
@ -1000,9 +1049,13 @@ mod fallbacks {
|
|||||||
endpoint2_block_number
|
endpoint2_block_number
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_fallback_when_node_far_behind() {
|
async fn test_fallback_when_node_far_behind() {
|
||||||
|
async {
|
||||||
let log = null_logger();
|
let log = null_logger();
|
||||||
let endpoint2 = new_ganache_instance()
|
let endpoint2 = new_ganache_instance()
|
||||||
.await
|
.await
|
||||||
@ -1048,7 +1101,7 @@ mod fallbacks {
|
|||||||
endpoint1_block_number
|
endpoint1_block_number
|
||||||
);
|
);
|
||||||
|
|
||||||
delay_for(Duration::from_secs(7)).await;
|
sleep(Duration::from_secs(7)).await;
|
||||||
|
|
||||||
//both endpoints don't have recent blocks => should return error
|
//both endpoints don't have recent blocks => should return error
|
||||||
assert!(service.update().await.is_err());
|
assert!(service.update().await.is_err());
|
||||||
@ -1071,4 +1124,7 @@ mod fallbacks {
|
|||||||
endpoint2_block_number
|
endpoint2_block_number
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,8 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
discv5 = { git = "https://github.com/sigp/discv5", rev = "fba7ceb5cfebd219ebbad6ffdb5d8c31dc8e4bc0", features = ["libp2p"] }
|
discv5 = { version = "0.1.0-beta.2", features = ["libp2p"] }
|
||||||
|
unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "dep-update", features = ["codec"] }
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
hashset_delay = { path = "../../common/hashset_delay" }
|
hashset_delay = { path = "../../common/hashset_delay" }
|
||||||
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
||||||
@ -15,15 +16,15 @@ eth2_ssz = "0.1.2"
|
|||||||
eth2_ssz_derive = "0.1.0"
|
eth2_ssz_derive = "0.1.0"
|
||||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||||
lighthouse_version = { path = "../../common/lighthouse_version" }
|
lighthouse_version = { path = "../../common/lighthouse_version" }
|
||||||
tokio = { version = "0.2.22", features = ["time", "macros"] }
|
tokio = { version = "0.3.2", features = ["time", "macros"] }
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
error-chain = "0.12.4"
|
error-chain = "0.12.4"
|
||||||
dirs = "3.0.1"
|
dirs = "3.0.1"
|
||||||
fnv = "1.0.7"
|
fnv = "1.0.7"
|
||||||
unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "latest-codecs", features = ["codec"] }
|
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||||
smallvec = "1.4.2"
|
smallvec = "1.4.2"
|
||||||
|
tokio-io-timeout = "0.5.0"
|
||||||
lru = "0.6.0"
|
lru = "0.6.0"
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
sha2 = "0.9.1"
|
sha2 = "0.9.1"
|
||||||
@ -31,8 +32,7 @@ base64 = "0.13.0"
|
|||||||
snap = "1.0.1"
|
snap = "1.0.1"
|
||||||
void = "1.0.2"
|
void = "1.0.2"
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
tokio-io-timeout = "0.4.0"
|
tokio-util = { version = "0.4.0", features = ["codec", "compat"] }
|
||||||
tokio-util = { version = "0.3.1", features = ["codec", "compat"] }
|
|
||||||
tiny-keccak = "2.0.2"
|
tiny-keccak = "2.0.2"
|
||||||
task_executor = { path = "../../common/task_executor" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
rand = "0.7.3"
|
rand = "0.7.3"
|
||||||
@ -42,12 +42,12 @@ regex = "1.3.9"
|
|||||||
[dependencies.libp2p]
|
[dependencies.libp2p]
|
||||||
#version = "0.23.0"
|
#version = "0.23.0"
|
||||||
git = "https://github.com/sigp/rust-libp2p"
|
git = "https://github.com/sigp/rust-libp2p"
|
||||||
rev = "f53d02bc873fef2bf52cd31e3d5ce366a41d8a8c"
|
rev = "e3caf9e0e5e78c9d51c6dccf0d6277cef553bb25"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"]
|
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"]
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { version = "0.2.22", features = ["full"] }
|
tokio = { version = "0.3.2", features = ["full"] }
|
||||||
slog-term = "2.6.0"
|
slog-term = "2.6.0"
|
||||||
slog-async = "2.5.0"
|
slog-async = "2.5.0"
|
||||||
tempdir = "0.3.7"
|
tempdir = "0.3.7"
|
||||||
|
@ -983,7 +983,14 @@ impl<TSpec: EthSpec> NetworkBehaviour for Behaviour<TSpec> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if let Some(goodbye_reason) = goodbye_reason {
|
if let Some(goodbye_reason) = goodbye_reason {
|
||||||
debug!(self.log, "Disconnecting newly connected peer"; "peer_id" => peer_id.to_string(), "reason" => goodbye_reason.to_string());
|
match goodbye_reason {
|
||||||
|
GoodbyeReason::Banned => {
|
||||||
|
debug!(self.log, "Disconnecting newly connected peer"; "peer_id" => peer_id.to_string(), "reason" => goodbye_reason.to_string())
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
trace!(self.log, "Disconnecting newly connected peer"; "peer_id" => peer_id.to_string(), "reason" => goodbye_reason.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
self.peers_to_dc
|
self.peers_to_dc
|
||||||
.push_back((peer_id.clone(), Some(goodbye_reason)));
|
.push_back((peer_id.clone(), Some(goodbye_reason)));
|
||||||
// NOTE: We don't inform the peer manager that this peer is disconnecting. It is simply
|
// NOTE: We don't inform the peer manager that this peer is disconnecting. It is simply
|
||||||
@ -1079,6 +1086,8 @@ impl<TSpec: EthSpec> NetworkBehaviour for Behaviour<TSpec> {
|
|||||||
// Inform the behaviour.
|
// Inform the behaviour.
|
||||||
delegate_to_behaviours!(self, inject_disconnected, peer_id);
|
delegate_to_behaviours!(self, inject_disconnected, peer_id);
|
||||||
|
|
||||||
|
debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id);
|
||||||
|
|
||||||
// Decrement the PEERS_PER_CLIENT metric
|
// Decrement the PEERS_PER_CLIENT metric
|
||||||
if let Some(kind) = self
|
if let Some(kind) = self
|
||||||
.network_globals
|
.network_globals
|
||||||
|
@ -212,7 +212,10 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
|
|
||||||
// Start the discv5 service and obtain an event stream
|
// Start the discv5 service and obtain an event stream
|
||||||
let event_stream = if !config.disable_discovery {
|
let event_stream = if !config.disable_discovery {
|
||||||
discv5.start(listen_socket).map_err(|e| e.to_string())?;
|
discv5
|
||||||
|
.start(listen_socket)
|
||||||
|
.map_err(|e| e.to_string())
|
||||||
|
.await?;
|
||||||
debug!(log, "Discovery service started");
|
debug!(log, "Discovery service started");
|
||||||
EventStream::Awaiting(Box::pin(discv5.event_stream()))
|
EventStream::Awaiting(Box::pin(discv5.event_stream()))
|
||||||
} else {
|
} else {
|
||||||
@ -712,8 +715,10 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
// predicate for finding nodes with a matching fork
|
// predicate for finding nodes with a matching fork and valid tcp port
|
||||||
let eth2_fork_predicate = move |enr: &Enr| enr.eth2() == Ok(enr_fork_id.clone());
|
let eth2_fork_predicate = move |enr: &Enr| {
|
||||||
|
enr.eth2() == Ok(enr_fork_id.clone()) && (enr.tcp().is_some() || enr.tcp6().is_some())
|
||||||
|
};
|
||||||
|
|
||||||
// General predicate
|
// General predicate
|
||||||
let predicate: Box<dyn Fn(&Enr) -> bool + Send> =
|
let predicate: Box<dyn Fn(&Enr) -> bool + Send> =
|
||||||
@ -743,7 +748,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
}
|
}
|
||||||
Ok(r) => {
|
Ok(r) => {
|
||||||
debug!(self.log, "Discovery query completed"; "peers_found" => r.len());
|
debug!(self.log, "Discovery query completed"; "peers_found" => r.len());
|
||||||
let mut results: HashMap<PeerId, Option<Instant>> = HashMap::new();
|
let mut results: HashMap<_, Option<Instant>> = HashMap::new();
|
||||||
r.iter().for_each(|enr| {
|
r.iter().for_each(|enr| {
|
||||||
// cache the found ENR's
|
// cache the found ENR's
|
||||||
self.cached_enrs.put(enr.peer_id(), enr.clone());
|
self.cached_enrs.put(enr.peer_id(), enr.clone());
|
||||||
@ -766,7 +771,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
Ok(r) => {
|
Ok(r) => {
|
||||||
debug!(self.log, "Peer grouped subnet discovery request completed"; "peers_found" => r.len(), "subnets_searched_for" => format!("{:?}",subnets_searched_for));
|
debug!(self.log, "Peer grouped subnet discovery request completed"; "peers_found" => r.len(), "subnets_searched_for" => format!("{:?}",subnets_searched_for));
|
||||||
|
|
||||||
let mut mapped_results: HashMap<PeerId, Option<Instant>> = HashMap::new();
|
let mut mapped_results = HashMap::new();
|
||||||
|
|
||||||
// cache the found ENR's
|
// cache the found ENR's
|
||||||
for enr in r.iter().cloned() {
|
for enr in r.iter().cloned() {
|
||||||
|
@ -7,6 +7,8 @@ extern crate lazy_static;
|
|||||||
|
|
||||||
pub mod behaviour;
|
pub mod behaviour;
|
||||||
mod config;
|
mod config;
|
||||||
|
|
||||||
|
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
|
||||||
pub mod discovery;
|
pub mod discovery;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
mod peer_manager;
|
mod peer_manager;
|
||||||
@ -64,6 +66,7 @@ pub use config::Config as NetworkConfig;
|
|||||||
pub use config::{GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage};
|
pub use config::{GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage};
|
||||||
pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr};
|
pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr};
|
||||||
pub use discv5;
|
pub use discv5;
|
||||||
|
pub use libp2p::bandwidth::BandwidthSinks;
|
||||||
pub use libp2p::gossipsub::{MessageAcceptance, MessageId, Topic, TopicHash};
|
pub use libp2p::gossipsub::{MessageAcceptance, MessageId, Topic, TopicHash};
|
||||||
pub use libp2p::{core::ConnectedPoint, PeerId, Swarm};
|
pub use libp2p::{core::ConnectedPoint, PeerId, Swarm};
|
||||||
pub use libp2p::{multiaddr, Multiaddr};
|
pub use libp2p::{multiaddr, Multiaddr};
|
||||||
|
@ -27,6 +27,7 @@ pub use libp2p::core::{identity::Keypair, Multiaddr};
|
|||||||
pub mod client;
|
pub mod client;
|
||||||
mod peer_info;
|
mod peer_info;
|
||||||
mod peer_sync_status;
|
mod peer_sync_status;
|
||||||
|
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
|
||||||
mod peerdb;
|
mod peerdb;
|
||||||
pub(crate) mod score;
|
pub(crate) mod score;
|
||||||
|
|
||||||
@ -639,6 +640,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
/// with a new `PeerId` which involves a discovery routing table lookup. We could dial the
|
/// with a new `PeerId` which involves a discovery routing table lookup. We could dial the
|
||||||
/// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup
|
/// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup
|
||||||
/// proves resource constraining, we should switch to multiaddr dialling here.
|
/// proves resource constraining, we should switch to multiaddr dialling here.
|
||||||
|
#[allow(clippy::mutable_key_type)]
|
||||||
fn peers_discovered(&mut self, results: HashMap<PeerId, Option<Instant>>) {
|
fn peers_discovered(&mut self, results: HashMap<PeerId, Option<Instant>>) {
|
||||||
let mut to_dial_peers = Vec::new();
|
let mut to_dial_peers = Vec::new();
|
||||||
|
|
||||||
|
@ -22,7 +22,8 @@ use std::{
|
|||||||
task::{Context, Poll},
|
task::{Context, Poll},
|
||||||
time::Duration,
|
time::Duration,
|
||||||
};
|
};
|
||||||
use tokio::time::{delay_queue, delay_until, Delay, DelayQueue, Instant as TInstant};
|
use tokio::time::{sleep_until, Instant as TInstant, Sleep};
|
||||||
|
use tokio_util::time::{delay_queue, DelayQueue};
|
||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
|
|
||||||
/// The time (in seconds) before a substream that is awaiting a response from the user times out.
|
/// The time (in seconds) before a substream that is awaiting a response from the user times out.
|
||||||
@ -132,7 +133,7 @@ enum HandlerState {
|
|||||||
///
|
///
|
||||||
/// While in this state the handler rejects new requests but tries to finish existing ones.
|
/// While in this state the handler rejects new requests but tries to finish existing ones.
|
||||||
/// Once the timer expires, all messages are killed.
|
/// Once the timer expires, all messages are killed.
|
||||||
ShuttingDown(Delay),
|
ShuttingDown(Sleep),
|
||||||
/// The handler is deactivated. A goodbye has been sent and no more messages are sent or
|
/// The handler is deactivated. A goodbye has been sent and no more messages are sent or
|
||||||
/// received.
|
/// received.
|
||||||
Deactivated,
|
Deactivated,
|
||||||
@ -255,7 +256,7 @@ where
|
|||||||
self.dial_queue.push((id, req));
|
self.dial_queue.push((id, req));
|
||||||
}
|
}
|
||||||
|
|
||||||
self.state = HandlerState::ShuttingDown(delay_until(
|
self.state = HandlerState::ShuttingDown(sleep_until(
|
||||||
TInstant::now() + Duration::from_secs(SHUTDOWN_TIMEOUT_SECS as u64),
|
TInstant::now() + Duration::from_secs(SHUTDOWN_TIMEOUT_SECS as u64),
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
@ -540,7 +541,7 @@ where
|
|||||||
|
|
||||||
// purge expired inbound substreams and send an error
|
// purge expired inbound substreams and send an error
|
||||||
loop {
|
loop {
|
||||||
match self.inbound_substreams_delay.poll_next_unpin(cx) {
|
match self.inbound_substreams_delay.poll_expired(cx) {
|
||||||
Poll::Ready(Some(Ok(inbound_id))) => {
|
Poll::Ready(Some(Ok(inbound_id))) => {
|
||||||
// handle a stream timeout for various states
|
// handle a stream timeout for various states
|
||||||
if let Some(info) = self.inbound_substreams.get_mut(inbound_id.get_ref()) {
|
if let Some(info) = self.inbound_substreams.get_mut(inbound_id.get_ref()) {
|
||||||
@ -574,7 +575,7 @@ where
|
|||||||
|
|
||||||
// purge expired outbound substreams
|
// purge expired outbound substreams
|
||||||
loop {
|
loop {
|
||||||
match self.outbound_substreams_delay.poll_next_unpin(cx) {
|
match self.outbound_substreams_delay.poll_expired(cx) {
|
||||||
Poll::Ready(Some(Ok(outbound_id))) => {
|
Poll::Ready(Some(Ok(outbound_id))) => {
|
||||||
if let Some(OutboundInfo { proto, req_id, .. }) =
|
if let Some(OutboundInfo { proto, req_id, .. }) =
|
||||||
self.outbound_substreams.remove(outbound_id.get_ref())
|
self.outbound_substreams.remove(outbound_id.get_ref())
|
||||||
@ -672,6 +673,7 @@ where
|
|||||||
if let Some(ref delay_key) = info.delay_key {
|
if let Some(ref delay_key) = info.delay_key {
|
||||||
self.inbound_substreams_delay.remove(delay_key);
|
self.inbound_substreams_delay.remove(delay_key);
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
} else {
|
} else {
|
||||||
// If we are not removing this substream, we reset the timer.
|
// If we are not removing this substream, we reset the timer.
|
||||||
// Each chunk is allowed RESPONSE_TIMEOUT to be sent.
|
// Each chunk is allowed RESPONSE_TIMEOUT to be sent.
|
||||||
|
@ -503,8 +503,8 @@ impl From<ssz::DecodeError> for RPCError {
|
|||||||
RPCError::SSZDecodeError(err)
|
RPCError::SSZDecodeError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl From<tokio::time::Elapsed> for RPCError {
|
impl From<tokio::time::error::Elapsed> for RPCError {
|
||||||
fn from(_: tokio::time::Elapsed) -> Self {
|
fn from(_: tokio::time::error::Elapsed) -> Self {
|
||||||
RPCError::StreamTimeout
|
RPCError::StreamTimeout
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@ use libp2p::core::{
|
|||||||
identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed,
|
identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed,
|
||||||
};
|
};
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
|
bandwidth::{BandwidthLogging, BandwidthSinks},
|
||||||
core, noise,
|
core, noise,
|
||||||
swarm::{SwarmBuilder, SwarmEvent},
|
swarm::{SwarmBuilder, SwarmEvent},
|
||||||
PeerId, Swarm, Transport,
|
PeerId, Swarm, Transport,
|
||||||
@ -48,10 +49,10 @@ pub enum Libp2pEvent<TSpec: EthSpec> {
|
|||||||
pub struct Service<TSpec: EthSpec> {
|
pub struct Service<TSpec: EthSpec> {
|
||||||
/// The libp2p Swarm handler.
|
/// The libp2p Swarm handler.
|
||||||
pub swarm: Swarm<Behaviour<TSpec>>,
|
pub swarm: Swarm<Behaviour<TSpec>>,
|
||||||
|
/// The bandwidth logger for the underlying libp2p transport.
|
||||||
|
pub bandwidth: Arc<BandwidthSinks>,
|
||||||
/// This node's PeerId.
|
/// This node's PeerId.
|
||||||
pub local_peer_id: PeerId,
|
pub local_peer_id: PeerId,
|
||||||
|
|
||||||
/// The libp2p logger handle.
|
/// The libp2p logger handle.
|
||||||
pub log: Logger,
|
pub log: Logger,
|
||||||
}
|
}
|
||||||
@ -100,10 +101,11 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
|||||||
};
|
};
|
||||||
debug!(log, "Attempting to open listening ports"; "address" => format!("{}", config.listen_address), "tcp_port" => config.libp2p_port, "udp_port" => discovery_string);
|
debug!(log, "Attempting to open listening ports"; "address" => format!("{}", config.listen_address), "tcp_port" => config.libp2p_port, "udp_port" => discovery_string);
|
||||||
|
|
||||||
let mut swarm = {
|
let (mut swarm, bandwidth) = {
|
||||||
// Set up the transport - tcp/ws with noise and mplex
|
// Set up the transport - tcp/ws with noise and mplex
|
||||||
let transport = build_transport(local_keypair.clone())
|
let (transport, bandwidth) = build_transport(local_keypair.clone())
|
||||||
.map_err(|e| format!("Failed to build transport: {:?}", e))?;
|
.map_err(|e| format!("Failed to build transport: {:?}", e))?;
|
||||||
|
|
||||||
// Lighthouse network behaviour
|
// Lighthouse network behaviour
|
||||||
let behaviour = Behaviour::new(
|
let behaviour = Behaviour::new(
|
||||||
&local_keypair,
|
&local_keypair,
|
||||||
@ -121,6 +123,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
|||||||
self.0.spawn(f, "libp2p");
|
self.0.spawn(f, "libp2p");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
(
|
||||||
SwarmBuilder::new(transport, behaviour, local_peer_id.clone())
|
SwarmBuilder::new(transport, behaviour, local_peer_id.clone())
|
||||||
.notify_handler_buffer_size(std::num::NonZeroUsize::new(32).expect("Not zero"))
|
.notify_handler_buffer_size(std::num::NonZeroUsize::new(32).expect("Not zero"))
|
||||||
.connection_event_buffer_size(64)
|
.connection_event_buffer_size(64)
|
||||||
@ -128,7 +131,9 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
|||||||
.outgoing_connection_limit(config.target_peers * 2)
|
.outgoing_connection_limit(config.target_peers * 2)
|
||||||
.peer_connection_limit(MAX_CONNECTIONS_PER_PEER)
|
.peer_connection_limit(MAX_CONNECTIONS_PER_PEER)
|
||||||
.executor(Box::new(Executor(executor)))
|
.executor(Box::new(Executor(executor)))
|
||||||
.build()
|
.build(),
|
||||||
|
bandwidth,
|
||||||
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
// listen on the specified address
|
// listen on the specified address
|
||||||
@ -221,6 +226,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
|||||||
|
|
||||||
let service = Service {
|
let service = Service {
|
||||||
local_peer_id,
|
local_peer_id,
|
||||||
|
bandwidth,
|
||||||
swarm,
|
swarm,
|
||||||
log,
|
log,
|
||||||
};
|
};
|
||||||
@ -273,7 +279,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
|||||||
endpoint: _,
|
endpoint: _,
|
||||||
num_established,
|
num_established,
|
||||||
} => {
|
} => {
|
||||||
debug!(self.log, "Connection closed"; "peer_id"=> peer_id.to_string(), "cause" => format!("{:?}", cause), "connections" => num_established);
|
trace!(self.log, "Connection closed"; "peer_id"=> peer_id.to_string(), "cause" => format!("{:?}", cause), "connections" => num_established);
|
||||||
}
|
}
|
||||||
SwarmEvent::NewListenAddr(multiaddr) => {
|
SwarmEvent::NewListenAddr(multiaddr) => {
|
||||||
return Libp2pEvent::NewListenAddr(multiaddr)
|
return Libp2pEvent::NewListenAddr(multiaddr)
|
||||||
@ -282,7 +288,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
|||||||
local_addr,
|
local_addr,
|
||||||
send_back_addr,
|
send_back_addr,
|
||||||
} => {
|
} => {
|
||||||
debug!(self.log, "Incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string())
|
trace!(self.log, "Incoming connection"; "our_addr" => local_addr.to_string(), "from" => send_back_addr.to_string())
|
||||||
}
|
}
|
||||||
SwarmEvent::IncomingConnectionError {
|
SwarmEvent::IncomingConnectionError {
|
||||||
local_addr,
|
local_addr,
|
||||||
@ -329,9 +335,13 @@ impl<TSpec: EthSpec> Service<TSpec> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>;
|
||||||
|
|
||||||
/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and
|
/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and
|
||||||
/// mplex as the multiplexing layer.
|
/// mplex as the multiplexing layer.
|
||||||
fn build_transport(local_private_key: Keypair) -> std::io::Result<Boxed<(PeerId, StreamMuxerBox)>> {
|
fn build_transport(
|
||||||
|
local_private_key: Keypair,
|
||||||
|
) -> std::io::Result<(BoxedTransport, Arc<BandwidthSinks>)> {
|
||||||
let transport = libp2p::tcp::TokioTcpConfig::new().nodelay(true);
|
let transport = libp2p::tcp::TokioTcpConfig::new().nodelay(true);
|
||||||
let transport = libp2p::dns::DnsConfig::new(transport)?;
|
let transport = libp2p::dns::DnsConfig::new(transport)?;
|
||||||
#[cfg(feature = "libp2p-websocket")]
|
#[cfg(feature = "libp2p-websocket")]
|
||||||
@ -340,21 +350,26 @@ fn build_transport(local_private_key: Keypair) -> std::io::Result<Boxed<(PeerId,
|
|||||||
transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone))
|
transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone))
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let (transport, bandwidth) = BandwidthLogging::new(transport);
|
||||||
|
|
||||||
// mplex config
|
// mplex config
|
||||||
let mut mplex_config = libp2p::mplex::MplexConfig::new();
|
let mut mplex_config = libp2p::mplex::MplexConfig::new();
|
||||||
mplex_config.max_buffer_len(256);
|
mplex_config.set_max_buffer_size(256);
|
||||||
mplex_config.max_buffer_len_behaviour(libp2p::mplex::MaxBufferBehaviour::Block);
|
mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block);
|
||||||
|
|
||||||
// Authentication
|
// Authentication
|
||||||
Ok(transport
|
Ok((
|
||||||
|
transport
|
||||||
.upgrade(core::upgrade::Version::V1)
|
.upgrade(core::upgrade::Version::V1)
|
||||||
.authenticate(generate_noise_config(&local_private_key))
|
.authenticate(generate_noise_config(&local_private_key))
|
||||||
.multiplex(core::upgrade::SelectUpgrade::new(
|
.multiplex(core::upgrade::SelectUpgrade::new(
|
||||||
libp2p::yamux::Config::default(),
|
libp2p::yamux::YamuxConfig::default(),
|
||||||
mplex_config,
|
mplex_config,
|
||||||
))
|
))
|
||||||
.timeout(Duration::from_secs(10))
|
.timeout(Duration::from_secs(10))
|
||||||
.boxed())
|
.boxed(),
|
||||||
|
bandwidth,
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Useful helper functions for debugging. Currently not used in the client.
|
// Useful helper functions for debugging. Currently not used in the client.
|
||||||
|
@ -6,7 +6,9 @@ use eth2_libp2p::Service as LibP2PService;
|
|||||||
use eth2_libp2p::{GossipsubConfigBuilder, Libp2pEvent, NetworkConfig};
|
use eth2_libp2p::{GossipsubConfigBuilder, Libp2pEvent, NetworkConfig};
|
||||||
use slog::{debug, error, o, Drain};
|
use slog::{debug, error, o, Drain};
|
||||||
use std::net::{TcpListener, UdpSocket};
|
use std::net::{TcpListener, UdpSocket};
|
||||||
|
use std::sync::Weak;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use tokio::runtime::Runtime;
|
||||||
use types::{ChainSpec, EnrForkId, MinimalEthSpec};
|
use types::{ChainSpec, EnrForkId, MinimalEthSpec};
|
||||||
|
|
||||||
type E = MinimalEthSpec;
|
type E = MinimalEthSpec;
|
||||||
@ -91,19 +93,18 @@ pub fn build_config(port: u16, mut boot_nodes: Vec<Enr>) -> NetworkConfig {
|
|||||||
config
|
config
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn build_libp2p_instance(boot_nodes: Vec<Enr>, log: slog::Logger) -> Libp2pInstance {
|
pub async fn build_libp2p_instance(
|
||||||
|
rt: Weak<Runtime>,
|
||||||
|
boot_nodes: Vec<Enr>,
|
||||||
|
log: slog::Logger,
|
||||||
|
) -> Libp2pInstance {
|
||||||
let port = unused_port("tcp").unwrap();
|
let port = unused_port("tcp").unwrap();
|
||||||
let config = build_config(port, boot_nodes);
|
let config = build_config(port, boot_nodes);
|
||||||
// launch libp2p service
|
// launch libp2p service
|
||||||
|
|
||||||
let (signal, exit) = exit_future::signal();
|
let (signal, exit) = exit_future::signal();
|
||||||
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
||||||
let executor = task_executor::TaskExecutor::new(
|
let executor = task_executor::TaskExecutor::new(rt, exit, log.clone(), shutdown_tx);
|
||||||
tokio::runtime::Handle::current(),
|
|
||||||
exit,
|
|
||||||
log.clone(),
|
|
||||||
shutdown_tx,
|
|
||||||
);
|
|
||||||
Libp2pInstance(
|
Libp2pInstance(
|
||||||
LibP2PService::new(
|
LibP2PService::new(
|
||||||
executor,
|
executor,
|
||||||
@ -127,10 +128,14 @@ pub fn get_enr(node: &LibP2PService<E>) -> Enr {
|
|||||||
|
|
||||||
// Returns `n` libp2p peers in fully connected topology.
|
// Returns `n` libp2p peers in fully connected topology.
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub async fn build_full_mesh(log: slog::Logger, n: usize) -> Vec<Libp2pInstance> {
|
pub async fn build_full_mesh(
|
||||||
|
rt: Weak<Runtime>,
|
||||||
|
log: slog::Logger,
|
||||||
|
n: usize,
|
||||||
|
) -> Vec<Libp2pInstance> {
|
||||||
let mut nodes = Vec::with_capacity(n);
|
let mut nodes = Vec::with_capacity(n);
|
||||||
for _ in 0..n {
|
for _ in 0..n {
|
||||||
nodes.push(build_libp2p_instance(vec![], log.clone()).await);
|
nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone()).await);
|
||||||
}
|
}
|
||||||
let multiaddrs: Vec<Multiaddr> = nodes
|
let multiaddrs: Vec<Multiaddr> = nodes
|
||||||
.iter()
|
.iter()
|
||||||
@ -153,12 +158,15 @@ pub async fn build_full_mesh(log: slog::Logger, n: usize) -> Vec<Libp2pInstance>
|
|||||||
// Constructs a pair of nodes with separate loggers. The sender dials the receiver.
|
// Constructs a pair of nodes with separate loggers. The sender dials the receiver.
|
||||||
// This returns a (sender, receiver) pair.
|
// This returns a (sender, receiver) pair.
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub async fn build_node_pair(log: &slog::Logger) -> (Libp2pInstance, Libp2pInstance) {
|
pub async fn build_node_pair(
|
||||||
|
rt: Weak<Runtime>,
|
||||||
|
log: &slog::Logger,
|
||||||
|
) -> (Libp2pInstance, Libp2pInstance) {
|
||||||
let sender_log = log.new(o!("who" => "sender"));
|
let sender_log = log.new(o!("who" => "sender"));
|
||||||
let receiver_log = log.new(o!("who" => "receiver"));
|
let receiver_log = log.new(o!("who" => "receiver"));
|
||||||
|
|
||||||
let mut sender = build_libp2p_instance(vec![], sender_log).await;
|
let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log).await;
|
||||||
let mut receiver = build_libp2p_instance(vec![], receiver_log).await;
|
let mut receiver = build_libp2p_instance(rt, vec![], receiver_log).await;
|
||||||
|
|
||||||
let receiver_multiaddr = receiver.swarm.local_enr().multiaddr()[1].clone();
|
let receiver_multiaddr = receiver.swarm.local_enr().multiaddr()[1].clone();
|
||||||
|
|
||||||
@ -182,7 +190,7 @@ pub async fn build_node_pair(log: &slog::Logger) -> (Libp2pInstance, Libp2pInsta
|
|||||||
|
|
||||||
// wait for either both nodes to listen or a timeout
|
// wait for either both nodes to listen or a timeout
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = tokio::time::delay_for(Duration::from_millis(500)) => {}
|
_ = tokio::time::sleep(Duration::from_millis(500)) => {}
|
||||||
_ = joined => {}
|
_ = joined => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -197,10 +205,10 @@ pub async fn build_node_pair(log: &slog::Logger) -> (Libp2pInstance, Libp2pInsta
|
|||||||
|
|
||||||
// Returns `n` peers in a linear topology
|
// Returns `n` peers in a linear topology
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub async fn build_linear(log: slog::Logger, n: usize) -> Vec<Libp2pInstance> {
|
pub async fn build_linear(rt: Weak<Runtime>, log: slog::Logger, n: usize) -> Vec<Libp2pInstance> {
|
||||||
let mut nodes = Vec::with_capacity(n);
|
let mut nodes = Vec::with_capacity(n);
|
||||||
for _ in 0..n {
|
for _ in 0..n {
|
||||||
nodes.push(build_libp2p_instance(vec![], log.clone()).await);
|
nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone()).await);
|
||||||
}
|
}
|
||||||
|
|
||||||
let multiaddrs: Vec<Multiaddr> = nodes
|
let multiaddrs: Vec<Multiaddr> = nodes
|
||||||
|
@ -3,8 +3,10 @@ use eth2_libp2p::rpc::methods::*;
|
|||||||
use eth2_libp2p::{BehaviourEvent, Libp2pEvent, Request, Response};
|
use eth2_libp2p::{BehaviourEvent, Libp2pEvent, Request, Response};
|
||||||
use slog::{debug, warn, Level};
|
use slog::{debug, warn, Level};
|
||||||
use ssz_types::VariableList;
|
use ssz_types::VariableList;
|
||||||
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::time::delay_for;
|
use tokio::runtime::Runtime;
|
||||||
|
use tokio::time::sleep;
|
||||||
use types::{
|
use types::{
|
||||||
BeaconBlock, Epoch, EthSpec, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot,
|
BeaconBlock, Epoch, EthSpec, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, Slot,
|
||||||
};
|
};
|
||||||
@ -13,17 +15,20 @@ mod common;
|
|||||||
|
|
||||||
type E = MinimalEthSpec;
|
type E = MinimalEthSpec;
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
// Tests the STATUS RPC message
|
// Tests the STATUS RPC message
|
||||||
async fn test_status_rpc() {
|
#[test]
|
||||||
|
fn test_status_rpc() {
|
||||||
// set up the logging. The level and enabled logging or not
|
// set up the logging. The level and enabled logging or not
|
||||||
let log_level = Level::Debug;
|
let log_level = Level::Debug;
|
||||||
let enable_logging = false;
|
let enable_logging = false;
|
||||||
|
|
||||||
|
let rt = Arc::new(Runtime::new().unwrap());
|
||||||
|
|
||||||
let log = common::build_log(log_level, enable_logging);
|
let log = common::build_log(log_level, enable_logging);
|
||||||
|
|
||||||
|
rt.block_on(async {
|
||||||
// get sender/receiver
|
// get sender/receiver
|
||||||
let (mut sender, mut receiver) = common::build_node_pair(&log).await;
|
let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await;
|
||||||
|
|
||||||
// Dummy STATUS RPC message
|
// Dummy STATUS RPC message
|
||||||
let rpc_request = Request::Status(StatusMessage {
|
let rpc_request = Request::Status(StatusMessage {
|
||||||
@ -50,9 +55,11 @@ async fn test_status_rpc() {
|
|||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
|
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
|
||||||
// Send a STATUS message
|
// Send a STATUS message
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender
|
sender.swarm.send_request(
|
||||||
.swarm
|
peer_id,
|
||||||
.send_request(peer_id, RequestId::Sync(10), rpc_request.clone());
|
RequestId::Sync(10),
|
||||||
|
rpc_request.clone(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
||||||
peer_id: _,
|
peer_id: _,
|
||||||
@ -82,9 +89,11 @@ async fn test_status_rpc() {
|
|||||||
if request == rpc_request {
|
if request == rpc_request {
|
||||||
// send the response
|
// send the response
|
||||||
debug!(log, "Receiver Received");
|
debug!(log, "Receiver Received");
|
||||||
receiver
|
receiver.swarm.send_successful_response(
|
||||||
.swarm
|
peer_id,
|
||||||
.send_successful_response(peer_id, id, rpc_response.clone());
|
id,
|
||||||
|
rpc_response.clone(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {} // Ignore other events
|
_ => {} // Ignore other events
|
||||||
@ -95,15 +104,16 @@ async fn test_status_rpc() {
|
|||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = sender_future => {}
|
_ = sender_future => {}
|
||||||
_ = receiver_future => {}
|
_ = receiver_future => {}
|
||||||
_ = delay_for(Duration::from_secs(30)) => {
|
_ = sleep(Duration::from_secs(30)) => {
|
||||||
panic!("Future timed out");
|
panic!("Future timed out");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
// Tests a streamed BlocksByRange RPC Message
|
// Tests a streamed BlocksByRange RPC Message
|
||||||
async fn test_blocks_by_range_chunked_rpc() {
|
#[test]
|
||||||
|
fn test_blocks_by_range_chunked_rpc() {
|
||||||
// set up the logging. The level and enabled logging or not
|
// set up the logging. The level and enabled logging or not
|
||||||
let log_level = Level::Trace;
|
let log_level = Level::Trace;
|
||||||
let enable_logging = false;
|
let enable_logging = false;
|
||||||
@ -112,8 +122,11 @@ async fn test_blocks_by_range_chunked_rpc() {
|
|||||||
|
|
||||||
let log = common::build_log(log_level, enable_logging);
|
let log = common::build_log(log_level, enable_logging);
|
||||||
|
|
||||||
|
let rt = Arc::new(Runtime::new().unwrap());
|
||||||
|
|
||||||
|
rt.block_on(async {
|
||||||
// get sender/receiver
|
// get sender/receiver
|
||||||
let (mut sender, mut receiver) = common::build_node_pair(&log).await;
|
let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await;
|
||||||
|
|
||||||
// BlocksByRange Request
|
// BlocksByRange Request
|
||||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
||||||
@ -140,9 +153,11 @@ async fn test_blocks_by_range_chunked_rpc() {
|
|||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
|
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
|
||||||
// Send a STATUS message
|
// Send a STATUS message
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender
|
sender.swarm.send_request(
|
||||||
.swarm
|
peer_id,
|
||||||
.send_request(peer_id, RequestId::Sync(10), rpc_request.clone());
|
RequestId::Sync(10),
|
||||||
|
rpc_request.clone(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
||||||
peer_id: _,
|
peer_id: _,
|
||||||
@ -205,15 +220,16 @@ async fn test_blocks_by_range_chunked_rpc() {
|
|||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = sender_future => {}
|
_ = sender_future => {}
|
||||||
_ = receiver_future => {}
|
_ = receiver_future => {}
|
||||||
_ = delay_for(Duration::from_secs(30)) => {
|
_ = sleep(Duration::from_secs(10)) => {
|
||||||
panic!("Future timed out");
|
panic!("Future timed out");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
// Tests that a streamed BlocksByRange RPC Message terminates when all expected chunks were received
|
// Tests that a streamed BlocksByRange RPC Message terminates when all expected chunks were received
|
||||||
async fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
#[test]
|
||||||
|
fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
||||||
// set up the logging. The level and enabled logging or not
|
// set up the logging. The level and enabled logging or not
|
||||||
let log_level = Level::Debug;
|
let log_level = Level::Debug;
|
||||||
let enable_logging = false;
|
let enable_logging = false;
|
||||||
@ -223,8 +239,11 @@ async fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
|||||||
|
|
||||||
let log = common::build_log(log_level, enable_logging);
|
let log = common::build_log(log_level, enable_logging);
|
||||||
|
|
||||||
|
let rt = Arc::new(Runtime::new().unwrap());
|
||||||
|
|
||||||
|
rt.block_on(async {
|
||||||
// get sender/receiver
|
// get sender/receiver
|
||||||
let (mut sender, mut receiver) = common::build_node_pair(&log).await;
|
let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await;
|
||||||
|
|
||||||
// BlocksByRange Request
|
// BlocksByRange Request
|
||||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
||||||
@ -251,9 +270,11 @@ async fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
|||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
|
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
|
||||||
// Send a STATUS message
|
// Send a STATUS message
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender
|
sender.swarm.send_request(
|
||||||
.swarm
|
peer_id,
|
||||||
.send_request(peer_id, RequestId::Sync(10), rpc_request.clone());
|
RequestId::Sync(10),
|
||||||
|
rpc_request.clone(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
||||||
peer_id: _,
|
peer_id: _,
|
||||||
@ -292,7 +313,7 @@ async fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
|||||||
// sent in the timeout
|
// sent in the timeout
|
||||||
match futures::future::select(
|
match futures::future::select(
|
||||||
Box::pin(receiver.next_event()),
|
Box::pin(receiver.next_event()),
|
||||||
tokio::time::delay_for(Duration::from_secs(1)),
|
tokio::time::sleep(Duration::from_secs(1)),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
@ -335,23 +356,26 @@ async fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
|
|||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = sender_future => {}
|
_ = sender_future => {}
|
||||||
_ = receiver_future => {}
|
_ = receiver_future => {}
|
||||||
_ = delay_for(Duration::from_secs(30)) => {
|
_ = sleep(Duration::from_secs(30)) => {
|
||||||
panic!("Future timed out");
|
panic!("Future timed out");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
// Tests an empty response to a BlocksByRange RPC Message
|
// Tests an empty response to a BlocksByRange RPC Message
|
||||||
async fn test_blocks_by_range_single_empty_rpc() {
|
#[test]
|
||||||
|
fn test_blocks_by_range_single_empty_rpc() {
|
||||||
// set up the logging. The level and enabled logging or not
|
// set up the logging. The level and enabled logging or not
|
||||||
let log_level = Level::Trace;
|
let log_level = Level::Trace;
|
||||||
let enable_logging = false;
|
let enable_logging = false;
|
||||||
|
|
||||||
let log = common::build_log(log_level, enable_logging);
|
let log = common::build_log(log_level, enable_logging);
|
||||||
|
let rt = Arc::new(Runtime::new().unwrap());
|
||||||
|
|
||||||
|
rt.block_on(async {
|
||||||
// get sender/receiver
|
// get sender/receiver
|
||||||
let (mut sender, mut receiver) = common::build_node_pair(&log).await;
|
let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await;
|
||||||
|
|
||||||
// BlocksByRange Request
|
// BlocksByRange Request
|
||||||
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest {
|
||||||
@ -380,9 +404,11 @@ async fn test_blocks_by_range_single_empty_rpc() {
|
|||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
|
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
|
||||||
// Send a STATUS message
|
// Send a STATUS message
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender
|
sender.swarm.send_request(
|
||||||
.swarm
|
peer_id,
|
||||||
.send_request(peer_id, RequestId::Sync(10), rpc_request.clone());
|
RequestId::Sync(10),
|
||||||
|
rpc_request.clone(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
||||||
peer_id: _,
|
peer_id: _,
|
||||||
@ -442,18 +468,19 @@ async fn test_blocks_by_range_single_empty_rpc() {
|
|||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = sender_future => {}
|
_ = sender_future => {}
|
||||||
_ = receiver_future => {}
|
_ = receiver_future => {}
|
||||||
_ = delay_for(Duration::from_secs(20)) => {
|
_ = sleep(Duration::from_secs(20)) => {
|
||||||
panic!("Future timed out");
|
panic!("Future timed out");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
// Tests a streamed, chunked BlocksByRoot RPC Message
|
// Tests a streamed, chunked BlocksByRoot RPC Message
|
||||||
// The size of the reponse is a full `BeaconBlock`
|
// The size of the reponse is a full `BeaconBlock`
|
||||||
// which is greater than the Snappy frame size. Hence, this test
|
// which is greater than the Snappy frame size. Hence, this test
|
||||||
// serves to test the snappy framing format as well.
|
// serves to test the snappy framing format as well.
|
||||||
async fn test_blocks_by_root_chunked_rpc() {
|
#[test]
|
||||||
|
fn test_blocks_by_root_chunked_rpc() {
|
||||||
// set up the logging. The level and enabled logging or not
|
// set up the logging. The level and enabled logging or not
|
||||||
let log_level = Level::Debug;
|
let log_level = Level::Debug;
|
||||||
let enable_logging = false;
|
let enable_logging = false;
|
||||||
@ -463,8 +490,10 @@ async fn test_blocks_by_root_chunked_rpc() {
|
|||||||
let log = common::build_log(log_level, enable_logging);
|
let log = common::build_log(log_level, enable_logging);
|
||||||
let spec = E::default_spec();
|
let spec = E::default_spec();
|
||||||
|
|
||||||
|
let rt = Arc::new(Runtime::new().unwrap());
|
||||||
// get sender/receiver
|
// get sender/receiver
|
||||||
let (mut sender, mut receiver) = common::build_node_pair(&log).await;
|
rt.block_on(async {
|
||||||
|
let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await;
|
||||||
|
|
||||||
// BlocksByRoot Request
|
// BlocksByRoot Request
|
||||||
let rpc_request = Request::BlocksByRoot(BlocksByRootRequest {
|
let rpc_request = Request::BlocksByRoot(BlocksByRootRequest {
|
||||||
@ -492,9 +521,11 @@ async fn test_blocks_by_root_chunked_rpc() {
|
|||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
|
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
|
||||||
// Send a STATUS message
|
// Send a STATUS message
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender
|
sender.swarm.send_request(
|
||||||
.swarm
|
peer_id,
|
||||||
.send_request(peer_id, RequestId::Sync(10), rpc_request.clone());
|
RequestId::Sync(10),
|
||||||
|
rpc_request.clone(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
||||||
peer_id: _,
|
peer_id: _,
|
||||||
@ -556,15 +587,16 @@ async fn test_blocks_by_root_chunked_rpc() {
|
|||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = sender_future => {}
|
_ = sender_future => {}
|
||||||
_ = receiver_future => {}
|
_ = receiver_future => {}
|
||||||
_ = delay_for(Duration::from_secs(30)) => {
|
_ = sleep(Duration::from_secs(30)) => {
|
||||||
panic!("Future timed out");
|
panic!("Future timed out");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
// Tests a streamed, chunked BlocksByRoot RPC Message terminates when all expected reponses have been received
|
// Tests a streamed, chunked BlocksByRoot RPC Message terminates when all expected reponses have been received
|
||||||
async fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
#[test]
|
||||||
|
fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
||||||
// set up the logging. The level and enabled logging or not
|
// set up the logging. The level and enabled logging or not
|
||||||
let log_level = Level::Debug;
|
let log_level = Level::Debug;
|
||||||
let enable_logging = false;
|
let enable_logging = false;
|
||||||
@ -575,8 +607,10 @@ async fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
|||||||
let log = common::build_log(log_level, enable_logging);
|
let log = common::build_log(log_level, enable_logging);
|
||||||
let spec = E::default_spec();
|
let spec = E::default_spec();
|
||||||
|
|
||||||
|
let rt = Arc::new(Runtime::new().unwrap());
|
||||||
// get sender/receiver
|
// get sender/receiver
|
||||||
let (mut sender, mut receiver) = common::build_node_pair(&log).await;
|
rt.block_on(async {
|
||||||
|
let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await;
|
||||||
|
|
||||||
// BlocksByRoot Request
|
// BlocksByRoot Request
|
||||||
let rpc_request = Request::BlocksByRoot(BlocksByRootRequest {
|
let rpc_request = Request::BlocksByRoot(BlocksByRootRequest {
|
||||||
@ -611,9 +645,11 @@ async fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
|||||||
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
|
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
|
||||||
// Send a STATUS message
|
// Send a STATUS message
|
||||||
debug!(log, "Sending RPC");
|
debug!(log, "Sending RPC");
|
||||||
sender
|
sender.swarm.send_request(
|
||||||
.swarm
|
peer_id,
|
||||||
.send_request(peer_id, RequestId::Sync(10), rpc_request.clone());
|
RequestId::Sync(10),
|
||||||
|
rpc_request.clone(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived {
|
||||||
peer_id: _,
|
peer_id: _,
|
||||||
@ -652,7 +688,7 @@ async fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
|||||||
// sent in the timeout
|
// sent in the timeout
|
||||||
match futures::future::select(
|
match futures::future::select(
|
||||||
Box::pin(receiver.next_event()),
|
Box::pin(receiver.next_event()),
|
||||||
tokio::time::delay_for(Duration::from_millis(1000)),
|
tokio::time::sleep(Duration::from_millis(1000)),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
@ -695,23 +731,26 @@ async fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
|
|||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = sender_future => {}
|
_ = sender_future => {}
|
||||||
_ = receiver_future => {}
|
_ = receiver_future => {}
|
||||||
_ = delay_for(Duration::from_secs(30)) => {
|
_ = sleep(Duration::from_secs(30)) => {
|
||||||
panic!("Future timed out");
|
panic!("Future timed out");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
// Tests a Goodbye RPC message
|
// Tests a Goodbye RPC message
|
||||||
async fn test_goodbye_rpc() {
|
#[test]
|
||||||
|
fn test_goodbye_rpc() {
|
||||||
// set up the logging. The level and enabled logging or not
|
// set up the logging. The level and enabled logging or not
|
||||||
let log_level = Level::Trace;
|
let log_level = Level::Trace;
|
||||||
let enable_logging = false;
|
let enable_logging = false;
|
||||||
|
|
||||||
let log = common::build_log(log_level, enable_logging);
|
let log = common::build_log(log_level, enable_logging);
|
||||||
|
|
||||||
|
let rt = Arc::new(Runtime::new().unwrap());
|
||||||
// get sender/receiver
|
// get sender/receiver
|
||||||
let (mut sender, mut receiver) = common::build_node_pair(&log).await;
|
rt.block_on(async {
|
||||||
|
let (mut sender, mut receiver) = common::build_node_pair(Arc::downgrade(&rt), &log).await;
|
||||||
|
|
||||||
// build the sender future
|
// build the sender future
|
||||||
let sender_future = async {
|
let sender_future = async {
|
||||||
@ -749,8 +788,9 @@ async fn test_goodbye_rpc() {
|
|||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = total_future => {}
|
_ = total_future => {}
|
||||||
_ = delay_for(Duration::from_secs(30)) => {
|
_ = sleep(Duration::from_secs(30)) => {
|
||||||
panic!("Future timed out");
|
panic!("Future timed out");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
@ -6,9 +6,10 @@ edition = "2018"
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
eth1_test_rig = { path = "../../testing/eth1_test_rig" }
|
eth1_test_rig = { path = "../../testing/eth1_test_rig" }
|
||||||
|
tokio-compat-02 = "0.1"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
types = { path = "../../consensus/types"}
|
types = { path = "../../consensus/types"}
|
||||||
environment = { path = "../../lighthouse/environment"}
|
environment = { path = "../../lighthouse/environment"}
|
||||||
eth1 = { path = "../eth1"}
|
eth1 = { path = "../eth1"}
|
||||||
@ -18,7 +19,7 @@ merkle_proof = { path = "../../consensus/merkle_proof" }
|
|||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
eth2_hashing = "0.1.0"
|
eth2_hashing = "0.1.0"
|
||||||
tree_hash = "0.1.1"
|
tree_hash = "0.1.1"
|
||||||
tokio = { version = "0.2.22", features = ["full"] }
|
tokio = { version = "0.3.2", features = ["full"] }
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
exit-future = "0.2.0"
|
exit-future = "0.2.0"
|
||||||
|
@ -12,7 +12,7 @@ use std::sync::{
|
|||||||
Arc,
|
Arc,
|
||||||
};
|
};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::time::delay_for;
|
use tokio::time::sleep;
|
||||||
use types::{BeaconState, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256};
|
use types::{BeaconState, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256};
|
||||||
|
|
||||||
/// The number of blocks that are pulled per request whilst waiting for genesis.
|
/// The number of blocks that are pulled per request whilst waiting for genesis.
|
||||||
@ -151,7 +151,7 @@ impl Eth1GenesisService {
|
|||||||
"valid_deposits" => eth1_service.get_raw_valid_signature_count(),
|
"valid_deposits" => eth1_service.get_raw_valid_signature_count(),
|
||||||
);
|
);
|
||||||
|
|
||||||
delay_for(update_interval).await;
|
sleep(update_interval).await;
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -231,9 +231,9 @@ impl Eth1GenesisService {
|
|||||||
// We assume that if we imported a large chunk of blocks then we're some distance from
|
// We assume that if we imported a large chunk of blocks then we're some distance from
|
||||||
// the head and we should sync faster.
|
// the head and we should sync faster.
|
||||||
if blocks_imported >= BLOCKS_PER_GENESIS_POLL {
|
if blocks_imported >= BLOCKS_PER_GENESIS_POLL {
|
||||||
delay_for(Duration::from_millis(50)).await;
|
sleep(Duration::from_millis(50)).await;
|
||||||
} else {
|
} else {
|
||||||
delay_for(update_interval).await;
|
sleep(update_interval).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,11 +10,12 @@ use futures::compat::Future01CompatExt;
|
|||||||
use genesis::{Eth1Config, Eth1GenesisService};
|
use genesis::{Eth1Config, Eth1GenesisService};
|
||||||
use state_processing::is_valid_genesis_state;
|
use state_processing::is_valid_genesis_state;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use tokio_compat_02::FutureExt;
|
||||||
use types::{test_utils::generate_deterministic_keypair, Hash256, MinimalEthSpec};
|
use types::{test_utils::generate_deterministic_keypair, Hash256, MinimalEthSpec};
|
||||||
|
|
||||||
pub fn new_env() -> Environment<MinimalEthSpec> {
|
pub fn new_env() -> Environment<MinimalEthSpec> {
|
||||||
EnvironmentBuilder::minimal()
|
EnvironmentBuilder::minimal()
|
||||||
.single_thread_tokio_runtime()
|
.multi_threaded_tokio_runtime()
|
||||||
.expect("should start tokio runtime")
|
.expect("should start tokio runtime")
|
||||||
.null_logger()
|
.null_logger()
|
||||||
.expect("should start null logger")
|
.expect("should start null logger")
|
||||||
@ -28,7 +29,8 @@ fn basic() {
|
|||||||
let log = env.core_context().log().clone();
|
let log = env.core_context().log().clone();
|
||||||
let mut spec = env.eth2_config().spec.clone();
|
let mut spec = env.eth2_config().spec.clone();
|
||||||
|
|
||||||
env.runtime().block_on(async {
|
env.runtime().block_on(
|
||||||
|
async {
|
||||||
let eth1 = GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into())
|
let eth1 = GanacheEth1Instance::new(DEFAULT_NETWORK_ID.into(), DEFAULT_CHAIN_ID.into())
|
||||||
.await
|
.await
|
||||||
.expect("should start eth1 environment");
|
.expect("should start eth1 environment");
|
||||||
@ -106,5 +108,7 @@ fn basic() {
|
|||||||
is_valid_genesis_state(&state, &spec),
|
is_valid_genesis_state(&state, &spec),
|
||||||
"should be valid genesis state"
|
"should be valid genesis state"
|
||||||
);
|
);
|
||||||
});
|
}
|
||||||
|
.compat(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
@ -5,9 +5,9 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
warp = { git = "https://github.com/paulhauner/warp", branch = "cors-wildcard" }
|
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" }
|
||||||
serde = { version = "1.0.116", features = ["derive"] }
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
tokio = { version = "0.2.22", features = ["macros"] }
|
tokio = { version = "0.3.2", features = ["macros"] }
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
@ -31,4 +31,5 @@ bs58 = "0.3.1"
|
|||||||
store = { path = "../store" }
|
store = { path = "../store" }
|
||||||
environment = { path = "../../lighthouse/environment" }
|
environment = { path = "../../lighthouse/environment" }
|
||||||
tree_hash = "0.1.1"
|
tree_hash = "0.1.1"
|
||||||
discv5 = { git = "https://github.com/sigp/discv5", rev = "fba7ceb5cfebd219ebbad6ffdb5d8c31dc8e4bc0", features = ["libp2p"] }
|
discv5 = { git = "https://github.com/sigp/discv5", rev = "f117b3ca56fa3dca2317270434634ff7106d391a", features = ["libp2p"] }
|
||||||
|
tokio-compat-02 = "0.1"
|
||||||
|
@ -17,10 +17,7 @@ use beacon_chain::{
|
|||||||
};
|
};
|
||||||
use beacon_proposer_cache::BeaconProposerCache;
|
use beacon_proposer_cache::BeaconProposerCache;
|
||||||
use block_id::BlockId;
|
use block_id::BlockId;
|
||||||
use eth2::{
|
use eth2::types::{self as api_types, ValidatorId};
|
||||||
types::{self as api_types, ValidatorId},
|
|
||||||
StatusCode,
|
|
||||||
};
|
|
||||||
use eth2_libp2p::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
|
use eth2_libp2p::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
|
||||||
use lighthouse_version::version_with_platform;
|
use lighthouse_version::version_with_platform;
|
||||||
use network::NetworkMessage;
|
use network::NetworkMessage;
|
||||||
@ -42,6 +39,7 @@ use types::{
|
|||||||
Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, RelativeEpoch, SignedAggregateAndProof,
|
Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, RelativeEpoch, SignedAggregateAndProof,
|
||||||
SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig,
|
SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig,
|
||||||
};
|
};
|
||||||
|
use warp::http::StatusCode;
|
||||||
use warp::{http::Response, Filter};
|
use warp::{http::Response, Filter};
|
||||||
use warp_utils::task::{blocking_json_task, blocking_task};
|
use warp_utils::task::{blocking_json_task, blocking_task};
|
||||||
|
|
||||||
@ -2251,12 +2249,14 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform()))
|
.map(|reply| warp::reply::with_header(reply, "Server", &version_with_platform()))
|
||||||
.with(cors_builder.build());
|
.with(cors_builder.build());
|
||||||
|
|
||||||
let (listening_socket, server) = warp::serve(routes).try_bind_with_graceful_shutdown(
|
let (listening_socket, server) = {
|
||||||
|
warp::serve(routes).try_bind_with_graceful_shutdown(
|
||||||
SocketAddrV4::new(config.listen_addr, config.listen_port),
|
SocketAddrV4::new(config.listen_addr, config.listen_port),
|
||||||
async {
|
async {
|
||||||
shutdown.await;
|
shutdown.await;
|
||||||
},
|
},
|
||||||
)?;
|
)?
|
||||||
|
};
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
log,
|
log,
|
||||||
|
@ -7,6 +7,7 @@ use beacon_chain::{
|
|||||||
use discv5::enr::{CombinedKey, EnrBuilder};
|
use discv5::enr::{CombinedKey, EnrBuilder};
|
||||||
use environment::null_logger;
|
use environment::null_logger;
|
||||||
use eth2::Error;
|
use eth2::Error;
|
||||||
|
use eth2::StatusCode;
|
||||||
use eth2::{types::*, BeaconNodeHttpClient, Url};
|
use eth2::{types::*, BeaconNodeHttpClient, Url};
|
||||||
use eth2_libp2p::{
|
use eth2_libp2p::{
|
||||||
rpc::methods::MetaData,
|
rpc::methods::MetaData,
|
||||||
@ -21,12 +22,12 @@ use std::net::Ipv4Addr;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use tokio::sync::oneshot;
|
use tokio::sync::oneshot;
|
||||||
|
use tokio_compat_02::FutureExt;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{
|
use types::{
|
||||||
test_utils::generate_deterministic_keypairs, AggregateSignature, BeaconState, BitList, Domain,
|
test_utils::generate_deterministic_keypairs, AggregateSignature, BeaconState, BitList, Domain,
|
||||||
EthSpec, Hash256, Keypair, MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot,
|
EthSpec, Hash256, Keypair, MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, Slot,
|
||||||
};
|
};
|
||||||
use warp::http::StatusCode;
|
|
||||||
|
|
||||||
type E = MainnetEthSpec;
|
type E = MainnetEthSpec;
|
||||||
|
|
||||||
@ -1825,8 +1826,9 @@ impl ApiTester {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn beacon_get() {
|
async fn beacon_get() {
|
||||||
|
async {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_beacon_genesis()
|
.test_beacon_genesis()
|
||||||
.await
|
.await
|
||||||
@ -1865,237 +1867,296 @@ async fn beacon_get() {
|
|||||||
.test_get_beacon_pool_voluntary_exits()
|
.test_get_beacon_pool_voluntary_exits()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn post_beacon_blocks_valid() {
|
async fn post_beacon_blocks_valid() {
|
||||||
ApiTester::new().test_post_beacon_blocks_valid().await;
|
ApiTester::new()
|
||||||
|
.test_post_beacon_blocks_valid()
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn post_beacon_blocks_invalid() {
|
async fn post_beacon_blocks_invalid() {
|
||||||
ApiTester::new().test_post_beacon_blocks_invalid().await;
|
ApiTester::new()
|
||||||
|
.test_post_beacon_blocks_invalid()
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn beacon_pools_post_attestations_valid() {
|
async fn beacon_pools_post_attestations_valid() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_post_beacon_pool_attestations_valid()
|
.test_post_beacon_pool_attestations_valid()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn beacon_pools_post_attestations_invalid() {
|
async fn beacon_pools_post_attestations_invalid() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_post_beacon_pool_attestations_invalid()
|
.test_post_beacon_pool_attestations_invalid()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn beacon_pools_post_attester_slashings_valid() {
|
async fn beacon_pools_post_attester_slashings_valid() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_post_beacon_pool_attester_slashings_valid()
|
.test_post_beacon_pool_attester_slashings_valid()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn beacon_pools_post_attester_slashings_invalid() {
|
async fn beacon_pools_post_attester_slashings_invalid() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_post_beacon_pool_attester_slashings_invalid()
|
.test_post_beacon_pool_attester_slashings_invalid()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn beacon_pools_post_proposer_slashings_valid() {
|
async fn beacon_pools_post_proposer_slashings_valid() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_post_beacon_pool_proposer_slashings_valid()
|
.test_post_beacon_pool_proposer_slashings_valid()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn beacon_pools_post_proposer_slashings_invalid() {
|
async fn beacon_pools_post_proposer_slashings_invalid() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_post_beacon_pool_proposer_slashings_invalid()
|
.test_post_beacon_pool_proposer_slashings_invalid()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn beacon_pools_post_voluntary_exits_valid() {
|
async fn beacon_pools_post_voluntary_exits_valid() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_post_beacon_pool_voluntary_exits_valid()
|
.test_post_beacon_pool_voluntary_exits_valid()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn beacon_pools_post_voluntary_exits_invalid() {
|
async fn beacon_pools_post_voluntary_exits_invalid() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_post_beacon_pool_voluntary_exits_invalid()
|
.test_post_beacon_pool_voluntary_exits_invalid()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn config_get() {
|
async fn config_get() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_get_config_fork_schedule()
|
.test_get_config_fork_schedule()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_config_spec()
|
.test_get_config_spec()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_config_deposit_contract()
|
.test_get_config_deposit_contract()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn debug_get() {
|
async fn debug_get() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_get_debug_beacon_states()
|
.test_get_debug_beacon_states()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_debug_beacon_heads()
|
.test_get_debug_beacon_heads()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn node_get() {
|
async fn node_get() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_get_node_version()
|
.test_get_node_version()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_node_syncing()
|
.test_get_node_syncing()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_node_identity()
|
.test_get_node_identity()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_node_health()
|
.test_get_node_health()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_node_peers_by_id()
|
.test_get_node_peers_by_id()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_node_peers()
|
.test_get_node_peers()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_node_peer_count()
|
.test_get_node_peer_count()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_duties_attester() {
|
async fn get_validator_duties_attester() {
|
||||||
ApiTester::new().test_get_validator_duties_attester().await;
|
ApiTester::new()
|
||||||
|
.test_get_validator_duties_attester()
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_duties_attester_with_skip_slots() {
|
async fn get_validator_duties_attester_with_skip_slots() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.skip_slots(E::slots_per_epoch() * 2)
|
.skip_slots(E::slots_per_epoch() * 2)
|
||||||
.test_get_validator_duties_attester()
|
.test_get_validator_duties_attester()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_duties_proposer() {
|
async fn get_validator_duties_proposer() {
|
||||||
ApiTester::new().test_get_validator_duties_proposer().await;
|
ApiTester::new()
|
||||||
|
.test_get_validator_duties_proposer()
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_duties_proposer_with_skip_slots() {
|
async fn get_validator_duties_proposer_with_skip_slots() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.skip_slots(E::slots_per_epoch() * 2)
|
.skip_slots(E::slots_per_epoch() * 2)
|
||||||
.test_get_validator_duties_proposer()
|
.test_get_validator_duties_proposer()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn block_production() {
|
async fn block_production() {
|
||||||
ApiTester::new().test_block_production().await;
|
ApiTester::new().test_block_production().compat().await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn block_production_with_skip_slots() {
|
async fn block_production_with_skip_slots() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.skip_slots(E::slots_per_epoch() * 2)
|
.skip_slots(E::slots_per_epoch() * 2)
|
||||||
.test_block_production()
|
.test_block_production()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_attestation_data() {
|
async fn get_validator_attestation_data() {
|
||||||
ApiTester::new().test_get_validator_attestation_data().await;
|
ApiTester::new()
|
||||||
|
.test_get_validator_attestation_data()
|
||||||
|
.compat()
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_attestation_data_with_skip_slots() {
|
async fn get_validator_attestation_data_with_skip_slots() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.skip_slots(E::slots_per_epoch() * 2)
|
.skip_slots(E::slots_per_epoch() * 2)
|
||||||
.test_get_validator_attestation_data()
|
.test_get_validator_attestation_data()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_aggregate_attestation() {
|
async fn get_validator_aggregate_attestation() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_get_validator_aggregate_attestation()
|
.test_get_validator_aggregate_attestation()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_aggregate_attestation_with_skip_slots() {
|
async fn get_validator_aggregate_attestation_with_skip_slots() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.skip_slots(E::slots_per_epoch() * 2)
|
.skip_slots(E::slots_per_epoch() * 2)
|
||||||
.test_get_validator_aggregate_attestation()
|
.test_get_validator_aggregate_attestation()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_aggregate_and_proofs_valid() {
|
async fn get_validator_aggregate_and_proofs_valid() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_get_validator_aggregate_and_proofs_valid()
|
.test_get_validator_aggregate_and_proofs_valid()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_aggregate_and_proofs_valid_with_skip_slots() {
|
async fn get_validator_aggregate_and_proofs_valid_with_skip_slots() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.skip_slots(E::slots_per_epoch() * 2)
|
.skip_slots(E::slots_per_epoch() * 2)
|
||||||
.test_get_validator_aggregate_and_proofs_valid()
|
.test_get_validator_aggregate_and_proofs_valid()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_aggregate_and_proofs_invalid() {
|
async fn get_validator_aggregate_and_proofs_invalid() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_get_validator_aggregate_and_proofs_invalid()
|
.test_get_validator_aggregate_and_proofs_invalid()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots() {
|
async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.skip_slots(E::slots_per_epoch() * 2)
|
.skip_slots(E::slots_per_epoch() * 2)
|
||||||
.test_get_validator_aggregate_and_proofs_invalid()
|
.test_get_validator_aggregate_and_proofs_invalid()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn get_validator_beacon_committee_subscriptions() {
|
async fn get_validator_beacon_committee_subscriptions() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_get_validator_beacon_committee_subscriptions()
|
.test_get_validator_beacon_committee_subscriptions()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn lighthouse_endpoints() {
|
async fn lighthouse_endpoints() {
|
||||||
ApiTester::new()
|
ApiTester::new()
|
||||||
.test_get_lighthouse_health()
|
.test_get_lighthouse_health()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_lighthouse_syncing()
|
.test_get_lighthouse_syncing()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_lighthouse_proto_array()
|
.test_get_lighthouse_proto_array()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_lighthouse_validator_inclusion()
|
.test_get_lighthouse_validator_inclusion()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_lighthouse_validator_inclusion_global()
|
.test_get_lighthouse_validator_inclusion_global()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_lighthouse_eth1_syncing()
|
.test_get_lighthouse_eth1_syncing()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_lighthouse_eth1_block_cache()
|
.test_get_lighthouse_eth1_block_cache()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_lighthouse_eth1_deposit_cache()
|
.test_get_lighthouse_eth1_deposit_cache()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_lighthouse_beacon_states_ssz()
|
.test_get_lighthouse_beacon_states_ssz()
|
||||||
|
.compat()
|
||||||
.await
|
.await
|
||||||
.test_get_lighthouse_staking()
|
.test_get_lighthouse_staking()
|
||||||
|
.compat()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ edition = "2018"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
prometheus = "0.10.0"
|
prometheus = "0.10.0"
|
||||||
warp = { git = "https://github.com/paulhauner/warp", branch = "cors-wildcard" }
|
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" }
|
||||||
serde = { version = "1.0.116", features = ["derive"] }
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
beacon_chain = { path = "../beacon_chain" }
|
beacon_chain = { path = "../beacon_chain" }
|
||||||
@ -22,7 +22,8 @@ lighthouse_version = { path = "../../common/lighthouse_version" }
|
|||||||
warp_utils = { path = "../../common/warp_utils" }
|
warp_utils = { path = "../../common/warp_utils" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { version = "0.2.22", features = ["sync"] }
|
tokio = { version = "0.3.2", features = ["sync"] }
|
||||||
reqwest = { version = "0.10.8", features = ["json"] }
|
reqwest = { version = "0.10.8", features = ["json"] }
|
||||||
environment = { path = "../../lighthouse/environment" }
|
environment = { path = "../../lighthouse/environment" }
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
|
tokio-compat-02 = "0.1"
|
||||||
|
@ -5,12 +5,14 @@ use reqwest::StatusCode;
|
|||||||
use std::net::Ipv4Addr;
|
use std::net::Ipv4Addr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::oneshot;
|
use tokio::sync::oneshot;
|
||||||
|
use tokio_compat_02::FutureExt;
|
||||||
use types::MainnetEthSpec;
|
use types::MainnetEthSpec;
|
||||||
|
|
||||||
type Context = http_metrics::Context<EphemeralHarnessType<MainnetEthSpec>>;
|
type Context = http_metrics::Context<EphemeralHarnessType<MainnetEthSpec>>;
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
||||||
async fn returns_200_ok() {
|
async fn returns_200_ok() {
|
||||||
|
async {
|
||||||
let log = null_logger().unwrap();
|
let log = null_logger().unwrap();
|
||||||
|
|
||||||
let context = Arc::new(Context {
|
let context = Arc::new(Context {
|
||||||
@ -44,3 +46,6 @@ async fn returns_200_ok() {
|
|||||||
|
|
||||||
assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK);
|
assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK);
|
||||||
}
|
}
|
||||||
|
.compat()
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
@ -11,6 +11,8 @@ lazy_static = "1.4.0"
|
|||||||
matches = "0.1.8"
|
matches = "0.1.8"
|
||||||
tempfile = "3.1.0"
|
tempfile = "3.1.0"
|
||||||
exit-future = "0.2.0"
|
exit-future = "0.2.0"
|
||||||
|
slog-term = "2.6.0"
|
||||||
|
slog-async = "2.5.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
beacon_chain = { path = "../beacon_chain" }
|
beacon_chain = { path = "../beacon_chain" }
|
||||||
@ -25,9 +27,9 @@ hex = "0.4.2"
|
|||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
eth2_ssz_types = { path = "../../consensus/ssz_types" }
|
||||||
tree_hash = "0.1.1"
|
tree_hash = "0.1.1"
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
error-chain = "0.12.4"
|
error-chain = "0.12.4"
|
||||||
tokio = { version = "0.2.22", features = ["full"] }
|
tokio = { version = "0.3.2", features = ["full"] }
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
smallvec = "1.4.2"
|
smallvec = "1.4.2"
|
||||||
rand = "0.7.3"
|
rand = "0.7.3"
|
||||||
|
@ -156,7 +156,7 @@ mod tests {
|
|||||||
|
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = collect_stream_fut => {return events}
|
_ = collect_stream_fut => {return events}
|
||||||
_ = tokio::time::delay_for(
|
_ = tokio::time::sleep(
|
||||||
Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout,
|
Duration::from_millis(SLOT_DURATION_MILLIS) * num_slots_before_timeout,
|
||||||
) => { return events; }
|
) => { return events; }
|
||||||
}
|
}
|
||||||
|
@ -233,6 +233,8 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
| Err(e @ BlockError::BeaconChainError(_)) => {
|
| Err(e @ BlockError::BeaconChainError(_)) => {
|
||||||
debug!(self.log, "Could not verify block for gossip, ignoring the block";
|
debug!(self.log, "Could not verify block for gossip, ignoring the block";
|
||||||
"error" => e.to_string());
|
"error" => e.to_string());
|
||||||
|
// Prevent recurring behaviour by penalizing the peer slightly.
|
||||||
|
self.penalize_peer(peer_id.clone(), PeerAction::HighToleranceError);
|
||||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -511,6 +513,12 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"block" => %beacon_block_root,
|
"block" => %beacon_block_root,
|
||||||
"type" => ?attestation_type,
|
"type" => ?attestation_type,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Peers that are slow or not to spec can spam us with these messages draining our
|
||||||
|
// bandwidth. We therefore penalize these peers when they do this.
|
||||||
|
self.penalize_peer(peer_id.clone(), PeerAction::LowToleranceError);
|
||||||
|
|
||||||
|
// Do not propagate these messages.
|
||||||
self.propagate_validation_result(
|
self.propagate_validation_result(
|
||||||
message_id,
|
message_id,
|
||||||
peer_id.clone(),
|
peer_id.clone(),
|
||||||
@ -618,7 +626,12 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"block" => %beacon_block_root,
|
"block" => %beacon_block_root,
|
||||||
"type" => ?attestation_type,
|
"type" => ?attestation_type,
|
||||||
);
|
);
|
||||||
|
// We still penalize the peer slightly. We don't want this to be a recurring
|
||||||
|
// behaviour.
|
||||||
|
self.penalize_peer(peer_id.clone(), PeerAction::HighToleranceError);
|
||||||
|
|
||||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
AttnError::PriorAttestationKnown { .. } => {
|
AttnError::PriorAttestationKnown { .. } => {
|
||||||
@ -634,7 +647,12 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"block" => %beacon_block_root,
|
"block" => %beacon_block_root,
|
||||||
"type" => ?attestation_type,
|
"type" => ?attestation_type,
|
||||||
);
|
);
|
||||||
|
// We still penalize the peer slightly. We don't want this to be a recurring
|
||||||
|
// behaviour.
|
||||||
|
self.penalize_peer(peer_id.clone(), PeerAction::HighToleranceError);
|
||||||
|
|
||||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
AttnError::ValidatorIndexTooHigh(_) => {
|
AttnError::ValidatorIndexTooHigh(_) => {
|
||||||
@ -677,6 +695,10 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"msg" => "UnknownBlockHash"
|
"msg" => "UnknownBlockHash"
|
||||||
)
|
)
|
||||||
});
|
});
|
||||||
|
// We still penalize the peer slightly. We don't want this to be a recurring
|
||||||
|
// behaviour.
|
||||||
|
self.penalize_peer(peer_id.clone(), PeerAction::HighToleranceError);
|
||||||
|
|
||||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -3,15 +3,18 @@ extern crate lazy_static;
|
|||||||
|
|
||||||
/// This crate provides the network server for Lighthouse.
|
/// This crate provides the network server for Lighthouse.
|
||||||
pub mod error;
|
pub mod error;
|
||||||
|
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
|
||||||
pub mod service;
|
pub mod service;
|
||||||
|
|
||||||
mod attestation_service;
|
mod attestation_service;
|
||||||
mod beacon_processor;
|
mod beacon_processor;
|
||||||
|
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
|
||||||
mod metrics;
|
mod metrics;
|
||||||
mod nat;
|
mod nat;
|
||||||
mod persisted_dht;
|
mod persisted_dht;
|
||||||
mod router;
|
mod router;
|
||||||
mod status;
|
mod status;
|
||||||
|
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
|
||||||
mod sync;
|
mod sync;
|
||||||
|
|
||||||
pub use eth2_libp2p::NetworkConfig;
|
pub use eth2_libp2p::NetworkConfig;
|
||||||
|
@ -1,5 +1,12 @@
|
|||||||
use beacon_chain::attestation_verification::Error as AttnError;
|
use beacon_chain::attestation_verification::Error as AttnError;
|
||||||
|
use eth2_libp2p::PubsubMessage;
|
||||||
|
use eth2_libp2p::{
|
||||||
|
types::GossipKind, BandwidthSinks, GossipTopic, Gossipsub, NetworkGlobals, TopicHash,
|
||||||
|
};
|
||||||
|
use fnv::FnvHashMap;
|
||||||
pub use lighthouse_metrics::*;
|
pub use lighthouse_metrics::*;
|
||||||
|
use std::{collections::HashMap, sync::Arc};
|
||||||
|
use types::{subnet_id::subnet_id_to_string, EthSpec};
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
|
|
||||||
@ -404,6 +411,27 @@ lazy_static! {
|
|||||||
"gossipsub_attestation_error_beacon_chain_error",
|
"gossipsub_attestation_error_beacon_chain_error",
|
||||||
"Count of a specific error type (see metric name)"
|
"Count of a specific error type (see metric name)"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
pub static ref INBOUND_LIBP2P_BYTES: Result<IntGauge> =
|
||||||
|
try_create_int_gauge("libp2p_inbound_bytes", "The inbound bandwidth over libp2p");
|
||||||
|
|
||||||
|
pub static ref OUTBOUND_LIBP2P_BYTES: Result<IntGauge> = try_create_int_gauge(
|
||||||
|
"libp2p_outbound_bytes",
|
||||||
|
"The outbound bandwidth over libp2p"
|
||||||
|
);
|
||||||
|
pub static ref TOTAL_LIBP2P_BANDWIDTH: Result<IntGauge> = try_create_int_gauge(
|
||||||
|
"libp2p_total_bandwidth",
|
||||||
|
"The total inbound/outbound bandwidth over libp2p"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update_bandwidth_metrics(bandwidth: Arc<BandwidthSinks>) {
|
||||||
|
set_gauge(&INBOUND_LIBP2P_BYTES, bandwidth.total_inbound() as i64);
|
||||||
|
set_gauge(&OUTBOUND_LIBP2P_BYTES, bandwidth.total_outbound() as i64);
|
||||||
|
set_gauge(
|
||||||
|
&TOTAL_LIBP2P_BANDWIDTH,
|
||||||
|
(bandwidth.total_inbound() + bandwidth.total_outbound()) as i64,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
lazy_static! {
|
lazy_static! {
|
||||||
@ -486,3 +514,359 @@ pub fn register_attestation_error(error: &AttnError) {
|
|||||||
AttnError::BeaconChainError(_) => inc_counter(&GOSSIP_ATTESTATION_ERROR_BEACON_CHAIN_ERROR),
|
AttnError::BeaconChainError(_) => inc_counter(&GOSSIP_ATTESTATION_ERROR_BEACON_CHAIN_ERROR),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Inspects the `messages` that were being sent to the network and updates Prometheus metrics.
|
||||||
|
pub fn expose_publish_metrics<T: EthSpec>(messages: &[PubsubMessage<T>]) {
|
||||||
|
for message in messages {
|
||||||
|
match message {
|
||||||
|
PubsubMessage::BeaconBlock(_) => inc_counter(&GOSSIP_BLOCKS_TX),
|
||||||
|
PubsubMessage::Attestation(subnet_id) => {
|
||||||
|
inc_counter_vec(
|
||||||
|
&ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT,
|
||||||
|
&[&subnet_id.0.as_ref()],
|
||||||
|
);
|
||||||
|
inc_counter(&GOSSIP_UNAGGREGATED_ATTESTATIONS_TX)
|
||||||
|
}
|
||||||
|
PubsubMessage::AggregateAndProofAttestation(_) => {
|
||||||
|
inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_TX)
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Inspects a `message` received from the network and updates Prometheus metrics.
|
||||||
|
pub fn expose_receive_metrics<T: EthSpec>(message: &PubsubMessage<T>) {
|
||||||
|
match message {
|
||||||
|
PubsubMessage::BeaconBlock(_) => inc_counter(&GOSSIP_BLOCKS_RX),
|
||||||
|
PubsubMessage::Attestation(_) => inc_counter(&GOSSIP_UNAGGREGATED_ATTESTATIONS_RX),
|
||||||
|
PubsubMessage::AggregateAndProofAttestation(_) => {
|
||||||
|
inc_counter(&GOSSIP_AGGREGATED_ATTESTATIONS_RX)
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update_gossip_metrics<T: EthSpec>(
|
||||||
|
gossipsub: &Gossipsub,
|
||||||
|
network_globals: &Arc<NetworkGlobals<T>>,
|
||||||
|
) {
|
||||||
|
// Clear the metrics
|
||||||
|
let _ = PEERS_PER_PROTOCOL.as_ref().map(|gauge| gauge.reset());
|
||||||
|
let _ = PEERS_PER_PROTOCOL.as_ref().map(|gauge| gauge.reset());
|
||||||
|
let _ = MESH_PEERS_PER_MAIN_TOPIC
|
||||||
|
.as_ref()
|
||||||
|
.map(|gauge| gauge.reset());
|
||||||
|
let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC
|
||||||
|
.as_ref()
|
||||||
|
.map(|gauge| gauge.reset());
|
||||||
|
let _ = AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC
|
||||||
|
.as_ref()
|
||||||
|
.map(|gauge| gauge.reset());
|
||||||
|
|
||||||
|
let _ = SCORES_BELOW_ZERO_PER_CLIENT
|
||||||
|
.as_ref()
|
||||||
|
.map(|gauge| gauge.reset());
|
||||||
|
let _ = SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT
|
||||||
|
.as_ref()
|
||||||
|
.map(|gauge| gauge.reset());
|
||||||
|
let _ = SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT
|
||||||
|
.as_ref()
|
||||||
|
.map(|gauge| gauge.reset());
|
||||||
|
let _ = SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT
|
||||||
|
.as_ref()
|
||||||
|
.map(|gauge| gauge.reset());
|
||||||
|
let _ = MIN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset());
|
||||||
|
let _ = MEDIAN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset());
|
||||||
|
let _ = MEAN_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset());
|
||||||
|
let _ = MAX_SCORES_PER_CLIENT.as_ref().map(|gauge| gauge.reset());
|
||||||
|
|
||||||
|
let _ = BEACON_BLOCK_MESH_PEERS_PER_CLIENT
|
||||||
|
.as_ref()
|
||||||
|
.map(|gauge| gauge.reset());
|
||||||
|
let _ = BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT
|
||||||
|
.as_ref()
|
||||||
|
.map(|gauge| gauge.reset());
|
||||||
|
|
||||||
|
// reset the mesh peers, showing all subnets
|
||||||
|
for subnet_id in 0..T::default_spec().attestation_subnet_count {
|
||||||
|
let _ = get_int_gauge(
|
||||||
|
&MESH_PEERS_PER_SUBNET_TOPIC,
|
||||||
|
&[subnet_id_to_string(subnet_id)],
|
||||||
|
)
|
||||||
|
.map(|v| v.set(0));
|
||||||
|
|
||||||
|
let _ = get_int_gauge(
|
||||||
|
&GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC,
|
||||||
|
&[subnet_id_to_string(subnet_id)],
|
||||||
|
)
|
||||||
|
.map(|v| v.set(0));
|
||||||
|
|
||||||
|
let _ = get_int_gauge(
|
||||||
|
&GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC,
|
||||||
|
&[subnet_id_to_string(subnet_id)],
|
||||||
|
)
|
||||||
|
.map(|v| v.set(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subnet topics subscribed to
|
||||||
|
for topic_hash in gossipsub.topics() {
|
||||||
|
if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) {
|
||||||
|
if let GossipKind::Attestation(subnet_id) = topic.kind() {
|
||||||
|
let _ = get_int_gauge(
|
||||||
|
&GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC,
|
||||||
|
&[subnet_id_to_string(subnet_id.into())],
|
||||||
|
)
|
||||||
|
.map(|v| v.set(1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Peers per subscribed subnet
|
||||||
|
let mut peers_per_topic: HashMap<TopicHash, usize> = HashMap::new();
|
||||||
|
for (peer_id, topics) in gossipsub.all_peers() {
|
||||||
|
for topic_hash in topics {
|
||||||
|
*peers_per_topic.entry(topic_hash.clone()).or_default() += 1;
|
||||||
|
|
||||||
|
if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) {
|
||||||
|
match topic.kind() {
|
||||||
|
GossipKind::Attestation(subnet_id) => {
|
||||||
|
if let Some(v) = get_int_gauge(
|
||||||
|
&GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC,
|
||||||
|
&[subnet_id_to_string(subnet_id.into())],
|
||||||
|
) {
|
||||||
|
v.inc()
|
||||||
|
};
|
||||||
|
|
||||||
|
// average peer scores
|
||||||
|
if let Some(score) = gossipsub.peer_score(peer_id) {
|
||||||
|
if let Some(v) = get_gauge(
|
||||||
|
&AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC,
|
||||||
|
&[subnet_id_to_string(subnet_id.into())],
|
||||||
|
) {
|
||||||
|
v.add(score)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
kind => {
|
||||||
|
// main topics
|
||||||
|
if let Some(score) = gossipsub.peer_score(peer_id) {
|
||||||
|
if let Some(v) = get_gauge(
|
||||||
|
&AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC,
|
||||||
|
&[kind.as_ref()],
|
||||||
|
) {
|
||||||
|
v.add(score)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// adjust to average scores by dividing by number of peers
|
||||||
|
for (topic_hash, peers) in peers_per_topic.iter() {
|
||||||
|
if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) {
|
||||||
|
match topic.kind() {
|
||||||
|
GossipKind::Attestation(subnet_id) => {
|
||||||
|
// average peer scores
|
||||||
|
if let Some(v) = get_gauge(
|
||||||
|
&AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC,
|
||||||
|
&[subnet_id_to_string(subnet_id.into())],
|
||||||
|
) {
|
||||||
|
v.set(v.get() / (*peers as f64))
|
||||||
|
};
|
||||||
|
}
|
||||||
|
kind => {
|
||||||
|
// main topics
|
||||||
|
if let Some(v) =
|
||||||
|
get_gauge(&AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC, &[kind.as_ref()])
|
||||||
|
{
|
||||||
|
v.set(v.get() / (*peers as f64))
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mesh peers
|
||||||
|
for topic_hash in gossipsub.topics() {
|
||||||
|
let peers = gossipsub.mesh_peers(&topic_hash).count();
|
||||||
|
if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) {
|
||||||
|
match topic.kind() {
|
||||||
|
GossipKind::Attestation(subnet_id) => {
|
||||||
|
if let Some(v) = get_int_gauge(
|
||||||
|
&MESH_PEERS_PER_SUBNET_TOPIC,
|
||||||
|
&[subnet_id_to_string(subnet_id.into())],
|
||||||
|
) {
|
||||||
|
v.set(peers as i64)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
kind => {
|
||||||
|
// main topics
|
||||||
|
if let Some(v) = get_int_gauge(&MESH_PEERS_PER_MAIN_TOPIC, &[kind.as_ref()]) {
|
||||||
|
v.set(peers as i64)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// protocol peers
|
||||||
|
let mut peers_per_protocol: HashMap<&'static str, i64> = HashMap::new();
|
||||||
|
for (_peer, protocol) in gossipsub.peer_protocol() {
|
||||||
|
*peers_per_protocol
|
||||||
|
.entry(protocol.as_static_ref())
|
||||||
|
.or_default() += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (protocol, peers) in peers_per_protocol.iter() {
|
||||||
|
if let Some(v) = get_int_gauge(&PEERS_PER_PROTOCOL, &[protocol]) {
|
||||||
|
v.set(*peers)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut peer_to_client = HashMap::new();
|
||||||
|
let mut scores_per_client: HashMap<&'static str, Vec<f64>> = HashMap::new();
|
||||||
|
{
|
||||||
|
let peers = network_globals.peers.read();
|
||||||
|
for (peer_id, _) in gossipsub.all_peers() {
|
||||||
|
let client = peers
|
||||||
|
.peer_info(peer_id)
|
||||||
|
.map(|peer_info| peer_info.client.kind.as_static_ref())
|
||||||
|
.unwrap_or_else(|| "Unknown");
|
||||||
|
|
||||||
|
peer_to_client.insert(peer_id, client);
|
||||||
|
let score = gossipsub.peer_score(peer_id).unwrap_or(0.0);
|
||||||
|
scores_per_client.entry(client).or_default().push(score);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// mesh peers per client
|
||||||
|
for topic_hash in gossipsub.topics() {
|
||||||
|
if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) {
|
||||||
|
match topic.kind() {
|
||||||
|
GossipKind::BeaconBlock => {
|
||||||
|
for peer in gossipsub.mesh_peers(&topic_hash) {
|
||||||
|
if let Some(client) = peer_to_client.get(peer) {
|
||||||
|
if let Some(v) =
|
||||||
|
get_int_gauge(&BEACON_BLOCK_MESH_PEERS_PER_CLIENT, &[client])
|
||||||
|
{
|
||||||
|
v.inc()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
GossipKind::BeaconAggregateAndProof => {
|
||||||
|
for peer in gossipsub.mesh_peers(&topic_hash) {
|
||||||
|
if let Some(client) = peer_to_client.get(peer) {
|
||||||
|
if let Some(v) = get_int_gauge(
|
||||||
|
&BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT,
|
||||||
|
&[client],
|
||||||
|
) {
|
||||||
|
v.inc()
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (client, scores) in scores_per_client.into_iter() {
|
||||||
|
let c = &[client];
|
||||||
|
let len = scores.len();
|
||||||
|
if len > 0 {
|
||||||
|
let mut below0 = 0;
|
||||||
|
let mut below_gossip_threshold = 0;
|
||||||
|
let mut below_publish_threshold = 0;
|
||||||
|
let mut below_greylist_threshold = 0;
|
||||||
|
let mut min = f64::INFINITY;
|
||||||
|
let mut sum = 0.0;
|
||||||
|
let mut max = f64::NEG_INFINITY;
|
||||||
|
|
||||||
|
let count = scores.len() as f64;
|
||||||
|
|
||||||
|
for &score in &scores {
|
||||||
|
if score < 0.0 {
|
||||||
|
below0 += 1;
|
||||||
|
}
|
||||||
|
if score < -4000.0 {
|
||||||
|
//TODO not hardcode
|
||||||
|
below_gossip_threshold += 1;
|
||||||
|
}
|
||||||
|
if score < -8000.0 {
|
||||||
|
//TODO not hardcode
|
||||||
|
below_publish_threshold += 1;
|
||||||
|
}
|
||||||
|
if score < -16000.0 {
|
||||||
|
//TODO not hardcode
|
||||||
|
below_greylist_threshold += 1;
|
||||||
|
}
|
||||||
|
if score < min {
|
||||||
|
min = score;
|
||||||
|
}
|
||||||
|
if score > max {
|
||||||
|
max = score;
|
||||||
|
}
|
||||||
|
sum += score;
|
||||||
|
}
|
||||||
|
|
||||||
|
let median = if len == 0 {
|
||||||
|
0.0
|
||||||
|
} else if len % 2 == 0 {
|
||||||
|
(scores[len / 2 - 1] + scores[len / 2]) / 2.0
|
||||||
|
} else {
|
||||||
|
scores[len / 2]
|
||||||
|
};
|
||||||
|
|
||||||
|
set_gauge_entry(&SCORES_BELOW_ZERO_PER_CLIENT, c, below0 as f64 / count);
|
||||||
|
set_gauge_entry(
|
||||||
|
&SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT,
|
||||||
|
c,
|
||||||
|
below_gossip_threshold as f64 / count,
|
||||||
|
);
|
||||||
|
set_gauge_entry(
|
||||||
|
&SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT,
|
||||||
|
c,
|
||||||
|
below_publish_threshold as f64 / count,
|
||||||
|
);
|
||||||
|
set_gauge_entry(
|
||||||
|
&SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT,
|
||||||
|
c,
|
||||||
|
below_greylist_threshold as f64 / count,
|
||||||
|
);
|
||||||
|
|
||||||
|
set_gauge_entry(&MIN_SCORES_PER_CLIENT, c, min);
|
||||||
|
set_gauge_entry(&MEDIAN_SCORES_PER_CLIENT, c, median);
|
||||||
|
set_gauge_entry(&MEAN_SCORES_PER_CLIENT, c, sum / count);
|
||||||
|
set_gauge_entry(&MAX_SCORES_PER_CLIENT, c, max);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update_sync_metrics<T: EthSpec>(network_globals: &Arc<NetworkGlobals<T>>) {
|
||||||
|
// reset the counts
|
||||||
|
if PEERS_PER_SYNC_TYPE
|
||||||
|
.as_ref()
|
||||||
|
.map(|metric| metric.reset())
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
// count per sync status, the number of connected peers
|
||||||
|
let mut peers_per_sync_type = FnvHashMap::default();
|
||||||
|
for sync_type in network_globals
|
||||||
|
.peers
|
||||||
|
.read()
|
||||||
|
.connected_peers()
|
||||||
|
.map(|(_peer_id, info)| info.sync_status.as_str())
|
||||||
|
{
|
||||||
|
*peers_per_sync_type.entry(sync_type).or_default() += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (sync_type, peer_count) in peers_per_sync_type {
|
||||||
|
set_gauge_entry(&PEERS_PER_SYNC_TYPE, &[sync_type], peer_count);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
use crate::{NetworkConfig, NetworkMessage};
|
use crate::{NetworkConfig, NetworkMessage};
|
||||||
use if_addrs::get_if_addrs;
|
use if_addrs::get_if_addrs;
|
||||||
use slog::{debug, info, warn};
|
use slog::{debug, info};
|
||||||
use std::net::{IpAddr, SocketAddr, SocketAddrV4};
|
use std::net::{IpAddr, SocketAddr, SocketAddrV4};
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
@ -70,6 +70,8 @@ pub fn construct_upnp_mappings<T: EthSpec>(
|
|||||||
Some(v) => v,
|
Some(v) => v,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
debug!(log, "UPnP Local IP Discovered"; "ip" => ?local_ip);
|
||||||
|
|
||||||
match local_ip {
|
match local_ip {
|
||||||
IpAddr::V4(address) => {
|
IpAddr::V4(address) => {
|
||||||
let libp2p_socket = SocketAddrV4::new(address, config.tcp_port);
|
let libp2p_socket = SocketAddrV4::new(address, config.tcp_port);
|
||||||
@ -78,53 +80,39 @@ pub fn construct_upnp_mappings<T: EthSpec>(
|
|||||||
// one.
|
// one.
|
||||||
// I've found this to be more reliable. If multiple users are behind a single
|
// I've found this to be more reliable. If multiple users are behind a single
|
||||||
// router, they should ideally try to set different port numbers.
|
// router, they should ideally try to set different port numbers.
|
||||||
let tcp_socket = match gateway.add_port(
|
let tcp_socket = add_port_mapping(
|
||||||
|
&gateway,
|
||||||
igd::PortMappingProtocol::TCP,
|
igd::PortMappingProtocol::TCP,
|
||||||
libp2p_socket.port(),
|
|
||||||
libp2p_socket,
|
libp2p_socket,
|
||||||
0,
|
"tcp",
|
||||||
"lighthouse-tcp",
|
&log,
|
||||||
) {
|
).and_then(|_| {
|
||||||
Err(e) => {
|
let external_socket = external_ip.as_ref().map(|ip| SocketAddr::new(ip.clone().into(), config.tcp_port)).map_err(|_| ());
|
||||||
info!(log, "UPnP TCP route not set"; "error" => %e);
|
info!(log, "UPnP TCP route established"; "external_socket" => format!("{}:{}", external_socket.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.tcp_port));
|
||||||
None
|
external_socket
|
||||||
}
|
}).ok();
|
||||||
Ok(_) => {
|
|
||||||
info!(log, "UPnP TCP route established"; "external_socket" => format!("{}:{}", external_ip.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.tcp_port));
|
|
||||||
external_ip
|
|
||||||
.as_ref()
|
|
||||||
.map(|ip| SocketAddr::new(ip.clone().into(), config.tcp_port))
|
|
||||||
.ok()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let udp_socket = if !config.disable_discovery {
|
let udp_socket = if !config.disable_discovery {
|
||||||
let discovery_socket = SocketAddrV4::new(address, config.udp_port);
|
let discovery_socket = SocketAddrV4::new(address, config.udp_port);
|
||||||
match gateway.add_port(
|
add_port_mapping(
|
||||||
|
&gateway,
|
||||||
igd::PortMappingProtocol::UDP,
|
igd::PortMappingProtocol::UDP,
|
||||||
discovery_socket.port(),
|
|
||||||
discovery_socket,
|
discovery_socket,
|
||||||
0,
|
"udp",
|
||||||
"lighthouse-udp",
|
&log,
|
||||||
) {
|
).and_then(|_| {
|
||||||
Err(e) => {
|
let external_socket = external_ip
|
||||||
info!(log, "UPnP UDP route not set"; "error" => %e);
|
.map(|ip| SocketAddr::new(ip.into(), config.udp_port)).map_err(|_| ());
|
||||||
None
|
info!(log, "UPnP UDP route established"; "external_socket" => format!("{}:{}", external_socket.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.udp_port));
|
||||||
}
|
external_socket
|
||||||
Ok(_) => {
|
}).ok()
|
||||||
info!(log, "UPnP UDP route established"; "external_socket" => format!("{}:{}", external_ip.as_ref().map(|ip| ip.to_string()).unwrap_or_else(|_| "".into()), config.tcp_port));
|
|
||||||
external_ip
|
|
||||||
.map(|ip| SocketAddr::new(ip.into(), config.tcp_port))
|
|
||||||
.ok()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
// report any updates to the network service.
|
// report any updates to the network service.
|
||||||
network_send.send(NetworkMessage::UPnPMappingEstablished{ tcp_socket, udp_socket })
|
network_send.send(NetworkMessage::UPnPMappingEstablished{ tcp_socket, udp_socket })
|
||||||
.unwrap_or_else(|e| warn!(log, "Could not send message to the network service"; "error" => %e));
|
.unwrap_or_else(|e| debug!(log, "Could not send message to the network service"; "error" => %e));
|
||||||
}
|
}
|
||||||
_ => debug!(log, "UPnP no routes constructed. IPv6 not supported"),
|
_ => debug!(log, "UPnP no routes constructed. IPv6 not supported"),
|
||||||
}
|
}
|
||||||
@ -132,6 +120,50 @@ pub fn construct_upnp_mappings<T: EthSpec>(
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Sets up a port mapping for a protocol returning the mapped port if successful.
|
||||||
|
fn add_port_mapping(
|
||||||
|
gateway: &igd::Gateway,
|
||||||
|
protocol: igd::PortMappingProtocol,
|
||||||
|
socket: SocketAddrV4,
|
||||||
|
protocol_string: &'static str,
|
||||||
|
log: &slog::Logger,
|
||||||
|
) -> Result<(), ()> {
|
||||||
|
// We add specific port mappings rather than getting the router to arbitrary assign
|
||||||
|
// one.
|
||||||
|
// I've found this to be more reliable. If multiple users are behind a single
|
||||||
|
// router, they should ideally try to set different port numbers.
|
||||||
|
let mapping_string = &format!("lighthouse-{}", protocol_string);
|
||||||
|
for _ in 0..2 {
|
||||||
|
match gateway.add_port(protocol, socket.port(), socket, 0, mapping_string) {
|
||||||
|
Err(e) => {
|
||||||
|
match e {
|
||||||
|
igd::AddPortError::PortInUse => {
|
||||||
|
// Try and remove and re-create
|
||||||
|
debug!(log, "UPnP port in use, attempting to remap"; "protocol" => protocol_string, "port" => socket.port());
|
||||||
|
match gateway.remove_port(protocol, socket.port()) {
|
||||||
|
Ok(()) => {
|
||||||
|
debug!(log, "UPnP Removed port mapping"; "protocol" => protocol_string, "port" => socket.port())
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
debug!(log, "UPnP Port remove failure"; "protocol" => protocol_string, "port" => socket.port(), "error" => %e);
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
e => {
|
||||||
|
info!(log, "UPnP TCP route not set"; "error" => %e);
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(_) => {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Removes the specified TCP and UDP port mappings.
|
/// Removes the specified TCP and UDP port mappings.
|
||||||
pub fn remove_mappings(tcp_port: Option<u16>, udp_port: Option<u16>, log: &slog::Logger) {
|
pub fn remove_mappings(tcp_port: Option<u16>, udp_port: Option<u16>, log: &slog::Logger) {
|
||||||
if tcp_port.is_some() || udp_port.is_some() {
|
if tcp_port.is_some() || udp_port.is_some() {
|
||||||
|
@ -8,20 +8,16 @@ use crate::{error, metrics};
|
|||||||
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||||
use eth2_libp2p::{
|
use eth2_libp2p::{
|
||||||
rpc::{GoodbyeReason, RPCResponseErrorCode, RequestId},
|
rpc::{GoodbyeReason, RPCResponseErrorCode, RequestId},
|
||||||
Gossipsub, Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, Request, Response,
|
Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, Request, Response,
|
||||||
};
|
|
||||||
use eth2_libp2p::{
|
|
||||||
types::GossipKind, BehaviourEvent, GossipTopic, MessageId, NetworkGlobals, PeerId, TopicHash,
|
|
||||||
};
|
};
|
||||||
|
use eth2_libp2p::{types::GossipKind, BehaviourEvent, MessageId, NetworkGlobals, PeerId};
|
||||||
use eth2_libp2p::{MessageAcceptance, Service as LibP2PService};
|
use eth2_libp2p::{MessageAcceptance, Service as LibP2PService};
|
||||||
use fnv::FnvHashMap;
|
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use slog::{debug, error, info, o, trace, warn};
|
use slog::{debug, error, info, o, trace, warn};
|
||||||
use std::{collections::HashMap, net::SocketAddr, sync::Arc, time::Duration};
|
use std::{net::SocketAddr, sync::Arc, time::Duration};
|
||||||
use store::HotColdDB;
|
use store::HotColdDB;
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use tokio::time::Delay;
|
use tokio::time::Sleep;
|
||||||
use types::subnet_id::subnet_id_to_string;
|
|
||||||
use types::{EthSpec, RelativeEpoch, SubnetId, Unsigned, ValidatorSubscription};
|
use types::{EthSpec, RelativeEpoch, SubnetId, Unsigned, ValidatorSubscription};
|
||||||
|
|
||||||
mod tests;
|
mod tests;
|
||||||
@ -111,7 +107,7 @@ pub struct NetworkService<T: BeaconChainTypes> {
|
|||||||
/// update the UDP socket of discovery if the UPnP mappings get established.
|
/// update the UDP socket of discovery if the UPnP mappings get established.
|
||||||
discovery_auto_update: bool,
|
discovery_auto_update: bool,
|
||||||
/// A delay that expires when a new fork takes place.
|
/// A delay that expires when a new fork takes place.
|
||||||
next_fork_update: Option<Delay>,
|
next_fork_update: Option<Sleep>,
|
||||||
/// Subscribe to all the subnets once synced.
|
/// Subscribe to all the subnets once synced.
|
||||||
subscribe_all_subnets: bool,
|
subscribe_all_subnets: bool,
|
||||||
/// A timer for updating various network metrics.
|
/// A timer for updating various network metrics.
|
||||||
@ -274,12 +270,12 @@ fn spawn_service<T: BeaconChainTypes>(
|
|||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|gauge| gauge.reset());
|
.map(|gauge| gauge.reset());
|
||||||
}
|
}
|
||||||
update_gossip_metrics::<T::EthSpec>(
|
metrics::update_gossip_metrics::<T::EthSpec>(
|
||||||
&service.libp2p.swarm.gs(),
|
&service.libp2p.swarm.gs(),
|
||||||
&service.network_globals,
|
&service.network_globals,
|
||||||
);
|
);
|
||||||
// update sync metrics
|
// update sync metrics
|
||||||
update_sync_metrics(&service.network_globals);
|
metrics::update_sync_metrics(&service.network_globals);
|
||||||
|
|
||||||
}
|
}
|
||||||
_ = service.gossipsub_parameter_update.next() => {
|
_ = service.gossipsub_parameter_update.next() => {
|
||||||
@ -382,7 +378,7 @@ fn spawn_service<T: BeaconChainTypes>(
|
|||||||
"count" => messages.len(),
|
"count" => messages.len(),
|
||||||
"topics" => format!("{:?}", topic_kinds)
|
"topics" => format!("{:?}", topic_kinds)
|
||||||
);
|
);
|
||||||
expose_publish_metrics(&messages);
|
metrics::expose_publish_metrics(&messages);
|
||||||
service.libp2p.swarm.publish(messages);
|
service.libp2p.swarm.publish(messages);
|
||||||
}
|
}
|
||||||
NetworkMessage::ReportPeer { peer_id, action } => service.libp2p.report_peer(&peer_id, action),
|
NetworkMessage::ReportPeer { peer_id, action } => service.libp2p.report_peer(&peer_id, action),
|
||||||
@ -512,7 +508,7 @@ fn spawn_service<T: BeaconChainTypes>(
|
|||||||
..
|
..
|
||||||
} => {
|
} => {
|
||||||
// Update prometheus metrics.
|
// Update prometheus metrics.
|
||||||
expose_receive_metrics(&message);
|
metrics::expose_receive_metrics(&message);
|
||||||
match message {
|
match message {
|
||||||
// attestation information gets processed in the attestation service
|
// attestation information gets processed in the attestation service
|
||||||
PubsubMessage::Attestation(ref subnet_and_attestation) => {
|
PubsubMessage::Attestation(ref subnet_and_attestation) => {
|
||||||
@ -566,399 +562,22 @@ fn spawn_service<T: BeaconChainTypes>(
|
|||||||
service.next_fork_update = next_fork_delay(&service.beacon_chain);
|
service.next_fork_update = next_fork_delay(&service.beacon_chain);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
metrics::update_bandwidth_metrics(service.libp2p.bandwidth.clone());
|
||||||
}
|
}
|
||||||
}, "network");
|
}, "network");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `Delay` that triggers shortly after the next change in the beacon chain fork version.
|
/// Returns a `Sleep` that triggers shortly after the next change in the beacon chain fork version.
|
||||||
/// If there is no scheduled fork, `None` is returned.
|
/// If there is no scheduled fork, `None` is returned.
|
||||||
fn next_fork_delay<T: BeaconChainTypes>(
|
fn next_fork_delay<T: BeaconChainTypes>(
|
||||||
beacon_chain: &BeaconChain<T>,
|
beacon_chain: &BeaconChain<T>,
|
||||||
) -> Option<tokio::time::Delay> {
|
) -> Option<tokio::time::Sleep> {
|
||||||
beacon_chain.duration_to_next_fork().map(|until_fork| {
|
beacon_chain.duration_to_next_fork().map(|until_fork| {
|
||||||
// Add a short time-out to start within the new fork period.
|
// Add a short time-out to start within the new fork period.
|
||||||
let delay = Duration::from_millis(200);
|
let delay = Duration::from_millis(200);
|
||||||
tokio::time::delay_until(tokio::time::Instant::now() + until_fork + delay)
|
tokio::time::sleep_until(tokio::time::Instant::now() + until_fork + delay)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Inspects the `messages` that were being sent to the network and updates Prometheus metrics.
|
|
||||||
fn expose_publish_metrics<T: EthSpec>(messages: &[PubsubMessage<T>]) {
|
|
||||||
for message in messages {
|
|
||||||
match message {
|
|
||||||
PubsubMessage::BeaconBlock(_) => metrics::inc_counter(&metrics::GOSSIP_BLOCKS_TX),
|
|
||||||
PubsubMessage::Attestation(subnet_id) => {
|
|
||||||
metrics::inc_counter_vec(
|
|
||||||
&metrics::ATTESTATIONS_PUBLISHED_PER_SUBNET_PER_SLOT,
|
|
||||||
&[&subnet_id.0.as_ref()],
|
|
||||||
);
|
|
||||||
metrics::inc_counter(&metrics::GOSSIP_UNAGGREGATED_ATTESTATIONS_TX)
|
|
||||||
}
|
|
||||||
PubsubMessage::AggregateAndProofAttestation(_) => {
|
|
||||||
metrics::inc_counter(&metrics::GOSSIP_AGGREGATED_ATTESTATIONS_TX)
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Inspects a `message` received from the network and updates Prometheus metrics.
|
|
||||||
fn expose_receive_metrics<T: EthSpec>(message: &PubsubMessage<T>) {
|
|
||||||
match message {
|
|
||||||
PubsubMessage::BeaconBlock(_) => metrics::inc_counter(&metrics::GOSSIP_BLOCKS_RX),
|
|
||||||
PubsubMessage::Attestation(_) => {
|
|
||||||
metrics::inc_counter(&metrics::GOSSIP_UNAGGREGATED_ATTESTATIONS_RX)
|
|
||||||
}
|
|
||||||
PubsubMessage::AggregateAndProofAttestation(_) => {
|
|
||||||
metrics::inc_counter(&metrics::GOSSIP_AGGREGATED_ATTESTATIONS_RX)
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_gossip_metrics<T: EthSpec>(
|
|
||||||
gossipsub: &Gossipsub,
|
|
||||||
network_globals: &Arc<NetworkGlobals<T>>,
|
|
||||||
) {
|
|
||||||
// Clear the metrics
|
|
||||||
let _ = metrics::PEERS_PER_PROTOCOL
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
let _ = metrics::PEERS_PER_PROTOCOL
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
let _ = metrics::MESH_PEERS_PER_MAIN_TOPIC
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
let _ = metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
let _ = metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
|
|
||||||
let _ = metrics::SCORES_BELOW_ZERO_PER_CLIENT
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
let _ = metrics::SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
let _ = metrics::SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
let _ = metrics::SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
let _ = metrics::MIN_SCORES_PER_CLIENT
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
let _ = metrics::MEDIAN_SCORES_PER_CLIENT
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
let _ = metrics::MEAN_SCORES_PER_CLIENT
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
let _ = metrics::MAX_SCORES_PER_CLIENT
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
|
|
||||||
let _ = metrics::BEACON_BLOCK_MESH_PEERS_PER_CLIENT
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
let _ = metrics::BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT
|
|
||||||
.as_ref()
|
|
||||||
.map(|gauge| gauge.reset());
|
|
||||||
|
|
||||||
// reset the mesh peers, showing all subnets
|
|
||||||
for subnet_id in 0..T::default_spec().attestation_subnet_count {
|
|
||||||
let _ = metrics::get_int_gauge(
|
|
||||||
&metrics::MESH_PEERS_PER_SUBNET_TOPIC,
|
|
||||||
&[subnet_id_to_string(subnet_id)],
|
|
||||||
)
|
|
||||||
.map(|v| v.set(0));
|
|
||||||
|
|
||||||
let _ = metrics::get_int_gauge(
|
|
||||||
&metrics::GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC,
|
|
||||||
&[subnet_id_to_string(subnet_id)],
|
|
||||||
)
|
|
||||||
.map(|v| v.set(0));
|
|
||||||
|
|
||||||
let _ = metrics::get_int_gauge(
|
|
||||||
&metrics::GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC,
|
|
||||||
&[subnet_id_to_string(subnet_id)],
|
|
||||||
)
|
|
||||||
.map(|v| v.set(0));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Subnet topics subscribed to
|
|
||||||
for topic_hash in gossipsub.topics() {
|
|
||||||
if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) {
|
|
||||||
if let GossipKind::Attestation(subnet_id) = topic.kind() {
|
|
||||||
let _ = metrics::get_int_gauge(
|
|
||||||
&metrics::GOSSIPSUB_SUBSCRIBED_SUBNET_TOPIC,
|
|
||||||
&[subnet_id_to_string(subnet_id.into())],
|
|
||||||
)
|
|
||||||
.map(|v| v.set(1));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Peers per subscribed subnet
|
|
||||||
let mut peers_per_topic: HashMap<TopicHash, usize> = HashMap::new();
|
|
||||||
for (peer_id, topics) in gossipsub.all_peers() {
|
|
||||||
for topic_hash in topics {
|
|
||||||
*peers_per_topic.entry(topic_hash.clone()).or_default() += 1;
|
|
||||||
|
|
||||||
if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) {
|
|
||||||
match topic.kind() {
|
|
||||||
GossipKind::Attestation(subnet_id) => {
|
|
||||||
if let Some(v) = metrics::get_int_gauge(
|
|
||||||
&metrics::GOSSIPSUB_SUBSCRIBED_PEERS_SUBNET_TOPIC,
|
|
||||||
&[subnet_id_to_string(subnet_id.into())],
|
|
||||||
) {
|
|
||||||
v.inc()
|
|
||||||
};
|
|
||||||
|
|
||||||
// average peer scores
|
|
||||||
if let Some(score) = gossipsub.peer_score(peer_id) {
|
|
||||||
if let Some(v) = metrics::get_gauge(
|
|
||||||
&metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC,
|
|
||||||
&[subnet_id_to_string(subnet_id.into())],
|
|
||||||
) {
|
|
||||||
v.add(score)
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
kind => {
|
|
||||||
// main topics
|
|
||||||
if let Some(score) = gossipsub.peer_score(peer_id) {
|
|
||||||
if let Some(v) = metrics::get_gauge(
|
|
||||||
&metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC,
|
|
||||||
&[kind.as_ref()],
|
|
||||||
) {
|
|
||||||
v.add(score)
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// adjust to average scores by dividing by number of peers
|
|
||||||
for (topic_hash, peers) in peers_per_topic.iter() {
|
|
||||||
if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) {
|
|
||||||
match topic.kind() {
|
|
||||||
GossipKind::Attestation(subnet_id) => {
|
|
||||||
// average peer scores
|
|
||||||
if let Some(v) = metrics::get_gauge(
|
|
||||||
&metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_SUBNET_TOPIC,
|
|
||||||
&[subnet_id_to_string(subnet_id.into())],
|
|
||||||
) {
|
|
||||||
v.set(v.get() / (*peers as f64))
|
|
||||||
};
|
|
||||||
}
|
|
||||||
kind => {
|
|
||||||
// main topics
|
|
||||||
if let Some(v) = metrics::get_gauge(
|
|
||||||
&metrics::AVG_GOSSIPSUB_PEER_SCORE_PER_MAIN_TOPIC,
|
|
||||||
&[kind.as_ref()],
|
|
||||||
) {
|
|
||||||
v.set(v.get() / (*peers as f64))
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// mesh peers
|
|
||||||
for topic_hash in gossipsub.topics() {
|
|
||||||
let peers = gossipsub.mesh_peers(&topic_hash).count();
|
|
||||||
if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) {
|
|
||||||
match topic.kind() {
|
|
||||||
GossipKind::Attestation(subnet_id) => {
|
|
||||||
if let Some(v) = metrics::get_int_gauge(
|
|
||||||
&metrics::MESH_PEERS_PER_SUBNET_TOPIC,
|
|
||||||
&[subnet_id_to_string(subnet_id.into())],
|
|
||||||
) {
|
|
||||||
v.set(peers as i64)
|
|
||||||
};
|
|
||||||
}
|
|
||||||
kind => {
|
|
||||||
// main topics
|
|
||||||
if let Some(v) = metrics::get_int_gauge(
|
|
||||||
&metrics::MESH_PEERS_PER_MAIN_TOPIC,
|
|
||||||
&[kind.as_ref()],
|
|
||||||
) {
|
|
||||||
v.set(peers as i64)
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// protocol peers
|
|
||||||
let mut peers_per_protocol: HashMap<&'static str, i64> = HashMap::new();
|
|
||||||
for (_peer, protocol) in gossipsub.peer_protocol() {
|
|
||||||
*peers_per_protocol
|
|
||||||
.entry(protocol.as_static_ref())
|
|
||||||
.or_default() += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (protocol, peers) in peers_per_protocol.iter() {
|
|
||||||
if let Some(v) = metrics::get_int_gauge(&metrics::PEERS_PER_PROTOCOL, &[protocol]) {
|
|
||||||
v.set(*peers)
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut peer_to_client = HashMap::new();
|
|
||||||
let mut scores_per_client: HashMap<&'static str, Vec<f64>> = HashMap::new();
|
|
||||||
{
|
|
||||||
let peers = network_globals.peers.read();
|
|
||||||
for (peer_id, _) in gossipsub.all_peers() {
|
|
||||||
let client = peers
|
|
||||||
.peer_info(peer_id)
|
|
||||||
.map(|peer_info| peer_info.client.kind.as_static_ref())
|
|
||||||
.unwrap_or_else(|| "Unknown");
|
|
||||||
|
|
||||||
peer_to_client.insert(peer_id, client);
|
|
||||||
let score = gossipsub.peer_score(peer_id).unwrap_or(0.0);
|
|
||||||
scores_per_client.entry(client).or_default().push(score);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// mesh peers per client
|
|
||||||
for topic_hash in gossipsub.topics() {
|
|
||||||
if let Ok(topic) = GossipTopic::decode(topic_hash.as_str()) {
|
|
||||||
match topic.kind() {
|
|
||||||
GossipKind::BeaconBlock => {
|
|
||||||
for peer in gossipsub.mesh_peers(&topic_hash) {
|
|
||||||
if let Some(client) = peer_to_client.get(peer) {
|
|
||||||
if let Some(v) = metrics::get_int_gauge(
|
|
||||||
&metrics::BEACON_BLOCK_MESH_PEERS_PER_CLIENT,
|
|
||||||
&[client],
|
|
||||||
) {
|
|
||||||
v.inc()
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
GossipKind::BeaconAggregateAndProof => {
|
|
||||||
for peer in gossipsub.mesh_peers(&topic_hash) {
|
|
||||||
if let Some(client) = peer_to_client.get(peer) {
|
|
||||||
if let Some(v) = metrics::get_int_gauge(
|
|
||||||
&metrics::BEACON_AGGREGATE_AND_PROOF_MESH_PEERS_PER_CLIENT,
|
|
||||||
&[client],
|
|
||||||
) {
|
|
||||||
v.inc()
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for (client, scores) in scores_per_client.into_iter() {
|
|
||||||
let c = &[client];
|
|
||||||
let len = scores.len();
|
|
||||||
if len > 0 {
|
|
||||||
let mut below0 = 0;
|
|
||||||
let mut below_gossip_threshold = 0;
|
|
||||||
let mut below_publish_threshold = 0;
|
|
||||||
let mut below_greylist_threshold = 0;
|
|
||||||
let mut min = f64::INFINITY;
|
|
||||||
let mut sum = 0.0;
|
|
||||||
let mut max = f64::NEG_INFINITY;
|
|
||||||
|
|
||||||
let count = scores.len() as f64;
|
|
||||||
|
|
||||||
for &score in &scores {
|
|
||||||
if score < 0.0 {
|
|
||||||
below0 += 1;
|
|
||||||
}
|
|
||||||
if score < -4000.0 {
|
|
||||||
//TODO not hardcode
|
|
||||||
below_gossip_threshold += 1;
|
|
||||||
}
|
|
||||||
if score < -8000.0 {
|
|
||||||
//TODO not hardcode
|
|
||||||
below_publish_threshold += 1;
|
|
||||||
}
|
|
||||||
if score < -16000.0 {
|
|
||||||
//TODO not hardcode
|
|
||||||
below_greylist_threshold += 1;
|
|
||||||
}
|
|
||||||
if score < min {
|
|
||||||
min = score;
|
|
||||||
}
|
|
||||||
if score > max {
|
|
||||||
max = score;
|
|
||||||
}
|
|
||||||
sum += score;
|
|
||||||
}
|
|
||||||
|
|
||||||
let median = if len == 0 {
|
|
||||||
0.0
|
|
||||||
} else if len % 2 == 0 {
|
|
||||||
(scores[len / 2 - 1] + scores[len / 2]) / 2.0
|
|
||||||
} else {
|
|
||||||
scores[len / 2]
|
|
||||||
};
|
|
||||||
|
|
||||||
metrics::set_gauge_entry(
|
|
||||||
&metrics::SCORES_BELOW_ZERO_PER_CLIENT,
|
|
||||||
c,
|
|
||||||
below0 as f64 / count,
|
|
||||||
);
|
|
||||||
metrics::set_gauge_entry(
|
|
||||||
&metrics::SCORES_BELOW_GOSSIP_THRESHOLD_PER_CLIENT,
|
|
||||||
c,
|
|
||||||
below_gossip_threshold as f64 / count,
|
|
||||||
);
|
|
||||||
metrics::set_gauge_entry(
|
|
||||||
&metrics::SCORES_BELOW_PUBLISH_THRESHOLD_PER_CLIENT,
|
|
||||||
c,
|
|
||||||
below_publish_threshold as f64 / count,
|
|
||||||
);
|
|
||||||
metrics::set_gauge_entry(
|
|
||||||
&metrics::SCORES_BELOW_GREYLIST_THRESHOLD_PER_CLIENT,
|
|
||||||
c,
|
|
||||||
below_greylist_threshold as f64 / count,
|
|
||||||
);
|
|
||||||
|
|
||||||
metrics::set_gauge_entry(&metrics::MIN_SCORES_PER_CLIENT, c, min);
|
|
||||||
metrics::set_gauge_entry(&metrics::MEDIAN_SCORES_PER_CLIENT, c, median);
|
|
||||||
metrics::set_gauge_entry(&metrics::MEAN_SCORES_PER_CLIENT, c, sum / count);
|
|
||||||
metrics::set_gauge_entry(&metrics::MAX_SCORES_PER_CLIENT, c, max);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn update_sync_metrics<T: EthSpec>(network_globals: &Arc<NetworkGlobals<T>>) {
|
|
||||||
// reset the counts
|
|
||||||
if metrics::PEERS_PER_SYNC_TYPE
|
|
||||||
.as_ref()
|
|
||||||
.map(|metric| metric.reset())
|
|
||||||
.is_err()
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
// count per sync status, the number of connected peers
|
|
||||||
let mut peers_per_sync_type = FnvHashMap::default();
|
|
||||||
for sync_type in network_globals
|
|
||||||
.peers
|
|
||||||
.read()
|
|
||||||
.connected_peers()
|
|
||||||
.map(|(_peer_id, info)| info.sync_status.as_str())
|
|
||||||
{
|
|
||||||
*peers_per_sync_type.entry(sync_type).or_default() += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (sync_type, peer_count) in peers_per_sync_type {
|
|
||||||
metrics::set_gauge_entry(&metrics::PEERS_PER_SYNC_TYPE, &[sync_type], peer_count);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -5,6 +5,7 @@ mod tests {
|
|||||||
use crate::{NetworkConfig, NetworkService};
|
use crate::{NetworkConfig, NetworkService};
|
||||||
use beacon_chain::test_utils::BeaconChainHarness;
|
use beacon_chain::test_utils::BeaconChainHarness;
|
||||||
use eth2_libp2p::Enr;
|
use eth2_libp2p::Enr;
|
||||||
|
//use slog::{o, Drain, Level, Logger};
|
||||||
use slog::Logger;
|
use slog::Logger;
|
||||||
use sloggers::{null::NullLoggerBuilder, Build};
|
use sloggers::{null::NullLoggerBuilder, Build};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
@ -14,6 +15,18 @@ mod tests {
|
|||||||
use types::{test_utils::generate_deterministic_keypairs, MinimalEthSpec};
|
use types::{test_utils::generate_deterministic_keypairs, MinimalEthSpec};
|
||||||
|
|
||||||
fn get_logger() -> Logger {
|
fn get_logger() -> Logger {
|
||||||
|
/* For emitting logs during the tests
|
||||||
|
let drain = {
|
||||||
|
let decorator = slog_term::TermDecorator::new().build();
|
||||||
|
let decorator =
|
||||||
|
logging::AlignedTermDecorator::new(decorator, logging::MAX_MESSAGE_WIDTH);
|
||||||
|
let drain = slog_term::FullFormat::new(decorator).build().fuse();
|
||||||
|
let drain = slog_async::Async::new(drain).chan_size(2048).build();
|
||||||
|
drain.filter_level(Level::Debug)
|
||||||
|
};
|
||||||
|
|
||||||
|
Logger::root(drain.fuse(), o!())
|
||||||
|
*/
|
||||||
let builder = NullLoggerBuilder;
|
let builder = NullLoggerBuilder;
|
||||||
builder.build().expect("should build logger")
|
builder.build().expect("should build logger")
|
||||||
}
|
}
|
||||||
@ -37,12 +50,12 @@ mod tests {
|
|||||||
let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap();
|
let enr2 = Enr::from_str("enr:-IS4QJ2d11eu6dC7E7LoXeLMgMP3kom1u3SE8esFSWvaHoo0dP1jg8O3-nx9ht-EO3CmG7L6OkHcMmoIh00IYWB92QABgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIB_c-jQMOXsbjWkbN-Oj99H57gfId5pfb4wa1qxwV4CIN1ZHCCIyk").unwrap();
|
||||||
let enrs = vec![enr1, enr2];
|
let enrs = vec![enr1, enr2];
|
||||||
|
|
||||||
let runtime = Runtime::new().unwrap();
|
let runtime = Arc::new(Runtime::new().unwrap());
|
||||||
|
|
||||||
let (signal, exit) = exit_future::signal();
|
let (signal, exit) = exit_future::signal();
|
||||||
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
let (shutdown_tx, _) = futures::channel::mpsc::channel(1);
|
||||||
let executor = task_executor::TaskExecutor::new(
|
let executor = task_executor::TaskExecutor::new(
|
||||||
runtime.handle().clone(),
|
Arc::downgrade(&runtime),
|
||||||
exit,
|
exit,
|
||||||
log.clone(),
|
log.clone(),
|
||||||
shutdown_tx,
|
shutdown_tx,
|
||||||
@ -50,9 +63,10 @@ mod tests {
|
|||||||
|
|
||||||
let mut config = NetworkConfig::default();
|
let mut config = NetworkConfig::default();
|
||||||
config.libp2p_port = 21212;
|
config.libp2p_port = 21212;
|
||||||
|
config.upnp_enabled = false;
|
||||||
config.discovery_port = 21212;
|
config.discovery_port = 21212;
|
||||||
config.boot_nodes_enr = enrs.clone();
|
config.boot_nodes_enr = enrs.clone();
|
||||||
runtime.spawn(async move {
|
runtime.block_on(async move {
|
||||||
// Create a new network service which implicitly gets dropped at the
|
// Create a new network service which implicitly gets dropped at the
|
||||||
// end of the block.
|
// end of the block.
|
||||||
|
|
||||||
@ -61,7 +75,9 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
drop(signal);
|
drop(signal);
|
||||||
});
|
});
|
||||||
runtime.shutdown_timeout(tokio::time::Duration::from_millis(300));
|
|
||||||
|
let raw_runtime = Arc::try_unwrap(runtime).unwrap();
|
||||||
|
raw_runtime.shutdown_timeout(tokio::time::Duration::from_secs(10));
|
||||||
|
|
||||||
// Load the persisted dht from the store
|
// Load the persisted dht from the store
|
||||||
let persisted_enrs = load_dht(store);
|
let persisted_enrs = load_dht(store);
|
||||||
|
@ -72,10 +72,8 @@ impl<E: EthSpec> ProductionBeaconNode<E> {
|
|||||||
let client_genesis = client_config.genesis.clone();
|
let client_genesis = client_config.genesis.clone();
|
||||||
let store_config = client_config.store.clone();
|
let store_config = client_config.store.clone();
|
||||||
let log = context.log().clone();
|
let log = context.log().clone();
|
||||||
|
|
||||||
let db_path = client_config.create_db_path()?;
|
let db_path = client_config.create_db_path()?;
|
||||||
let freezer_db_path_res = client_config.create_freezer_db_path();
|
let freezer_db_path_res = client_config.create_freezer_db_path();
|
||||||
|
|
||||||
let executor = context.executor.clone();
|
let executor = context.executor.clone();
|
||||||
|
|
||||||
let builder = ClientBuilder::new(context.eth_spec_instance.clone())
|
let builder = ClientBuilder::new(context.eth_spec_instance.clone())
|
||||||
|
@ -1,11 +1,14 @@
|
|||||||
#![cfg(test)]
|
#![cfg(test)]
|
||||||
|
|
||||||
|
//TODO: Drop compat library once reqwest and other libraries update to tokio 0.3
|
||||||
|
|
||||||
use beacon_chain::StateSkipConfig;
|
use beacon_chain::StateSkipConfig;
|
||||||
use node_test_rig::{
|
use node_test_rig::{
|
||||||
environment::{Environment, EnvironmentBuilder},
|
environment::{Environment, EnvironmentBuilder},
|
||||||
eth2::types::StateId,
|
eth2::types::StateId,
|
||||||
testing_client_config, LocalBeaconNode,
|
testing_client_config, LocalBeaconNode,
|
||||||
};
|
};
|
||||||
|
use tokio_compat_02::FutureExt;
|
||||||
use types::{EthSpec, MinimalEthSpec, Slot};
|
use types::{EthSpec, MinimalEthSpec, Slot};
|
||||||
|
|
||||||
fn env_builder() -> EnvironmentBuilder<MinimalEthSpec> {
|
fn env_builder() -> EnvironmentBuilder<MinimalEthSpec> {
|
||||||
@ -26,18 +29,26 @@ fn build_node<E: EthSpec>(env: &mut Environment<E>) -> LocalBeaconNode<E> {
|
|||||||
fn http_server_genesis_state() {
|
fn http_server_genesis_state() {
|
||||||
let mut env = env_builder()
|
let mut env = env_builder()
|
||||||
.null_logger()
|
.null_logger()
|
||||||
|
//.async_logger("debug", None)
|
||||||
.expect("should build env logger")
|
.expect("should build env logger")
|
||||||
.multi_threaded_tokio_runtime()
|
.multi_threaded_tokio_runtime()
|
||||||
.expect("should start tokio runtime")
|
.expect("should start tokio runtime")
|
||||||
.build()
|
.build()
|
||||||
.expect("environment should build");
|
.expect("environment should build");
|
||||||
|
|
||||||
|
// build a runtime guard
|
||||||
|
|
||||||
let node = build_node(&mut env);
|
let node = build_node(&mut env);
|
||||||
|
|
||||||
let remote_node = node.remote_node().expect("should produce remote node");
|
let remote_node = node.remote_node().expect("should produce remote node");
|
||||||
|
|
||||||
let api_state = env
|
let api_state = env
|
||||||
.runtime()
|
.runtime()
|
||||||
.block_on(remote_node.get_debug_beacon_states(StateId::Slot(Slot::new(0))))
|
.block_on(
|
||||||
|
remote_node
|
||||||
|
.get_debug_beacon_states(StateId::Slot(Slot::new(0)))
|
||||||
|
.compat(),
|
||||||
|
)
|
||||||
.expect("should fetch state from http api")
|
.expect("should fetch state from http api")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.data;
|
.data;
|
||||||
@ -54,5 +65,6 @@ fn http_server_genesis_state() {
|
|||||||
api_state, db_state,
|
api_state, db_state,
|
||||||
"genesis state from api should match that from the DB"
|
"genesis state from api should match that from the DB"
|
||||||
);
|
);
|
||||||
|
|
||||||
env.fire_signal();
|
env.fire_signal();
|
||||||
}
|
}
|
||||||
|
@ -8,8 +8,8 @@ edition = "2018"
|
|||||||
beacon_chain = { path = "../beacon_chain" }
|
beacon_chain = { path = "../beacon_chain" }
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
slot_clock = { path = "../../common/slot_clock" }
|
slot_clock = { path = "../../common/slot_clock" }
|
||||||
tokio = { version = "0.2.22", features = ["full"] }
|
tokio = { version = "0.3.2", features = ["full"] }
|
||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
task_executor = { path = "../../common/task_executor" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
|
@ -7,11 +7,11 @@ edition = "2018"
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
serde = "1.0.116"
|
serde = "1.0.116"
|
||||||
serde_derive = "1.0.116"
|
serde_derive = "1.0.116"
|
||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
tokio = { version = "0.2.22", features = ["full"] }
|
tokio = { version = "0.3.2", features = ["full"] }
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
ws = "0.9.1"
|
ws = "0.9.1"
|
||||||
task_executor = { path = "../../common/task_executor" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
|
@ -79,11 +79,11 @@ pub fn start_server<T: EthSpec>(
|
|||||||
|
|
||||||
// Place a future on the handle that will shutdown the websocket server when the
|
// Place a future on the handle that will shutdown the websocket server when the
|
||||||
// application exits.
|
// application exits.
|
||||||
executor.runtime_handle().spawn(exit_future);
|
|
||||||
|
executor.spawn(exit_future, "Websocket exit");
|
||||||
|
|
||||||
let log_inner = log.clone();
|
let log_inner = log.clone();
|
||||||
|
let server_future = move || match server.run() {
|
||||||
let _ = std::thread::spawn(move || match server.run() {
|
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
debug!(
|
debug!(
|
||||||
log_inner,
|
log_inner,
|
||||||
@ -97,7 +97,9 @@ pub fn start_server<T: EthSpec>(
|
|||||||
"error" => format!("{:?}", e)
|
"error" => format!("{:?}", e)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
});
|
};
|
||||||
|
|
||||||
|
executor.spawn_blocking(server_future, "Websocket server");
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
log,
|
log,
|
||||||
|
@ -13,12 +13,12 @@ eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
|||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
sloggers = "1.0.1"
|
sloggers = "1.0.1"
|
||||||
tokio = "0.2.22"
|
tokio = "0.3.2"
|
||||||
log = "0.4.11"
|
log = "0.4.11"
|
||||||
slog-term = "2.6.0"
|
slog-term = "2.6.0"
|
||||||
logging = { path = "../common/logging" }
|
logging = { path = "../common/logging" }
|
||||||
slog-async = "2.5.0"
|
slog-async = "2.5.0"
|
||||||
slog-scope = "4.3.0"
|
slog-scope = "4.3.0"
|
||||||
slog-stdlog = "4.0.0"
|
slog-stdlog = "4.0.0"
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
|
@ -59,8 +59,7 @@ pub fn run(matches: &ArgMatches<'_>, eth_spec_id: EthSpecId, debug_level: String
|
|||||||
|
|
||||||
fn main<T: EthSpec>(matches: &ArgMatches<'_>, log: slog::Logger) -> Result<(), String> {
|
fn main<T: EthSpec>(matches: &ArgMatches<'_>, log: slog::Logger) -> Result<(), String> {
|
||||||
// Builds a custom executor for the bootnode
|
// Builds a custom executor for the bootnode
|
||||||
let mut runtime = tokio::runtime::Builder::new()
|
let runtime = tokio::runtime::Builder::new_multi_thread()
|
||||||
.threaded_scheduler()
|
|
||||||
.enable_all()
|
.enable_all()
|
||||||
.build()
|
.build()
|
||||||
.map_err(|e| format!("Failed to build runtime: {}", e))?;
|
.map_err(|e| format!("Failed to build runtime: {}", e))?;
|
||||||
|
@ -52,7 +52,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// start the server
|
// start the server
|
||||||
if let Err(e) = discv5.start(config.listen_socket) {
|
if let Err(e) = discv5.start(config.listen_socket).await {
|
||||||
slog::crit!(log, "Could not start discv5 server"; "error" => e.to_string());
|
slog::crit!(log, "Could not start discv5 server"; "error" => e.to_string());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -52,9 +52,12 @@ fn uncompress_state(testnet: &Eth2NetArchiveAndDirectory<'static>) -> Result<(),
|
|||||||
.map_err(|e| format!("Error writing file {:?}: {}", path, e))?;
|
.map_err(|e| format!("Error writing file {:?}: {}", path, e))?;
|
||||||
} else {
|
} else {
|
||||||
// Create empty genesis.ssz if genesis is unknown
|
// Create empty genesis.ssz if genesis is unknown
|
||||||
File::create(testnet.dir().join(GENESIS_FILE_NAME))
|
let genesis_file = testnet.dir().join(GENESIS_FILE_NAME);
|
||||||
|
if !genesis_file.exists() {
|
||||||
|
File::create(genesis_file)
|
||||||
.map_err(|e| format!("Failed to create {}: {}", GENESIS_FILE_NAME, e))?;
|
.map_err(|e| format!("Failed to create {}: {}", GENESIS_FILE_NAME, e))?;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -5,8 +5,8 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
tokio = { version = "0.2.22", features = ["time"] }
|
tokio-util = { version = "0.4.0", features = ["time"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { version = "0.2.22", features = ["time", "rt-threaded", "macros"] }
|
tokio = { version = "0.3.2", features = ["time", "rt-multi-thread", "macros"] }
|
||||||
|
@ -12,7 +12,7 @@ use std::{
|
|||||||
task::{Context, Poll},
|
task::{Context, Poll},
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
use tokio::time::delay_queue::{self, DelayQueue};
|
use tokio_util::time::delay_queue::{self, DelayQueue};
|
||||||
|
|
||||||
pub struct HashSetDelay<K>
|
pub struct HashSetDelay<K>
|
||||||
where
|
where
|
||||||
|
@ -5,9 +5,10 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tokio = { version = "0.2.22", features = ["rt-threaded", "macros", "blocking"] }
|
tokio = { version = "0.3.2", features = ["rt"] }
|
||||||
slog = "2.5.2"
|
slog = "2.5.2"
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
exit-future = "0.2.0"
|
exit-future = "0.2.0"
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
lighthouse_metrics = { path = "../lighthouse_metrics" }
|
lighthouse_metrics = { path = "../lighthouse_metrics" }
|
||||||
|
tokio-compat-02 = "0.1"
|
||||||
|
@ -3,13 +3,15 @@ mod metrics;
|
|||||||
use futures::channel::mpsc::Sender;
|
use futures::channel::mpsc::Sender;
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use slog::{debug, o, trace};
|
use slog::{debug, o, trace};
|
||||||
use tokio::runtime::Handle;
|
use std::sync::Weak;
|
||||||
|
use tokio::runtime::Runtime;
|
||||||
|
use tokio_compat_02::FutureExt;
|
||||||
|
|
||||||
/// A wrapper over a runtime handle which can spawn async and blocking tasks.
|
/// A wrapper over a runtime handle which can spawn async and blocking tasks.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct TaskExecutor {
|
pub struct TaskExecutor {
|
||||||
/// The handle to the runtime on which tasks are spawned
|
/// The handle to the runtime on which tasks are spawned
|
||||||
handle: Handle,
|
runtime: Weak<Runtime>,
|
||||||
/// The receiver exit future which on receiving shuts down the task
|
/// The receiver exit future which on receiving shuts down the task
|
||||||
exit: exit_future::Exit,
|
exit: exit_future::Exit,
|
||||||
/// Sender given to tasks, so that if they encounter a state in which execution cannot
|
/// Sender given to tasks, so that if they encounter a state in which execution cannot
|
||||||
@ -27,13 +29,13 @@ impl TaskExecutor {
|
|||||||
/// Note: this function is mainly useful in tests. A `TaskExecutor` should be normally obtained from
|
/// Note: this function is mainly useful in tests. A `TaskExecutor` should be normally obtained from
|
||||||
/// a [`RuntimeContext`](struct.RuntimeContext.html)
|
/// a [`RuntimeContext`](struct.RuntimeContext.html)
|
||||||
pub fn new(
|
pub fn new(
|
||||||
handle: Handle,
|
runtime: Weak<Runtime>,
|
||||||
exit: exit_future::Exit,
|
exit: exit_future::Exit,
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
signal_tx: Sender<&'static str>,
|
signal_tx: Sender<&'static str>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
handle,
|
runtime,
|
||||||
exit,
|
exit,
|
||||||
signal_tx,
|
signal_tx,
|
||||||
log,
|
log,
|
||||||
@ -43,7 +45,7 @@ impl TaskExecutor {
|
|||||||
/// Clones the task executor adding a service name.
|
/// Clones the task executor adding a service name.
|
||||||
pub fn clone_with_name(&self, service_name: String) -> Self {
|
pub fn clone_with_name(&self, service_name: String) -> Self {
|
||||||
TaskExecutor {
|
TaskExecutor {
|
||||||
handle: self.handle.clone(),
|
runtime: self.runtime.clone(),
|
||||||
exit: self.exit.clone(),
|
exit: self.exit.clone(),
|
||||||
signal_tx: self.signal_tx.clone(),
|
signal_tx: self.signal_tx.clone(),
|
||||||
log: self.log.new(o!("service" => service_name)),
|
log: self.log.new(o!("service" => service_name)),
|
||||||
@ -61,7 +63,7 @@ impl TaskExecutor {
|
|||||||
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
|
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
|
||||||
// Task is shutdown before it completes if `exit` receives
|
// Task is shutdown before it completes if `exit` receives
|
||||||
let int_gauge_1 = int_gauge.clone();
|
let int_gauge_1 = int_gauge.clone();
|
||||||
let future = future::select(Box::pin(task), exit).then(move |either| {
|
let future = future::select(Box::pin(task.compat()), exit).then(move |either| {
|
||||||
match either {
|
match either {
|
||||||
future::Either::Left(_) => trace!(log, "Async task completed"; "task" => name),
|
future::Either::Left(_) => trace!(log, "Async task completed"; "task" => name),
|
||||||
future::Either::Right(_) => {
|
future::Either::Right(_) => {
|
||||||
@ -73,7 +75,11 @@ impl TaskExecutor {
|
|||||||
});
|
});
|
||||||
|
|
||||||
int_gauge.inc();
|
int_gauge.inc();
|
||||||
self.handle.spawn(future);
|
if let Some(runtime) = self.runtime.upgrade() {
|
||||||
|
runtime.spawn(future);
|
||||||
|
} else {
|
||||||
|
debug!(self.log, "Couldn't spawn task. Runtime shutting down");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,13 +99,19 @@ impl TaskExecutor {
|
|||||||
) {
|
) {
|
||||||
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
|
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
|
||||||
let int_gauge_1 = int_gauge.clone();
|
let int_gauge_1 = int_gauge.clone();
|
||||||
let future = task.then(move |_| {
|
let future = task
|
||||||
|
.then(move |_| {
|
||||||
int_gauge_1.dec();
|
int_gauge_1.dec();
|
||||||
futures::future::ready(())
|
futures::future::ready(())
|
||||||
});
|
})
|
||||||
|
.compat();
|
||||||
|
|
||||||
int_gauge.inc();
|
int_gauge.inc();
|
||||||
self.handle.spawn(future);
|
if let Some(runtime) = self.runtime.upgrade() {
|
||||||
|
runtime.spawn(future);
|
||||||
|
} else {
|
||||||
|
debug!(self.log, "Couldn't spawn task. Runtime shutting down");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,7 +121,6 @@ impl TaskExecutor {
|
|||||||
where
|
where
|
||||||
F: FnOnce() + Send + 'static,
|
F: FnOnce() + Send + 'static,
|
||||||
{
|
{
|
||||||
let exit = self.exit.clone();
|
|
||||||
let log = self.log.clone();
|
let log = self.log.clone();
|
||||||
|
|
||||||
if let Some(metric) = metrics::get_histogram(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]) {
|
if let Some(metric) = metrics::get_histogram(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]) {
|
||||||
@ -117,31 +128,128 @@ impl TaskExecutor {
|
|||||||
{
|
{
|
||||||
let int_gauge_1 = int_gauge.clone();
|
let int_gauge_1 = int_gauge.clone();
|
||||||
let timer = metric.start_timer();
|
let timer = metric.start_timer();
|
||||||
let join_handle = self.handle.spawn_blocking(task);
|
let join_handle = if let Some(runtime) = self.runtime.upgrade() {
|
||||||
|
runtime.spawn_blocking(task)
|
||||||
|
} else {
|
||||||
|
debug!(self.log, "Couldn't spawn task. Runtime shutting down");
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
let future = future::select(join_handle, exit).then(move |either| {
|
let future = async move {
|
||||||
match either {
|
match join_handle.await {
|
||||||
future::Either::Left(_) => {
|
Ok(_) => trace!(log, "Blocking task completed"; "task" => name),
|
||||||
trace!(log, "Blocking task completed"; "task" => name)
|
Err(e) => debug!(log, "Blocking task failed"; "error" => %e),
|
||||||
}
|
};
|
||||||
future::Either::Right(_) => {
|
|
||||||
debug!(log, "Blocking task shutdown, exit received"; "task" => name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
timer.observe_duration();
|
timer.observe_duration();
|
||||||
int_gauge_1.dec();
|
int_gauge_1.dec();
|
||||||
futures::future::ready(())
|
};
|
||||||
|
|
||||||
|
int_gauge.inc();
|
||||||
|
if let Some(runtime) = self.runtime.upgrade() {
|
||||||
|
runtime.spawn(future);
|
||||||
|
} else {
|
||||||
|
debug!(self.log, "Couldn't spawn task. Runtime shutting down");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/// Spawn a future on the tokio runtime wrapped in an `exit_future::Exit` returning an optional
|
||||||
|
/// join handle to the future.
|
||||||
|
/// The task is canceled when the corresponding exit_future `Signal` is fired/dropped.
|
||||||
|
///
|
||||||
|
/// This function generates prometheus metrics on number of tasks and task duration.
|
||||||
|
pub fn spawn_handle<R: Send + 'static>(
|
||||||
|
&self,
|
||||||
|
task: impl Future<Output = R> + Send + 'static,
|
||||||
|
name: &'static str,
|
||||||
|
) -> Option<tokio::task::JoinHandle<Option<R>>> {
|
||||||
|
let exit = self.exit.clone();
|
||||||
|
let log = self.log.clone();
|
||||||
|
|
||||||
|
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
|
||||||
|
// Task is shutdown before it completes if `exit` receives
|
||||||
|
let int_gauge_1 = int_gauge.clone();
|
||||||
|
let future = future::select(Box::pin(task), exit).then(move |either| {
|
||||||
|
let result = match either {
|
||||||
|
future::Either::Left((task, _)) => {
|
||||||
|
trace!(log, "Async task completed"; "task" => name);
|
||||||
|
Some(task)
|
||||||
|
}
|
||||||
|
future::Either::Right(_) => {
|
||||||
|
debug!(log, "Async task shutdown, exit received"; "task" => name);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
};
|
||||||
|
int_gauge_1.dec();
|
||||||
|
futures::future::ready(result)
|
||||||
});
|
});
|
||||||
|
|
||||||
int_gauge.inc();
|
int_gauge.inc();
|
||||||
self.handle.spawn(future);
|
if let Some(runtime) = self.runtime.upgrade() {
|
||||||
|
Some(runtime.spawn(future.compat()))
|
||||||
|
} else {
|
||||||
|
debug!(self.log, "Couldn't spawn task. Runtime shutting down");
|
||||||
|
None
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the underlying runtime handle.
|
/// Spawn a blocking task on a dedicated tokio thread pool wrapped in an exit future returning
|
||||||
pub fn runtime_handle(&self) -> Handle {
|
/// a join handle to the future.
|
||||||
self.handle.clone()
|
/// If the runtime doesn't exist, this will return None.
|
||||||
|
/// The Future returned behaves like the standard JoinHandle which can return an error if the
|
||||||
|
/// task failed.
|
||||||
|
/// This function generates prometheus metrics on number of tasks and task duration.
|
||||||
|
pub fn spawn_blocking_handle<F, R>(
|
||||||
|
&self,
|
||||||
|
task: F,
|
||||||
|
name: &'static str,
|
||||||
|
) -> Option<impl Future<Output = Result<R, tokio::task::JoinError>>>
|
||||||
|
where
|
||||||
|
F: FnOnce() -> R + Send + 'static,
|
||||||
|
R: Send + 'static,
|
||||||
|
{
|
||||||
|
let log = self.log.clone();
|
||||||
|
|
||||||
|
if let Some(metric) = metrics::get_histogram(&metrics::BLOCKING_TASKS_HISTOGRAM, &[name]) {
|
||||||
|
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::BLOCKING_TASKS_COUNT, &[name])
|
||||||
|
{
|
||||||
|
let int_gauge_1 = int_gauge;
|
||||||
|
let timer = metric.start_timer();
|
||||||
|
let join_handle = if let Some(runtime) = self.runtime.upgrade() {
|
||||||
|
runtime.spawn_blocking(task)
|
||||||
|
} else {
|
||||||
|
debug!(self.log, "Couldn't spawn task. Runtime shutting down");
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
Some(async move {
|
||||||
|
let result = match join_handle.await {
|
||||||
|
Ok(result) => {
|
||||||
|
trace!(log, "Blocking task completed"; "task" => name);
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
debug!(log, "Blocking task ended unexpectedly"; "error" => %e);
|
||||||
|
Err(e)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
timer.observe_duration();
|
||||||
|
int_gauge_1.dec();
|
||||||
|
result
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn runtime(&self) -> Weak<Runtime> {
|
||||||
|
self.runtime.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a copy of the `exit_future::Exit`.
|
/// Returns a copy of the `exit_future::Exit`.
|
||||||
|
@ -7,14 +7,14 @@ edition = "2018"
|
|||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
warp = { git = "https://github.com/paulhauner/warp", branch = "cors-wildcard" }
|
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" }
|
||||||
eth2 = { path = "../eth2" }
|
eth2 = { path = "../eth2" }
|
||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
beacon_chain = { path = "../../beacon_node/beacon_chain" }
|
beacon_chain = { path = "../../beacon_node/beacon_chain" }
|
||||||
state_processing = { path = "../../consensus/state_processing" }
|
state_processing = { path = "../../consensus/state_processing" }
|
||||||
safe_arith = { path = "../../consensus/safe_arith" }
|
safe_arith = { path = "../../consensus/safe_arith" }
|
||||||
serde = { version = "1.0.116", features = ["derive"] }
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
tokio = { version = "0.2.22", features = ["sync"] }
|
tokio = { version = "0.3.2", features = ["sync"] }
|
||||||
headers = "0.3.2"
|
headers = "0.3.2"
|
||||||
lighthouse_metrics = { path = "../lighthouse_metrics" }
|
lighthouse_metrics = { path = "../lighthouse_metrics" }
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
|
@ -1,19 +1,21 @@
|
|||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
/// Execute some task in a tokio "blocking thread". These threads are ideal for long-running
|
/// A convenience wrapper around `blocking_task`.
|
||||||
/// (blocking) tasks since they don't jam up the core executor.
|
pub async fn blocking_task<F, T>(func: F) -> Result<T, warp::Rejection>
|
||||||
pub async fn blocking_task<F, T>(func: F) -> T
|
|
||||||
where
|
where
|
||||||
F: Fn() -> T,
|
F: FnOnce() -> Result<T, warp::Rejection> + Send + 'static,
|
||||||
|
T: Send + 'static,
|
||||||
{
|
{
|
||||||
tokio::task::block_in_place(func)
|
tokio::task::spawn_blocking(func)
|
||||||
|
.await
|
||||||
|
.unwrap_or_else(|_| Err(warp::reject::reject())) // This should really be a 500
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A convenience wrapper around `blocking_task` for use with `warp` JSON responses.
|
/// A convenience wrapper around `blocking_task` for use with `warp` JSON responses.
|
||||||
pub async fn blocking_json_task<F, T>(func: F) -> Result<warp::reply::Json, warp::Rejection>
|
pub async fn blocking_json_task<F, T>(func: F) -> Result<warp::reply::Json, warp::Rejection>
|
||||||
where
|
where
|
||||||
F: Fn() -> Result<T, warp::Rejection>,
|
F: FnOnce() -> Result<T, warp::Rejection> + Send + 'static,
|
||||||
T: Serialize,
|
T: Serialize + Send + 'static,
|
||||||
{
|
{
|
||||||
blocking_task(func)
|
blocking_task(func)
|
||||||
.await
|
.await
|
||||||
|
@ -20,7 +20,7 @@ types = { path = "../consensus/types" }
|
|||||||
state_processing = { path = "../consensus/state_processing" }
|
state_processing = { path = "../consensus/state_processing" }
|
||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
regex = "1.3.9"
|
regex = "1.3.9"
|
||||||
futures = { version = "0.3.5", features = ["compat"] }
|
futures = { version = "0.3.7", features = ["compat"] }
|
||||||
environment = { path = "../lighthouse/environment" }
|
environment = { path = "../lighthouse/environment" }
|
||||||
web3 = "0.11.0"
|
web3 = "0.11.0"
|
||||||
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
eth2_testnet_config = { path = "../common/eth2_testnet_config" }
|
||||||
@ -28,7 +28,7 @@ dirs = "3.0.1"
|
|||||||
genesis = { path = "../beacon_node/genesis" }
|
genesis = { path = "../beacon_node/genesis" }
|
||||||
deposit_contract = { path = "../common/deposit_contract" }
|
deposit_contract = { path = "../common/deposit_contract" }
|
||||||
tree_hash = "0.1.1"
|
tree_hash = "0.1.1"
|
||||||
tokio = { version = "0.2.22", features = ["full"] }
|
tokio = { version = "0.3.2", features = ["full"] }
|
||||||
clap_utils = { path = "../common/clap_utils" }
|
clap_utils = { path = "../common/clap_utils" }
|
||||||
eth2_libp2p = { path = "../beacon_node/eth2_libp2p" }
|
eth2_libp2p = { path = "../beacon_node/eth2_libp2p" }
|
||||||
validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] }
|
validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] }
|
||||||
@ -36,3 +36,4 @@ rand = "0.7.3"
|
|||||||
eth2_keystore = { path = "../crypto/eth2_keystore" }
|
eth2_keystore = { path = "../crypto/eth2_keystore" }
|
||||||
lighthouse_version = { path = "../common/lighthouse_version" }
|
lighthouse_version = { path = "../common/lighthouse_version" }
|
||||||
directory = { path = "../common/directory" }
|
directory = { path = "../common/directory" }
|
||||||
|
tokio-compat-02 = "0.1"
|
||||||
|
@ -6,6 +6,7 @@ use deposit_contract::{
|
|||||||
use environment::Environment;
|
use environment::Environment;
|
||||||
use futures::compat::Future01CompatExt;
|
use futures::compat::Future01CompatExt;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use tokio_compat_02::FutureExt;
|
||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
use web3::{
|
use web3::{
|
||||||
contract::{Contract, Options},
|
contract::{Contract, Options},
|
||||||
@ -14,7 +15,7 @@ use web3::{
|
|||||||
Web3,
|
Web3,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Result<(), String> {
|
pub fn run<T: EthSpec>(env: Environment<T>, matches: &ArgMatches<'_>) -> Result<(), String> {
|
||||||
let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?;
|
let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?;
|
||||||
let from_address: Address = clap_utils::parse_required(matches, "from-address")?;
|
let from_address: Address = clap_utils::parse_required(matches, "from-address")?;
|
||||||
let confirmations: usize = clap_utils::parse_required(matches, "confirmations")?;
|
let confirmations: usize = clap_utils::parse_required(matches, "confirmations")?;
|
||||||
@ -30,7 +31,8 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Res
|
|||||||
)
|
)
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
env.runtime().block_on(async {
|
env.runtime().block_on(
|
||||||
|
async {
|
||||||
// It's unlikely that this will be the _actual_ deployment block, however it'll be close
|
// It's unlikely that this will be the _actual_ deployment block, however it'll be close
|
||||||
// enough to serve our purposes.
|
// enough to serve our purposes.
|
||||||
//
|
//
|
||||||
@ -63,5 +65,7 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Res
|
|||||||
println!("deposit_contract_deploy_block: {}", deploy_block);
|
println!("deposit_contract_deploy_block: {}", deploy_block);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
}
|
||||||
|
.compat(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
@ -6,6 +6,7 @@ use ssz::Encode;
|
|||||||
use std::cmp::max;
|
use std::cmp::max;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use tokio_compat_02::FutureExt;
|
||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
|
|
||||||
/// Interval between polling the eth1 node for genesis information.
|
/// Interval between polling the eth1 node for genesis information.
|
||||||
@ -58,7 +59,8 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Res
|
|||||||
let genesis_service =
|
let genesis_service =
|
||||||
Eth1GenesisService::new(config, env.core_context().log().clone(), spec.clone());
|
Eth1GenesisService::new(config, env.core_context().log().clone(), spec.clone());
|
||||||
|
|
||||||
env.runtime().block_on(async {
|
env.runtime().block_on(
|
||||||
|
async {
|
||||||
let _ = genesis_service
|
let _ = genesis_service
|
||||||
.wait_for_genesis_state::<T>(ETH1_GENESIS_UPDATE_INTERVAL, spec)
|
.wait_for_genesis_state::<T>(ETH1_GENESIS_UPDATE_INTERVAL, spec)
|
||||||
.await
|
.await
|
||||||
@ -72,5 +74,7 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Res
|
|||||||
info!("Connecting to eth1 http endpoints: {:?}", endpoints);
|
info!("Connecting to eth1 http endpoints: {:?}", endpoints);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
}
|
||||||
|
.compat(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ use clap::ArgMatches;
|
|||||||
use environment::Environment;
|
use environment::Environment;
|
||||||
use futures::compat::Future01CompatExt;
|
use futures::compat::Future01CompatExt;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use tokio_compat_02::FutureExt;
|
||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
use web3::{
|
use web3::{
|
||||||
transports::Ipc,
|
transports::Ipc,
|
||||||
@ -12,7 +13,7 @@ use web3::{
|
|||||||
/// `keccak("steal()")[0..4]`
|
/// `keccak("steal()")[0..4]`
|
||||||
pub const STEAL_FN_SIGNATURE: &[u8] = &[0xcf, 0x7a, 0x89, 0x65];
|
pub const STEAL_FN_SIGNATURE: &[u8] = &[0xcf, 0x7a, 0x89, 0x65];
|
||||||
|
|
||||||
pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Result<(), String> {
|
pub fn run<T: EthSpec>(env: Environment<T>, matches: &ArgMatches<'_>) -> Result<(), String> {
|
||||||
let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?;
|
let eth1_ipc_path: PathBuf = clap_utils::parse_required(matches, "eth1-ipc")?;
|
||||||
let from: Address = clap_utils::parse_required(matches, "from-address")?;
|
let from: Address = clap_utils::parse_required(matches, "from-address")?;
|
||||||
let contract_address: Address = clap_utils::parse_required(matches, "contract-address")?;
|
let contract_address: Address = clap_utils::parse_required(matches, "contract-address")?;
|
||||||
@ -21,7 +22,8 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Res
|
|||||||
Ipc::new(eth1_ipc_path).map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?;
|
Ipc::new(eth1_ipc_path).map_err(|e| format!("Unable to connect to eth1 IPC: {:?}", e))?;
|
||||||
let web3 = Web3::new(transport);
|
let web3 = Web3::new(transport);
|
||||||
|
|
||||||
env.runtime().block_on(async {
|
env.runtime().block_on(
|
||||||
|
async {
|
||||||
let _ = web3
|
let _ = web3
|
||||||
.eth()
|
.eth()
|
||||||
.send_transaction(TransactionRequest {
|
.send_transaction(TransactionRequest {
|
||||||
@ -39,5 +41,7 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Res
|
|||||||
.map_err(|e| format!("Failed to call steal fn: {:?}", e))?;
|
.map_err(|e| format!("Failed to call steal fn: {:?}", e))?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
}
|
||||||
|
.compat(),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@ milagro = ["bls/milagro"]
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
beacon_node = { "path" = "../beacon_node" }
|
beacon_node = { "path" = "../beacon_node" }
|
||||||
tokio = "0.2.22"
|
tokio = "0.3.2"
|
||||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||||
sloggers = "1.0.1"
|
sloggers = "1.0.1"
|
||||||
types = { "path" = "../consensus/types" }
|
types = { "path" = "../consensus/types" }
|
||||||
@ -28,7 +28,7 @@ slog-term = "2.6.0"
|
|||||||
slog-async = "2.5.0"
|
slog-async = "2.5.0"
|
||||||
environment = { path = "./environment" }
|
environment = { path = "./environment" }
|
||||||
boot_node = { path = "../boot_node" }
|
boot_node = { path = "../boot_node" }
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
validator_client = { "path" = "../validator_client" }
|
validator_client = { "path" = "../validator_client" }
|
||||||
account_manager = { "path" = "../account_manager" }
|
account_manager = { "path" = "../account_manager" }
|
||||||
clap_utils = { path = "../common/clap_utils" }
|
clap_utils = { path = "../common/clap_utils" }
|
||||||
@ -37,6 +37,7 @@ directory = { path = "../common/directory" }
|
|||||||
lighthouse_version = { path = "../common/lighthouse_version" }
|
lighthouse_version = { path = "../common/lighthouse_version" }
|
||||||
account_utils = { path = "../common/account_utils" }
|
account_utils = { path = "../common/account_utils" }
|
||||||
remote_signer = { "path" = "../remote_signer" }
|
remote_signer = { "path" = "../remote_signer" }
|
||||||
|
tokio-compat-02 = "0.1"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tempfile = "3.1.0"
|
tempfile = "3.1.0"
|
||||||
|
@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tokio = { version = "0.2.22", features = ["macros"] }
|
tokio = { version = "0.3.2", features = ["macros", "rt", "rt-multi-thread" ] }
|
||||||
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
slog = { version = "2.5.2", features = ["max_level_trace"] }
|
||||||
sloggers = "1.0.1"
|
sloggers = "1.0.1"
|
||||||
types = { "path" = "../../consensus/types" }
|
types = { "path" = "../../consensus/types" }
|
||||||
@ -16,7 +16,7 @@ logging = { path = "../../common/logging" }
|
|||||||
slog-term = "2.6.0"
|
slog-term = "2.6.0"
|
||||||
slog-async = "2.5.0"
|
slog-async = "2.5.0"
|
||||||
ctrlc = { version = "3.1.6", features = ["termination"] }
|
ctrlc = { version = "3.1.6", features = ["termination"] }
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
slog-json = "2.3.0"
|
slog-json = "2.3.0"
|
||||||
exit-future = "0.2.0"
|
exit-future = "0.2.0"
|
||||||
|
@ -15,12 +15,13 @@ use futures::channel::{
|
|||||||
};
|
};
|
||||||
use futures::{future, StreamExt};
|
use futures::{future, StreamExt};
|
||||||
|
|
||||||
use slog::{error, info, o, Drain, Level, Logger};
|
use slog::{error, info, o, warn, Drain, Level, Logger};
|
||||||
use sloggers::{null::NullLoggerBuilder, Build};
|
use sloggers::{null::NullLoggerBuilder, Build};
|
||||||
use std::cell::RefCell;
|
use std::cell::RefCell;
|
||||||
use std::ffi::OsStr;
|
use std::ffi::OsStr;
|
||||||
use std::fs::{rename as FsRename, OpenOptions};
|
use std::fs::{rename as FsRename, OpenOptions};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
use std::sync::Arc;
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
use task_executor::TaskExecutor;
|
use task_executor::TaskExecutor;
|
||||||
use tokio::runtime::{Builder as RuntimeBuilder, Runtime};
|
use tokio::runtime::{Builder as RuntimeBuilder, Runtime};
|
||||||
@ -29,11 +30,11 @@ use types::{EthSpec, MainnetEthSpec, MinimalEthSpec, V012LegacyEthSpec};
|
|||||||
pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml";
|
pub const ETH2_CONFIG_FILENAME: &str = "eth2-spec.toml";
|
||||||
const LOG_CHANNEL_SIZE: usize = 2048;
|
const LOG_CHANNEL_SIZE: usize = 2048;
|
||||||
/// The maximum time in seconds the client will wait for all internal tasks to shutdown.
|
/// The maximum time in seconds the client will wait for all internal tasks to shutdown.
|
||||||
const MAXIMUM_SHUTDOWN_TIME: u64 = 3;
|
const MAXIMUM_SHUTDOWN_TIME: u64 = 15;
|
||||||
|
|
||||||
/// Builds an `Environment`.
|
/// Builds an `Environment`.
|
||||||
pub struct EnvironmentBuilder<E: EthSpec> {
|
pub struct EnvironmentBuilder<E: EthSpec> {
|
||||||
runtime: Option<Runtime>,
|
runtime: Option<Arc<Runtime>>,
|
||||||
log: Option<Logger>,
|
log: Option<Logger>,
|
||||||
eth_spec_instance: E,
|
eth_spec_instance: E,
|
||||||
eth2_config: Eth2Config,
|
eth2_config: Eth2Config,
|
||||||
@ -84,28 +85,12 @@ impl<E: EthSpec> EnvironmentBuilder<E> {
|
|||||||
///
|
///
|
||||||
/// The `Runtime` used is just the standard tokio runtime.
|
/// The `Runtime` used is just the standard tokio runtime.
|
||||||
pub fn multi_threaded_tokio_runtime(mut self) -> Result<Self, String> {
|
pub fn multi_threaded_tokio_runtime(mut self) -> Result<Self, String> {
|
||||||
self.runtime = Some(
|
self.runtime = Some(Arc::new(
|
||||||
RuntimeBuilder::new()
|
RuntimeBuilder::new_multi_thread()
|
||||||
.threaded_scheduler()
|
|
||||||
.enable_all()
|
.enable_all()
|
||||||
.build()
|
.build()
|
||||||
.map_err(|e| format!("Failed to start runtime: {:?}", e))?,
|
.map_err(|e| format!("Failed to start runtime: {:?}", e))?,
|
||||||
);
|
));
|
||||||
Ok(self)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Specifies that a single-threaded tokio runtime should be used. Ideal for testing purposes
|
|
||||||
/// where tests are already multi-threaded.
|
|
||||||
///
|
|
||||||
/// This can solve problems if "too many open files" errors are thrown during tests.
|
|
||||||
pub fn single_thread_tokio_runtime(mut self) -> Result<Self, String> {
|
|
||||||
self.runtime = Some(
|
|
||||||
RuntimeBuilder::new()
|
|
||||||
.basic_scheduler()
|
|
||||||
.enable_all()
|
|
||||||
.build()
|
|
||||||
.map_err(|e| format!("Failed to start runtime: {:?}", e))?,
|
|
||||||
);
|
|
||||||
Ok(self)
|
Ok(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -329,7 +314,7 @@ impl<E: EthSpec> RuntimeContext<E> {
|
|||||||
/// An environment where Lighthouse services can run. Used to start a production beacon node or
|
/// An environment where Lighthouse services can run. Used to start a production beacon node or
|
||||||
/// validator client, or to run tests that involve logging and async task execution.
|
/// validator client, or to run tests that involve logging and async task execution.
|
||||||
pub struct Environment<E: EthSpec> {
|
pub struct Environment<E: EthSpec> {
|
||||||
runtime: Runtime,
|
runtime: Arc<Runtime>,
|
||||||
/// Receiver side of an internal shutdown signal.
|
/// Receiver side of an internal shutdown signal.
|
||||||
signal_rx: Option<Receiver<&'static str>>,
|
signal_rx: Option<Receiver<&'static str>>,
|
||||||
/// Sender to request shutting down.
|
/// Sender to request shutting down.
|
||||||
@ -347,15 +332,15 @@ impl<E: EthSpec> Environment<E> {
|
|||||||
///
|
///
|
||||||
/// Useful in the rare scenarios where it's necessary to block the current thread until a task
|
/// Useful in the rare scenarios where it's necessary to block the current thread until a task
|
||||||
/// is finished (e.g., during testing).
|
/// is finished (e.g., during testing).
|
||||||
pub fn runtime(&mut self) -> &mut Runtime {
|
pub fn runtime(&self) -> &Arc<Runtime> {
|
||||||
&mut self.runtime
|
&self.runtime
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a `Context` where no "service" has been added to the logger output.
|
/// Returns a `Context` where no "service" has been added to the logger output.
|
||||||
pub fn core_context(&mut self) -> RuntimeContext<E> {
|
pub fn core_context(&mut self) -> RuntimeContext<E> {
|
||||||
RuntimeContext {
|
RuntimeContext {
|
||||||
executor: TaskExecutor::new(
|
executor: TaskExecutor::new(
|
||||||
self.runtime().handle().clone(),
|
Arc::downgrade(self.runtime()),
|
||||||
self.exit.clone(),
|
self.exit.clone(),
|
||||||
self.log.clone(),
|
self.log.clone(),
|
||||||
self.signal_tx.clone(),
|
self.signal_tx.clone(),
|
||||||
@ -369,7 +354,7 @@ impl<E: EthSpec> Environment<E> {
|
|||||||
pub fn service_context(&mut self, service_name: String) -> RuntimeContext<E> {
|
pub fn service_context(&mut self, service_name: String) -> RuntimeContext<E> {
|
||||||
RuntimeContext {
|
RuntimeContext {
|
||||||
executor: TaskExecutor::new(
|
executor: TaskExecutor::new(
|
||||||
self.runtime().handle().clone(),
|
Arc::downgrade(self.runtime()),
|
||||||
self.exit.clone(),
|
self.exit.clone(),
|
||||||
self.log.new(o!("service" => service_name)),
|
self.log.new(o!("service" => service_name)),
|
||||||
self.signal_tx.clone(),
|
self.signal_tx.clone(),
|
||||||
@ -425,8 +410,16 @@ impl<E: EthSpec> Environment<E> {
|
|||||||
|
|
||||||
/// Shutdown the `tokio` runtime when all tasks are idle.
|
/// Shutdown the `tokio` runtime when all tasks are idle.
|
||||||
pub fn shutdown_on_idle(self) {
|
pub fn shutdown_on_idle(self) {
|
||||||
self.runtime
|
match Arc::try_unwrap(self.runtime) {
|
||||||
.shutdown_timeout(std::time::Duration::from_secs(MAXIMUM_SHUTDOWN_TIME))
|
Ok(runtime) => {
|
||||||
|
runtime.shutdown_timeout(std::time::Duration::from_secs(MAXIMUM_SHUTDOWN_TIME))
|
||||||
|
}
|
||||||
|
Err(e) => warn!(
|
||||||
|
self.log,
|
||||||
|
"Failed to obtain runtime access to shutdown gracefully";
|
||||||
|
"error" => ?e
|
||||||
|
),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fire exit signal which shuts down all spawned services
|
/// Fire exit signal which shuts down all spawned services
|
||||||
|
@ -7,7 +7,7 @@ use types::{V012LegacyEthSpec, YamlConfig};
|
|||||||
|
|
||||||
fn builder() -> EnvironmentBuilder<V012LegacyEthSpec> {
|
fn builder() -> EnvironmentBuilder<V012LegacyEthSpec> {
|
||||||
EnvironmentBuilder::v012_legacy()
|
EnvironmentBuilder::v012_legacy()
|
||||||
.single_thread_tokio_runtime()
|
.multi_threaded_tokio_runtime()
|
||||||
.expect("should set runtime")
|
.expect("should set runtime")
|
||||||
.null_logger()
|
.null_logger()
|
||||||
.expect("should set logger")
|
.expect("should set logger")
|
||||||
|
@ -7,6 +7,7 @@ use lighthouse_version::VERSION;
|
|||||||
use slog::{crit, info, warn};
|
use slog::{crit, info, warn};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::process::exit;
|
use std::process::exit;
|
||||||
|
use tokio_compat_02::FutureExt;
|
||||||
use types::{EthSpec, EthSpecId};
|
use types::{EthSpec, EthSpecId};
|
||||||
use validator_client::ProductionValidatorClient;
|
use validator_client::ProductionValidatorClient;
|
||||||
|
|
||||||
@ -280,7 +281,8 @@ fn run<E: EthSpec>(
|
|||||||
&context.eth2_config().spec,
|
&context.eth2_config().spec,
|
||||||
context.log().clone(),
|
context.log().clone(),
|
||||||
)?;
|
)?;
|
||||||
environment.runtime().spawn(async move {
|
environment.runtime().spawn(
|
||||||
|
async move {
|
||||||
if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await {
|
if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await {
|
||||||
crit!(log, "Failed to start beacon node"; "reason" => e);
|
crit!(log, "Failed to start beacon node"; "reason" => e);
|
||||||
// Ignore the error since it always occurs during normal operation when
|
// Ignore the error since it always occurs during normal operation when
|
||||||
@ -289,7 +291,9 @@ fn run<E: EthSpec>(
|
|||||||
.shutdown_sender()
|
.shutdown_sender()
|
||||||
.try_send("Failed to start beacon node");
|
.try_send("Failed to start beacon node");
|
||||||
}
|
}
|
||||||
});
|
}
|
||||||
|
.compat(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
("validator_client", Some(matches)) => {
|
("validator_client", Some(matches)) => {
|
||||||
let context = environment.core_context();
|
let context = environment.core_context();
|
||||||
@ -297,7 +301,8 @@ fn run<E: EthSpec>(
|
|||||||
let executor = context.executor.clone();
|
let executor = context.executor.clone();
|
||||||
let config = validator_client::Config::from_cli(&matches, context.log())
|
let config = validator_client::Config::from_cli(&matches, context.log())
|
||||||
.map_err(|e| format!("Unable to initialize validator config: {}", e))?;
|
.map_err(|e| format!("Unable to initialize validator config: {}", e))?;
|
||||||
environment.runtime().spawn(async move {
|
environment.runtime().spawn(
|
||||||
|
async move {
|
||||||
let run = async {
|
let run = async {
|
||||||
ProductionValidatorClient::new(context, config)
|
ProductionValidatorClient::new(context, config)
|
||||||
.await?
|
.await?
|
||||||
@ -313,7 +318,9 @@ fn run<E: EthSpec>(
|
|||||||
.shutdown_sender()
|
.shutdown_sender()
|
||||||
.try_send("Failed to start validator client");
|
.try_send("Failed to start validator client");
|
||||||
}
|
}
|
||||||
});
|
}
|
||||||
|
.compat(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
("remote_signer", Some(matches)) => {
|
("remote_signer", Some(matches)) => {
|
||||||
if let Err(e) = remote_signer::run(&mut environment, matches) {
|
if let Err(e) = remote_signer::run(&mut environment, matches) {
|
||||||
|
@ -9,7 +9,7 @@ clap = "2.33.3"
|
|||||||
client_backend = { path = "../backend", package = "remote_signer_backend" }
|
client_backend = { path = "../backend", package = "remote_signer_backend" }
|
||||||
environment = { path = "../../lighthouse/environment" }
|
environment = { path = "../../lighthouse/environment" }
|
||||||
futures = "0.3.6"
|
futures = "0.3.6"
|
||||||
hyper = "0.13.8"
|
hyper = { git = "https://github.com/sigp/hyper", branch = "lighthouse" }
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
regex = "1.3.9"
|
regex = "1.3.9"
|
||||||
serde = { version = "1.0.116", features = ["derive"] }
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
|
@ -58,16 +58,14 @@ impl<E: EthSpec, S: 'static + Send + Sync> Handler<E, S> {
|
|||||||
let (req_parts, _) = self.req.into_parts();
|
let (req_parts, _) = self.req.into_parts();
|
||||||
let req = Request::from_parts(req_parts, body);
|
let req = Request::from_parts(req_parts, body);
|
||||||
|
|
||||||
|
// NOTE: The task executor now holds a weak reference to the global runtime. On shutdown
|
||||||
|
// there may be no runtime available.
|
||||||
|
// All these edge cases must be handled here.
|
||||||
let value = executor
|
let value = executor
|
||||||
.runtime_handle()
|
.spawn_blocking_handle(move || func(req, ctx), "remote_signer_request")
|
||||||
.spawn_blocking(move || func(req, ctx))
|
.ok_or_else(|| ApiError::ServerError("Runtime does not exist".to_string()))?
|
||||||
.await
|
.await
|
||||||
.map_err(|e| {
|
.map_err(|_| ApiError::ServerError("Panic during execution".to_string()))??;
|
||||||
ApiError::ServerError(format!(
|
|
||||||
"Failed to get blocking join handle: {}",
|
|
||||||
e.to_string()
|
|
||||||
))
|
|
||||||
})??;
|
|
||||||
|
|
||||||
Ok(HandledRequest { value })
|
Ok(HandledRequest { value })
|
||||||
}
|
}
|
||||||
|
@ -5,9 +5,9 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
|
|||||||
edition = "2018"
|
edition = "2018"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tokio = { version = "0.2.22", features = ["time"] }
|
tokio = { version = "0.3.2", features = ["time"] }
|
||||||
web3 = "0.11.0"
|
web3 = "0.11.0"
|
||||||
futures = { version = "0.3.5", features = ["compat"] }
|
futures = { version = "0.3.7", features = ["compat"] }
|
||||||
types = { path = "../../consensus/types"}
|
types = { path = "../../consensus/types"}
|
||||||
serde_json = "1.0.58"
|
serde_json = "1.0.58"
|
||||||
deposit_contract = { path = "../../common/deposit_contract"}
|
deposit_contract = { path = "../../common/deposit_contract"}
|
||||||
|
@ -13,7 +13,7 @@ use deposit_contract::{
|
|||||||
use futures::compat::Future01CompatExt;
|
use futures::compat::Future01CompatExt;
|
||||||
use ganache::GanacheInstance;
|
use ganache::GanacheInstance;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::time::delay_for;
|
use tokio::time::sleep;
|
||||||
use types::DepositData;
|
use types::DepositData;
|
||||||
use types::{test_utils::generate_deterministic_keypair, EthSpec, Hash256, Keypair, Signature};
|
use types::{test_utils::generate_deterministic_keypair, EthSpec, Hash256, Keypair, Signature};
|
||||||
use web3::contract::{Contract, Options};
|
use web3::contract::{Contract, Options};
|
||||||
@ -220,7 +220,7 @@ impl DepositContract {
|
|||||||
/// Peforms many deposits, each preceded by a delay.
|
/// Peforms many deposits, each preceded by a delay.
|
||||||
pub async fn deposit_multiple(&self, deposits: Vec<DelayThenDeposit>) -> Result<(), String> {
|
pub async fn deposit_multiple(&self, deposits: Vec<DelayThenDeposit>) -> Result<(), String> {
|
||||||
for deposit in deposits.into_iter() {
|
for deposit in deposits.into_iter() {
|
||||||
delay_for(deposit.delay).await;
|
sleep(deposit.delay).await;
|
||||||
self.deposit_async(deposit.deposit).await?;
|
self.deposit_async(deposit.deposit).await?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -13,7 +13,7 @@ tempdir = "0.3.7"
|
|||||||
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] }
|
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] }
|
||||||
url = "2.1.1"
|
url = "2.1.1"
|
||||||
serde = "1.0.116"
|
serde = "1.0.116"
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
genesis = { path = "../../beacon_node/genesis" }
|
genesis = { path = "../../beacon_node/genesis" }
|
||||||
eth2 = { path = "../../common/eth2" }
|
eth2 = { path = "../../common/eth2" }
|
||||||
validator_client = { path = "../../validator_client" }
|
validator_client = { path = "../../validator_client" }
|
||||||
|
@ -86,6 +86,7 @@ pub fn testing_client_config() -> ClientConfig {
|
|||||||
// Setting ports to `0` means that the OS will choose some available port.
|
// Setting ports to `0` means that the OS will choose some available port.
|
||||||
client_config.network.libp2p_port = 0;
|
client_config.network.libp2p_port = 0;
|
||||||
client_config.network.discovery_port = 0;
|
client_config.network.discovery_port = 0;
|
||||||
|
client_config.network.upnp_enabled = false;
|
||||||
client_config.http_api.enabled = true;
|
client_config.http_api.enabled = true;
|
||||||
client_config.http_api.listen_port = 0;
|
client_config.http_api.listen_port = 0;
|
||||||
client_config.websocket_server.enabled = true;
|
client_config.websocket_server.enabled = true;
|
||||||
|
@ -12,9 +12,10 @@ eth1 = {path = "../../beacon_node/eth1"}
|
|||||||
types = { path = "../../consensus/types" }
|
types = { path = "../../consensus/types" }
|
||||||
validator_client = { path = "../../validator_client" }
|
validator_client = { path = "../../validator_client" }
|
||||||
parking_lot = "0.11.0"
|
parking_lot = "0.11.0"
|
||||||
futures = "0.3.5"
|
futures = "0.3.7"
|
||||||
tokio = "0.2.22"
|
tokio = "0.3.2"
|
||||||
eth1_test_rig = { path = "../eth1_test_rig" }
|
eth1_test_rig = { path = "../eth1_test_rig" }
|
||||||
env_logger = "0.7.1"
|
env_logger = "0.7.1"
|
||||||
clap = "2.33.3"
|
clap = "2.33.3"
|
||||||
rayon = "1.4.1"
|
rayon = "1.4.1"
|
||||||
|
tokio-compat-02 = "0.1"
|
||||||
|
@ -46,13 +46,13 @@ pub async fn verify_first_finalization<E: EthSpec>(
|
|||||||
/// Delays for `epochs`, plus half a slot extra.
|
/// Delays for `epochs`, plus half a slot extra.
|
||||||
pub async fn epoch_delay(epochs: Epoch, slot_duration: Duration, slots_per_epoch: u64) {
|
pub async fn epoch_delay(epochs: Epoch, slot_duration: Duration, slots_per_epoch: u64) {
|
||||||
let duration = slot_duration * (epochs.as_u64() * slots_per_epoch) as u32 + slot_duration / 2;
|
let duration = slot_duration * (epochs.as_u64() * slots_per_epoch) as u32 + slot_duration / 2;
|
||||||
tokio::time::delay_for(duration).await
|
tokio::time::sleep(duration).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Delays for `slots`, plus half a slot extra.
|
/// Delays for `slots`, plus half a slot extra.
|
||||||
async fn slot_delay(slots: Slot, slot_duration: Duration) {
|
async fn slot_delay(slots: Slot, slot_duration: Duration) {
|
||||||
let duration = slot_duration * slots.as_u64() as u32 + slot_duration / 2;
|
let duration = slot_duration * slots.as_u64() as u32 + slot_duration / 2;
|
||||||
tokio::time::delay_for(duration).await;
|
tokio::time::sleep(duration).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verifies that all beacon nodes in the given network have a head state that has a finalized
|
/// Verifies that all beacon nodes in the given network have a head state that has a finalized
|
||||||
|
@ -197,6 +197,12 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
Ok::<(), String>(())
|
Ok::<(), String>(())
|
||||||
};
|
};
|
||||||
|
|
||||||
env.runtime().block_on(main_future).unwrap();
|
env.runtime()
|
||||||
|
.block_on(tokio_compat_02::FutureExt::compat(main_future))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
env.fire_signal();
|
||||||
|
env.shutdown_on_idle();
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,7 @@ use node_test_rig::{
|
|||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use std::net::{IpAddr, Ipv4Addr};
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||||
use tokio::time::{delay_until, Instant};
|
use tokio::time::{sleep_until, Instant};
|
||||||
use types::{Epoch, EthSpec, MainnetEthSpec};
|
use types::{Epoch, EthSpec, MainnetEthSpec};
|
||||||
|
|
||||||
pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
|
pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
|
||||||
@ -111,7 +111,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
* The processes that will run checks on the network as it runs.
|
* The processes that will run checks on the network as it runs.
|
||||||
*/
|
*/
|
||||||
let checks_fut = async {
|
let checks_fut = async {
|
||||||
delay_until(genesis_instant).await;
|
sleep_until(genesis_instant).await;
|
||||||
|
|
||||||
let (finalization, block_prod) = futures::join!(
|
let (finalization, block_prod) = futures::join!(
|
||||||
// Check that the chain finalizes at the first given opportunity.
|
// Check that the chain finalizes at the first given opportunity.
|
||||||
@ -156,6 +156,11 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
|
|||||||
Ok::<(), String>(())
|
Ok::<(), String>(())
|
||||||
};
|
};
|
||||||
|
|
||||||
env.runtime().block_on(main_future).unwrap();
|
env.runtime()
|
||||||
|
.block_on(tokio_compat_02::FutureExt::compat(main_future))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
env.fire_signal();
|
||||||
|
env.shutdown_on_idle();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -9,9 +9,10 @@ name = "validator_client"
|
|||||||
path = "src/lib.rs"
|
path = "src/lib.rs"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { version = "0.2.22", features = ["time", "rt-threaded", "macros"] }
|
tokio = { version = "0.3.2", features = ["time", "rt-multi-thread", "macros"] }
|
||||||
tempfile = "3.1.0"
|
tempfile = "3.1.0"
|
||||||
deposit_contract = { path = "../common/deposit_contract" }
|
deposit_contract = { path = "../common/deposit_contract" }
|
||||||
|
tokio-compat-02 = "0.1"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
eth2_ssz = "0.1.2"
|
eth2_ssz = "0.1.2"
|
||||||
@ -30,8 +31,8 @@ serde_yaml = "0.8.13"
|
|||||||
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
|
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
|
||||||
slog-async = "2.5.0"
|
slog-async = "2.5.0"
|
||||||
slog-term = "2.6.0"
|
slog-term = "2.6.0"
|
||||||
tokio = { version = "0.2.22", features = ["time"] }
|
tokio = { version = "0.3.2", features = ["time"] }
|
||||||
futures = { version = "0.3.5", features = ["compat"] }
|
futures = { version = "0.3.7", features = ["compat"] }
|
||||||
dirs = "3.0.1"
|
dirs = "3.0.1"
|
||||||
directory = { path = "../common/directory" }
|
directory = { path = "../common/directory" }
|
||||||
lockfile = { path = "../common/lockfile" }
|
lockfile = { path = "../common/lockfile" }
|
||||||
@ -53,7 +54,7 @@ eth2_keystore = { path = "../crypto/eth2_keystore" }
|
|||||||
account_utils = { path = "../common/account_utils" }
|
account_utils = { path = "../common/account_utils" }
|
||||||
lighthouse_version = { path = "../common/lighthouse_version" }
|
lighthouse_version = { path = "../common/lighthouse_version" }
|
||||||
warp_utils = { path = "../common/warp_utils" }
|
warp_utils = { path = "../common/warp_utils" }
|
||||||
warp = { git = "https://github.com/paulhauner/warp", branch = "cors-wildcard" }
|
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" }
|
||||||
hyper = "0.13.8"
|
hyper = "0.13.8"
|
||||||
serde_utils = { path = "../consensus/serde_utils" }
|
serde_utils = { path = "../consensus/serde_utils" }
|
||||||
libsecp256k1 = "0.3.5"
|
libsecp256k1 = "0.3.5"
|
||||||
|
@ -5,13 +5,14 @@ use crate::{
|
|||||||
};
|
};
|
||||||
use environment::RuntimeContext;
|
use environment::RuntimeContext;
|
||||||
use eth2::BeaconNodeHttpClient;
|
use eth2::BeaconNodeHttpClient;
|
||||||
|
use futures::future::FutureExt;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use slog::{crit, error, info, trace};
|
use slog::{crit, error, info, trace};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::time::{delay_until, interval_at, Duration, Instant};
|
use tokio::time::{interval_at, sleep_until, Duration, Instant};
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{
|
use types::{
|
||||||
AggregateSignature, Attestation, AttestationData, BitList, ChainSpec, CommitteeIndex, EthSpec,
|
AggregateSignature, Attestation, AttestationData, BitList, ChainSpec, CommitteeIndex, EthSpec,
|
||||||
@ -211,13 +212,16 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.for_each(|(committee_index, validator_duties)| {
|
.for_each(|(committee_index, validator_duties)| {
|
||||||
// Spawn a separate task for each attestation.
|
// Spawn a separate task for each attestation.
|
||||||
self.inner.context.executor.runtime_handle().spawn(
|
self.inner.context.executor.spawn(
|
||||||
self.clone().publish_attestations_and_aggregates(
|
self.clone()
|
||||||
|
.publish_attestations_and_aggregates(
|
||||||
slot,
|
slot,
|
||||||
committee_index,
|
committee_index,
|
||||||
validator_duties,
|
validator_duties,
|
||||||
aggregate_production_instant,
|
aggregate_production_instant,
|
||||||
),
|
)
|
||||||
|
.map(|_| ()),
|
||||||
|
"attestation publish",
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -278,7 +282,7 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
|
|||||||
// of the way though the slot). As verified in the
|
// of the way though the slot). As verified in the
|
||||||
// `delay_triggers_when_in_the_past` test, this code will still run
|
// `delay_triggers_when_in_the_past` test, this code will still run
|
||||||
// even if the instant has already elapsed.
|
// even if the instant has already elapsed.
|
||||||
delay_until(aggregate_production_instant).await;
|
sleep_until(aggregate_production_instant).await;
|
||||||
|
|
||||||
// Start the metrics timer *after* we've done the delay.
|
// Start the metrics timer *after* we've done the delay.
|
||||||
let _aggregates_timer = metrics::start_timer_vec(
|
let _aggregates_timer = metrics::start_timer_vec(
|
||||||
@ -552,7 +556,7 @@ mod tests {
|
|||||||
use futures::future::FutureExt;
|
use futures::future::FutureExt;
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
|
|
||||||
/// This test is to ensure that a `tokio_timer::Delay` with an instant in the past will still
|
/// This test is to ensure that a `tokio_timer::Sleep` with an instant in the past will still
|
||||||
/// trigger.
|
/// trigger.
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn delay_triggers_when_in_the_past() {
|
async fn delay_triggers_when_in_the_past() {
|
||||||
@ -560,7 +564,7 @@ mod tests {
|
|||||||
let state_1 = Arc::new(RwLock::new(in_the_past));
|
let state_1 = Arc::new(RwLock::new(in_the_past));
|
||||||
let state_2 = state_1.clone();
|
let state_2 = state_1.clone();
|
||||||
|
|
||||||
delay_until(in_the_past)
|
sleep_until(in_the_past)
|
||||||
.map(move |()| *state_1.write() = Instant::now())
|
.map(move |()| *state_1.write() = Instant::now())
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
@ -188,21 +188,22 @@ impl<T: SlotClock + 'static, E: EthSpec> BlockService<T, E> {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
proposers.into_iter().for_each(|validator_pubkey| {
|
for validator_pubkey in proposers {
|
||||||
let service = self.clone();
|
let service = self.clone();
|
||||||
let log = log.clone();
|
let log = log.clone();
|
||||||
self.inner.context.executor.runtime_handle().spawn(
|
self.inner.context.executor.spawn(
|
||||||
service
|
service
|
||||||
.publish_block(slot, validator_pubkey)
|
.publish_block(slot, validator_pubkey)
|
||||||
.map_err(move |e| {
|
.unwrap_or_else(move |e| {
|
||||||
crit!(
|
crit!(
|
||||||
log,
|
log,
|
||||||
"Error whilst producing block";
|
"Error whilst producing block";
|
||||||
"message" => e
|
"message" => e
|
||||||
)
|
|
||||||
}),
|
|
||||||
);
|
);
|
||||||
});
|
}),
|
||||||
|
"block service",
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -481,15 +481,14 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
|
|||||||
let duties_service = self.clone();
|
let duties_service = self.clone();
|
||||||
let mut block_service_tx_clone = block_service_tx.clone();
|
let mut block_service_tx_clone = block_service_tx.clone();
|
||||||
let inner_spec = spec.clone();
|
let inner_spec = spec.clone();
|
||||||
self.inner
|
self.inner.context.executor.spawn(
|
||||||
.context
|
async move {
|
||||||
.executor
|
|
||||||
.runtime_handle()
|
|
||||||
.spawn(async move {
|
|
||||||
duties_service
|
duties_service
|
||||||
.do_update(&mut block_service_tx_clone, &inner_spec)
|
.do_update(&mut block_service_tx_clone, &inner_spec)
|
||||||
.await
|
.await
|
||||||
});
|
},
|
||||||
|
"duties update",
|
||||||
|
);
|
||||||
|
|
||||||
let executor = self.inner.context.executor.clone();
|
let executor = self.inner.context.executor.clone();
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
use crate::http_metrics::metrics;
|
use crate::http_metrics::metrics;
|
||||||
use environment::RuntimeContext;
|
use environment::RuntimeContext;
|
||||||
use eth2::{types::StateId, BeaconNodeHttpClient};
|
use eth2::{types::StateId, BeaconNodeHttpClient};
|
||||||
|
use futures::future::FutureExt;
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use slog::Logger;
|
use slog::Logger;
|
||||||
@ -144,8 +145,7 @@ impl<T: SlotClock + 'static> ForkService<T> {
|
|||||||
// Run an immediate update before starting the updater service.
|
// Run an immediate update before starting the updater service.
|
||||||
context
|
context
|
||||||
.executor
|
.executor
|
||||||
.runtime_handle()
|
.spawn(self.clone().do_update().map(|_| ()), "fork service update");
|
||||||
.spawn(self.clone().do_update());
|
|
||||||
|
|
||||||
let executor = context.executor.clone();
|
let executor = context.executor.clone();
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ use validator_dir::Builder as ValidatorDirBuilder;
|
|||||||
///
|
///
|
||||||
/// If `key_derivation_path_offset` is supplied then the EIP-2334 validator index will start at
|
/// If `key_derivation_path_offset` is supplied then the EIP-2334 validator index will start at
|
||||||
/// this point.
|
/// this point.
|
||||||
pub fn create_validators<P: AsRef<Path>, T: 'static + SlotClock, E: EthSpec>(
|
pub async fn create_validators<P: AsRef<Path>, T: 'static + SlotClock, E: EthSpec>(
|
||||||
mnemonic_opt: Option<Mnemonic>,
|
mnemonic_opt: Option<Mnemonic>,
|
||||||
key_derivation_path_offset: Option<u32>,
|
key_derivation_path_offset: Option<u32>,
|
||||||
validator_requests: &[api_types::ValidatorRequest],
|
validator_requests: &[api_types::ValidatorRequest],
|
||||||
@ -129,12 +129,9 @@ pub fn create_validators<P: AsRef<Path>, T: 'static + SlotClock, E: EthSpec>(
|
|||||||
let voting_keystore_path = validator_dir.voting_keystore_path();
|
let voting_keystore_path = validator_dir.voting_keystore_path();
|
||||||
drop(validator_dir);
|
drop(validator_dir);
|
||||||
|
|
||||||
tokio::runtime::Handle::current()
|
validator_store
|
||||||
.block_on(validator_store.add_validator_keystore(
|
.add_validator_keystore(voting_keystore_path, voting_password_string, request.enable)
|
||||||
voting_keystore_path,
|
.await
|
||||||
voting_password_string,
|
|
||||||
request.enable,
|
|
||||||
))
|
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
warp_utils::reject::custom_server_error(format!(
|
warp_utils::reject::custom_server_error(format!(
|
||||||
"failed to initialize validator: {:?}",
|
"failed to initialize validator: {:?}",
|
||||||
|
@ -14,7 +14,8 @@ use std::future::Future;
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
|
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, Weak};
|
||||||
|
use tokio::runtime::Runtime;
|
||||||
use types::{ChainSpec, EthSpec, YamlConfig};
|
use types::{ChainSpec, EthSpec, YamlConfig};
|
||||||
use validator_dir::Builder as ValidatorDirBuilder;
|
use validator_dir::Builder as ValidatorDirBuilder;
|
||||||
use warp::{
|
use warp::{
|
||||||
@ -50,6 +51,7 @@ impl From<String> for Error {
|
|||||||
///
|
///
|
||||||
/// The server will gracefully handle the case where any fields are `None`.
|
/// The server will gracefully handle the case where any fields are `None`.
|
||||||
pub struct Context<T: Clone, E: EthSpec> {
|
pub struct Context<T: Clone, E: EthSpec> {
|
||||||
|
pub runtime: Weak<Runtime>,
|
||||||
pub api_secret: ApiSecret,
|
pub api_secret: ApiSecret,
|
||||||
pub validator_store: Option<ValidatorStore<T, E>>,
|
pub validator_store: Option<ValidatorStore<T, E>>,
|
||||||
pub validator_dir: Option<PathBuf>,
|
pub validator_dir: Option<PathBuf>,
|
||||||
@ -138,6 +140,9 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
|
let inner_runtime = ctx.runtime.clone();
|
||||||
|
let runtime_filter = warp::any().map(move || inner_runtime.clone());
|
||||||
|
|
||||||
let inner_validator_dir = ctx.validator_dir.clone();
|
let inner_validator_dir = ctx.validator_dir.clone();
|
||||||
let validator_dir_filter = warp::any()
|
let validator_dir_filter = warp::any()
|
||||||
.map(move || inner_validator_dir.clone())
|
.map(move || inner_validator_dir.clone())
|
||||||
@ -258,26 +263,34 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
.and(validator_store_filter.clone())
|
.and(validator_store_filter.clone())
|
||||||
.and(spec_filter.clone())
|
.and(spec_filter.clone())
|
||||||
.and(signer.clone())
|
.and(signer.clone())
|
||||||
|
.and(runtime_filter.clone())
|
||||||
.and_then(
|
.and_then(
|
||||||
|body: Vec<api_types::ValidatorRequest>,
|
|body: Vec<api_types::ValidatorRequest>,
|
||||||
validator_dir: PathBuf,
|
validator_dir: PathBuf,
|
||||||
validator_store: ValidatorStore<T, E>,
|
validator_store: ValidatorStore<T, E>,
|
||||||
spec: Arc<ChainSpec>,
|
spec: Arc<ChainSpec>,
|
||||||
signer| {
|
signer,
|
||||||
|
runtime: Weak<Runtime>| {
|
||||||
blocking_signed_json_task(signer, move || {
|
blocking_signed_json_task(signer, move || {
|
||||||
let (validators, mnemonic) = create_validators(
|
if let Some(runtime) = runtime.upgrade() {
|
||||||
|
let (validators, mnemonic) = runtime.block_on(create_validators(
|
||||||
None,
|
None,
|
||||||
None,
|
None,
|
||||||
&body,
|
&body,
|
||||||
&validator_dir,
|
&validator_dir,
|
||||||
&validator_store,
|
&validator_store,
|
||||||
&spec,
|
&spec,
|
||||||
)?;
|
))?;
|
||||||
let response = api_types::PostValidatorsResponseData {
|
let response = api_types::PostValidatorsResponseData {
|
||||||
mnemonic: mnemonic.into_phrase().into(),
|
mnemonic: mnemonic.into_phrase().into(),
|
||||||
validators,
|
validators,
|
||||||
};
|
};
|
||||||
Ok(api_types::GenericResponse::from(response))
|
Ok(api_types::GenericResponse::from(response))
|
||||||
|
} else {
|
||||||
|
Err(warp_utils::reject::custom_server_error(
|
||||||
|
"Runtime shutdown".into(),
|
||||||
|
))
|
||||||
|
}
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
@ -292,25 +305,37 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
.and(validator_store_filter.clone())
|
.and(validator_store_filter.clone())
|
||||||
.and(spec_filter)
|
.and(spec_filter)
|
||||||
.and(signer.clone())
|
.and(signer.clone())
|
||||||
|
.and(runtime_filter.clone())
|
||||||
.and_then(
|
.and_then(
|
||||||
|body: api_types::CreateValidatorsMnemonicRequest,
|
|body: api_types::CreateValidatorsMnemonicRequest,
|
||||||
validator_dir: PathBuf,
|
validator_dir: PathBuf,
|
||||||
validator_store: ValidatorStore<T, E>,
|
validator_store: ValidatorStore<T, E>,
|
||||||
spec: Arc<ChainSpec>,
|
spec: Arc<ChainSpec>,
|
||||||
signer| {
|
signer,
|
||||||
|
runtime: Weak<Runtime>| {
|
||||||
blocking_signed_json_task(signer, move || {
|
blocking_signed_json_task(signer, move || {
|
||||||
let mnemonic = mnemonic_from_phrase(body.mnemonic.as_str()).map_err(|e| {
|
if let Some(runtime) = runtime.upgrade() {
|
||||||
warp_utils::reject::custom_bad_request(format!("invalid mnemonic: {:?}", e))
|
let mnemonic =
|
||||||
|
mnemonic_from_phrase(body.mnemonic.as_str()).map_err(|e| {
|
||||||
|
warp_utils::reject::custom_bad_request(format!(
|
||||||
|
"invalid mnemonic: {:?}",
|
||||||
|
e
|
||||||
|
))
|
||||||
})?;
|
})?;
|
||||||
let (validators, _mnemonic) = create_validators(
|
let (validators, _mnemonic) = runtime.block_on(create_validators(
|
||||||
Some(mnemonic),
|
Some(mnemonic),
|
||||||
Some(body.key_derivation_path_offset),
|
Some(body.key_derivation_path_offset),
|
||||||
&body.validators,
|
&body.validators,
|
||||||
&validator_dir,
|
&validator_dir,
|
||||||
&validator_store,
|
&validator_store,
|
||||||
&spec,
|
&spec,
|
||||||
)?;
|
))?;
|
||||||
Ok(api_types::GenericResponse::from(validators))
|
Ok(api_types::GenericResponse::from(validators))
|
||||||
|
} else {
|
||||||
|
Err(warp_utils::reject::custom_server_error(
|
||||||
|
"Runtime shutdown".into(),
|
||||||
|
))
|
||||||
|
}
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
@ -324,11 +349,13 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
.and(validator_dir_filter)
|
.and(validator_dir_filter)
|
||||||
.and(validator_store_filter.clone())
|
.and(validator_store_filter.clone())
|
||||||
.and(signer.clone())
|
.and(signer.clone())
|
||||||
|
.and(runtime_filter.clone())
|
||||||
.and_then(
|
.and_then(
|
||||||
|body: api_types::KeystoreValidatorsPostRequest,
|
|body: api_types::KeystoreValidatorsPostRequest,
|
||||||
validator_dir: PathBuf,
|
validator_dir: PathBuf,
|
||||||
validator_store: ValidatorStore<T, E>,
|
validator_store: ValidatorStore<T, E>,
|
||||||
signer| {
|
signer,
|
||||||
|
runtime: Weak<Runtime>| {
|
||||||
blocking_signed_json_task(signer, move || {
|
blocking_signed_json_task(signer, move || {
|
||||||
// Check to ensure the password is correct.
|
// Check to ensure the password is correct.
|
||||||
let keypair = body
|
let keypair = body
|
||||||
@ -357,7 +384,9 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
drop(validator_dir);
|
drop(validator_dir);
|
||||||
let voting_password = body.password.clone();
|
let voting_password = body.password.clone();
|
||||||
|
|
||||||
let validator_def = tokio::runtime::Handle::current()
|
let validator_def = {
|
||||||
|
if let Some(runtime) = runtime.upgrade() {
|
||||||
|
runtime
|
||||||
.block_on(validator_store.add_validator_keystore(
|
.block_on(validator_store.add_validator_keystore(
|
||||||
voting_keystore_path,
|
voting_keystore_path,
|
||||||
voting_password,
|
voting_password,
|
||||||
@ -368,7 +397,13 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
"failed to initialize validator: {:?}",
|
"failed to initialize validator: {:?}",
|
||||||
e
|
e
|
||||||
))
|
))
|
||||||
})?;
|
})?
|
||||||
|
} else {
|
||||||
|
return Err(warp_utils::reject::custom_server_error(
|
||||||
|
"Runtime shutdown".into(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
Ok(api_types::GenericResponse::from(api_types::ValidatorData {
|
Ok(api_types::GenericResponse::from(api_types::ValidatorData {
|
||||||
enabled: body.enable,
|
enabled: body.enable,
|
||||||
@ -387,11 +422,13 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
.and(warp::body::json())
|
.and(warp::body::json())
|
||||||
.and(validator_store_filter)
|
.and(validator_store_filter)
|
||||||
.and(signer)
|
.and(signer)
|
||||||
|
.and(runtime_filter)
|
||||||
.and_then(
|
.and_then(
|
||||||
|validator_pubkey: PublicKey,
|
|validator_pubkey: PublicKey,
|
||||||
body: api_types::ValidatorPatchRequest,
|
body: api_types::ValidatorPatchRequest,
|
||||||
validator_store: ValidatorStore<T, E>,
|
validator_store: ValidatorStore<T, E>,
|
||||||
signer| {
|
signer,
|
||||||
|
runtime: Weak<Runtime>| {
|
||||||
blocking_signed_json_task(signer, move || {
|
blocking_signed_json_task(signer, move || {
|
||||||
let initialized_validators_rw_lock = validator_store.initialized_validators();
|
let initialized_validators_rw_lock = validator_store.initialized_validators();
|
||||||
let mut initialized_validators = initialized_validators_rw_lock.write();
|
let mut initialized_validators = initialized_validators_rw_lock.write();
|
||||||
@ -403,7 +440,8 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
))),
|
))),
|
||||||
Some(enabled) if enabled == body.enabled => Ok(()),
|
Some(enabled) if enabled == body.enabled => Ok(()),
|
||||||
Some(_) => {
|
Some(_) => {
|
||||||
tokio::runtime::Handle::current()
|
if let Some(runtime) = runtime.upgrade() {
|
||||||
|
runtime
|
||||||
.block_on(
|
.block_on(
|
||||||
initialized_validators
|
initialized_validators
|
||||||
.set_validator_status(&validator_pubkey, body.enabled),
|
.set_validator_status(&validator_pubkey, body.enabled),
|
||||||
@ -414,8 +452,12 @@ pub fn serve<T: 'static + SlotClock + Clone, E: EthSpec>(
|
|||||||
e
|
e
|
||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(warp_utils::reject::custom_server_error(
|
||||||
|
"Runtime shutdown".into(),
|
||||||
|
))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -471,8 +513,8 @@ pub async fn blocking_signed_json_task<S, F, T>(
|
|||||||
) -> Result<impl warp::Reply, warp::Rejection>
|
) -> Result<impl warp::Reply, warp::Rejection>
|
||||||
where
|
where
|
||||||
S: Fn(&[u8]) -> String,
|
S: Fn(&[u8]) -> String,
|
||||||
F: Fn() -> Result<T, warp::Rejection>,
|
F: Fn() -> Result<T, warp::Rejection> + Send + 'static,
|
||||||
T: Serialize,
|
T: Serialize + Send + 'static,
|
||||||
{
|
{
|
||||||
warp_utils::task::blocking_task(func)
|
warp_utils::task::blocking_task(func)
|
||||||
.await
|
.await
|
||||||
|
@ -23,7 +23,9 @@ use std::marker::PhantomData;
|
|||||||
use std::net::Ipv4Addr;
|
use std::net::Ipv4Addr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tempfile::{tempdir, TempDir};
|
use tempfile::{tempdir, TempDir};
|
||||||
|
use tokio::runtime::Runtime;
|
||||||
use tokio::sync::oneshot;
|
use tokio::sync::oneshot;
|
||||||
|
use tokio_compat_02::FutureExt;
|
||||||
|
|
||||||
const PASSWORD_BYTES: &[u8] = &[42, 50, 37];
|
const PASSWORD_BYTES: &[u8] = &[42, 50, 37];
|
||||||
|
|
||||||
@ -37,8 +39,18 @@ struct ApiTester {
|
|||||||
_validator_dir: TempDir,
|
_validator_dir: TempDir,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Builds a runtime to be used in the testing configuration.
|
||||||
|
fn build_runtime() -> Arc<Runtime> {
|
||||||
|
Arc::new(
|
||||||
|
tokio::runtime::Builder::new_multi_thread()
|
||||||
|
.enable_all()
|
||||||
|
.build()
|
||||||
|
.expect("Should be able to build a testing runtime"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
impl ApiTester {
|
impl ApiTester {
|
||||||
pub async fn new() -> Self {
|
pub async fn new(runtime: std::sync::Weak<Runtime>) -> Self {
|
||||||
let log = null_logger().unwrap();
|
let log = null_logger().unwrap();
|
||||||
|
|
||||||
let validator_dir = tempdir().unwrap();
|
let validator_dir = tempdir().unwrap();
|
||||||
@ -80,6 +92,7 @@ impl ApiTester {
|
|||||||
let initialized_validators = validator_store.initialized_validators();
|
let initialized_validators = validator_store.initialized_validators();
|
||||||
|
|
||||||
let context: Arc<Context<TestingSlotClock, E>> = Arc::new(Context {
|
let context: Arc<Context<TestingSlotClock, E>> = Arc::new(Context {
|
||||||
|
runtime,
|
||||||
api_secret,
|
api_secret,
|
||||||
validator_dir: Some(validator_dir.path().into()),
|
validator_dir: Some(validator_dir.path().into()),
|
||||||
validator_store: Some(validator_store),
|
validator_store: Some(validator_store),
|
||||||
@ -420,18 +433,29 @@ struct KeystoreValidatorScenario {
|
|||||||
correct_password: bool,
|
correct_password: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[test]
|
||||||
async fn invalid_pubkey() {
|
fn invalid_pubkey() {
|
||||||
ApiTester::new()
|
let runtime = build_runtime();
|
||||||
|
let weak_runtime = Arc::downgrade(&runtime);
|
||||||
|
runtime.block_on(
|
||||||
|
async {
|
||||||
|
ApiTester::new(weak_runtime)
|
||||||
.await
|
.await
|
||||||
.invalidate_api_token()
|
.invalidate_api_token()
|
||||||
.test_get_lighthouse_version_invalid()
|
.test_get_lighthouse_version_invalid()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
.compat(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[test]
|
||||||
async fn simple_getters() {
|
fn simple_getters() {
|
||||||
ApiTester::new()
|
let runtime = build_runtime();
|
||||||
|
let weak_runtime = Arc::downgrade(&runtime);
|
||||||
|
runtime.block_on(
|
||||||
|
async {
|
||||||
|
ApiTester::new(weak_runtime)
|
||||||
.await
|
.await
|
||||||
.test_get_lighthouse_version()
|
.test_get_lighthouse_version()
|
||||||
.await
|
.await
|
||||||
@ -440,10 +464,17 @@ async fn simple_getters() {
|
|||||||
.test_get_lighthouse_spec()
|
.test_get_lighthouse_spec()
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
.compat(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[test]
|
||||||
async fn hd_validator_creation() {
|
fn hd_validator_creation() {
|
||||||
ApiTester::new()
|
let runtime = build_runtime();
|
||||||
|
let weak_runtime = Arc::downgrade(&runtime);
|
||||||
|
runtime.block_on(
|
||||||
|
async {
|
||||||
|
ApiTester::new(weak_runtime)
|
||||||
.await
|
.await
|
||||||
.assert_enabled_validators_count(0)
|
.assert_enabled_validators_count(0)
|
||||||
.assert_validators_count(0)
|
.assert_validators_count(0)
|
||||||
@ -475,10 +506,17 @@ async fn hd_validator_creation() {
|
|||||||
.assert_enabled_validators_count(2)
|
.assert_enabled_validators_count(2)
|
||||||
.assert_validators_count(3);
|
.assert_validators_count(3);
|
||||||
}
|
}
|
||||||
|
.compat(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[test]
|
||||||
async fn validator_enabling() {
|
fn validator_enabling() {
|
||||||
ApiTester::new()
|
let runtime = build_runtime();
|
||||||
|
let weak_runtime = Arc::downgrade(&runtime);
|
||||||
|
runtime.block_on(
|
||||||
|
async {
|
||||||
|
ApiTester::new(weak_runtime)
|
||||||
.await
|
.await
|
||||||
.create_hd_validators(HdValidatorScenario {
|
.create_hd_validators(HdValidatorScenario {
|
||||||
count: 2,
|
count: 2,
|
||||||
@ -498,10 +536,17 @@ async fn validator_enabling() {
|
|||||||
.assert_enabled_validators_count(2)
|
.assert_enabled_validators_count(2)
|
||||||
.assert_validators_count(2);
|
.assert_validators_count(2);
|
||||||
}
|
}
|
||||||
|
.compat(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test(core_threads = 2)]
|
#[test]
|
||||||
async fn keystore_validator_creation() {
|
fn keystore_validator_creation() {
|
||||||
ApiTester::new()
|
let runtime = build_runtime();
|
||||||
|
let weak_runtime = Arc::downgrade(&runtime);
|
||||||
|
runtime.block_on(
|
||||||
|
async {
|
||||||
|
ApiTester::new(weak_runtime)
|
||||||
.await
|
.await
|
||||||
.assert_enabled_validators_count(0)
|
.assert_enabled_validators_count(0)
|
||||||
.assert_validators_count(0)
|
.assert_validators_count(0)
|
||||||
@ -527,3 +572,6 @@ async fn keystore_validator_creation() {
|
|||||||
.assert_enabled_validators_count(1)
|
.assert_enabled_validators_count(1)
|
||||||
.assert_validators_count(2);
|
.assert_validators_count(2);
|
||||||
}
|
}
|
||||||
|
.compat(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
@ -38,7 +38,7 @@ use std::marker::PhantomData;
|
|||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{SystemTime, UNIX_EPOCH};
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
use tokio::time::{delay_for, Duration};
|
use tokio::time::{sleep, Duration};
|
||||||
use types::{EthSpec, Hash256};
|
use types::{EthSpec, Hash256};
|
||||||
use validator_store::ValidatorStore;
|
use validator_store::ValidatorStore;
|
||||||
|
|
||||||
@ -337,6 +337,7 @@ impl<T: EthSpec> ProductionValidatorClient<T> {
|
|||||||
|
|
||||||
self.http_api_listen_addr = if self.config.http_api.enabled {
|
self.http_api_listen_addr = if self.config.http_api.enabled {
|
||||||
let ctx: Arc<http_api::Context<SystemTimeSlotClock, T>> = Arc::new(http_api::Context {
|
let ctx: Arc<http_api::Context<SystemTimeSlotClock, T>> = Arc::new(http_api::Context {
|
||||||
|
runtime: self.context.executor.runtime(),
|
||||||
api_secret,
|
api_secret,
|
||||||
validator_store: Some(self.validator_store.clone()),
|
validator_store: Some(self.validator_store.clone()),
|
||||||
validator_dir: Some(self.config.validator_dir.clone()),
|
validator_dir: Some(self.config.validator_dir.clone()),
|
||||||
@ -415,7 +416,7 @@ async fn init_from_beacon_node<E: EthSpec>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
delay_for(RETRY_DELAY).await;
|
sleep(RETRY_DELAY).await;
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok((genesis.genesis_time, genesis.genesis_validators_root))
|
Ok((genesis.genesis_time, genesis.genesis_validators_root))
|
||||||
@ -447,7 +448,7 @@ async fn wait_for_genesis<E: EthSpec>(
|
|||||||
// timer runs out.
|
// timer runs out.
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
result = poll_whilst_waiting_for_genesis(beacon_node, genesis_time, context.log()) => result?,
|
result = poll_whilst_waiting_for_genesis(beacon_node, genesis_time, context.log()) => result?,
|
||||||
() = delay_for(genesis_time - now) => ()
|
() = sleep(genesis_time - now) => ()
|
||||||
};
|
};
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
@ -497,7 +498,7 @@ async fn wait_for_connectivity(
|
|||||||
"Unable to connect to beacon node";
|
"Unable to connect to beacon node";
|
||||||
"error" => format!("{:?}", e),
|
"error" => format!("{:?}", e),
|
||||||
);
|
);
|
||||||
delay_for(RETRY_DELAY).await;
|
sleep(RETRY_DELAY).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -546,6 +547,6 @@ async fn poll_whilst_waiting_for_genesis(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
delay_for(WAITING_FOR_GENESIS_POLL_TIME).await;
|
sleep(WAITING_FOR_GENESIS_POLL_TIME).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user