Update to tokio 1.1 (#2172)

## Issue Addressed

resolves #2129
resolves #2099 
addresses some of #1712
unblocks #2076
unblocks #2153 

## Proposed Changes

- Updates all the dependencies mentioned in #2129, except for web3. They haven't merged their tokio 1.0 update because they are waiting on some dependencies of their own. Since we only use web3 in tests, I think updating it in a separate issue is fine. If they are able to merge soon though, I can update in this PR. 

- Updates `tokio_util` to 0.6.2 and `bytes` to 1.0.1.

- We haven't made a discv5 release since merging tokio 1.0 updates so I'm using a commit rather than release atm. **Edit:** I think we should merge an update of `tokio_util` to 0.6.2 into discv5 before this release because it has panic fixes in `DelayQueue`  --> PR in discv5:  https://github.com/sigp/discv5/pull/58

## Additional Info

tokio 1.0 changes that required some changes in lighthouse:

- `interval.next().await.is_some()` -> `interval.tick().await`
- `sleep` future is now `!Unpin` -> https://github.com/tokio-rs/tokio/issues/3028
- `try_recv` has been temporarily removed from `mpsc` -> https://github.com/tokio-rs/tokio/issues/3350
- stream features have moved to `tokio-stream` and `broadcast::Receiver::into_stream()` has been temporarily removed -> `https://github.com/tokio-rs/tokio/issues/2870
- I've copied over the `BroadcastStream` wrapper from this PR, but can update to use `tokio-stream` once it's merged https://github.com/tokio-rs/tokio/pull/3384

Co-authored-by: realbigsean <seananderson33@gmail.com>
This commit is contained in:
realbigsean 2021-02-10 23:29:49 +00:00
parent 6f4da9a5d2
commit e20f64b21a
74 changed files with 1146 additions and 1327 deletions

1457
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -27,14 +27,13 @@ eth2_wallet = { path = "../crypto/eth2_wallet" }
eth2_wallet_manager = { path = "../common/eth2_wallet_manager" } eth2_wallet_manager = { path = "../common/eth2_wallet_manager" }
rand = "0.7.3" rand = "0.7.3"
validator_dir = { path = "../common/validator_dir" } validator_dir = { path = "../common/validator_dir" }
tokio = { version = "0.3.5", features = ["full"] } tokio = { version = "1.1.0", features = ["full"] }
eth2_keystore = { path = "../crypto/eth2_keystore" } eth2_keystore = { path = "../crypto/eth2_keystore" }
account_utils = { path = "../common/account_utils" } account_utils = { path = "../common/account_utils" }
slashing_protection = { path = "../validator_client/slashing_protection" } slashing_protection = { path = "../validator_client/slashing_protection" }
eth2 = {path = "../common/eth2"} eth2 = {path = "../common/eth2"}
safe_arith = {path = "../consensus/safe_arith"} safe_arith = {path = "../consensus/safe_arith"}
slot_clock = { path = "../common/slot_clock" } slot_clock = { path = "../common/slot_clock" }
tokio-compat-02 = "0.1"
[dev-dependencies] [dev-dependencies]
tempfile = "3.1.0" tempfile = "3.1.0"

View File

@ -12,7 +12,6 @@ use safe_arith::SafeArith;
use slot_clock::{SlotClock, SystemTimeSlotClock}; use slot_clock::{SlotClock, SystemTimeSlotClock};
use std::path::PathBuf; use std::path::PathBuf;
use std::time::Duration; use std::time::Duration;
use tokio_compat_02::FutureExt;
use types::{ChainSpec, Epoch, EthSpec, Fork, VoluntaryExit}; use types::{ChainSpec, Epoch, EthSpec, Fork, VoluntaryExit};
pub const CMD: &str = "exit"; pub const CMD: &str = "exit";
@ -77,17 +76,14 @@ pub fn cli_run<E: EthSpec>(matches: &ArgMatches, env: Environment<E>) -> Result<
.clone() .clone()
.expect("network should have a valid config"); .expect("network should have a valid config");
env.runtime().block_on( env.runtime().block_on(publish_voluntary_exit::<E>(
publish_voluntary_exit::<E>( &keystore_path,
&keystore_path, password_file_path.as_ref(),
password_file_path.as_ref(), &client,
&client, &spec,
&spec, stdin_inputs,
stdin_inputs, &testnet_config,
&testnet_config, ))?;
)
.compat(),
)?;
Ok(()) Ok(())
} }

View File

@ -10,7 +10,6 @@ path = "src/lib.rs"
[dev-dependencies] [dev-dependencies]
node_test_rig = { path = "../testing/node_test_rig" } node_test_rig = { path = "../testing/node_test_rig" }
tokio-compat-02 = "0.1"
[features] [features]
write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing. write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing.
@ -27,7 +26,7 @@ slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_tr
slog-term = "2.6.0" slog-term = "2.6.0"
slog-async = "2.5.0" slog-async = "2.5.0"
ctrlc = { version = "3.1.6", features = ["termination"] } ctrlc = { version = "3.1.6", features = ["termination"] }
tokio = { version = "0.3.2", features = ["time"] } tokio = { version = "1.1.0", features = ["time"] }
exit-future = "0.2.0" exit-future = "0.2.0"
dirs = "3.0.1" dirs = "3.0.1"
logging = { path = "../common/logging" } logging = { path = "../common/logging" }
@ -41,7 +40,7 @@ eth2_libp2p = { path = "./eth2_libp2p" }
eth2_ssz = "0.1.2" eth2_ssz = "0.1.2"
serde = "1.0.116" serde = "1.0.116"
clap_utils = { path = "../common/clap_utils" } clap_utils = { path = "../common/clap_utils" }
hyper = "0.13.8" hyper = "0.14.4"
lighthouse_version = { path = "../common/lighthouse_version" } lighthouse_version = { path = "../common/lighthouse_version" }
hex = "0.4.2" hex = "0.4.2"
slasher = { path = "../slasher" } slasher = { path = "../slasher" }

View File

@ -40,7 +40,7 @@ eth2_ssz_derive = "0.1.0"
state_processing = { path = "../../consensus/state_processing" } state_processing = { path = "../../consensus/state_processing" }
tree_hash = "0.1.1" tree_hash = "0.1.1"
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
tokio = "0.3.2" tokio = "1.1.0"
eth1 = { path = "../eth1" } eth1 = { path = "../eth1" }
futures = "0.3.7" futures = "0.3.7"
genesis = { path = "../genesis" } genesis = { path = "../genesis" }

View File

@ -26,10 +26,10 @@ error-chain = "0.12.4"
serde_yaml = "0.8.13" serde_yaml = "0.8.13"
slog = { version = "2.5.2", features = ["max_level_trace"] } slog = { version = "2.5.2", features = ["max_level_trace"] }
slog-async = "2.5.0" slog-async = "2.5.0"
tokio = "0.3.2" tokio = "1.1.0"
dirs = "3.0.1" dirs = "3.0.1"
futures = "0.3.7" futures = "0.3.7"
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] } reqwest = { version = "0.11.0", features = ["native-tls-vendored"] }
url = "2.1.1" url = "2.1.1"
eth1 = { path = "../eth1" } eth1 = { path = "../eth1" }
genesis = { path = "../genesis" } genesis = { path = "../genesis" }

View File

@ -1,7 +1,6 @@
use crate::metrics; use crate::metrics;
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_libp2p::NetworkGlobals; use eth2_libp2p::NetworkGlobals;
use futures::prelude::*;
use parking_lot::Mutex; use parking_lot::Mutex;
use slog::{debug, error, info, warn, Logger}; use slog::{debug, error, info, warn, Logger};
use slot_clock::SlotClock; use slot_clock::SlotClock;
@ -64,26 +63,32 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
} }
// Perform post-genesis logging. // Perform post-genesis logging.
while interval.next().await.is_some() { loop {
interval.tick().await;
let connected_peer_count = network.connected_peers(); let connected_peer_count = network.connected_peers();
let sync_state = network.sync_state(); let sync_state = network.sync_state();
let head_info = beacon_chain.head_info().map_err(|e| { let head_info = match beacon_chain.head_info() {
error!( Ok(head_info) => head_info,
log, Err(e) => {
"Failed to get beacon chain head info"; error!(log, "Failed to get beacon chain head info"; "error" => format!("{:?}", e));
"error" => format!("{:?}", e) break;
) }
})?; };
let head_slot = head_info.slot; let head_slot = head_info.slot;
let current_slot = beacon_chain.slot().map_err(|e| { let current_slot = match beacon_chain.slot() {
error!( Ok(slot) => slot,
log, Err(e) => {
"Unable to read current slot"; error!(
"error" => format!("{:?}", e) log,
) "Unable to read current slot";
})?; "error" => format!("{:?}", e)
);
break;
}
};
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
let finalized_epoch = head_info.finalized_checkpoint.epoch; let finalized_epoch = head_info.finalized_checkpoint.epoch;
let finalized_root = head_info.finalized_checkpoint.root; let finalized_root = head_info.finalized_checkpoint.root;
@ -175,11 +180,10 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
eth1_logging(&beacon_chain, &log); eth1_logging(&beacon_chain, &log);
} }
Ok::<(), ()>(())
}; };
// run the notifier on the current executor // run the notifier on the current executor
executor.spawn(interval_future.unwrap_or_else(|_| ()), "notifier"); executor.spawn(interval_future, "notifier");
Ok(()) Ok(())
} }

View File

@ -13,7 +13,7 @@ environment = { path = "../../lighthouse/environment" }
tokio-compat-02 = "0.1" tokio-compat-02 = "0.1"
[dependencies] [dependencies]
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] } reqwest = { version = "0.11.0", features = ["native-tls-vendored"] }
futures = "0.3.7" futures = "0.3.7"
serde_json = "1.0.58" serde_json = "1.0.58"
serde = { version = "1.0.116", features = ["derive"] } serde = { version = "1.0.116", features = ["derive"] }
@ -26,7 +26,7 @@ tree_hash = "0.1.1"
eth2_hashing = "0.1.0" eth2_hashing = "0.1.0"
parking_lot = "0.11.0" parking_lot = "0.11.0"
slog = "2.5.2" slog = "2.5.2"
tokio = { version = "0.3.2", features = ["full"] } tokio = { version = "1.1.0", features = ["full"] }
state_processing = { path = "../../consensus/state_processing" } state_processing = { path = "../../consensus/state_processing" }
libflate = "1.0.2" libflate = "1.0.2"
lighthouse_metrics = { path = "../../common/lighthouse_metrics"} lighthouse_metrics = { path = "../../common/lighthouse_metrics"}

View File

@ -9,7 +9,7 @@ use crate::{
inner::{DepositUpdater, Inner}, inner::{DepositUpdater, Inner},
}; };
use fallback::{Fallback, FallbackError}; use fallback::{Fallback, FallbackError};
use futures::{future::TryFutureExt, StreamExt}; use futures::future::TryFutureExt;
use parking_lot::{RwLock, RwLockReadGuard}; use parking_lot::{RwLock, RwLockReadGuard};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use slog::{crit, debug, error, info, trace, warn, Logger}; use slog::{crit, debug, error, info, trace, warn, Logger};
@ -721,7 +721,8 @@ impl Service {
let mut interval = interval_at(Instant::now(), update_interval); let mut interval = interval_at(Instant::now(), update_interval);
let update_future = async move { let update_future = async move {
while interval.next().await.is_some() { loop {
interval.tick().await;
self.do_update(update_interval).await.ok(); self.do_update(update_interval).await.ok();
} }
}; };

View File

@ -5,8 +5,8 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
discv5 = { version = "0.1.0-beta.2", features = ["libp2p"] } discv5 = { version = "0.1.0-beta.3", features = ["libp2p"] }
unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "dep-update", features = ["codec"] } unsigned-varint = { version = "0.6.0", features = ["codec"] }
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
hashset_delay = { path = "../../common/hashset_delay" } hashset_delay = { path = "../../common/hashset_delay" }
eth2_ssz_types = { path = "../../consensus/ssz_types" } eth2_ssz_types = { path = "../../consensus/ssz_types" }
@ -16,15 +16,16 @@ eth2_ssz = "0.1.2"
eth2_ssz_derive = "0.1.0" eth2_ssz_derive = "0.1.0"
slog = { version = "2.5.2", features = ["max_level_trace"] } slog = { version = "2.5.2", features = ["max_level_trace"] }
lighthouse_version = { path = "../../common/lighthouse_version" } lighthouse_version = { path = "../../common/lighthouse_version" }
tokio = { version = "0.3.2", features = ["time", "macros"] } tokio = { version = "1.1.0", features = ["time", "macros"] }
futures = "0.3.7" futures = "0.3.7"
futures-io = "0.3.7"
error-chain = "0.12.4" error-chain = "0.12.4"
dirs = "3.0.1" dirs = "3.0.1"
fnv = "1.0.7" fnv = "1.0.7"
lazy_static = "1.4.0" lazy_static = "1.4.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
smallvec = "1.6.1" smallvec = "1.6.1"
tokio-io-timeout = "0.5.0" tokio-io-timeout = "1.1.1"
lru = "0.6.0" lru = "0.6.0"
parking_lot = "0.11.0" parking_lot = "0.11.0"
sha2 = "0.9.1" sha2 = "0.9.1"
@ -32,7 +33,7 @@ base64 = "0.13.0"
snap = "1.0.1" snap = "1.0.1"
void = "1.0.2" void = "1.0.2"
hex = "0.4.2" hex = "0.4.2"
tokio-util = { version = "0.4.0", features = ["codec", "compat", "time"] } tokio-util = { version = "0.6.2", features = ["codec", "compat", "time"] }
tiny-keccak = "2.0.2" tiny-keccak = "2.0.2"
task_executor = { path = "../../common/task_executor" } task_executor = { path = "../../common/task_executor" }
rand = "0.7.3" rand = "0.7.3"
@ -41,14 +42,12 @@ regex = "1.3.9"
strum = { version = "0.20", features = ["derive"] } strum = { version = "0.20", features = ["derive"] }
[dependencies.libp2p] [dependencies.libp2p]
#version = "0.23.0" version = "0.34.0"
git = "https://github.com/sigp/rust-libp2p"
rev = "97000533e4710183124abde017c6c3d68287c1ae"
default-features = false default-features = false
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"] features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"]
[dev-dependencies] [dev-dependencies]
tokio = { version = "0.3.2", features = ["full"] } tokio = { version = "1.1.0", features = ["full"] }
slog-term = "2.6.0" slog-term = "2.6.0"
slog-async = "2.5.0" slog-async = "2.5.0"
tempfile = "3.1.0" tempfile = "3.1.0"

View File

@ -832,7 +832,7 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
if let Some((peer_id, reason)) = self.peers_to_dc.pop_front() { if let Some((peer_id, reason)) = self.peers_to_dc.pop_front() {
return Poll::Ready(NBAction::NotifyHandler { return Poll::Ready(NBAction::NotifyHandler {
peer_id, peer_id,
handler: NotifyHandler::All, handler: NotifyHandler::Any,
event: BehaviourHandlerIn::Shutdown( event: BehaviourHandlerIn::Shutdown(
reason.map(|reason| (RequestId::Behaviour, RPCRequest::Goodbye(reason))), reason.map(|reason| (RequestId::Behaviour, RPCRequest::Goodbye(reason))),
), ),
@ -893,7 +893,7 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
} }
// perform gossipsub score updates when necessary // perform gossipsub score updates when necessary
while let Poll::Ready(Some(_)) = self.update_gossipsub_scores.poll_next_unpin(cx) { while let Poll::Ready(_) = self.update_gossipsub_scores.poll_tick(cx) {
self.peer_manager.update_gossipsub_scores(&self.gossipsub); self.peer_manager.update_gossipsub_scores(&self.gossipsub);
} }

View File

@ -221,8 +221,9 @@ impl CombinedKeyExt for CombinedKey {
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str> { fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str> {
match key { match key {
Keypair::Secp256k1(key) => { Keypair::Secp256k1(key) => {
let secret = discv5::enr::k256::ecdsa::SigningKey::new(&key.secret().to_bytes()) let secret =
.expect("libp2p key must be valid"); discv5::enr::k256::ecdsa::SigningKey::from_bytes(&key.secret().to_bytes())
.expect("libp2p key must be valid");
Ok(CombinedKey::Secp256k1(secret)) Ok(CombinedKey::Secp256k1(secret))
} }
Keypair::Ed25519(key) => { Keypair::Ed25519(key) => {
@ -277,7 +278,7 @@ mod tests {
fn test_secp256k1_peer_id_conversion() { fn test_secp256k1_peer_id_conversion() {
let sk_hex = "df94a73d528434ce2309abb19c16aedb535322797dbd59c157b1e04095900f48"; let sk_hex = "df94a73d528434ce2309abb19c16aedb535322797dbd59c157b1e04095900f48";
let sk_bytes = hex::decode(sk_hex).unwrap(); let sk_bytes = hex::decode(sk_hex).unwrap();
let secret_key = discv5::enr::k256::ecdsa::SigningKey::new(&sk_bytes).unwrap(); let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_bytes(&sk_bytes).unwrap();
let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap(); let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap();
let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into(); let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into();

View File

@ -896,7 +896,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
} }
EventStream::InActive => {} // ignore checking the stream EventStream::InActive => {} // ignore checking the stream
EventStream::Present(ref mut stream) => { EventStream::Present(ref mut stream) => {
while let Ok(event) = stream.try_recv() { while let Poll::Ready(Some(event)) = stream.poll_recv(cx) {
match event { match event {
// We filter out unwanted discv5 events here and only propagate useful results to // We filter out unwanted discv5 events here and only propagate useful results to
// the peer manager. // the peer manager.

View File

@ -972,7 +972,7 @@ impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
// perform the heartbeat when necessary // perform the heartbeat when necessary
while let Poll::Ready(Some(_)) = self.heartbeat.poll_next_unpin(cx) { while let Poll::Ready(_) = self.heartbeat.poll_tick(cx) {
self.heartbeat(); self.heartbeat();
} }
@ -1011,8 +1011,10 @@ impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
} }
} }
if !matches!(self.network_globals.sync_state(), SyncState::SyncingFinalized{..}|SyncState::SyncingHead{..}) if !matches!(
{ self.network_globals.sync_state(),
SyncState::SyncingFinalized { .. } | SyncState::SyncingHead { .. }
) {
loop { loop {
match self.status_peers.poll_next_unpin(cx) { match self.status_peers.poll_next_unpin(cx) {
Poll::Ready(Some(Ok(peer_id))) => { Poll::Ready(Some(Ok(peer_id))) => {

View File

@ -156,7 +156,10 @@ impl<T: EthSpec> PeerInfo<T> {
/// Checks if the status is connected. /// Checks if the status is connected.
pub fn is_connected(&self) -> bool { pub fn is_connected(&self) -> bool {
matches!(self.connection_status, PeerConnectionStatus::Connected { .. }) matches!(
self.connection_status,
PeerConnectionStatus::Connected { .. }
)
} }
/// Checks if the status is connected. /// Checks if the status is connected.

View File

@ -29,12 +29,20 @@ pub struct SyncInfo {
impl std::cmp::PartialEq for PeerSyncStatus { impl std::cmp::PartialEq for PeerSyncStatus {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
matches!((self, other), matches!(
(PeerSyncStatus::Synced { .. }, PeerSyncStatus::Synced { .. }) | (self, other),
(PeerSyncStatus::Advanced { .. }, PeerSyncStatus::Advanced { .. }) | (PeerSyncStatus::Synced { .. }, PeerSyncStatus::Synced { .. })
(PeerSyncStatus::Behind { .. }, PeerSyncStatus::Behind { .. }) | | (
(PeerSyncStatus::IrrelevantPeer, PeerSyncStatus::IrrelevantPeer) | PeerSyncStatus::Advanced { .. },
(PeerSyncStatus::Unknown, PeerSyncStatus::Unknown)) PeerSyncStatus::Advanced { .. }
)
| (PeerSyncStatus::Behind { .. }, PeerSyncStatus::Behind { .. })
| (
PeerSyncStatus::IrrelevantPeer,
PeerSyncStatus::IrrelevantPeer
)
| (PeerSyncStatus::Unknown, PeerSyncStatus::Unknown)
)
} }
} }

View File

@ -137,14 +137,20 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
/// If we are connected or currently dialing the peer returns true. /// If we are connected or currently dialing the peer returns true.
pub fn is_connected_or_dialing(&self, peer_id: &PeerId) -> bool { pub fn is_connected_or_dialing(&self, peer_id: &PeerId) -> bool {
matches!(self.connection_status(peer_id), Some(PeerConnectionStatus::Connected { .. }) matches!(
| Some(PeerConnectionStatus::Dialing { .. })) self.connection_status(peer_id),
Some(PeerConnectionStatus::Connected { .. })
| Some(PeerConnectionStatus::Dialing { .. })
)
} }
/// If we are connected or in the process of disconnecting /// If we are connected or in the process of disconnecting
pub fn is_connected_or_disconnecting(&self, peer_id: &PeerId) -> bool { pub fn is_connected_or_disconnecting(&self, peer_id: &PeerId) -> bool {
matches!(self.connection_status(peer_id), Some(PeerConnectionStatus::Connected { .. }) matches!(
| Some(PeerConnectionStatus::Disconnecting { .. })) self.connection_status(peer_id),
Some(PeerConnectionStatus::Connected { .. })
| Some(PeerConnectionStatus::Disconnecting { .. })
)
} }
/// Returns true if the peer is synced at least to our current head. /// Returns true if the peer is synced at least to our current head.

View File

@ -7,6 +7,7 @@ use super::{RPCReceived, RPCSend};
use crate::rpc::protocol::{InboundFramed, OutboundFramed}; use crate::rpc::protocol::{InboundFramed, OutboundFramed};
use fnv::FnvHashMap; use fnv::FnvHashMap;
use futures::prelude::*; use futures::prelude::*;
use futures::{Sink, SinkExt};
use libp2p::core::upgrade::{ use libp2p::core::upgrade::{
InboundUpgrade, NegotiationError, OutboundUpgrade, ProtocolError, UpgradeError, InboundUpgrade, NegotiationError, OutboundUpgrade, ProtocolError, UpgradeError,
}; };
@ -133,7 +134,7 @@ enum HandlerState {
/// ///
/// While in this state the handler rejects new requests but tries to finish existing ones. /// While in this state the handler rejects new requests but tries to finish existing ones.
/// Once the timer expires, all messages are killed. /// Once the timer expires, all messages are killed.
ShuttingDown(Sleep), ShuttingDown(Box<Sleep>),
/// The handler is deactivated. A goodbye has been sent and no more messages are sent or /// The handler is deactivated. A goodbye has been sent and no more messages are sent or
/// received. /// received.
Deactivated, Deactivated,
@ -239,9 +240,9 @@ where
self.dial_queue.push((id, req)); self.dial_queue.push((id, req));
} }
self.state = HandlerState::ShuttingDown(sleep_until( self.state = HandlerState::ShuttingDown(Box::new(sleep_until(
TInstant::now() + Duration::from_secs(SHUTDOWN_TIMEOUT_SECS as u64), TInstant::now() + Duration::from_secs(SHUTDOWN_TIMEOUT_SECS as u64),
)); )));
} }
} }

View File

@ -9,8 +9,8 @@ use crate::rpc::{
MaxRequestBlocks, MAX_REQUEST_BLOCKS, MaxRequestBlocks, MAX_REQUEST_BLOCKS,
}; };
use futures::future::BoxFuture; use futures::future::BoxFuture;
use futures::prelude::*;
use futures::prelude::{AsyncRead, AsyncWrite}; use futures::prelude::{AsyncRead, AsyncWrite};
use futures::{FutureExt, SinkExt, StreamExt};
use libp2p::core::{InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo}; use libp2p::core::{InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo};
use ssz::Encode; use ssz::Encode;
use ssz_types::VariableList; use ssz_types::VariableList;
@ -278,7 +278,7 @@ impl ProtocolName for ProtocolId {
pub type InboundOutput<TSocket, TSpec> = (RPCRequest<TSpec>, InboundFramed<TSocket, TSpec>); pub type InboundOutput<TSocket, TSpec> = (RPCRequest<TSpec>, InboundFramed<TSocket, TSpec>);
pub type InboundFramed<TSocket, TSpec> = pub type InboundFramed<TSocket, TSpec> =
Framed<TimeoutStream<Compat<TSocket>>, InboundCodec<TSpec>>; Framed<std::pin::Pin<Box<TimeoutStream<Compat<TSocket>>>>, InboundCodec<TSpec>>;
impl<TSocket, TSpec> InboundUpgrade<TSocket> for RPCProtocol<TSpec> impl<TSocket, TSpec> InboundUpgrade<TSocket> for RPCProtocol<TSpec>
where where
@ -304,7 +304,7 @@ where
let mut timed_socket = TimeoutStream::new(socket); let mut timed_socket = TimeoutStream::new(socket);
timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT))); timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT)));
let socket = Framed::new(timed_socket, codec); let socket = Framed::new(Box::pin(timed_socket), codec);
// MetaData requests should be empty, return the stream // MetaData requests should be empty, return the stream
match protocol_name { match protocol_name {

View File

@ -1,6 +1,5 @@
use crate::rpc::{Protocol, RPCRequest}; use crate::rpc::{Protocol, RPCRequest};
use fnv::FnvHashMap; use fnv::FnvHashMap;
use futures::StreamExt;
use libp2p::PeerId; use libp2p::PeerId;
use std::convert::TryInto; use std::convert::TryInto;
use std::future::Future; use std::future::Future;
@ -241,7 +240,7 @@ impl Future for RPCRateLimiter {
type Output = (); type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
while let Poll::Ready(Some(_)) = self.prune_interval.poll_next_unpin(cx) { while let Poll::Ready(_) = self.prune_interval.poll_tick(cx) {
self.prune(); self.prune();
} }

View File

@ -23,12 +23,16 @@ pub enum SyncState {
impl PartialEq for SyncState { impl PartialEq for SyncState {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
matches!((self, other), matches!(
(SyncState::SyncingFinalized { .. }, SyncState::SyncingFinalized { .. }) | (self, other),
(SyncState::SyncingHead { .. }, SyncState::SyncingHead { .. }) | (
(SyncState::Synced, SyncState::Synced) | SyncState::SyncingFinalized { .. },
(SyncState::Stalled, SyncState::Stalled) | SyncState::SyncingFinalized { .. }
(SyncState::SyncTransition, SyncState::SyncTransition)) ) | (SyncState::SyncingHead { .. }, SyncState::SyncingHead { .. })
| (SyncState::Synced, SyncState::Synced)
| (SyncState::Stalled, SyncState::Stalled)
| (SyncState::SyncTransition, SyncState::SyncTransition)
)
} }
} }

View File

@ -315,7 +315,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
// sent in the timeout // sent in the timeout
match futures::future::select( match futures::future::select(
Box::pin(receiver.next_event()), Box::pin(receiver.next_event()),
tokio::time::sleep(Duration::from_secs(1)), Box::pin(tokio::time::sleep(Duration::from_secs(1))),
) )
.await .await
{ {
@ -692,7 +692,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
// sent in the timeout // sent in the timeout
match futures::future::select( match futures::future::select(
Box::pin(receiver.next_event()), Box::pin(receiver.next_event()),
tokio::time::sleep(Duration::from_millis(1000)), Box::pin(tokio::time::sleep(Duration::from_secs(1))),
) )
.await .await
{ {

View File

@ -19,7 +19,7 @@ merkle_proof = { path = "../../consensus/merkle_proof" }
eth2_ssz = "0.1.2" eth2_ssz = "0.1.2"
eth2_hashing = "0.1.0" eth2_hashing = "0.1.0"
tree_hash = "0.1.1" tree_hash = "0.1.1"
tokio = { version = "0.3.2", features = ["full"] } tokio = { version = "1.1.0", features = ["full"] }
parking_lot = "0.11.0" parking_lot = "0.11.0"
slog = "2.5.2" slog = "2.5.2"
exit-future = "0.2.0" exit-future = "0.2.0"

View File

@ -5,9 +5,11 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" } warp = "0.3.0"
serde = { version = "1.0.116", features = ["derive"] } serde = { version = "1.0.116", features = ["derive"] }
tokio = { version = "0.3.2", features = ["macros","stream","sync"] } tokio = { version = "1.1.0", features = ["macros","sync"] }
tokio-stream = "0.1.2"
tokio-util = "0.6.3"
parking_lot = "0.11.0" parking_lot = "0.11.0"
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
hex = "0.4.2" hex = "0.4.2"
@ -32,5 +34,4 @@ futures = "0.3.8"
store = { path = "../store" } store = { path = "../store" }
environment = { path = "../../lighthouse/environment" } environment = { path = "../../lighthouse/environment" }
tree_hash = "0.1.1" tree_hash = "0.1.1"
discv5 = { version = "0.1.0-beta.2", features = ["libp2p"] } discv5 = { version = "0.1.0-beta.3" }
tokio-compat-02 = "0.1"

View File

@ -0,0 +1,66 @@
// TODO: this should be replaced with the tokio's `BroadcastStream` once it's added to
// tokio-stream (https://github.com/tokio-rs/tokio/pull/3384)
use std::fmt;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::broadcast::Receiver;
use tokio_stream::Stream;
use tokio_util::sync::ReusableBoxFuture;
/// A wrapper around [`tokio::sync::broadcast::Receiver`] that implements [`Stream`].
///
/// [`tokio::sync::broadcast::Receiver`]: struct@tokio::sync::broadcast::Receiver
/// [`Stream`]: trait@crate::Stream
pub struct BroadcastStream<T> {
inner: ReusableBoxFuture<(Result<T, RecvError>, Receiver<T>)>,
}
/// An error returned from the inner stream of a [`BroadcastStream`].
#[derive(Debug, PartialEq)]
pub enum BroadcastStreamRecvError {
/// The receiver lagged too far behind. Attempting to receive again will
/// return the oldest message still retained by the channel.
///
/// Includes the number of skipped messages.
Lagged(u64),
}
async fn make_future<T: Clone>(mut rx: Receiver<T>) -> (Result<T, RecvError>, Receiver<T>) {
let result = rx.recv().await;
(result, rx)
}
impl<T: 'static + Clone + Send> BroadcastStream<T> {
/// Create a new `BroadcastStream`.
pub fn new(rx: Receiver<T>) -> Self {
Self {
inner: ReusableBoxFuture::new(make_future(rx)),
}
}
}
impl<T: 'static + Clone + Send> Stream for BroadcastStream<T> {
type Item = Result<T, BroadcastStreamRecvError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let (result, rx) = match self.inner.poll(cx) {
std::task::Poll::Ready(t) => t,
std::task::Poll::Pending => return std::task::Poll::Pending,
};
self.inner.set(make_future(rx));
match result {
Ok(item) => Poll::Ready(Some(Ok(item))),
Err(RecvError::Closed) => Poll::Ready(None),
Err(RecvError::Lagged(n)) => {
Poll::Ready(Some(Err(BroadcastStreamRecvError::Lagged(n))))
}
}
}
}
impl<T> fmt::Debug for BroadcastStream<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BroadcastStream").finish()
}
}

View File

@ -7,6 +7,7 @@
mod beacon_proposer_cache; mod beacon_proposer_cache;
mod block_id; mod block_id;
mod broadcast_stream;
mod metrics; mod metrics;
mod state_id; mod state_id;
mod validator_inclusion; mod validator_inclusion;
@ -18,7 +19,7 @@ use beacon_chain::{
}; };
use beacon_proposer_cache::BeaconProposerCache; use beacon_proposer_cache::BeaconProposerCache;
use block_id::BlockId; use block_id::BlockId;
use eth2::types::{self as api_types, EventKind, ValidatorId}; use eth2::types::{self as api_types, ValidatorId};
use eth2_libp2p::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use eth2_libp2p::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
use lighthouse_version::version_with_platform; use lighthouse_version::version_with_platform;
use network::NetworkMessage; use network::NetworkMessage;
@ -34,19 +35,17 @@ use std::convert::TryInto;
use std::future::Future; use std::future::Future;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::sync::Arc; use std::sync::Arc;
use tokio::stream::{StreamExt, StreamMap};
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::UnboundedSender;
use tokio_stream::StreamExt;
use types::{ use types::{
Attestation, AttestationDuty, AttesterSlashing, CloneConfig, CommitteeCache, Epoch, EthSpec, Attestation, AttestationDuty, AttesterSlashing, CloneConfig, CommitteeCache, Epoch, EthSpec,
Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, RelativeEpoch, SignedAggregateAndProof, Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, RelativeEpoch, SignedAggregateAndProof,
SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig, SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig,
}; };
use warp::http::StatusCode; use warp::http::StatusCode;
use warp::sse::ServerSentEvent; use warp::sse::Event;
use warp::Reply; use warp::Reply;
use warp::{http::Response, Filter, Stream}; use warp::{http::Response, Filter};
use warp_utils::reject::ServerSentEventError;
use warp_utils::task::{blocking_json_task, blocking_task}; use warp_utils::task::{blocking_json_task, blocking_task};
const API_PREFIX: &str = "eth"; const API_PREFIX: &str = "eth";
@ -1610,9 +1609,9 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path("duties")) .and(warp::path("duties"))
.and(warp::path("proposer")) .and(warp::path("proposer"))
.and(warp::path::param::<Epoch>().or_else(|_| async { .and(warp::path::param::<Epoch>().or_else(|_| async {
Err(warp_utils::reject::custom_bad_request( Err(warp_utils::reject::custom_bad_request(
"Invalid epoch".to_string(), "Invalid epoch".to_string(),
)) ))
})) }))
.and(warp::path::end()) .and(warp::path::end())
.and(not_while_syncing_filter.clone()) .and(not_while_syncing_filter.clone())
@ -1637,7 +1636,7 @@ pub fn serve<T: BeaconChainTypes>(
if epoch == current_epoch { if epoch == current_epoch {
let dependent_root_slot = current_epoch let dependent_root_slot = current_epoch
.start_slot(T::EthSpec::slots_per_epoch()) - 1; .start_slot(T::EthSpec::slots_per_epoch()) - 1;
let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? { let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? {
chain.head_beacon_block_root().map_err(warp_utils::reject::beacon_chain_error)? chain.head_beacon_block_root().map_err(warp_utils::reject::beacon_chain_error)?
} else { } else {
chain chain
@ -1649,7 +1648,7 @@ pub fn serve<T: BeaconChainTypes>(
beacon_proposer_cache beacon_proposer_cache
.lock() .lock()
.get_proposers(&chain, epoch) .get_proposers(&chain, epoch)
.map(|duties| api_types::DutiesResponse{ data: duties, dependent_root} ) .map(|duties| api_types::DutiesResponse { data: duties, dependent_root })
} else { } else {
let state = let state =
StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))
@ -1657,7 +1656,7 @@ pub fn serve<T: BeaconChainTypes>(
let dependent_root_slot = state.current_epoch() let dependent_root_slot = state.current_epoch()
.start_slot(T::EthSpec::slots_per_epoch()) - 1; .start_slot(T::EthSpec::slots_per_epoch()) - 1;
let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? { let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? {
chain.head_beacon_block_root().map_err(warp_utils::reject::beacon_chain_error)? chain.head_beacon_block_root().map_err(warp_utils::reject::beacon_chain_error)?
} else { } else {
chain chain
@ -1691,8 +1690,7 @@ pub fn serve<T: BeaconChainTypes>(
}) })
.collect::<Result<Vec<api_types::ProposerData>, _>>() .collect::<Result<Vec<api_types::ProposerData>, _>>()
.map(|duties| { .map(|duties| {
api_types::DutiesResponse {
api_types::DutiesResponse{
dependent_root, dependent_root,
data: duties, data: duties,
} }
@ -2053,7 +2051,7 @@ pub fn serve<T: BeaconChainTypes>(
"attestation_slot" => aggregate.message.aggregate.data.slot, "attestation_slot" => aggregate.message.aggregate.data.slot,
); );
failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e))); failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e)));
}, }
} }
} }
@ -2087,7 +2085,7 @@ pub fn serve<T: BeaconChainTypes>(
if !failures.is_empty() { if !failures.is_empty() {
Err(warp_utils::reject::indexed_bad_request("error processing aggregate and proofs".to_string(), Err(warp_utils::reject::indexed_bad_request("error processing aggregate and proofs".to_string(),
failures failures,
)) ))
} else { } else {
Ok(()) Ok(())
@ -2358,24 +2356,6 @@ pub fn serve<T: BeaconChainTypes>(
}) })
}); });
fn merge_streams<T: EthSpec>(
stream_map: StreamMap<
String,
impl Stream<Item = Result<EventKind<T>, RecvError>> + Unpin + Send + 'static,
>,
) -> impl Stream<Item = Result<impl ServerSentEvent + Send + 'static, ServerSentEventError>>
+ Send
+ 'static {
// Convert messages into Server-Sent Events and return resulting stream.
stream_map.map(move |(topic_name, msg)| match msg {
Ok(data) => Ok((warp::sse::event(topic_name), warp::sse::json(data)).boxed()),
Err(e) => Err(warp_utils::reject::server_sent_event_error(format!(
"{:?}",
e
))),
})
}
let get_events = eth1_v1 let get_events = eth1_v1
.and(warp::path("events")) .and(warp::path("events"))
.and(warp::path::end()) .and(warp::path::end())
@ -2385,7 +2365,7 @@ pub fn serve<T: BeaconChainTypes>(
|topics: api_types::EventQuery, chain: Arc<BeaconChain<T>>| { |topics: api_types::EventQuery, chain: Arc<BeaconChain<T>>| {
blocking_task(move || { blocking_task(move || {
// for each topic subscribed spawn a new subscription // for each topic subscribed spawn a new subscription
let mut stream_map = StreamMap::with_capacity(topics.topics.0.len()); let mut receivers = Vec::with_capacity(topics.topics.0.len());
if let Some(event_handler) = chain.event_handler.as_ref() { if let Some(event_handler) = chain.event_handler.as_ref() {
for topic in topics.topics.0.clone() { for topic in topics.topics.0.clone() {
@ -2402,7 +2382,24 @@ pub fn serve<T: BeaconChainTypes>(
event_handler.subscribe_finalized() event_handler.subscribe_finalized()
} }
}; };
stream_map.insert(topic.to_string(), Box::pin(receiver.into_stream()));
receivers.push(broadcast_stream::BroadcastStream::new(receiver).map(
|msg| {
match msg {
Ok(data) => Event::default()
.event(data.topic_name())
.json_data(data)
.map_err(|e| {
warp_utils::reject::server_sent_event_error(
format!("{:?}", e),
)
}),
Err(e) => Err(warp_utils::reject::server_sent_event_error(
format!("{:?}", e),
)),
}
},
));
} }
} else { } else {
return Err(warp_utils::reject::custom_server_error( return Err(warp_utils::reject::custom_server_error(
@ -2410,11 +2407,9 @@ pub fn serve<T: BeaconChainTypes>(
)); ));
} }
let stream = merge_streams(stream_map); let s = futures::stream::select_all(receivers);
Ok::<_, warp::Rejection>(warp::sse::reply( Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s)))
warp::sse::keep_alive().stream(stream),
))
}) })
}, },
); );

View File

@ -15,6 +15,7 @@ use eth2_libp2p::{
Enr, EnrExt, NetworkGlobals, PeerId, Enr, EnrExt, NetworkGlobals, PeerId,
}; };
use futures::stream::{Stream, StreamExt}; use futures::stream::{Stream, StreamExt};
use futures::FutureExt;
use http_api::{Config, Context}; use http_api::{Config, Context};
use network::NetworkMessage; use network::NetworkMessage;
use state_processing::per_slot_processing; use state_processing::per_slot_processing;
@ -25,7 +26,6 @@ use std::sync::Arc;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tokio::sync::oneshot; use tokio::sync::oneshot;
use tokio::time::Duration; use tokio::time::Duration;
use tokio_compat_02::FutureExt;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{
test_utils::generate_deterministic_keypairs, AggregateSignature, BeaconState, BitList, Domain, test_utils::generate_deterministic_keypairs, AggregateSignature, BeaconState, BitList, Domain,
@ -933,7 +933,7 @@ impl ApiTester {
self.client.post_beacon_blocks(next_block).await.unwrap(); self.client.post_beacon_blocks(next_block).await.unwrap();
assert!( assert!(
self.network_rx.try_recv().is_ok(), self.network_rx.recv().await.is_some(),
"valid blocks should be sent to network" "valid blocks should be sent to network"
); );
@ -947,7 +947,7 @@ impl ApiTester {
assert!(self.client.post_beacon_blocks(&next_block).await.is_err()); assert!(self.client.post_beacon_blocks(&next_block).await.is_err());
assert!( assert!(
self.network_rx.try_recv().is_ok(), self.network_rx.recv().await.is_some(),
"invalid blocks should be sent to network" "invalid blocks should be sent to network"
); );
@ -997,7 +997,7 @@ impl ApiTester {
.unwrap(); .unwrap();
assert!( assert!(
self.network_rx.try_recv().is_ok(), self.network_rx.recv().await.is_some(),
"valid attestation should be sent to network" "valid attestation should be sent to network"
); );
@ -1034,7 +1034,7 @@ impl ApiTester {
} }
assert!( assert!(
self.network_rx.try_recv().is_ok(), self.network_rx.recv().await.is_some(),
"if some attestations are valid, we should send them to the network" "if some attestations are valid, we should send them to the network"
); );
@ -1064,7 +1064,7 @@ impl ApiTester {
.unwrap(); .unwrap();
assert!( assert!(
self.network_rx.try_recv().is_ok(), self.network_rx.recv().await.is_some(),
"valid attester slashing should be sent to network" "valid attester slashing should be sent to network"
); );
@ -1081,7 +1081,7 @@ impl ApiTester {
.unwrap_err(); .unwrap_err();
assert!( assert!(
self.network_rx.try_recv().is_err(), self.network_rx.recv().now_or_never().is_none(),
"invalid attester slashing should not be sent to network" "invalid attester slashing should not be sent to network"
); );
@ -1110,7 +1110,7 @@ impl ApiTester {
.unwrap(); .unwrap();
assert!( assert!(
self.network_rx.try_recv().is_ok(), self.network_rx.recv().await.is_some(),
"valid proposer slashing should be sent to network" "valid proposer slashing should be sent to network"
); );
@ -1127,7 +1127,7 @@ impl ApiTester {
.unwrap_err(); .unwrap_err();
assert!( assert!(
self.network_rx.try_recv().is_err(), self.network_rx.recv().now_or_never().is_none(),
"invalid proposer slashing should not be sent to network" "invalid proposer slashing should not be sent to network"
); );
@ -1156,7 +1156,7 @@ impl ApiTester {
.unwrap(); .unwrap();
assert!( assert!(
self.network_rx.try_recv().is_ok(), self.network_rx.recv().await.is_some(),
"valid exit should be sent to network" "valid exit should be sent to network"
); );
@ -1173,7 +1173,7 @@ impl ApiTester {
.unwrap_err(); .unwrap_err();
assert!( assert!(
self.network_rx.try_recv().is_err(), self.network_rx.recv().now_or_never().is_none(),
"invalid exit should not be sent to network" "invalid exit should not be sent to network"
); );
@ -1822,7 +1822,7 @@ impl ApiTester {
.await .await
.unwrap(); .unwrap();
assert!(self.network_rx.try_recv().is_ok()); assert!(self.network_rx.recv().await.is_some());
self self
} }
@ -1837,7 +1837,7 @@ impl ApiTester {
.await .await
.unwrap_err(); .unwrap_err();
assert!(self.network_rx.try_recv().is_err()); assert!(self.network_rx.recv().now_or_never().is_none());
self self
} }
@ -1856,7 +1856,7 @@ impl ApiTester {
.await .await
.unwrap(); .unwrap();
self.network_rx.try_recv().unwrap(); self.network_rx.recv().now_or_never().unwrap();
self self
} }
@ -2127,83 +2127,71 @@ async fn poll_events<S: Stream<Item = Result<EventKind<T>, eth2::Error>> + Unpin
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_events() { async fn get_events() {
ApiTester::new().test_get_events().compat().await; ApiTester::new().test_get_events().await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_events_from_genesis() { async fn get_events_from_genesis() {
ApiTester::new_from_genesis() ApiTester::new_from_genesis()
.test_get_events_from_genesis() .test_get_events_from_genesis()
.compat()
.await; .await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn beacon_get() { async fn beacon_get() {
async { ApiTester::new()
ApiTester::new() .test_beacon_genesis()
.test_beacon_genesis() .await
.await .test_beacon_states_root()
.test_beacon_states_root() .await
.await .test_beacon_states_fork()
.test_beacon_states_fork() .await
.await .test_beacon_states_finality_checkpoints()
.test_beacon_states_finality_checkpoints() .await
.await .test_beacon_states_validators()
.test_beacon_states_validators() .await
.await .test_beacon_states_validator_balances()
.test_beacon_states_validator_balances() .await
.await .test_beacon_states_committees()
.test_beacon_states_committees() .await
.await .test_beacon_states_validator_id()
.test_beacon_states_validator_id() .await
.await .test_beacon_headers_all_slots()
.test_beacon_headers_all_slots() .await
.await .test_beacon_headers_all_parents()
.test_beacon_headers_all_parents() .await
.await .test_beacon_headers_block_id()
.test_beacon_headers_block_id() .await
.await .test_beacon_blocks()
.test_beacon_blocks() .await
.await .test_beacon_blocks_attestations()
.test_beacon_blocks_attestations() .await
.await .test_beacon_blocks_root()
.test_beacon_blocks_root() .await
.await .test_get_beacon_pool_attestations()
.test_get_beacon_pool_attestations() .await
.await .test_get_beacon_pool_attester_slashings()
.test_get_beacon_pool_attester_slashings() .await
.await .test_get_beacon_pool_proposer_slashings()
.test_get_beacon_pool_proposer_slashings() .await
.await .test_get_beacon_pool_voluntary_exits()
.test_get_beacon_pool_voluntary_exits() .await;
.await;
}
.compat()
.await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn post_beacon_blocks_valid() { async fn post_beacon_blocks_valid() {
ApiTester::new() ApiTester::new().test_post_beacon_blocks_valid().await;
.test_post_beacon_blocks_valid()
.compat()
.await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn post_beacon_blocks_invalid() { async fn post_beacon_blocks_invalid() {
ApiTester::new() ApiTester::new().test_post_beacon_blocks_invalid().await;
.test_post_beacon_blocks_invalid()
.compat()
.await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn beacon_pools_post_attestations_valid() { async fn beacon_pools_post_attestations_valid() {
ApiTester::new() ApiTester::new()
.test_post_beacon_pool_attestations_valid() .test_post_beacon_pool_attestations_valid()
.compat()
.await; .await;
} }
@ -2211,7 +2199,6 @@ async fn beacon_pools_post_attestations_valid() {
async fn beacon_pools_post_attestations_invalid() { async fn beacon_pools_post_attestations_invalid() {
ApiTester::new() ApiTester::new()
.test_post_beacon_pool_attestations_invalid() .test_post_beacon_pool_attestations_invalid()
.compat()
.await; .await;
} }
@ -2219,7 +2206,6 @@ async fn beacon_pools_post_attestations_invalid() {
async fn beacon_pools_post_attester_slashings_valid() { async fn beacon_pools_post_attester_slashings_valid() {
ApiTester::new() ApiTester::new()
.test_post_beacon_pool_attester_slashings_valid() .test_post_beacon_pool_attester_slashings_valid()
.compat()
.await; .await;
} }
@ -2227,7 +2213,6 @@ async fn beacon_pools_post_attester_slashings_valid() {
async fn beacon_pools_post_attester_slashings_invalid() { async fn beacon_pools_post_attester_slashings_invalid() {
ApiTester::new() ApiTester::new()
.test_post_beacon_pool_attester_slashings_invalid() .test_post_beacon_pool_attester_slashings_invalid()
.compat()
.await; .await;
} }
@ -2235,7 +2220,6 @@ async fn beacon_pools_post_attester_slashings_invalid() {
async fn beacon_pools_post_proposer_slashings_valid() { async fn beacon_pools_post_proposer_slashings_valid() {
ApiTester::new() ApiTester::new()
.test_post_beacon_pool_proposer_slashings_valid() .test_post_beacon_pool_proposer_slashings_valid()
.compat()
.await; .await;
} }
@ -2243,7 +2227,6 @@ async fn beacon_pools_post_proposer_slashings_valid() {
async fn beacon_pools_post_proposer_slashings_invalid() { async fn beacon_pools_post_proposer_slashings_invalid() {
ApiTester::new() ApiTester::new()
.test_post_beacon_pool_proposer_slashings_invalid() .test_post_beacon_pool_proposer_slashings_invalid()
.compat()
.await; .await;
} }
@ -2251,7 +2234,6 @@ async fn beacon_pools_post_proposer_slashings_invalid() {
async fn beacon_pools_post_voluntary_exits_valid() { async fn beacon_pools_post_voluntary_exits_valid() {
ApiTester::new() ApiTester::new()
.test_post_beacon_pool_voluntary_exits_valid() .test_post_beacon_pool_voluntary_exits_valid()
.compat()
.await; .await;
} }
@ -2259,7 +2241,6 @@ async fn beacon_pools_post_voluntary_exits_valid() {
async fn beacon_pools_post_voluntary_exits_invalid() { async fn beacon_pools_post_voluntary_exits_invalid() {
ApiTester::new() ApiTester::new()
.test_post_beacon_pool_voluntary_exits_invalid() .test_post_beacon_pool_voluntary_exits_invalid()
.compat()
.await; .await;
} }
@ -2267,13 +2248,10 @@ async fn beacon_pools_post_voluntary_exits_invalid() {
async fn config_get() { async fn config_get() {
ApiTester::new() ApiTester::new()
.test_get_config_fork_schedule() .test_get_config_fork_schedule()
.compat()
.await .await
.test_get_config_spec() .test_get_config_spec()
.compat()
.await .await
.test_get_config_deposit_contract() .test_get_config_deposit_contract()
.compat()
.await; .await;
} }
@ -2281,10 +2259,8 @@ async fn config_get() {
async fn debug_get() { async fn debug_get() {
ApiTester::new() ApiTester::new()
.test_get_debug_beacon_states() .test_get_debug_beacon_states()
.compat()
.await .await
.test_get_debug_beacon_heads() .test_get_debug_beacon_heads()
.compat()
.await; .await;
} }
@ -2292,34 +2268,24 @@ async fn debug_get() {
async fn node_get() { async fn node_get() {
ApiTester::new() ApiTester::new()
.test_get_node_version() .test_get_node_version()
.compat()
.await .await
.test_get_node_syncing() .test_get_node_syncing()
.compat()
.await .await
.test_get_node_identity() .test_get_node_identity()
.compat()
.await .await
.test_get_node_health() .test_get_node_health()
.compat()
.await .await
.test_get_node_peers_by_id() .test_get_node_peers_by_id()
.compat()
.await .await
.test_get_node_peers() .test_get_node_peers()
.compat()
.await .await
.test_get_node_peer_count() .test_get_node_peer_count()
.compat()
.await; .await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_validator_duties_attester() { async fn get_validator_duties_attester() {
ApiTester::new() ApiTester::new().test_get_validator_duties_attester().await;
.test_get_validator_duties_attester()
.compat()
.await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@ -2327,16 +2293,12 @@ async fn get_validator_duties_attester_with_skip_slots() {
ApiTester::new() ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2) .skip_slots(E::slots_per_epoch() * 2)
.test_get_validator_duties_attester() .test_get_validator_duties_attester()
.compat()
.await; .await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_validator_duties_proposer() { async fn get_validator_duties_proposer() {
ApiTester::new() ApiTester::new().test_get_validator_duties_proposer().await;
.test_get_validator_duties_proposer()
.compat()
.await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@ -2344,13 +2306,12 @@ async fn get_validator_duties_proposer_with_skip_slots() {
ApiTester::new() ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2) .skip_slots(E::slots_per_epoch() * 2)
.test_get_validator_duties_proposer() .test_get_validator_duties_proposer()
.compat()
.await; .await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn block_production() { async fn block_production() {
ApiTester::new().test_block_production().compat().await; ApiTester::new().test_block_production().await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@ -2358,16 +2319,12 @@ async fn block_production_with_skip_slots() {
ApiTester::new() ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2) .skip_slots(E::slots_per_epoch() * 2)
.test_block_production() .test_block_production()
.compat()
.await; .await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_validator_attestation_data() { async fn get_validator_attestation_data() {
ApiTester::new() ApiTester::new().test_get_validator_attestation_data().await;
.test_get_validator_attestation_data()
.compat()
.await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@ -2375,7 +2332,6 @@ async fn get_validator_attestation_data_with_skip_slots() {
ApiTester::new() ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2) .skip_slots(E::slots_per_epoch() * 2)
.test_get_validator_attestation_data() .test_get_validator_attestation_data()
.compat()
.await; .await;
} }
@ -2383,7 +2339,6 @@ async fn get_validator_attestation_data_with_skip_slots() {
async fn get_validator_aggregate_attestation() { async fn get_validator_aggregate_attestation() {
ApiTester::new() ApiTester::new()
.test_get_validator_aggregate_attestation() .test_get_validator_aggregate_attestation()
.compat()
.await; .await;
} }
@ -2392,7 +2347,6 @@ async fn get_validator_aggregate_attestation_with_skip_slots() {
ApiTester::new() ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2) .skip_slots(E::slots_per_epoch() * 2)
.test_get_validator_aggregate_attestation() .test_get_validator_aggregate_attestation()
.compat()
.await; .await;
} }
@ -2400,7 +2354,6 @@ async fn get_validator_aggregate_attestation_with_skip_slots() {
async fn get_validator_aggregate_and_proofs_valid() { async fn get_validator_aggregate_and_proofs_valid() {
ApiTester::new() ApiTester::new()
.test_get_validator_aggregate_and_proofs_valid() .test_get_validator_aggregate_and_proofs_valid()
.compat()
.await; .await;
} }
@ -2409,7 +2362,6 @@ async fn get_validator_aggregate_and_proofs_valid_with_skip_slots() {
ApiTester::new() ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2) .skip_slots(E::slots_per_epoch() * 2)
.test_get_validator_aggregate_and_proofs_valid() .test_get_validator_aggregate_and_proofs_valid()
.compat()
.await; .await;
} }
@ -2417,7 +2369,6 @@ async fn get_validator_aggregate_and_proofs_valid_with_skip_slots() {
async fn get_validator_aggregate_and_proofs_invalid() { async fn get_validator_aggregate_and_proofs_invalid() {
ApiTester::new() ApiTester::new()
.test_get_validator_aggregate_and_proofs_invalid() .test_get_validator_aggregate_and_proofs_invalid()
.compat()
.await; .await;
} }
@ -2426,7 +2377,6 @@ async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots() {
ApiTester::new() ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2) .skip_slots(E::slots_per_epoch() * 2)
.test_get_validator_aggregate_and_proofs_invalid() .test_get_validator_aggregate_and_proofs_invalid()
.compat()
.await; .await;
} }
@ -2434,7 +2384,6 @@ async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots() {
async fn get_validator_beacon_committee_subscriptions() { async fn get_validator_beacon_committee_subscriptions() {
ApiTester::new() ApiTester::new()
.test_get_validator_beacon_committee_subscriptions() .test_get_validator_beacon_committee_subscriptions()
.compat()
.await; .await;
} }
@ -2442,33 +2391,23 @@ async fn get_validator_beacon_committee_subscriptions() {
async fn lighthouse_endpoints() { async fn lighthouse_endpoints() {
ApiTester::new() ApiTester::new()
.test_get_lighthouse_health() .test_get_lighthouse_health()
.compat()
.await .await
.test_get_lighthouse_syncing() .test_get_lighthouse_syncing()
.compat()
.await .await
.test_get_lighthouse_proto_array() .test_get_lighthouse_proto_array()
.compat()
.await .await
.test_get_lighthouse_validator_inclusion() .test_get_lighthouse_validator_inclusion()
.compat()
.await .await
.test_get_lighthouse_validator_inclusion_global() .test_get_lighthouse_validator_inclusion_global()
.compat()
.await .await
.test_get_lighthouse_eth1_syncing() .test_get_lighthouse_eth1_syncing()
.compat()
.await .await
.test_get_lighthouse_eth1_block_cache() .test_get_lighthouse_eth1_block_cache()
.compat()
.await .await
.test_get_lighthouse_eth1_deposit_cache() .test_get_lighthouse_eth1_deposit_cache()
.compat()
.await .await
.test_get_lighthouse_beacon_states_ssz() .test_get_lighthouse_beacon_states_ssz()
.compat()
.await .await
.test_get_lighthouse_staking() .test_get_lighthouse_staking()
.compat()
.await; .await;
} }

View File

@ -8,7 +8,7 @@ edition = "2018"
[dependencies] [dependencies]
prometheus = "0.11.0" prometheus = "0.11.0"
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" } warp = "0.3.0"
serde = { version = "1.0.116", features = ["derive"] } serde = { version = "1.0.116", features = ["derive"] }
slog = "2.5.2" slog = "2.5.2"
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }
@ -22,8 +22,7 @@ lighthouse_version = { path = "../../common/lighthouse_version" }
warp_utils = { path = "../../common/warp_utils" } warp_utils = { path = "../../common/warp_utils" }
[dev-dependencies] [dev-dependencies]
tokio = { version = "0.3.2", features = ["sync"] } tokio = { version = "1.1.0", features = ["sync"] }
reqwest = { version = "0.10.8", features = ["json"] } reqwest = { version = "0.11.0", features = ["json"] }
environment = { path = "../../lighthouse/environment" } environment = { path = "../../lighthouse/environment" }
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
tokio-compat-02 = "0.1"

View File

@ -5,7 +5,6 @@ use reqwest::StatusCode;
use std::net::Ipv4Addr; use std::net::Ipv4Addr;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::oneshot; use tokio::sync::oneshot;
use tokio_compat_02::FutureExt;
use types::MainnetEthSpec; use types::MainnetEthSpec;
type Context = http_metrics::Context<EphemeralHarnessType<MainnetEthSpec>>; type Context = http_metrics::Context<EphemeralHarnessType<MainnetEthSpec>>;
@ -46,6 +45,5 @@ async fn returns_200_ok() {
assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK); assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK);
} }
.compat()
.await .await
} }

View File

@ -30,12 +30,13 @@ eth2_ssz_types = { path = "../../consensus/ssz_types" }
tree_hash = "0.1.1" tree_hash = "0.1.1"
futures = "0.3.7" futures = "0.3.7"
error-chain = "0.12.4" error-chain = "0.12.4"
tokio = { version = "0.3.2", features = ["full"] } tokio = { version = "1.1.0", features = ["full"] }
tokio-stream = "0.1.2"
parking_lot = "0.11.0" parking_lot = "0.11.0"
smallvec = "1.6.1" smallvec = "1.6.1"
rand = "0.7.3" rand = "0.7.3"
fnv = "1.0.7" fnv = "1.0.7"
rlp = "0.4.6" rlp = "0.5.0"
lazy_static = "1.4.0" lazy_static = "1.4.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics" } lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
task_executor = { path = "../../common/task_executor" } task_executor = { path = "../../common/task_executor" }

View File

@ -38,7 +38,7 @@ impl StoreItem for PersistedDht {
} }
fn as_store_bytes(&self) -> Vec<u8> { fn as_store_bytes(&self) -> Vec<u8> {
rlp::encode_list(&self.enrs) rlp::encode_list(&self.enrs).to_vec()
} }
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> { fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {

View File

@ -19,6 +19,7 @@ use processor::Processor;
use slog::{debug, o, trace}; use slog::{debug, o, trace};
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tokio_stream::wrappers::UnboundedReceiverStream;
use types::EthSpec; use types::EthSpec;
/// Handles messages received from the network and client and organises syncing. This /// Handles messages received from the network and client and organises syncing. This
@ -101,7 +102,7 @@ impl<T: BeaconChainTypes> Router<T> {
executor.spawn( executor.spawn(
async move { async move {
debug!(log, "Network message router started"); debug!(log, "Network message router started");
handler_recv UnboundedReceiverStream::new(handler_recv)
.for_each(move |msg| future::ready(handler.handle_message(msg))) .for_each(move |msg| future::ready(handler.handle_message(msg)))
.await; .await;
}, },

View File

@ -266,7 +266,7 @@ fn spawn_service<T: BeaconChainTypes>(
info!(service.log, "Network service shutdown"); info!(service.log, "Network service shutdown");
return; return;
} }
_ = service.metrics_update.next() => { _ = service.metrics_update.tick() => {
// update various network metrics // update various network metrics
metric_update_counter +=1; metric_update_counter +=1;
if metric_update_counter % T::EthSpec::default_spec().seconds_per_slot == 0 { if metric_update_counter % T::EthSpec::default_spec().seconds_per_slot == 0 {
@ -283,7 +283,7 @@ fn spawn_service<T: BeaconChainTypes>(
metrics::update_sync_metrics(&service.network_globals); metrics::update_sync_metrics(&service.network_globals);
} }
_ = service.gossipsub_parameter_update.next() => { _ = service.gossipsub_parameter_update.tick() => {
if let Ok(slot) = service.beacon_chain.slot() { if let Ok(slot) = service.beacon_chain.slot() {
if let Some(active_validators) = service.beacon_chain.with_head(|head| { if let Some(active_validators) = service.beacon_chain.with_head(|head| {
Ok::<_, BeaconChainError>( Ok::<_, BeaconChainError>(

View File

@ -1,14 +1,11 @@
#![cfg(test)] #![cfg(test)]
//TODO: Drop compat library once reqwest and other libraries update to tokio 0.3
use beacon_chain::StateSkipConfig; use beacon_chain::StateSkipConfig;
use node_test_rig::{ use node_test_rig::{
environment::{Environment, EnvironmentBuilder}, environment::{Environment, EnvironmentBuilder},
eth2::types::StateId, eth2::types::StateId,
testing_client_config, LocalBeaconNode, testing_client_config, LocalBeaconNode,
}; };
use tokio_compat_02::FutureExt;
use types::{EthSpec, MinimalEthSpec, Slot}; use types::{EthSpec, MinimalEthSpec, Slot};
fn env_builder() -> EnvironmentBuilder<MinimalEthSpec> { fn env_builder() -> EnvironmentBuilder<MinimalEthSpec> {
@ -44,11 +41,7 @@ fn http_server_genesis_state() {
let api_state = env let api_state = env
.runtime() .runtime()
.block_on( .block_on(remote_node.get_debug_beacon_states(StateId::Slot(Slot::new(0))))
remote_node
.get_debug_beacon_states(StateId::Slot(Slot::new(0)))
.compat(),
)
.expect("should fetch state from http api") .expect("should fetch state from http api")
.unwrap() .unwrap()
.data; .data;

View File

@ -8,7 +8,7 @@ edition = "2018"
beacon_chain = { path = "../beacon_chain" } beacon_chain = { path = "../beacon_chain" }
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
slot_clock = { path = "../../common/slot_clock" } slot_clock = { path = "../../common/slot_clock" }
tokio = { version = "0.3.2", features = ["full"] } tokio = { version = "1.1.0", features = ["full"] }
slog = "2.5.2" slog = "2.5.2"
parking_lot = "0.11.0" parking_lot = "0.11.0"
futures = "0.3.7" futures = "0.3.7"

View File

@ -3,7 +3,6 @@
//! This service allows task execution on the beacon node for various functionality. //! This service allows task execution on the beacon node for various functionality.
use beacon_chain::{BeaconChain, BeaconChainTypes}; use beacon_chain::{BeaconChain, BeaconChainTypes};
use futures::stream::StreamExt;
use slog::info; use slog::info;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::sync::Arc; use std::sync::Arc;
@ -26,7 +25,8 @@ pub fn spawn_timer<T: BeaconChainTypes>(
// Warning: `interval_at` panics if `seconds_per_slot` = 0. // Warning: `interval_at` panics if `seconds_per_slot` = 0.
let mut interval = interval_at(start_instant, Duration::from_secs(seconds_per_slot)); let mut interval = interval_at(start_instant, Duration::from_secs(seconds_per_slot));
let timer_future = async move { let timer_future = async move {
while interval.next().await.is_some() { loop {
interval.tick().await;
beacon_chain.per_slot_task(); beacon_chain.per_slot_task();
} }
}; };

View File

@ -13,7 +13,7 @@ eth2_network_config = { path = "../common/eth2_network_config" }
eth2_ssz = "0.1.2" eth2_ssz = "0.1.2"
slog = "2.5.2" slog = "2.5.2"
sloggers = "1.0.1" sloggers = "1.0.1"
tokio = "0.3.2" tokio = "1.1.0"
log = "0.4.11" log = "0.4.11"
slog-term = "2.6.0" slog-term = "2.6.0"
logging = { path = "../common/logging" } logging = { path = "../common/logging" }

View File

@ -5,7 +5,6 @@ use eth2_libp2p::{
discv5::{enr::NodeId, Discv5, Discv5ConfigBuilder, Discv5Event}, discv5::{enr::NodeId, Discv5, Discv5ConfigBuilder, Discv5Event},
EnrExt, Eth2Enr, EnrExt, Eth2Enr,
}; };
use futures::prelude::*;
use slog::info; use slog::info;
use types::EthSpec; use types::EthSpec;
@ -78,7 +77,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
// listen for events // listen for events
loop { loop {
tokio::select! { tokio::select! {
_ = metric_interval.next() => { _ = metric_interval.tick() => {
// display server metrics // display server metrics
let metrics = discv5.metrics(); let metrics = discv5.metrics();
info!(log, "Server metrics"; "connected_peers" => discv5.connected_peers(), "active_sessions" => metrics.active_sessions, "requests/s" => format!("{:.2}", metrics.unsolicited_requests_per_second)); info!(log, "Server metrics"; "connected_peers" => discv5.connected_peers(), "active_sessions" => metrics.active_sessions, "requests/s" => format!("{:.2}", metrics.unsolicited_requests_per_second));

View File

@ -7,7 +7,7 @@ edition = "2018"
build = "build.rs" build = "build.rs"
[build-dependencies] [build-dependencies]
reqwest = { version = "0.10.8", features = ["blocking", "json", "native-tls-vendored"] } reqwest = { version = "0.11.0", features = ["blocking", "json", "native-tls-vendored"] }
serde_json = "1.0.58" serde_json = "1.0.58"
sha2 = "0.9.1" sha2 = "0.9.1"
hex = "0.4.2" hex = "0.4.2"

View File

@ -11,7 +11,7 @@ serde = { version = "1.0.116", features = ["derive"] }
serde_json = "1.0.58" serde_json = "1.0.58"
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
hex = "0.4.2" hex = "0.4.2"
reqwest = { version = "0.10.8", features = ["json","stream"] } reqwest = { version = "0.11.0", features = ["json","stream"] }
eth2_libp2p = { path = "../../beacon_node/eth2_libp2p" } eth2_libp2p = { path = "../../beacon_node/eth2_libp2p" }
proto_array = { path = "../../consensus/proto_array", optional = true } proto_array = { path = "../../consensus/proto_array", optional = true }
serde_utils = { path = "../../consensus/serde_utils" } serde_utils = { path = "../../consensus/serde_utils" }
@ -19,7 +19,7 @@ zeroize = { version = "1.1.1", features = ["zeroize_derive"] }
eth2_keystore = { path = "../../crypto/eth2_keystore" } eth2_keystore = { path = "../../crypto/eth2_keystore" }
libsecp256k1 = "0.3.5" libsecp256k1 = "0.3.5"
ring = "0.16.19" ring = "0.16.19"
bytes = "0.5.6" bytes = "1.0.1"
account_utils = { path = "../../common/account_utils" } account_utils = { path = "../../common/account_utils" }
eth2_ssz = "0.1.2" eth2_ssz = "0.1.2"
eth2_ssz_derive = "0.1.0" eth2_ssz_derive = "0.1.0"

View File

@ -682,6 +682,16 @@ pub enum EventKind<T: EthSpec> {
} }
impl<T: EthSpec> EventKind<T> { impl<T: EthSpec> EventKind<T> {
pub fn topic_name(&self) -> &str {
match self {
EventKind::Head(_) => "head",
EventKind::Block(_) => "block",
EventKind::Attestation(_) => "attestation",
EventKind::VoluntaryExit(_) => "voluntary_exit",
EventKind::FinalizedCheckpoint(_) => "finalized_checkpoint",
}
}
pub fn from_sse_bytes(message: &[u8]) -> Result<Self, ServerError> { pub fn from_sse_bytes(message: &[u8]) -> Result<Self, ServerError> {
let s = from_utf8(message) let s = from_utf8(message)
.map_err(|e| ServerError::InvalidServerSentEvent(format!("{:?}", e)))?; .map_err(|e| ServerError::InvalidServerSentEvent(format!("{:?}", e)))?;

View File

@ -19,4 +19,4 @@ serde_yaml = "0.8.13"
types = { path = "../../consensus/types"} types = { path = "../../consensus/types"}
eth2_ssz = "0.1.2" eth2_ssz = "0.1.2"
eth2_config = { path = "../eth2_config"} eth2_config = { path = "../eth2_config"}
enr = { version = "0.4.0", features = ["ed25519", "k256"] } enr = { version = "0.5.0", features = ["ed25519", "k256"] }

View File

@ -6,7 +6,7 @@ edition = "2018"
[dependencies] [dependencies]
futures = "0.3.7" futures = "0.3.7"
tokio-util = { version = "0.5.0", features = ["time"] } tokio-util = { version = "0.6.2", features = ["time"] }
[dev-dependencies] [dev-dependencies]
tokio = { version = "0.3.2", features = ["time", "rt-multi-thread", "macros"] } tokio = { version = "1.1.0", features = ["time", "rt-multi-thread", "macros"] }

View File

@ -9,7 +9,7 @@ rand = "0.7.3"
remote_signer_test = { path = "../../testing/remote_signer_test" } remote_signer_test = { path = "../../testing/remote_signer_test" }
[dependencies] [dependencies]
reqwest = { version = "0.10.8", features = ["json"] } reqwest = { version = "0.11.0", features = ["json"] }
serde = { version = "1.0.116", features = ["derive"] } serde = { version = "1.0.116", features = ["derive"] }
tokio = { version = "0.3.5", features = ["time"] } tokio = { version = "1.1.0", features = ["time"] }
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }

View File

@ -14,8 +14,8 @@ state_processing = { path = "../../consensus/state_processing" }
bls = { path = "../../crypto/bls" } bls = { path = "../../crypto/bls" }
serde = { version = "1.0.110", features = ["derive"] } serde = { version = "1.0.110", features = ["derive"] }
rayon = "1.3.0" rayon = "1.3.0"
hyper = "0.13.5" hyper = "0.14.4"
tokio = { version = "0.3.5", features = ["sync"] } tokio = { version = "1.1.0", features = ["sync"] }
environment = { path = "../../lighthouse/environment" } environment = { path = "../../lighthouse/environment" }
store = { path = "../../beacon_node/store" } store = { path = "../../beacon_node/store" }
beacon_chain = { path = "../../beacon_node/beacon_chain" } beacon_chain = { path = "../../beacon_node/beacon_chain" }

View File

@ -5,10 +5,9 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
tokio = { version = "0.3.2", features = ["rt"] } tokio = { version = "1.1.0", features = ["rt"] }
slog = "2.5.2" slog = "2.5.2"
futures = "0.3.7" futures = "0.3.7"
exit-future = "0.2.0" exit-future = "0.2.0"
lazy_static = "1.4.0" lazy_static = "1.4.0"
lighthouse_metrics = { path = "../lighthouse_metrics" } lighthouse_metrics = { path = "../lighthouse_metrics" }
tokio-compat-02 = "0.1"

View File

@ -5,7 +5,6 @@ use futures::prelude::*;
use slog::{debug, o, trace}; use slog::{debug, o, trace};
use std::sync::Weak; use std::sync::Weak;
use tokio::runtime::Runtime; use tokio::runtime::Runtime;
use tokio_compat_02::FutureExt;
/// A wrapper over a runtime handle which can spawn async and blocking tasks. /// A wrapper over a runtime handle which can spawn async and blocking tasks.
#[derive(Clone)] #[derive(Clone)]
@ -63,7 +62,7 @@ impl TaskExecutor {
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) { if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
// Task is shutdown before it completes if `exit` receives // Task is shutdown before it completes if `exit` receives
let int_gauge_1 = int_gauge.clone(); let int_gauge_1 = int_gauge.clone();
let future = future::select(Box::pin(task.compat()), exit).then(move |either| { let future = future::select(Box::pin(task), exit).then(move |either| {
match either { match either {
future::Either::Left(_) => trace!(log, "Async task completed"; "task" => name), future::Either::Left(_) => trace!(log, "Async task completed"; "task" => name),
future::Either::Right(_) => { future::Either::Right(_) => {
@ -99,12 +98,10 @@ impl TaskExecutor {
) { ) {
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) { if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
let int_gauge_1 = int_gauge.clone(); let int_gauge_1 = int_gauge.clone();
let future = task let future = task.then(move |_| {
.then(move |_| { int_gauge_1.dec();
int_gauge_1.dec(); futures::future::ready(())
futures::future::ready(()) });
})
.compat();
int_gauge.inc(); int_gauge.inc();
if let Some(runtime) = self.runtime.upgrade() { if let Some(runtime) = self.runtime.upgrade() {
@ -186,7 +183,7 @@ impl TaskExecutor {
int_gauge.inc(); int_gauge.inc();
if let Some(runtime) = self.runtime.upgrade() { if let Some(runtime) = self.runtime.upgrade() {
Some(runtime.spawn(future.compat())) Some(runtime.spawn(future))
} else { } else {
debug!(self.log, "Couldn't spawn task. Runtime shutting down"); debug!(self.log, "Couldn't spawn task. Runtime shutting down");
None None

View File

@ -7,14 +7,14 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" } warp = "0.3.0"
eth2 = { path = "../eth2" } eth2 = { path = "../eth2" }
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
beacon_chain = { path = "../../beacon_node/beacon_chain" } beacon_chain = { path = "../../beacon_node/beacon_chain" }
state_processing = { path = "../../consensus/state_processing" } state_processing = { path = "../../consensus/state_processing" }
safe_arith = { path = "../../consensus/safe_arith" } safe_arith = { path = "../../consensus/safe_arith" }
serde = { version = "1.0.116", features = ["derive"] } serde = { version = "1.0.116", features = ["derive"] }
tokio = { version = "0.3.2", features = ["sync"] } tokio = { version = "1.1.0", features = ["sync"] }
headers = "0.3.2" headers = "0.3.2"
lighthouse_metrics = { path = "../lighthouse_metrics" } lighthouse_metrics = { path = "../lighthouse_metrics" }
lazy_static = "1.4.0" lazy_static = "1.4.0"

View File

@ -635,12 +635,7 @@ fn invalid_block_future_slot() {
|block, _| { |block, _| {
block.slot = block.slot + 1; block.slot = block.slot + 1;
}, },
|err| { |err| assert_invalid_block!(err, InvalidBlock::FutureSlot { .. }),
assert_invalid_block!(
err,
InvalidBlock::FutureSlot { .. }
)
},
); );
} }

View File

@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
bytes = "0.6.0" bytes = "1.0.1"
[dev-dependencies] [dev-dependencies]
yaml-rust = "0.4.4" yaml-rust = "0.4.4"

View File

@ -56,7 +56,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq +
/* /*
* Misc * Misc
*/ */
type MaxValidatorsPerCommittee: Unsigned + Clone + Sync + Send + Debug + PartialEq + Eq; type MaxValidatorsPerCommittee: Unsigned + Clone + Sync + Send + Debug + PartialEq + Eq + Unpin;
/* /*
* Time parameters * Time parameters
*/ */

View File

@ -27,7 +27,7 @@ dirs = "3.0.1"
genesis = { path = "../beacon_node/genesis" } genesis = { path = "../beacon_node/genesis" }
deposit_contract = { path = "../common/deposit_contract" } deposit_contract = { path = "../common/deposit_contract" }
tree_hash = "0.1.1" tree_hash = "0.1.1"
tokio = { version = "0.3.2", features = ["full"] } tokio = { version = "1.1.0", features = ["full"] }
clap_utils = { path = "../common/clap_utils" } clap_utils = { path = "../common/clap_utils" }
eth2_libp2p = { path = "../beacon_node/eth2_libp2p" } eth2_libp2p = { path = "../beacon_node/eth2_libp2p" }
validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] } validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] }
@ -37,4 +37,3 @@ lighthouse_version = { path = "../common/lighthouse_version" }
directory = { path = "../common/directory" } directory = { path = "../common/directory" }
account_utils = { path = "../common/account_utils" } account_utils = { path = "../common/account_utils" }
eth2_wallet = { path = "../crypto/eth2_wallet" } eth2_wallet = { path = "../crypto/eth2_wallet" }
tokio-compat-02 = "0.1"

View File

@ -6,7 +6,6 @@ use ssz::Encode;
use std::cmp::max; use std::cmp::max;
use std::path::PathBuf; use std::path::PathBuf;
use std::time::Duration; use std::time::Duration;
use tokio_compat_02::FutureExt;
use types::EthSpec; use types::EthSpec;
/// Interval between polling the eth1 node for genesis information. /// Interval between polling the eth1 node for genesis information.
@ -62,22 +61,19 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Res
let genesis_service = let genesis_service =
Eth1GenesisService::new(config, env.core_context().log().clone(), spec.clone()); Eth1GenesisService::new(config, env.core_context().log().clone(), spec.clone());
env.runtime().block_on( env.runtime().block_on(async {
async { let _ = genesis_service
let _ = genesis_service .wait_for_genesis_state::<T>(ETH1_GENESIS_UPDATE_INTERVAL, spec)
.wait_for_genesis_state::<T>(ETH1_GENESIS_UPDATE_INTERVAL, spec) .await
.await .map(move |genesis_state| {
.map(move |genesis_state| { eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes());
eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes()); eth2_network_config.force_write_to_file(testnet_dir)
eth2_network_config.force_write_to_file(testnet_dir) })
}) .map_err(|e| format!("Failed to find genesis: {}", e))?;
.map_err(|e| format!("Failed to find genesis: {}", e))?;
info!("Starting service to produce genesis BeaconState from eth1"); info!("Starting service to produce genesis BeaconState from eth1");
info!("Connecting to eth1 http endpoints: {:?}", endpoints); info!("Connecting to eth1 http endpoints: {:?}", endpoints);
Ok(()) Ok(())
} })
.compat(),
)
} }

View File

@ -20,7 +20,7 @@ spec-v12 = []
[dependencies] [dependencies]
beacon_node = { "path" = "../beacon_node" } beacon_node = { "path" = "../beacon_node" }
tokio = "0.3.2" tokio = "1.1.0"
slog = { version = "2.5.2", features = ["max_level_trace"] } slog = { version = "2.5.2", features = ["max_level_trace"] }
sloggers = "1.0.1" sloggers = "1.0.1"
types = { "path" = "../consensus/types" } types = { "path" = "../consensus/types" }
@ -41,7 +41,6 @@ directory = { path = "../common/directory" }
lighthouse_version = { path = "../common/lighthouse_version" } lighthouse_version = { path = "../common/lighthouse_version" }
account_utils = { path = "../common/account_utils" } account_utils = { path = "../common/account_utils" }
remote_signer = { "path" = "../remote_signer" } remote_signer = { "path" = "../remote_signer" }
tokio-compat-02 = "0.1"
[dev-dependencies] [dev-dependencies]
tempfile = "3.1.0" tempfile = "3.1.0"

View File

@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
tokio = { version = "0.3.2", features = ["macros", "rt", "rt-multi-thread" ] } tokio = { version = "1.1.0", features = ["macros", "rt", "rt-multi-thread" ] }
slog = { version = "2.5.2", features = ["max_level_trace"] } slog = { version = "2.5.2", features = ["max_level_trace"] }
sloggers = "1.0.1" sloggers = "1.0.1"
types = { "path" = "../../consensus/types" } types = { "path" = "../../consensus/types" }

View File

@ -7,7 +7,6 @@ use lighthouse_version::VERSION;
use slog::{crit, info, warn}; use slog::{crit, info, warn};
use std::path::PathBuf; use std::path::PathBuf;
use std::process::exit; use std::process::exit;
use tokio_compat_02::FutureExt;
use types::{EthSpec, EthSpecId}; use types::{EthSpec, EthSpecId};
use validator_client::ProductionValidatorClient; use validator_client::ProductionValidatorClient;
@ -281,19 +280,16 @@ fn run<E: EthSpec>(
&context.eth2_config().spec, &context.eth2_config().spec,
context.log().clone(), context.log().clone(),
)?; )?;
environment.runtime().spawn( environment.runtime().spawn(async move {
async move { if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await {
if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await { crit!(log, "Failed to start beacon node"; "reason" => e);
crit!(log, "Failed to start beacon node"; "reason" => e); // Ignore the error since it always occurs during normal operation when
// Ignore the error since it always occurs during normal operation when // shutting down.
// shutting down. let _ = executor
let _ = executor .shutdown_sender()
.shutdown_sender() .try_send("Failed to start beacon node");
.try_send("Failed to start beacon node");
}
} }
.compat(), });
);
} }
("validator_client", Some(matches)) => { ("validator_client", Some(matches)) => {
let context = environment.core_context(); let context = environment.core_context();
@ -301,26 +297,23 @@ fn run<E: EthSpec>(
let executor = context.executor.clone(); let executor = context.executor.clone();
let config = validator_client::Config::from_cli(&matches, context.log()) let config = validator_client::Config::from_cli(&matches, context.log())
.map_err(|e| format!("Unable to initialize validator config: {}", e))?; .map_err(|e| format!("Unable to initialize validator config: {}", e))?;
environment.runtime().spawn( environment.runtime().spawn(async move {
async move { let run = async {
let run = async { ProductionValidatorClient::new(context, config)
ProductionValidatorClient::new(context, config) .await?
.await? .start_service()?;
.start_service()?;
Ok::<(), String>(()) Ok::<(), String>(())
}; };
if let Err(e) = run.await { if let Err(e) = run.await {
crit!(log, "Failed to start validator client"; "reason" => e); crit!(log, "Failed to start validator client"; "reason" => e);
// Ignore the error since it always occurs during normal operation when // Ignore the error since it always occurs during normal operation when
// shutting down. // shutting down.
let _ = executor let _ = executor
.shutdown_sender() .shutdown_sender()
.try_send("Failed to start validator client"); .try_send("Failed to start validator client");
}
} }
.compat(), });
);
} }
("remote_signer", Some(matches)) => { ("remote_signer", Some(matches)) => {
if let Err(e) = remote_signer::run(&mut environment, matches) { if let Err(e) = remote_signer::run(&mut environment, matches) {

View File

@ -9,7 +9,7 @@ clap = "2.33.3"
client_backend = { path = "../backend", package = "remote_signer_backend" } client_backend = { path = "../backend", package = "remote_signer_backend" }
environment = { path = "../../lighthouse/environment" } environment = { path = "../../lighthouse/environment" }
futures = "0.3.6" futures = "0.3.6"
hyper = "0.13.8" hyper = "0.14.4"
lazy_static = "1.4.0" lazy_static = "1.4.0"
regex = "1.3.9" regex = "1.3.9"
serde = { version = "1.0.116", features = ["derive"] } serde = { version = "1.0.116", features = ["derive"] }

View File

@ -14,5 +14,6 @@ slog = "2.5.2"
slot_clock = { path = "../../common/slot_clock" } slot_clock = { path = "../../common/slot_clock" }
state_processing = { path = "../../consensus/state_processing" } state_processing = { path = "../../consensus/state_processing" }
task_executor = { path = "../../common/task_executor" } task_executor = { path = "../../common/task_executor" }
tokio = { version = "0.3.5", features = ["full"] } tokio = { version = "1.1.0", features = ["full"] }
tokio-stream = "0.1.2"
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }

View File

@ -19,7 +19,6 @@ use state_processing::{
use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TrySendError}; use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TrySendError};
use std::sync::Arc; use std::sync::Arc;
use task_executor::TaskExecutor; use task_executor::TaskExecutor;
use tokio::stream::StreamExt;
use tokio::sync::mpsc::UnboundedSender; use tokio::sync::mpsc::UnboundedSender;
use tokio::time::{interval_at, Duration, Instant}; use tokio::time::{interval_at, Duration, Instant};
use types::{AttesterSlashing, Epoch, EthSpec, ProposerSlashing}; use types::{AttesterSlashing, Epoch, EthSpec, ProposerSlashing};
@ -83,7 +82,8 @@ impl<T: BeaconChainTypes> SlasherService<T> {
// https://github.com/sigp/lighthouse/issues/1861 // https://github.com/sigp/lighthouse/issues/1861
let mut interval = interval_at(Instant::now(), Duration::from_secs(update_period)); let mut interval = interval_at(Instant::now(), Duration::from_secs(update_period));
while interval.next().await.is_some() { loop {
interval.tick().await;
if let Some(current_slot) = beacon_chain.slot_clock.now() { if let Some(current_slot) = beacon_chain.slot_clock.now() {
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
if let Err(TrySendError::Disconnected(_)) = notif_sender.try_send(current_epoch) { if let Err(TrySendError::Disconnected(_)) = notif_sender.try_send(current_epoch) {

View File

@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018" edition = "2018"
[dependencies] [dependencies]
tokio = { version = "0.3.2", features = ["time"] } tokio = { version = "1.1.0", features = ["time"] }
tokio-compat-02 = "0.1" tokio-compat-02 = "0.1"
web3 = "0.14.0" web3 = "0.14.0"
futures = "0.3.7" futures = "0.3.7"

View File

@ -10,7 +10,7 @@ beacon_node = { path = "../../beacon_node" }
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
eth2_config = { path = "../../common/eth2_config" } eth2_config = { path = "../../common/eth2_config" }
tempfile = "3.1.0" tempfile = "3.1.0"
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] } reqwest = { version = "0.11.0", features = ["native-tls-vendored"] }
url = "2.1.1" url = "2.1.1"
serde = "1.0.116" serde = "1.0.116"
futures = "0.3.7" futures = "0.3.7"

View File

@ -11,10 +11,9 @@ hex = "0.4.2"
httpmock = "0.5.1" httpmock = "0.5.1"
remote_signer_client = { path = "../../remote_signer/client" } remote_signer_client = { path = "../../remote_signer/client" }
remote_signer_consumer = { path = "../../common/remote_signer_consumer" } remote_signer_consumer = { path = "../../common/remote_signer_consumer" }
reqwest = { version = "0.10.8", features = ["blocking", "json"] } reqwest = { version = "0.11.0", features = ["blocking", "json"] }
serde = { version = "1.0.116", features = ["derive"] } serde = { version = "1.0.116", features = ["derive"] }
serde_json = "1.0.58" serde_json = "1.0.58"
tempfile = "3.1.0" tempfile = "3.1.0"
tokio = { version = "0.3.5", features = ["time"] } tokio = { version = "1.1.0", features = ["time"] }
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
tokio-compat-02 = "0.1"

View File

@ -26,10 +26,7 @@ impl ApiTestSigner<E> {
let client = environment let client = environment
.runtime() .runtime()
.block_on(tokio_compat_02::FutureExt::compat(Client::new( .block_on(Client::new(runtime_context, &matches))
runtime_context,
&matches,
)))
.map_err(|e| format!("Failed to init Rest API: {}", e)) .map_err(|e| format!("Failed to init Rest API: {}", e))
.unwrap(); .unwrap();

View File

@ -30,13 +30,13 @@ pub fn do_sign_request<E: EthSpec, T: RemoteSignerObject>(
) -> Result<String, Error> { ) -> Result<String, Error> {
let runtime = Builder::new_multi_thread().enable_all().build().unwrap(); let runtime = Builder::new_multi_thread().enable_all().build().unwrap();
runtime.block_on(tokio_compat_02::FutureExt::compat(test_client.sign( runtime.block_on(test_client.sign(
&test_input.public_key, &test_input.public_key,
test_input.bls_domain, test_input.bls_domain,
test_input.data, test_input.data,
test_input.fork, test_input.fork,
test_input.genesis_validators_root, test_input.genesis_validators_root,
))) ))
} }
#[derive(Serialize)] #[derive(Serialize)]

View File

@ -13,9 +13,8 @@ types = { path = "../../consensus/types" }
validator_client = { path = "../../validator_client" } validator_client = { path = "../../validator_client" }
parking_lot = "0.11.0" parking_lot = "0.11.0"
futures = "0.3.7" futures = "0.3.7"
tokio = "0.3.2" tokio = "1.1.0"
eth1_test_rig = { path = "../eth1_test_rig" } eth1_test_rig = { path = "../eth1_test_rig" }
env_logger = "0.8.2" env_logger = "0.8.2"
clap = "2.33.3" clap = "2.33.3"
rayon = "1.4.1" rayon = "1.4.1"
tokio-compat-02 = "0.1"

View File

@ -90,7 +90,8 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
// Start a timer that produces eth1 blocks on an interval. // Start a timer that produces eth1 blocks on an interval.
tokio::spawn(async move { tokio::spawn(async move {
let mut interval = tokio::time::interval(eth1_block_time); let mut interval = tokio::time::interval(eth1_block_time);
while interval.next().await.is_some() { loop {
interval.tick().await;
let _ = ganache.evm_mine().await; let _ = ganache.evm_mine().await;
} }
}); });
@ -219,9 +220,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
Ok::<(), String>(()) Ok::<(), String>(())
}; };
env.runtime() env.runtime().block_on(main_future).unwrap();
.block_on(tokio_compat_02::FutureExt::compat(main_future))
.unwrap();
env.fire_signal(); env.fire_signal();
env.shutdown_on_idle(); env.shutdown_on_idle();

View File

@ -158,9 +158,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
Ok::<(), String>(()) Ok::<(), String>(())
}; };
env.runtime() env.runtime().block_on(main_future).unwrap();
.block_on(tokio_compat_02::FutureExt::compat(main_future))
.unwrap();
env.fire_signal(); env.fire_signal();
env.shutdown_on_idle(); env.shutdown_on_idle();

View File

@ -130,9 +130,7 @@ fn syncing_sim(
Ok::<(), String>(()) Ok::<(), String>(())
}; };
env.runtime() env.runtime().block_on(main_future).unwrap();
.block_on(tokio_compat_02::FutureExt::compat(main_future))
.unwrap();
env.fire_signal(); env.fire_signal();
env.shutdown_on_idle(); env.shutdown_on_idle();
@ -217,7 +215,8 @@ pub async fn verify_one_node_sync<E: EthSpec>(
// limited to at most `sync_timeout` epochs // limited to at most `sync_timeout` epochs
let mut interval = tokio::time::interval(epoch_duration); let mut interval = tokio::time::interval(epoch_duration);
let mut count = 0; let mut count = 0;
while interval.next().await.is_some() { loop {
interval.tick().await;
if count >= sync_timeout || !check_still_syncing(&network_c).await? { if count >= sync_timeout || !check_still_syncing(&network_c).await? {
break; break;
} }
@ -254,7 +253,8 @@ pub async fn verify_two_nodes_sync<E: EthSpec>(
// limited to at most `sync_timeout` epochs // limited to at most `sync_timeout` epochs
let mut interval = tokio::time::interval(epoch_duration); let mut interval = tokio::time::interval(epoch_duration);
let mut count = 0; let mut count = 0;
while interval.next().await.is_some() { loop {
interval.tick().await;
if count >= sync_timeout || !check_still_syncing(&network_c).await? { if count >= sync_timeout || !check_still_syncing(&network_c).await? {
break; break;
} }
@ -302,7 +302,8 @@ pub async fn verify_in_between_sync<E: EthSpec>(
// limited to at most `sync_timeout` epochs // limited to at most `sync_timeout` epochs
let mut interval = tokio::time::interval(epoch_duration); let mut interval = tokio::time::interval(epoch_duration);
let mut count = 0; let mut count = 0;
while interval.next().await.is_some() { loop {
interval.tick().await;
if count >= sync_timeout || !check_still_syncing(&network_c).await? { if count >= sync_timeout || !check_still_syncing(&network_c).await? {
break; break;
} }

View File

@ -9,9 +9,8 @@ name = "validator_client"
path = "src/lib.rs" path = "src/lib.rs"
[dev-dependencies] [dev-dependencies]
tokio = { version = "0.3.2", features = ["time", "rt-multi-thread", "macros"] } tokio = { version = "1.1.0", features = ["time", "rt-multi-thread", "macros"] }
deposit_contract = { path = "../common/deposit_contract" } deposit_contract = { path = "../common/deposit_contract" }
tokio-compat-02 = "0.1"
[dependencies] [dependencies]
eth2_ssz = "0.1.2" eth2_ssz = "0.1.2"
@ -30,7 +29,7 @@ serde_yaml = "0.8.13"
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] } slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
slog-async = "2.5.0" slog-async = "2.5.0"
slog-term = "2.6.0" slog-term = "2.6.0"
tokio = { version = "0.3.2", features = ["time"] } tokio = { version = "1.1.0", features = ["time"] }
futures = "0.3.7" futures = "0.3.7"
dirs = "3.0.1" dirs = "3.0.1"
directory = { path = "../common/directory" } directory = { path = "../common/directory" }
@ -53,8 +52,8 @@ eth2_keystore = { path = "../crypto/eth2_keystore" }
account_utils = { path = "../common/account_utils" } account_utils = { path = "../common/account_utils" }
lighthouse_version = { path = "../common/lighthouse_version" } lighthouse_version = { path = "../common/lighthouse_version" }
warp_utils = { path = "../common/warp_utils" } warp_utils = { path = "../common/warp_utils" }
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" } warp = "0.3.0"
hyper = "0.13.8" hyper = "0.14.4"
serde_utils = { path = "../consensus/serde_utils" } serde_utils = { path = "../consensus/serde_utils" }
libsecp256k1 = "0.3.5" libsecp256k1 = "0.3.5"
ring = "0.16.19" ring = "0.16.19"

View File

@ -6,7 +6,6 @@ use crate::{
}; };
use environment::RuntimeContext; use environment::RuntimeContext;
use futures::future::FutureExt; use futures::future::FutureExt;
use futures::StreamExt;
use slog::{crit, error, info, trace}; use slog::{crit, error, info, trace};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::collections::HashMap; use std::collections::HashMap;
@ -149,7 +148,8 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
let executor = self.context.executor.clone(); let executor = self.context.executor.clone();
let interval_fut = async move { let interval_fut = async move {
while interval.next().await.is_some() { loop {
interval.tick().await;
let log = self.context.log(); let log = self.context.log();
if let Err(e) = self.spawn_attestation_tasks(slot_duration) { if let Err(e) = self.spawn_attestation_tasks(slot_duration) {

View File

@ -5,7 +5,7 @@ use crate::{
}; };
use environment::RuntimeContext; use environment::RuntimeContext;
use futures::channel::mpsc::Sender; use futures::channel::mpsc::Sender;
use futures::{SinkExt, StreamExt}; use futures::SinkExt;
use parking_lot::RwLock; use parking_lot::RwLock;
use slog::{debug, error, trace, warn}; use slog::{debug, error, trace, warn};
use slot_clock::SlotClock; use slot_clock::SlotClock;
@ -490,7 +490,8 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
let executor = self.inner.context.executor.clone(); let executor = self.inner.context.executor.clone();
let interval_fut = async move { let interval_fut = async move {
while interval.next().await.is_some() { loop {
interval.tick().await;
self.clone().do_update(&mut block_service_tx, &spec).await; self.clone().do_update(&mut block_service_tx, &spec).await;
} }
}; };

View File

@ -3,7 +3,6 @@ use crate::http_metrics::metrics;
use environment::RuntimeContext; use environment::RuntimeContext;
use eth2::types::StateId; use eth2::types::StateId;
use futures::future::FutureExt; use futures::future::FutureExt;
use futures::StreamExt;
use parking_lot::RwLock; use parking_lot::RwLock;
use slog::Logger; use slog::Logger;
use slog::{debug, trace}; use slog::{debug, trace};
@ -164,7 +163,8 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
let executor = context.executor.clone(); let executor = context.executor.clone();
let interval_fut = async move { let interval_fut = async move {
while interval.next().await.is_some() { loop {
interval.tick().await;
self.clone().do_update().await.ok(); self.clone().do_update().await.ok();
} }
}; };

View File

@ -25,7 +25,6 @@ use std::sync::Arc;
use tempfile::{tempdir, TempDir}; use tempfile::{tempdir, TempDir};
use tokio::runtime::Runtime; use tokio::runtime::Runtime;
use tokio::sync::oneshot; use tokio::sync::oneshot;
use tokio_compat_02::FutureExt;
const PASSWORD_BYTES: &[u8] = &[42, 50, 37]; const PASSWORD_BYTES: &[u8] = &[42, 50, 37];
@ -439,141 +438,126 @@ struct KeystoreValidatorScenario {
fn invalid_pubkey() { fn invalid_pubkey() {
let runtime = build_runtime(); let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime); let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on( runtime.block_on(async {
async { ApiTester::new(weak_runtime)
ApiTester::new(weak_runtime) .await
.await .invalidate_api_token()
.invalidate_api_token() .test_get_lighthouse_version_invalid()
.test_get_lighthouse_version_invalid() .await;
.await; });
}
.compat(),
);
} }
#[test] #[test]
fn simple_getters() { fn simple_getters() {
let runtime = build_runtime(); let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime); let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on( runtime.block_on(async {
async { ApiTester::new(weak_runtime)
ApiTester::new(weak_runtime) .await
.await .test_get_lighthouse_version()
.test_get_lighthouse_version() .await
.await .test_get_lighthouse_health()
.test_get_lighthouse_health() .await
.await .test_get_lighthouse_spec()
.test_get_lighthouse_spec() .await;
.await; });
}
.compat(),
);
} }
#[test] #[test]
fn hd_validator_creation() { fn hd_validator_creation() {
let runtime = build_runtime(); let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime); let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on( runtime.block_on(async {
async { ApiTester::new(weak_runtime)
ApiTester::new(weak_runtime) .await
.await .assert_enabled_validators_count(0)
.assert_enabled_validators_count(0) .assert_validators_count(0)
.assert_validators_count(0) .create_hd_validators(HdValidatorScenario {
.create_hd_validators(HdValidatorScenario { count: 2,
count: 2, specify_mnemonic: true,
specify_mnemonic: true, key_derivation_path_offset: 0,
key_derivation_path_offset: 0, disabled: vec![],
disabled: vec![], })
}) .await
.await .assert_enabled_validators_count(2)
.assert_enabled_validators_count(2) .assert_validators_count(2)
.assert_validators_count(2) .create_hd_validators(HdValidatorScenario {
.create_hd_validators(HdValidatorScenario { count: 1,
count: 1, specify_mnemonic: false,
specify_mnemonic: false, key_derivation_path_offset: 0,
key_derivation_path_offset: 0, disabled: vec![0],
disabled: vec![0], })
}) .await
.await .assert_enabled_validators_count(2)
.assert_enabled_validators_count(2) .assert_validators_count(3)
.assert_validators_count(3) .create_hd_validators(HdValidatorScenario {
.create_hd_validators(HdValidatorScenario { count: 0,
count: 0, specify_mnemonic: true,
specify_mnemonic: true, key_derivation_path_offset: 4,
key_derivation_path_offset: 4, disabled: vec![],
disabled: vec![], })
}) .await
.await .assert_enabled_validators_count(2)
.assert_enabled_validators_count(2) .assert_validators_count(3);
.assert_validators_count(3); });
}
.compat(),
);
} }
#[test] #[test]
fn validator_enabling() { fn validator_enabling() {
let runtime = build_runtime(); let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime); let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on( runtime.block_on(async {
async { ApiTester::new(weak_runtime)
ApiTester::new(weak_runtime) .await
.await .create_hd_validators(HdValidatorScenario {
.create_hd_validators(HdValidatorScenario { count: 2,
count: 2, specify_mnemonic: false,
specify_mnemonic: false, key_derivation_path_offset: 0,
key_derivation_path_offset: 0, disabled: vec![],
disabled: vec![], })
}) .await
.await .assert_enabled_validators_count(2)
.assert_enabled_validators_count(2) .assert_validators_count(2)
.assert_validators_count(2) .set_validator_enabled(0, false)
.set_validator_enabled(0, false) .await
.await .assert_enabled_validators_count(1)
.assert_enabled_validators_count(1) .assert_validators_count(2)
.assert_validators_count(2) .set_validator_enabled(0, true)
.set_validator_enabled(0, true) .await
.await .assert_enabled_validators_count(2)
.assert_enabled_validators_count(2) .assert_validators_count(2);
.assert_validators_count(2); });
}
.compat(),
);
} }
#[test] #[test]
fn keystore_validator_creation() { fn keystore_validator_creation() {
let runtime = build_runtime(); let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime); let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on( runtime.block_on(async {
async { ApiTester::new(weak_runtime)
ApiTester::new(weak_runtime) .await
.await .assert_enabled_validators_count(0)
.assert_enabled_validators_count(0) .assert_validators_count(0)
.assert_validators_count(0) .create_keystore_validators(KeystoreValidatorScenario {
.create_keystore_validators(KeystoreValidatorScenario { correct_password: true,
correct_password: true, enabled: true,
enabled: true, })
}) .await
.await .assert_enabled_validators_count(1)
.assert_enabled_validators_count(1) .assert_validators_count(1)
.assert_validators_count(1) .create_keystore_validators(KeystoreValidatorScenario {
.create_keystore_validators(KeystoreValidatorScenario { correct_password: false,
correct_password: false, enabled: true,
enabled: true, })
}) .await
.await .assert_enabled_validators_count(1)
.assert_enabled_validators_count(1) .assert_validators_count(1)
.assert_validators_count(1) .create_keystore_validators(KeystoreValidatorScenario {
.create_keystore_validators(KeystoreValidatorScenario { correct_password: true,
correct_password: true, enabled: false,
enabled: false, })
}) .await
.await .assert_enabled_validators_count(1)
.assert_enabled_validators_count(1) .assert_validators_count(2);
.assert_validators_count(2); });
}
.compat(),
);
} }

View File

@ -1,5 +1,4 @@
use crate::ProductionValidatorClient; use crate::ProductionValidatorClient;
use futures::StreamExt;
use slog::{error, info}; use slog::{error, info};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use tokio::time::{interval_at, Duration, Instant}; use tokio::time::{interval_at, Duration, Instant};
@ -24,7 +23,8 @@ pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Resu
let interval_fut = async move { let interval_fut = async move {
let log = context.log(); let log = context.log();
while interval.next().await.is_some() { loop {
interval.tick().await;
let num_available = duties_service.beacon_nodes.num_available().await; let num_available = duties_service.beacon_nodes.num_available().await;
let num_synced = duties_service.beacon_nodes.num_synced().await; let num_synced = duties_service.beacon_nodes.num_synced().await;
let num_total = duties_service.beacon_nodes.num_total().await; let num_total = duties_service.beacon_nodes.num_total().await;