Update to tokio 1.1 (#2172)

## Issue Addressed

resolves #2129
resolves #2099 
addresses some of #1712
unblocks #2076
unblocks #2153 

## Proposed Changes

- Updates all the dependencies mentioned in #2129, except for web3. They haven't merged their tokio 1.0 update because they are waiting on some dependencies of their own. Since we only use web3 in tests, I think updating it in a separate issue is fine. If they are able to merge soon though, I can update in this PR. 

- Updates `tokio_util` to 0.6.2 and `bytes` to 1.0.1.

- We haven't made a discv5 release since merging tokio 1.0 updates so I'm using a commit rather than release atm. **Edit:** I think we should merge an update of `tokio_util` to 0.6.2 into discv5 before this release because it has panic fixes in `DelayQueue`  --> PR in discv5:  https://github.com/sigp/discv5/pull/58

## Additional Info

tokio 1.0 changes that required some changes in lighthouse:

- `interval.next().await.is_some()` -> `interval.tick().await`
- `sleep` future is now `!Unpin` -> https://github.com/tokio-rs/tokio/issues/3028
- `try_recv` has been temporarily removed from `mpsc` -> https://github.com/tokio-rs/tokio/issues/3350
- stream features have moved to `tokio-stream` and `broadcast::Receiver::into_stream()` has been temporarily removed -> `https://github.com/tokio-rs/tokio/issues/2870
- I've copied over the `BroadcastStream` wrapper from this PR, but can update to use `tokio-stream` once it's merged https://github.com/tokio-rs/tokio/pull/3384

Co-authored-by: realbigsean <seananderson33@gmail.com>
This commit is contained in:
realbigsean 2021-02-10 23:29:49 +00:00
parent 6f4da9a5d2
commit e20f64b21a
74 changed files with 1146 additions and 1327 deletions

1457
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -27,14 +27,13 @@ eth2_wallet = { path = "../crypto/eth2_wallet" }
eth2_wallet_manager = { path = "../common/eth2_wallet_manager" }
rand = "0.7.3"
validator_dir = { path = "../common/validator_dir" }
tokio = { version = "0.3.5", features = ["full"] }
tokio = { version = "1.1.0", features = ["full"] }
eth2_keystore = { path = "../crypto/eth2_keystore" }
account_utils = { path = "../common/account_utils" }
slashing_protection = { path = "../validator_client/slashing_protection" }
eth2 = {path = "../common/eth2"}
safe_arith = {path = "../consensus/safe_arith"}
slot_clock = { path = "../common/slot_clock" }
tokio-compat-02 = "0.1"
[dev-dependencies]
tempfile = "3.1.0"

View File

@ -12,7 +12,6 @@ use safe_arith::SafeArith;
use slot_clock::{SlotClock, SystemTimeSlotClock};
use std::path::PathBuf;
use std::time::Duration;
use tokio_compat_02::FutureExt;
use types::{ChainSpec, Epoch, EthSpec, Fork, VoluntaryExit};
pub const CMD: &str = "exit";
@ -77,17 +76,14 @@ pub fn cli_run<E: EthSpec>(matches: &ArgMatches, env: Environment<E>) -> Result<
.clone()
.expect("network should have a valid config");
env.runtime().block_on(
publish_voluntary_exit::<E>(
&keystore_path,
password_file_path.as_ref(),
&client,
&spec,
stdin_inputs,
&testnet_config,
)
.compat(),
)?;
env.runtime().block_on(publish_voluntary_exit::<E>(
&keystore_path,
password_file_path.as_ref(),
&client,
&spec,
stdin_inputs,
&testnet_config,
))?;
Ok(())
}

View File

@ -10,7 +10,6 @@ path = "src/lib.rs"
[dev-dependencies]
node_test_rig = { path = "../testing/node_test_rig" }
tokio-compat-02 = "0.1"
[features]
write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing.
@ -27,7 +26,7 @@ slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_tr
slog-term = "2.6.0"
slog-async = "2.5.0"
ctrlc = { version = "3.1.6", features = ["termination"] }
tokio = { version = "0.3.2", features = ["time"] }
tokio = { version = "1.1.0", features = ["time"] }
exit-future = "0.2.0"
dirs = "3.0.1"
logging = { path = "../common/logging" }
@ -41,7 +40,7 @@ eth2_libp2p = { path = "./eth2_libp2p" }
eth2_ssz = "0.1.2"
serde = "1.0.116"
clap_utils = { path = "../common/clap_utils" }
hyper = "0.13.8"
hyper = "0.14.4"
lighthouse_version = { path = "../common/lighthouse_version" }
hex = "0.4.2"
slasher = { path = "../slasher" }

View File

@ -40,7 +40,7 @@ eth2_ssz_derive = "0.1.0"
state_processing = { path = "../../consensus/state_processing" }
tree_hash = "0.1.1"
types = { path = "../../consensus/types" }
tokio = "0.3.2"
tokio = "1.1.0"
eth1 = { path = "../eth1" }
futures = "0.3.7"
genesis = { path = "../genesis" }

View File

@ -26,10 +26,10 @@ error-chain = "0.12.4"
serde_yaml = "0.8.13"
slog = { version = "2.5.2", features = ["max_level_trace"] }
slog-async = "2.5.0"
tokio = "0.3.2"
tokio = "1.1.0"
dirs = "3.0.1"
futures = "0.3.7"
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] }
reqwest = { version = "0.11.0", features = ["native-tls-vendored"] }
url = "2.1.1"
eth1 = { path = "../eth1" }
genesis = { path = "../genesis" }

View File

@ -1,7 +1,6 @@
use crate::metrics;
use beacon_chain::{BeaconChain, BeaconChainTypes};
use eth2_libp2p::NetworkGlobals;
use futures::prelude::*;
use parking_lot::Mutex;
use slog::{debug, error, info, warn, Logger};
use slot_clock::SlotClock;
@ -64,26 +63,32 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
}
// Perform post-genesis logging.
while interval.next().await.is_some() {
loop {
interval.tick().await;
let connected_peer_count = network.connected_peers();
let sync_state = network.sync_state();
let head_info = beacon_chain.head_info().map_err(|e| {
error!(
log,
"Failed to get beacon chain head info";
"error" => format!("{:?}", e)
)
})?;
let head_info = match beacon_chain.head_info() {
Ok(head_info) => head_info,
Err(e) => {
error!(log, "Failed to get beacon chain head info"; "error" => format!("{:?}", e));
break;
}
};
let head_slot = head_info.slot;
let current_slot = beacon_chain.slot().map_err(|e| {
error!(
log,
"Unable to read current slot";
"error" => format!("{:?}", e)
)
})?;
let current_slot = match beacon_chain.slot() {
Ok(slot) => slot,
Err(e) => {
error!(
log,
"Unable to read current slot";
"error" => format!("{:?}", e)
);
break;
}
};
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
let finalized_epoch = head_info.finalized_checkpoint.epoch;
let finalized_root = head_info.finalized_checkpoint.root;
@ -175,11 +180,10 @@ pub fn spawn_notifier<T: BeaconChainTypes>(
eth1_logging(&beacon_chain, &log);
}
Ok::<(), ()>(())
};
// run the notifier on the current executor
executor.spawn(interval_future.unwrap_or_else(|_| ()), "notifier");
executor.spawn(interval_future, "notifier");
Ok(())
}

View File

@ -13,7 +13,7 @@ environment = { path = "../../lighthouse/environment" }
tokio-compat-02 = "0.1"
[dependencies]
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] }
reqwest = { version = "0.11.0", features = ["native-tls-vendored"] }
futures = "0.3.7"
serde_json = "1.0.58"
serde = { version = "1.0.116", features = ["derive"] }
@ -26,7 +26,7 @@ tree_hash = "0.1.1"
eth2_hashing = "0.1.0"
parking_lot = "0.11.0"
slog = "2.5.2"
tokio = { version = "0.3.2", features = ["full"] }
tokio = { version = "1.1.0", features = ["full"] }
state_processing = { path = "../../consensus/state_processing" }
libflate = "1.0.2"
lighthouse_metrics = { path = "../../common/lighthouse_metrics"}

View File

@ -9,7 +9,7 @@ use crate::{
inner::{DepositUpdater, Inner},
};
use fallback::{Fallback, FallbackError};
use futures::{future::TryFutureExt, StreamExt};
use futures::future::TryFutureExt;
use parking_lot::{RwLock, RwLockReadGuard};
use serde::{Deserialize, Serialize};
use slog::{crit, debug, error, info, trace, warn, Logger};
@ -721,7 +721,8 @@ impl Service {
let mut interval = interval_at(Instant::now(), update_interval);
let update_future = async move {
while interval.next().await.is_some() {
loop {
interval.tick().await;
self.do_update(update_interval).await.ok();
}
};

View File

@ -5,8 +5,8 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2018"
[dependencies]
discv5 = { version = "0.1.0-beta.2", features = ["libp2p"] }
unsigned-varint = { git = "https://github.com/sigp/unsigned-varint", branch = "dep-update", features = ["codec"] }
discv5 = { version = "0.1.0-beta.3", features = ["libp2p"] }
unsigned-varint = { version = "0.6.0", features = ["codec"] }
types = { path = "../../consensus/types" }
hashset_delay = { path = "../../common/hashset_delay" }
eth2_ssz_types = { path = "../../consensus/ssz_types" }
@ -16,15 +16,16 @@ eth2_ssz = "0.1.2"
eth2_ssz_derive = "0.1.0"
slog = { version = "2.5.2", features = ["max_level_trace"] }
lighthouse_version = { path = "../../common/lighthouse_version" }
tokio = { version = "0.3.2", features = ["time", "macros"] }
tokio = { version = "1.1.0", features = ["time", "macros"] }
futures = "0.3.7"
futures-io = "0.3.7"
error-chain = "0.12.4"
dirs = "3.0.1"
fnv = "1.0.7"
lazy_static = "1.4.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
smallvec = "1.6.1"
tokio-io-timeout = "0.5.0"
tokio-io-timeout = "1.1.1"
lru = "0.6.0"
parking_lot = "0.11.0"
sha2 = "0.9.1"
@ -32,7 +33,7 @@ base64 = "0.13.0"
snap = "1.0.1"
void = "1.0.2"
hex = "0.4.2"
tokio-util = { version = "0.4.0", features = ["codec", "compat", "time"] }
tokio-util = { version = "0.6.2", features = ["codec", "compat", "time"] }
tiny-keccak = "2.0.2"
task_executor = { path = "../../common/task_executor" }
rand = "0.7.3"
@ -41,14 +42,12 @@ regex = "1.3.9"
strum = { version = "0.20", features = ["derive"] }
[dependencies.libp2p]
#version = "0.23.0"
git = "https://github.com/sigp/rust-libp2p"
rev = "97000533e4710183124abde017c6c3d68287c1ae"
version = "0.34.0"
default-features = false
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"]
[dev-dependencies]
tokio = { version = "0.3.2", features = ["full"] }
tokio = { version = "1.1.0", features = ["full"] }
slog-term = "2.6.0"
slog-async = "2.5.0"
tempfile = "3.1.0"

View File

@ -832,7 +832,7 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
if let Some((peer_id, reason)) = self.peers_to_dc.pop_front() {
return Poll::Ready(NBAction::NotifyHandler {
peer_id,
handler: NotifyHandler::All,
handler: NotifyHandler::Any,
event: BehaviourHandlerIn::Shutdown(
reason.map(|reason| (RequestId::Behaviour, RPCRequest::Goodbye(reason))),
),
@ -893,7 +893,7 @@ impl<TSpec: EthSpec> Behaviour<TSpec> {
}
// perform gossipsub score updates when necessary
while let Poll::Ready(Some(_)) = self.update_gossipsub_scores.poll_next_unpin(cx) {
while let Poll::Ready(_) = self.update_gossipsub_scores.poll_tick(cx) {
self.peer_manager.update_gossipsub_scores(&self.gossipsub);
}

View File

@ -221,8 +221,9 @@ impl CombinedKeyExt for CombinedKey {
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str> {
match key {
Keypair::Secp256k1(key) => {
let secret = discv5::enr::k256::ecdsa::SigningKey::new(&key.secret().to_bytes())
.expect("libp2p key must be valid");
let secret =
discv5::enr::k256::ecdsa::SigningKey::from_bytes(&key.secret().to_bytes())
.expect("libp2p key must be valid");
Ok(CombinedKey::Secp256k1(secret))
}
Keypair::Ed25519(key) => {
@ -277,7 +278,7 @@ mod tests {
fn test_secp256k1_peer_id_conversion() {
let sk_hex = "df94a73d528434ce2309abb19c16aedb535322797dbd59c157b1e04095900f48";
let sk_bytes = hex::decode(sk_hex).unwrap();
let secret_key = discv5::enr::k256::ecdsa::SigningKey::new(&sk_bytes).unwrap();
let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_bytes(&sk_bytes).unwrap();
let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap();
let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into();

View File

@ -896,7 +896,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
}
EventStream::InActive => {} // ignore checking the stream
EventStream::Present(ref mut stream) => {
while let Ok(event) = stream.try_recv() {
while let Poll::Ready(Some(event)) = stream.poll_recv(cx) {
match event {
// We filter out unwanted discv5 events here and only propagate useful results to
// the peer manager.

View File

@ -972,7 +972,7 @@ impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
// perform the heartbeat when necessary
while let Poll::Ready(Some(_)) = self.heartbeat.poll_next_unpin(cx) {
while let Poll::Ready(_) = self.heartbeat.poll_tick(cx) {
self.heartbeat();
}
@ -1011,8 +1011,10 @@ impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
}
}
if !matches!(self.network_globals.sync_state(), SyncState::SyncingFinalized{..}|SyncState::SyncingHead{..})
{
if !matches!(
self.network_globals.sync_state(),
SyncState::SyncingFinalized { .. } | SyncState::SyncingHead { .. }
) {
loop {
match self.status_peers.poll_next_unpin(cx) {
Poll::Ready(Some(Ok(peer_id))) => {

View File

@ -156,7 +156,10 @@ impl<T: EthSpec> PeerInfo<T> {
/// Checks if the status is connected.
pub fn is_connected(&self) -> bool {
matches!(self.connection_status, PeerConnectionStatus::Connected { .. })
matches!(
self.connection_status,
PeerConnectionStatus::Connected { .. }
)
}
/// Checks if the status is connected.

View File

@ -29,12 +29,20 @@ pub struct SyncInfo {
impl std::cmp::PartialEq for PeerSyncStatus {
fn eq(&self, other: &Self) -> bool {
matches!((self, other),
(PeerSyncStatus::Synced { .. }, PeerSyncStatus::Synced { .. }) |
(PeerSyncStatus::Advanced { .. }, PeerSyncStatus::Advanced { .. }) |
(PeerSyncStatus::Behind { .. }, PeerSyncStatus::Behind { .. }) |
(PeerSyncStatus::IrrelevantPeer, PeerSyncStatus::IrrelevantPeer) |
(PeerSyncStatus::Unknown, PeerSyncStatus::Unknown))
matches!(
(self, other),
(PeerSyncStatus::Synced { .. }, PeerSyncStatus::Synced { .. })
| (
PeerSyncStatus::Advanced { .. },
PeerSyncStatus::Advanced { .. }
)
| (PeerSyncStatus::Behind { .. }, PeerSyncStatus::Behind { .. })
| (
PeerSyncStatus::IrrelevantPeer,
PeerSyncStatus::IrrelevantPeer
)
| (PeerSyncStatus::Unknown, PeerSyncStatus::Unknown)
)
}
}

View File

@ -137,14 +137,20 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
/// If we are connected or currently dialing the peer returns true.
pub fn is_connected_or_dialing(&self, peer_id: &PeerId) -> bool {
matches!(self.connection_status(peer_id), Some(PeerConnectionStatus::Connected { .. })
| Some(PeerConnectionStatus::Dialing { .. }))
matches!(
self.connection_status(peer_id),
Some(PeerConnectionStatus::Connected { .. })
| Some(PeerConnectionStatus::Dialing { .. })
)
}
/// If we are connected or in the process of disconnecting
pub fn is_connected_or_disconnecting(&self, peer_id: &PeerId) -> bool {
matches!(self.connection_status(peer_id), Some(PeerConnectionStatus::Connected { .. })
| Some(PeerConnectionStatus::Disconnecting { .. }))
matches!(
self.connection_status(peer_id),
Some(PeerConnectionStatus::Connected { .. })
| Some(PeerConnectionStatus::Disconnecting { .. })
)
}
/// Returns true if the peer is synced at least to our current head.

View File

@ -7,6 +7,7 @@ use super::{RPCReceived, RPCSend};
use crate::rpc::protocol::{InboundFramed, OutboundFramed};
use fnv::FnvHashMap;
use futures::prelude::*;
use futures::{Sink, SinkExt};
use libp2p::core::upgrade::{
InboundUpgrade, NegotiationError, OutboundUpgrade, ProtocolError, UpgradeError,
};
@ -133,7 +134,7 @@ enum HandlerState {
///
/// While in this state the handler rejects new requests but tries to finish existing ones.
/// Once the timer expires, all messages are killed.
ShuttingDown(Sleep),
ShuttingDown(Box<Sleep>),
/// The handler is deactivated. A goodbye has been sent and no more messages are sent or
/// received.
Deactivated,
@ -239,9 +240,9 @@ where
self.dial_queue.push((id, req));
}
self.state = HandlerState::ShuttingDown(sleep_until(
self.state = HandlerState::ShuttingDown(Box::new(sleep_until(
TInstant::now() + Duration::from_secs(SHUTDOWN_TIMEOUT_SECS as u64),
));
)));
}
}

View File

@ -9,8 +9,8 @@ use crate::rpc::{
MaxRequestBlocks, MAX_REQUEST_BLOCKS,
};
use futures::future::BoxFuture;
use futures::prelude::*;
use futures::prelude::{AsyncRead, AsyncWrite};
use futures::{FutureExt, SinkExt, StreamExt};
use libp2p::core::{InboundUpgrade, OutboundUpgrade, ProtocolName, UpgradeInfo};
use ssz::Encode;
use ssz_types::VariableList;
@ -278,7 +278,7 @@ impl ProtocolName for ProtocolId {
pub type InboundOutput<TSocket, TSpec> = (RPCRequest<TSpec>, InboundFramed<TSocket, TSpec>);
pub type InboundFramed<TSocket, TSpec> =
Framed<TimeoutStream<Compat<TSocket>>, InboundCodec<TSpec>>;
Framed<std::pin::Pin<Box<TimeoutStream<Compat<TSocket>>>>, InboundCodec<TSpec>>;
impl<TSocket, TSpec> InboundUpgrade<TSocket> for RPCProtocol<TSpec>
where
@ -304,7 +304,7 @@ where
let mut timed_socket = TimeoutStream::new(socket);
timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT)));
let socket = Framed::new(timed_socket, codec);
let socket = Framed::new(Box::pin(timed_socket), codec);
// MetaData requests should be empty, return the stream
match protocol_name {

View File

@ -1,6 +1,5 @@
use crate::rpc::{Protocol, RPCRequest};
use fnv::FnvHashMap;
use futures::StreamExt;
use libp2p::PeerId;
use std::convert::TryInto;
use std::future::Future;
@ -241,7 +240,7 @@ impl Future for RPCRateLimiter {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
while let Poll::Ready(Some(_)) = self.prune_interval.poll_next_unpin(cx) {
while let Poll::Ready(_) = self.prune_interval.poll_tick(cx) {
self.prune();
}

View File

@ -23,12 +23,16 @@ pub enum SyncState {
impl PartialEq for SyncState {
fn eq(&self, other: &Self) -> bool {
matches!((self, other),
(SyncState::SyncingFinalized { .. }, SyncState::SyncingFinalized { .. }) |
(SyncState::SyncingHead { .. }, SyncState::SyncingHead { .. }) |
(SyncState::Synced, SyncState::Synced) |
(SyncState::Stalled, SyncState::Stalled) |
(SyncState::SyncTransition, SyncState::SyncTransition))
matches!(
(self, other),
(
SyncState::SyncingFinalized { .. },
SyncState::SyncingFinalized { .. }
) | (SyncState::SyncingHead { .. }, SyncState::SyncingHead { .. })
| (SyncState::Synced, SyncState::Synced)
| (SyncState::Stalled, SyncState::Stalled)
| (SyncState::SyncTransition, SyncState::SyncTransition)
)
}
}

View File

@ -315,7 +315,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
// sent in the timeout
match futures::future::select(
Box::pin(receiver.next_event()),
tokio::time::sleep(Duration::from_secs(1)),
Box::pin(tokio::time::sleep(Duration::from_secs(1))),
)
.await
{
@ -692,7 +692,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
// sent in the timeout
match futures::future::select(
Box::pin(receiver.next_event()),
tokio::time::sleep(Duration::from_millis(1000)),
Box::pin(tokio::time::sleep(Duration::from_secs(1))),
)
.await
{

View File

@ -19,7 +19,7 @@ merkle_proof = { path = "../../consensus/merkle_proof" }
eth2_ssz = "0.1.2"
eth2_hashing = "0.1.0"
tree_hash = "0.1.1"
tokio = { version = "0.3.2", features = ["full"] }
tokio = { version = "1.1.0", features = ["full"] }
parking_lot = "0.11.0"
slog = "2.5.2"
exit-future = "0.2.0"

View File

@ -5,9 +5,11 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dependencies]
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" }
warp = "0.3.0"
serde = { version = "1.0.116", features = ["derive"] }
tokio = { version = "0.3.2", features = ["macros","stream","sync"] }
tokio = { version = "1.1.0", features = ["macros","sync"] }
tokio-stream = "0.1.2"
tokio-util = "0.6.3"
parking_lot = "0.11.0"
types = { path = "../../consensus/types" }
hex = "0.4.2"
@ -32,5 +34,4 @@ futures = "0.3.8"
store = { path = "../store" }
environment = { path = "../../lighthouse/environment" }
tree_hash = "0.1.1"
discv5 = { version = "0.1.0-beta.2", features = ["libp2p"] }
tokio-compat-02 = "0.1"
discv5 = { version = "0.1.0-beta.3" }

View File

@ -0,0 +1,66 @@
// TODO: this should be replaced with the tokio's `BroadcastStream` once it's added to
// tokio-stream (https://github.com/tokio-rs/tokio/pull/3384)
use std::fmt;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::broadcast::Receiver;
use tokio_stream::Stream;
use tokio_util::sync::ReusableBoxFuture;
/// A wrapper around [`tokio::sync::broadcast::Receiver`] that implements [`Stream`].
///
/// [`tokio::sync::broadcast::Receiver`]: struct@tokio::sync::broadcast::Receiver
/// [`Stream`]: trait@crate::Stream
pub struct BroadcastStream<T> {
inner: ReusableBoxFuture<(Result<T, RecvError>, Receiver<T>)>,
}
/// An error returned from the inner stream of a [`BroadcastStream`].
#[derive(Debug, PartialEq)]
pub enum BroadcastStreamRecvError {
/// The receiver lagged too far behind. Attempting to receive again will
/// return the oldest message still retained by the channel.
///
/// Includes the number of skipped messages.
Lagged(u64),
}
async fn make_future<T: Clone>(mut rx: Receiver<T>) -> (Result<T, RecvError>, Receiver<T>) {
let result = rx.recv().await;
(result, rx)
}
impl<T: 'static + Clone + Send> BroadcastStream<T> {
/// Create a new `BroadcastStream`.
pub fn new(rx: Receiver<T>) -> Self {
Self {
inner: ReusableBoxFuture::new(make_future(rx)),
}
}
}
impl<T: 'static + Clone + Send> Stream for BroadcastStream<T> {
type Item = Result<T, BroadcastStreamRecvError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let (result, rx) = match self.inner.poll(cx) {
std::task::Poll::Ready(t) => t,
std::task::Poll::Pending => return std::task::Poll::Pending,
};
self.inner.set(make_future(rx));
match result {
Ok(item) => Poll::Ready(Some(Ok(item))),
Err(RecvError::Closed) => Poll::Ready(None),
Err(RecvError::Lagged(n)) => {
Poll::Ready(Some(Err(BroadcastStreamRecvError::Lagged(n))))
}
}
}
}
impl<T> fmt::Debug for BroadcastStream<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BroadcastStream").finish()
}
}

View File

@ -7,6 +7,7 @@
mod beacon_proposer_cache;
mod block_id;
mod broadcast_stream;
mod metrics;
mod state_id;
mod validator_inclusion;
@ -18,7 +19,7 @@ use beacon_chain::{
};
use beacon_proposer_cache::BeaconProposerCache;
use block_id::BlockId;
use eth2::types::{self as api_types, EventKind, ValidatorId};
use eth2::types::{self as api_types, ValidatorId};
use eth2_libp2p::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
use lighthouse_version::version_with_platform;
use network::NetworkMessage;
@ -34,19 +35,17 @@ use std::convert::TryInto;
use std::future::Future;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::sync::Arc;
use tokio::stream::{StreamExt, StreamMap};
use tokio::sync::broadcast::error::RecvError;
use tokio::sync::mpsc::UnboundedSender;
use tokio_stream::StreamExt;
use types::{
Attestation, AttestationDuty, AttesterSlashing, CloneConfig, CommitteeCache, Epoch, EthSpec,
Hash256, ProposerSlashing, PublicKey, PublicKeyBytes, RelativeEpoch, SignedAggregateAndProof,
SignedBeaconBlock, SignedVoluntaryExit, Slot, YamlConfig,
};
use warp::http::StatusCode;
use warp::sse::ServerSentEvent;
use warp::sse::Event;
use warp::Reply;
use warp::{http::Response, Filter, Stream};
use warp_utils::reject::ServerSentEventError;
use warp::{http::Response, Filter};
use warp_utils::task::{blocking_json_task, blocking_task};
const API_PREFIX: &str = "eth";
@ -1610,9 +1609,9 @@ pub fn serve<T: BeaconChainTypes>(
.and(warp::path("duties"))
.and(warp::path("proposer"))
.and(warp::path::param::<Epoch>().or_else(|_| async {
Err(warp_utils::reject::custom_bad_request(
"Invalid epoch".to_string(),
))
Err(warp_utils::reject::custom_bad_request(
"Invalid epoch".to_string(),
))
}))
.and(warp::path::end())
.and(not_while_syncing_filter.clone())
@ -1637,7 +1636,7 @@ pub fn serve<T: BeaconChainTypes>(
if epoch == current_epoch {
let dependent_root_slot = current_epoch
.start_slot(T::EthSpec::slots_per_epoch()) - 1;
let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? {
let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? {
chain.head_beacon_block_root().map_err(warp_utils::reject::beacon_chain_error)?
} else {
chain
@ -1649,7 +1648,7 @@ pub fn serve<T: BeaconChainTypes>(
beacon_proposer_cache
.lock()
.get_proposers(&chain, epoch)
.map(|duties| api_types::DutiesResponse{ data: duties, dependent_root} )
.map(|duties| api_types::DutiesResponse { data: duties, dependent_root })
} else {
let state =
StateId::slot(epoch.start_slot(T::EthSpec::slots_per_epoch()))
@ -1657,7 +1656,7 @@ pub fn serve<T: BeaconChainTypes>(
let dependent_root_slot = state.current_epoch()
.start_slot(T::EthSpec::slots_per_epoch()) - 1;
let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? {
let dependent_root = if dependent_root_slot > chain.best_slot().map_err(warp_utils::reject::beacon_chain_error)? {
chain.head_beacon_block_root().map_err(warp_utils::reject::beacon_chain_error)?
} else {
chain
@ -1691,8 +1690,7 @@ pub fn serve<T: BeaconChainTypes>(
})
.collect::<Result<Vec<api_types::ProposerData>, _>>()
.map(|duties| {
api_types::DutiesResponse{
api_types::DutiesResponse {
dependent_root,
data: duties,
}
@ -2053,7 +2051,7 @@ pub fn serve<T: BeaconChainTypes>(
"attestation_slot" => aggregate.message.aggregate.data.slot,
);
failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e)));
},
}
}
}
@ -2087,7 +2085,7 @@ pub fn serve<T: BeaconChainTypes>(
if !failures.is_empty() {
Err(warp_utils::reject::indexed_bad_request("error processing aggregate and proofs".to_string(),
failures
failures,
))
} else {
Ok(())
@ -2358,24 +2356,6 @@ pub fn serve<T: BeaconChainTypes>(
})
});
fn merge_streams<T: EthSpec>(
stream_map: StreamMap<
String,
impl Stream<Item = Result<EventKind<T>, RecvError>> + Unpin + Send + 'static,
>,
) -> impl Stream<Item = Result<impl ServerSentEvent + Send + 'static, ServerSentEventError>>
+ Send
+ 'static {
// Convert messages into Server-Sent Events and return resulting stream.
stream_map.map(move |(topic_name, msg)| match msg {
Ok(data) => Ok((warp::sse::event(topic_name), warp::sse::json(data)).boxed()),
Err(e) => Err(warp_utils::reject::server_sent_event_error(format!(
"{:?}",
e
))),
})
}
let get_events = eth1_v1
.and(warp::path("events"))
.and(warp::path::end())
@ -2385,7 +2365,7 @@ pub fn serve<T: BeaconChainTypes>(
|topics: api_types::EventQuery, chain: Arc<BeaconChain<T>>| {
blocking_task(move || {
// for each topic subscribed spawn a new subscription
let mut stream_map = StreamMap::with_capacity(topics.topics.0.len());
let mut receivers = Vec::with_capacity(topics.topics.0.len());
if let Some(event_handler) = chain.event_handler.as_ref() {
for topic in topics.topics.0.clone() {
@ -2402,7 +2382,24 @@ pub fn serve<T: BeaconChainTypes>(
event_handler.subscribe_finalized()
}
};
stream_map.insert(topic.to_string(), Box::pin(receiver.into_stream()));
receivers.push(broadcast_stream::BroadcastStream::new(receiver).map(
|msg| {
match msg {
Ok(data) => Event::default()
.event(data.topic_name())
.json_data(data)
.map_err(|e| {
warp_utils::reject::server_sent_event_error(
format!("{:?}", e),
)
}),
Err(e) => Err(warp_utils::reject::server_sent_event_error(
format!("{:?}", e),
)),
}
},
));
}
} else {
return Err(warp_utils::reject::custom_server_error(
@ -2410,11 +2407,9 @@ pub fn serve<T: BeaconChainTypes>(
));
}
let stream = merge_streams(stream_map);
let s = futures::stream::select_all(receivers);
Ok::<_, warp::Rejection>(warp::sse::reply(
warp::sse::keep_alive().stream(stream),
))
Ok::<_, warp::Rejection>(warp::sse::reply(warp::sse::keep_alive().stream(s)))
})
},
);

View File

@ -15,6 +15,7 @@ use eth2_libp2p::{
Enr, EnrExt, NetworkGlobals, PeerId,
};
use futures::stream::{Stream, StreamExt};
use futures::FutureExt;
use http_api::{Config, Context};
use network::NetworkMessage;
use state_processing::per_slot_processing;
@ -25,7 +26,6 @@ use std::sync::Arc;
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use tokio::time::Duration;
use tokio_compat_02::FutureExt;
use tree_hash::TreeHash;
use types::{
test_utils::generate_deterministic_keypairs, AggregateSignature, BeaconState, BitList, Domain,
@ -933,7 +933,7 @@ impl ApiTester {
self.client.post_beacon_blocks(next_block).await.unwrap();
assert!(
self.network_rx.try_recv().is_ok(),
self.network_rx.recv().await.is_some(),
"valid blocks should be sent to network"
);
@ -947,7 +947,7 @@ impl ApiTester {
assert!(self.client.post_beacon_blocks(&next_block).await.is_err());
assert!(
self.network_rx.try_recv().is_ok(),
self.network_rx.recv().await.is_some(),
"invalid blocks should be sent to network"
);
@ -997,7 +997,7 @@ impl ApiTester {
.unwrap();
assert!(
self.network_rx.try_recv().is_ok(),
self.network_rx.recv().await.is_some(),
"valid attestation should be sent to network"
);
@ -1034,7 +1034,7 @@ impl ApiTester {
}
assert!(
self.network_rx.try_recv().is_ok(),
self.network_rx.recv().await.is_some(),
"if some attestations are valid, we should send them to the network"
);
@ -1064,7 +1064,7 @@ impl ApiTester {
.unwrap();
assert!(
self.network_rx.try_recv().is_ok(),
self.network_rx.recv().await.is_some(),
"valid attester slashing should be sent to network"
);
@ -1081,7 +1081,7 @@ impl ApiTester {
.unwrap_err();
assert!(
self.network_rx.try_recv().is_err(),
self.network_rx.recv().now_or_never().is_none(),
"invalid attester slashing should not be sent to network"
);
@ -1110,7 +1110,7 @@ impl ApiTester {
.unwrap();
assert!(
self.network_rx.try_recv().is_ok(),
self.network_rx.recv().await.is_some(),
"valid proposer slashing should be sent to network"
);
@ -1127,7 +1127,7 @@ impl ApiTester {
.unwrap_err();
assert!(
self.network_rx.try_recv().is_err(),
self.network_rx.recv().now_or_never().is_none(),
"invalid proposer slashing should not be sent to network"
);
@ -1156,7 +1156,7 @@ impl ApiTester {
.unwrap();
assert!(
self.network_rx.try_recv().is_ok(),
self.network_rx.recv().await.is_some(),
"valid exit should be sent to network"
);
@ -1173,7 +1173,7 @@ impl ApiTester {
.unwrap_err();
assert!(
self.network_rx.try_recv().is_err(),
self.network_rx.recv().now_or_never().is_none(),
"invalid exit should not be sent to network"
);
@ -1822,7 +1822,7 @@ impl ApiTester {
.await
.unwrap();
assert!(self.network_rx.try_recv().is_ok());
assert!(self.network_rx.recv().await.is_some());
self
}
@ -1837,7 +1837,7 @@ impl ApiTester {
.await
.unwrap_err();
assert!(self.network_rx.try_recv().is_err());
assert!(self.network_rx.recv().now_or_never().is_none());
self
}
@ -1856,7 +1856,7 @@ impl ApiTester {
.await
.unwrap();
self.network_rx.try_recv().unwrap();
self.network_rx.recv().now_or_never().unwrap();
self
}
@ -2127,83 +2127,71 @@ async fn poll_events<S: Stream<Item = Result<EventKind<T>, eth2::Error>> + Unpin
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_events() {
ApiTester::new().test_get_events().compat().await;
ApiTester::new().test_get_events().await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_events_from_genesis() {
ApiTester::new_from_genesis()
.test_get_events_from_genesis()
.compat()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn beacon_get() {
async {
ApiTester::new()
.test_beacon_genesis()
.await
.test_beacon_states_root()
.await
.test_beacon_states_fork()
.await
.test_beacon_states_finality_checkpoints()
.await
.test_beacon_states_validators()
.await
.test_beacon_states_validator_balances()
.await
.test_beacon_states_committees()
.await
.test_beacon_states_validator_id()
.await
.test_beacon_headers_all_slots()
.await
.test_beacon_headers_all_parents()
.await
.test_beacon_headers_block_id()
.await
.test_beacon_blocks()
.await
.test_beacon_blocks_attestations()
.await
.test_beacon_blocks_root()
.await
.test_get_beacon_pool_attestations()
.await
.test_get_beacon_pool_attester_slashings()
.await
.test_get_beacon_pool_proposer_slashings()
.await
.test_get_beacon_pool_voluntary_exits()
.await;
}
.compat()
.await;
ApiTester::new()
.test_beacon_genesis()
.await
.test_beacon_states_root()
.await
.test_beacon_states_fork()
.await
.test_beacon_states_finality_checkpoints()
.await
.test_beacon_states_validators()
.await
.test_beacon_states_validator_balances()
.await
.test_beacon_states_committees()
.await
.test_beacon_states_validator_id()
.await
.test_beacon_headers_all_slots()
.await
.test_beacon_headers_all_parents()
.await
.test_beacon_headers_block_id()
.await
.test_beacon_blocks()
.await
.test_beacon_blocks_attestations()
.await
.test_beacon_blocks_root()
.await
.test_get_beacon_pool_attestations()
.await
.test_get_beacon_pool_attester_slashings()
.await
.test_get_beacon_pool_proposer_slashings()
.await
.test_get_beacon_pool_voluntary_exits()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn post_beacon_blocks_valid() {
ApiTester::new()
.test_post_beacon_blocks_valid()
.compat()
.await;
ApiTester::new().test_post_beacon_blocks_valid().await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn post_beacon_blocks_invalid() {
ApiTester::new()
.test_post_beacon_blocks_invalid()
.compat()
.await;
ApiTester::new().test_post_beacon_blocks_invalid().await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn beacon_pools_post_attestations_valid() {
ApiTester::new()
.test_post_beacon_pool_attestations_valid()
.compat()
.await;
}
@ -2211,7 +2199,6 @@ async fn beacon_pools_post_attestations_valid() {
async fn beacon_pools_post_attestations_invalid() {
ApiTester::new()
.test_post_beacon_pool_attestations_invalid()
.compat()
.await;
}
@ -2219,7 +2206,6 @@ async fn beacon_pools_post_attestations_invalid() {
async fn beacon_pools_post_attester_slashings_valid() {
ApiTester::new()
.test_post_beacon_pool_attester_slashings_valid()
.compat()
.await;
}
@ -2227,7 +2213,6 @@ async fn beacon_pools_post_attester_slashings_valid() {
async fn beacon_pools_post_attester_slashings_invalid() {
ApiTester::new()
.test_post_beacon_pool_attester_slashings_invalid()
.compat()
.await;
}
@ -2235,7 +2220,6 @@ async fn beacon_pools_post_attester_slashings_invalid() {
async fn beacon_pools_post_proposer_slashings_valid() {
ApiTester::new()
.test_post_beacon_pool_proposer_slashings_valid()
.compat()
.await;
}
@ -2243,7 +2227,6 @@ async fn beacon_pools_post_proposer_slashings_valid() {
async fn beacon_pools_post_proposer_slashings_invalid() {
ApiTester::new()
.test_post_beacon_pool_proposer_slashings_invalid()
.compat()
.await;
}
@ -2251,7 +2234,6 @@ async fn beacon_pools_post_proposer_slashings_invalid() {
async fn beacon_pools_post_voluntary_exits_valid() {
ApiTester::new()
.test_post_beacon_pool_voluntary_exits_valid()
.compat()
.await;
}
@ -2259,7 +2241,6 @@ async fn beacon_pools_post_voluntary_exits_valid() {
async fn beacon_pools_post_voluntary_exits_invalid() {
ApiTester::new()
.test_post_beacon_pool_voluntary_exits_invalid()
.compat()
.await;
}
@ -2267,13 +2248,10 @@ async fn beacon_pools_post_voluntary_exits_invalid() {
async fn config_get() {
ApiTester::new()
.test_get_config_fork_schedule()
.compat()
.await
.test_get_config_spec()
.compat()
.await
.test_get_config_deposit_contract()
.compat()
.await;
}
@ -2281,10 +2259,8 @@ async fn config_get() {
async fn debug_get() {
ApiTester::new()
.test_get_debug_beacon_states()
.compat()
.await
.test_get_debug_beacon_heads()
.compat()
.await;
}
@ -2292,34 +2268,24 @@ async fn debug_get() {
async fn node_get() {
ApiTester::new()
.test_get_node_version()
.compat()
.await
.test_get_node_syncing()
.compat()
.await
.test_get_node_identity()
.compat()
.await
.test_get_node_health()
.compat()
.await
.test_get_node_peers_by_id()
.compat()
.await
.test_get_node_peers()
.compat()
.await
.test_get_node_peer_count()
.compat()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_validator_duties_attester() {
ApiTester::new()
.test_get_validator_duties_attester()
.compat()
.await;
ApiTester::new().test_get_validator_duties_attester().await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@ -2327,16 +2293,12 @@ async fn get_validator_duties_attester_with_skip_slots() {
ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2)
.test_get_validator_duties_attester()
.compat()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_validator_duties_proposer() {
ApiTester::new()
.test_get_validator_duties_proposer()
.compat()
.await;
ApiTester::new().test_get_validator_duties_proposer().await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@ -2344,13 +2306,12 @@ async fn get_validator_duties_proposer_with_skip_slots() {
ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2)
.test_get_validator_duties_proposer()
.compat()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn block_production() {
ApiTester::new().test_block_production().compat().await;
ApiTester::new().test_block_production().await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@ -2358,16 +2319,12 @@ async fn block_production_with_skip_slots() {
ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2)
.test_block_production()
.compat()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn get_validator_attestation_data() {
ApiTester::new()
.test_get_validator_attestation_data()
.compat()
.await;
ApiTester::new().test_get_validator_attestation_data().await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
@ -2375,7 +2332,6 @@ async fn get_validator_attestation_data_with_skip_slots() {
ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2)
.test_get_validator_attestation_data()
.compat()
.await;
}
@ -2383,7 +2339,6 @@ async fn get_validator_attestation_data_with_skip_slots() {
async fn get_validator_aggregate_attestation() {
ApiTester::new()
.test_get_validator_aggregate_attestation()
.compat()
.await;
}
@ -2392,7 +2347,6 @@ async fn get_validator_aggregate_attestation_with_skip_slots() {
ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2)
.test_get_validator_aggregate_attestation()
.compat()
.await;
}
@ -2400,7 +2354,6 @@ async fn get_validator_aggregate_attestation_with_skip_slots() {
async fn get_validator_aggregate_and_proofs_valid() {
ApiTester::new()
.test_get_validator_aggregate_and_proofs_valid()
.compat()
.await;
}
@ -2409,7 +2362,6 @@ async fn get_validator_aggregate_and_proofs_valid_with_skip_slots() {
ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2)
.test_get_validator_aggregate_and_proofs_valid()
.compat()
.await;
}
@ -2417,7 +2369,6 @@ async fn get_validator_aggregate_and_proofs_valid_with_skip_slots() {
async fn get_validator_aggregate_and_proofs_invalid() {
ApiTester::new()
.test_get_validator_aggregate_and_proofs_invalid()
.compat()
.await;
}
@ -2426,7 +2377,6 @@ async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots() {
ApiTester::new()
.skip_slots(E::slots_per_epoch() * 2)
.test_get_validator_aggregate_and_proofs_invalid()
.compat()
.await;
}
@ -2434,7 +2384,6 @@ async fn get_validator_aggregate_and_proofs_invalid_with_skip_slots() {
async fn get_validator_beacon_committee_subscriptions() {
ApiTester::new()
.test_get_validator_beacon_committee_subscriptions()
.compat()
.await;
}
@ -2442,33 +2391,23 @@ async fn get_validator_beacon_committee_subscriptions() {
async fn lighthouse_endpoints() {
ApiTester::new()
.test_get_lighthouse_health()
.compat()
.await
.test_get_lighthouse_syncing()
.compat()
.await
.test_get_lighthouse_proto_array()
.compat()
.await
.test_get_lighthouse_validator_inclusion()
.compat()
.await
.test_get_lighthouse_validator_inclusion_global()
.compat()
.await
.test_get_lighthouse_eth1_syncing()
.compat()
.await
.test_get_lighthouse_eth1_block_cache()
.compat()
.await
.test_get_lighthouse_eth1_deposit_cache()
.compat()
.await
.test_get_lighthouse_beacon_states_ssz()
.compat()
.await
.test_get_lighthouse_staking()
.compat()
.await;
}

View File

@ -8,7 +8,7 @@ edition = "2018"
[dependencies]
prometheus = "0.11.0"
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" }
warp = "0.3.0"
serde = { version = "1.0.116", features = ["derive"] }
slog = "2.5.2"
beacon_chain = { path = "../beacon_chain" }
@ -22,8 +22,7 @@ lighthouse_version = { path = "../../common/lighthouse_version" }
warp_utils = { path = "../../common/warp_utils" }
[dev-dependencies]
tokio = { version = "0.3.2", features = ["sync"] }
reqwest = { version = "0.10.8", features = ["json"] }
tokio = { version = "1.1.0", features = ["sync"] }
reqwest = { version = "0.11.0", features = ["json"] }
environment = { path = "../../lighthouse/environment" }
types = { path = "../../consensus/types" }
tokio-compat-02 = "0.1"

View File

@ -5,7 +5,6 @@ use reqwest::StatusCode;
use std::net::Ipv4Addr;
use std::sync::Arc;
use tokio::sync::oneshot;
use tokio_compat_02::FutureExt;
use types::MainnetEthSpec;
type Context = http_metrics::Context<EphemeralHarnessType<MainnetEthSpec>>;
@ -46,6 +45,5 @@ async fn returns_200_ok() {
assert_eq!(reqwest::get(&url).await.unwrap().status(), StatusCode::OK);
}
.compat()
.await
}

View File

@ -30,12 +30,13 @@ eth2_ssz_types = { path = "../../consensus/ssz_types" }
tree_hash = "0.1.1"
futures = "0.3.7"
error-chain = "0.12.4"
tokio = { version = "0.3.2", features = ["full"] }
tokio = { version = "1.1.0", features = ["full"] }
tokio-stream = "0.1.2"
parking_lot = "0.11.0"
smallvec = "1.6.1"
rand = "0.7.3"
fnv = "1.0.7"
rlp = "0.4.6"
rlp = "0.5.0"
lazy_static = "1.4.0"
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
task_executor = { path = "../../common/task_executor" }

View File

@ -38,7 +38,7 @@ impl StoreItem for PersistedDht {
}
fn as_store_bytes(&self) -> Vec<u8> {
rlp::encode_list(&self.enrs)
rlp::encode_list(&self.enrs).to_vec()
}
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {

View File

@ -19,6 +19,7 @@ use processor::Processor;
use slog::{debug, o, trace};
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio_stream::wrappers::UnboundedReceiverStream;
use types::EthSpec;
/// Handles messages received from the network and client and organises syncing. This
@ -101,7 +102,7 @@ impl<T: BeaconChainTypes> Router<T> {
executor.spawn(
async move {
debug!(log, "Network message router started");
handler_recv
UnboundedReceiverStream::new(handler_recv)
.for_each(move |msg| future::ready(handler.handle_message(msg)))
.await;
},

View File

@ -266,7 +266,7 @@ fn spawn_service<T: BeaconChainTypes>(
info!(service.log, "Network service shutdown");
return;
}
_ = service.metrics_update.next() => {
_ = service.metrics_update.tick() => {
// update various network metrics
metric_update_counter +=1;
if metric_update_counter % T::EthSpec::default_spec().seconds_per_slot == 0 {
@ -283,7 +283,7 @@ fn spawn_service<T: BeaconChainTypes>(
metrics::update_sync_metrics(&service.network_globals);
}
_ = service.gossipsub_parameter_update.next() => {
_ = service.gossipsub_parameter_update.tick() => {
if let Ok(slot) = service.beacon_chain.slot() {
if let Some(active_validators) = service.beacon_chain.with_head(|head| {
Ok::<_, BeaconChainError>(

View File

@ -1,14 +1,11 @@
#![cfg(test)]
//TODO: Drop compat library once reqwest and other libraries update to tokio 0.3
use beacon_chain::StateSkipConfig;
use node_test_rig::{
environment::{Environment, EnvironmentBuilder},
eth2::types::StateId,
testing_client_config, LocalBeaconNode,
};
use tokio_compat_02::FutureExt;
use types::{EthSpec, MinimalEthSpec, Slot};
fn env_builder() -> EnvironmentBuilder<MinimalEthSpec> {
@ -44,11 +41,7 @@ fn http_server_genesis_state() {
let api_state = env
.runtime()
.block_on(
remote_node
.get_debug_beacon_states(StateId::Slot(Slot::new(0)))
.compat(),
)
.block_on(remote_node.get_debug_beacon_states(StateId::Slot(Slot::new(0))))
.expect("should fetch state from http api")
.unwrap()
.data;

View File

@ -8,7 +8,7 @@ edition = "2018"
beacon_chain = { path = "../beacon_chain" }
types = { path = "../../consensus/types" }
slot_clock = { path = "../../common/slot_clock" }
tokio = { version = "0.3.2", features = ["full"] }
tokio = { version = "1.1.0", features = ["full"] }
slog = "2.5.2"
parking_lot = "0.11.0"
futures = "0.3.7"

View File

@ -3,7 +3,6 @@
//! This service allows task execution on the beacon node for various functionality.
use beacon_chain::{BeaconChain, BeaconChainTypes};
use futures::stream::StreamExt;
use slog::info;
use slot_clock::SlotClock;
use std::sync::Arc;
@ -26,7 +25,8 @@ pub fn spawn_timer<T: BeaconChainTypes>(
// Warning: `interval_at` panics if `seconds_per_slot` = 0.
let mut interval = interval_at(start_instant, Duration::from_secs(seconds_per_slot));
let timer_future = async move {
while interval.next().await.is_some() {
loop {
interval.tick().await;
beacon_chain.per_slot_task();
}
};

View File

@ -13,7 +13,7 @@ eth2_network_config = { path = "../common/eth2_network_config" }
eth2_ssz = "0.1.2"
slog = "2.5.2"
sloggers = "1.0.1"
tokio = "0.3.2"
tokio = "1.1.0"
log = "0.4.11"
slog-term = "2.6.0"
logging = { path = "../common/logging" }

View File

@ -5,7 +5,6 @@ use eth2_libp2p::{
discv5::{enr::NodeId, Discv5, Discv5ConfigBuilder, Discv5Event},
EnrExt, Eth2Enr,
};
use futures::prelude::*;
use slog::info;
use types::EthSpec;
@ -78,7 +77,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
// listen for events
loop {
tokio::select! {
_ = metric_interval.next() => {
_ = metric_interval.tick() => {
// display server metrics
let metrics = discv5.metrics();
info!(log, "Server metrics"; "connected_peers" => discv5.connected_peers(), "active_sessions" => metrics.active_sessions, "requests/s" => format!("{:.2}", metrics.unsolicited_requests_per_second));

View File

@ -7,7 +7,7 @@ edition = "2018"
build = "build.rs"
[build-dependencies]
reqwest = { version = "0.10.8", features = ["blocking", "json", "native-tls-vendored"] }
reqwest = { version = "0.11.0", features = ["blocking", "json", "native-tls-vendored"] }
serde_json = "1.0.58"
sha2 = "0.9.1"
hex = "0.4.2"

View File

@ -11,7 +11,7 @@ serde = { version = "1.0.116", features = ["derive"] }
serde_json = "1.0.58"
types = { path = "../../consensus/types" }
hex = "0.4.2"
reqwest = { version = "0.10.8", features = ["json","stream"] }
reqwest = { version = "0.11.0", features = ["json","stream"] }
eth2_libp2p = { path = "../../beacon_node/eth2_libp2p" }
proto_array = { path = "../../consensus/proto_array", optional = true }
serde_utils = { path = "../../consensus/serde_utils" }
@ -19,7 +19,7 @@ zeroize = { version = "1.1.1", features = ["zeroize_derive"] }
eth2_keystore = { path = "../../crypto/eth2_keystore" }
libsecp256k1 = "0.3.5"
ring = "0.16.19"
bytes = "0.5.6"
bytes = "1.0.1"
account_utils = { path = "../../common/account_utils" }
eth2_ssz = "0.1.2"
eth2_ssz_derive = "0.1.0"

View File

@ -682,6 +682,16 @@ pub enum EventKind<T: EthSpec> {
}
impl<T: EthSpec> EventKind<T> {
pub fn topic_name(&self) -> &str {
match self {
EventKind::Head(_) => "head",
EventKind::Block(_) => "block",
EventKind::Attestation(_) => "attestation",
EventKind::VoluntaryExit(_) => "voluntary_exit",
EventKind::FinalizedCheckpoint(_) => "finalized_checkpoint",
}
}
pub fn from_sse_bytes(message: &[u8]) -> Result<Self, ServerError> {
let s = from_utf8(message)
.map_err(|e| ServerError::InvalidServerSentEvent(format!("{:?}", e)))?;

View File

@ -19,4 +19,4 @@ serde_yaml = "0.8.13"
types = { path = "../../consensus/types"}
eth2_ssz = "0.1.2"
eth2_config = { path = "../eth2_config"}
enr = { version = "0.4.0", features = ["ed25519", "k256"] }
enr = { version = "0.5.0", features = ["ed25519", "k256"] }

View File

@ -6,7 +6,7 @@ edition = "2018"
[dependencies]
futures = "0.3.7"
tokio-util = { version = "0.5.0", features = ["time"] }
tokio-util = { version = "0.6.2", features = ["time"] }
[dev-dependencies]
tokio = { version = "0.3.2", features = ["time", "rt-multi-thread", "macros"] }
tokio = { version = "1.1.0", features = ["time", "rt-multi-thread", "macros"] }

View File

@ -9,7 +9,7 @@ rand = "0.7.3"
remote_signer_test = { path = "../../testing/remote_signer_test" }
[dependencies]
reqwest = { version = "0.10.8", features = ["json"] }
reqwest = { version = "0.11.0", features = ["json"] }
serde = { version = "1.0.116", features = ["derive"] }
tokio = { version = "0.3.5", features = ["time"] }
tokio = { version = "1.1.0", features = ["time"] }
types = { path = "../../consensus/types" }

View File

@ -14,8 +14,8 @@ state_processing = { path = "../../consensus/state_processing" }
bls = { path = "../../crypto/bls" }
serde = { version = "1.0.110", features = ["derive"] }
rayon = "1.3.0"
hyper = "0.13.5"
tokio = { version = "0.3.5", features = ["sync"] }
hyper = "0.14.4"
tokio = { version = "1.1.0", features = ["sync"] }
environment = { path = "../../lighthouse/environment" }
store = { path = "../../beacon_node/store" }
beacon_chain = { path = "../../beacon_node/beacon_chain" }

View File

@ -5,10 +5,9 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2018"
[dependencies]
tokio = { version = "0.3.2", features = ["rt"] }
tokio = { version = "1.1.0", features = ["rt"] }
slog = "2.5.2"
futures = "0.3.7"
exit-future = "0.2.0"
lazy_static = "1.4.0"
lighthouse_metrics = { path = "../lighthouse_metrics" }
tokio-compat-02 = "0.1"

View File

@ -5,7 +5,6 @@ use futures::prelude::*;
use slog::{debug, o, trace};
use std::sync::Weak;
use tokio::runtime::Runtime;
use tokio_compat_02::FutureExt;
/// A wrapper over a runtime handle which can spawn async and blocking tasks.
#[derive(Clone)]
@ -63,7 +62,7 @@ impl TaskExecutor {
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
// Task is shutdown before it completes if `exit` receives
let int_gauge_1 = int_gauge.clone();
let future = future::select(Box::pin(task.compat()), exit).then(move |either| {
let future = future::select(Box::pin(task), exit).then(move |either| {
match either {
future::Either::Left(_) => trace!(log, "Async task completed"; "task" => name),
future::Either::Right(_) => {
@ -99,12 +98,10 @@ impl TaskExecutor {
) {
if let Some(int_gauge) = metrics::get_int_gauge(&metrics::ASYNC_TASKS_COUNT, &[name]) {
let int_gauge_1 = int_gauge.clone();
let future = task
.then(move |_| {
int_gauge_1.dec();
futures::future::ready(())
})
.compat();
let future = task.then(move |_| {
int_gauge_1.dec();
futures::future::ready(())
});
int_gauge.inc();
if let Some(runtime) = self.runtime.upgrade() {
@ -186,7 +183,7 @@ impl TaskExecutor {
int_gauge.inc();
if let Some(runtime) = self.runtime.upgrade() {
Some(runtime.spawn(future.compat()))
Some(runtime.spawn(future))
} else {
debug!(self.log, "Couldn't spawn task. Runtime shutting down");
None

View File

@ -7,14 +7,14 @@ edition = "2018"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" }
warp = "0.3.0"
eth2 = { path = "../eth2" }
types = { path = "../../consensus/types" }
beacon_chain = { path = "../../beacon_node/beacon_chain" }
state_processing = { path = "../../consensus/state_processing" }
safe_arith = { path = "../../consensus/safe_arith" }
serde = { version = "1.0.116", features = ["derive"] }
tokio = { version = "0.3.2", features = ["sync"] }
tokio = { version = "1.1.0", features = ["sync"] }
headers = "0.3.2"
lighthouse_metrics = { path = "../lighthouse_metrics" }
lazy_static = "1.4.0"

View File

@ -635,12 +635,7 @@ fn invalid_block_future_slot() {
|block, _| {
block.slot = block.slot + 1;
},
|err| {
assert_invalid_block!(
err,
InvalidBlock::FutureSlot { .. }
)
},
|err| assert_invalid_block!(err, InvalidBlock::FutureSlot { .. }),
);
}

View File

@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dependencies]
bytes = "0.6.0"
bytes = "1.0.1"
[dev-dependencies]
yaml-rust = "0.4.4"

View File

@ -56,7 +56,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq +
/*
* Misc
*/
type MaxValidatorsPerCommittee: Unsigned + Clone + Sync + Send + Debug + PartialEq + Eq;
type MaxValidatorsPerCommittee: Unsigned + Clone + Sync + Send + Debug + PartialEq + Eq + Unpin;
/*
* Time parameters
*/

View File

@ -27,7 +27,7 @@ dirs = "3.0.1"
genesis = { path = "../beacon_node/genesis" }
deposit_contract = { path = "../common/deposit_contract" }
tree_hash = "0.1.1"
tokio = { version = "0.3.2", features = ["full"] }
tokio = { version = "1.1.0", features = ["full"] }
clap_utils = { path = "../common/clap_utils" }
eth2_libp2p = { path = "../beacon_node/eth2_libp2p" }
validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] }
@ -37,4 +37,3 @@ lighthouse_version = { path = "../common/lighthouse_version" }
directory = { path = "../common/directory" }
account_utils = { path = "../common/account_utils" }
eth2_wallet = { path = "../crypto/eth2_wallet" }
tokio-compat-02 = "0.1"

View File

@ -6,7 +6,6 @@ use ssz::Encode;
use std::cmp::max;
use std::path::PathBuf;
use std::time::Duration;
use tokio_compat_02::FutureExt;
use types::EthSpec;
/// Interval between polling the eth1 node for genesis information.
@ -62,22 +61,19 @@ pub fn run<T: EthSpec>(mut env: Environment<T>, matches: &ArgMatches<'_>) -> Res
let genesis_service =
Eth1GenesisService::new(config, env.core_context().log().clone(), spec.clone());
env.runtime().block_on(
async {
let _ = genesis_service
.wait_for_genesis_state::<T>(ETH1_GENESIS_UPDATE_INTERVAL, spec)
.await
.map(move |genesis_state| {
eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes());
eth2_network_config.force_write_to_file(testnet_dir)
})
.map_err(|e| format!("Failed to find genesis: {}", e))?;
env.runtime().block_on(async {
let _ = genesis_service
.wait_for_genesis_state::<T>(ETH1_GENESIS_UPDATE_INTERVAL, spec)
.await
.map(move |genesis_state| {
eth2_network_config.genesis_state_bytes = Some(genesis_state.as_ssz_bytes());
eth2_network_config.force_write_to_file(testnet_dir)
})
.map_err(|e| format!("Failed to find genesis: {}", e))?;
info!("Starting service to produce genesis BeaconState from eth1");
info!("Connecting to eth1 http endpoints: {:?}", endpoints);
info!("Starting service to produce genesis BeaconState from eth1");
info!("Connecting to eth1 http endpoints: {:?}", endpoints);
Ok(())
}
.compat(),
)
Ok(())
})
}

View File

@ -20,7 +20,7 @@ spec-v12 = []
[dependencies]
beacon_node = { "path" = "../beacon_node" }
tokio = "0.3.2"
tokio = "1.1.0"
slog = { version = "2.5.2", features = ["max_level_trace"] }
sloggers = "1.0.1"
types = { "path" = "../consensus/types" }
@ -41,7 +41,6 @@ directory = { path = "../common/directory" }
lighthouse_version = { path = "../common/lighthouse_version" }
account_utils = { path = "../common/account_utils" }
remote_signer = { "path" = "../remote_signer" }
tokio-compat-02 = "0.1"
[dev-dependencies]
tempfile = "3.1.0"

View File

@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dependencies]
tokio = { version = "0.3.2", features = ["macros", "rt", "rt-multi-thread" ] }
tokio = { version = "1.1.0", features = ["macros", "rt", "rt-multi-thread" ] }
slog = { version = "2.5.2", features = ["max_level_trace"] }
sloggers = "1.0.1"
types = { "path" = "../../consensus/types" }

View File

@ -7,7 +7,6 @@ use lighthouse_version::VERSION;
use slog::{crit, info, warn};
use std::path::PathBuf;
use std::process::exit;
use tokio_compat_02::FutureExt;
use types::{EthSpec, EthSpecId};
use validator_client::ProductionValidatorClient;
@ -281,19 +280,16 @@ fn run<E: EthSpec>(
&context.eth2_config().spec,
context.log().clone(),
)?;
environment.runtime().spawn(
async move {
if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await {
crit!(log, "Failed to start beacon node"; "reason" => e);
// Ignore the error since it always occurs during normal operation when
// shutting down.
let _ = executor
.shutdown_sender()
.try_send("Failed to start beacon node");
}
environment.runtime().spawn(async move {
if let Err(e) = ProductionBeaconNode::new(context.clone(), config).await {
crit!(log, "Failed to start beacon node"; "reason" => e);
// Ignore the error since it always occurs during normal operation when
// shutting down.
let _ = executor
.shutdown_sender()
.try_send("Failed to start beacon node");
}
.compat(),
);
});
}
("validator_client", Some(matches)) => {
let context = environment.core_context();
@ -301,26 +297,23 @@ fn run<E: EthSpec>(
let executor = context.executor.clone();
let config = validator_client::Config::from_cli(&matches, context.log())
.map_err(|e| format!("Unable to initialize validator config: {}", e))?;
environment.runtime().spawn(
async move {
let run = async {
ProductionValidatorClient::new(context, config)
.await?
.start_service()?;
environment.runtime().spawn(async move {
let run = async {
ProductionValidatorClient::new(context, config)
.await?
.start_service()?;
Ok::<(), String>(())
};
if let Err(e) = run.await {
crit!(log, "Failed to start validator client"; "reason" => e);
// Ignore the error since it always occurs during normal operation when
// shutting down.
let _ = executor
.shutdown_sender()
.try_send("Failed to start validator client");
}
Ok::<(), String>(())
};
if let Err(e) = run.await {
crit!(log, "Failed to start validator client"; "reason" => e);
// Ignore the error since it always occurs during normal operation when
// shutting down.
let _ = executor
.shutdown_sender()
.try_send("Failed to start validator client");
}
.compat(),
);
});
}
("remote_signer", Some(matches)) => {
if let Err(e) = remote_signer::run(&mut environment, matches) {

View File

@ -9,7 +9,7 @@ clap = "2.33.3"
client_backend = { path = "../backend", package = "remote_signer_backend" }
environment = { path = "../../lighthouse/environment" }
futures = "0.3.6"
hyper = "0.13.8"
hyper = "0.14.4"
lazy_static = "1.4.0"
regex = "1.3.9"
serde = { version = "1.0.116", features = ["derive"] }

View File

@ -14,5 +14,6 @@ slog = "2.5.2"
slot_clock = { path = "../../common/slot_clock" }
state_processing = { path = "../../consensus/state_processing" }
task_executor = { path = "../../common/task_executor" }
tokio = { version = "0.3.5", features = ["full"] }
tokio = { version = "1.1.0", features = ["full"] }
tokio-stream = "0.1.2"
types = { path = "../../consensus/types" }

View File

@ -19,7 +19,6 @@ use state_processing::{
use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TrySendError};
use std::sync::Arc;
use task_executor::TaskExecutor;
use tokio::stream::StreamExt;
use tokio::sync::mpsc::UnboundedSender;
use tokio::time::{interval_at, Duration, Instant};
use types::{AttesterSlashing, Epoch, EthSpec, ProposerSlashing};
@ -83,7 +82,8 @@ impl<T: BeaconChainTypes> SlasherService<T> {
// https://github.com/sigp/lighthouse/issues/1861
let mut interval = interval_at(Instant::now(), Duration::from_secs(update_period));
while interval.next().await.is_some() {
loop {
interval.tick().await;
if let Some(current_slot) = beacon_chain.slot_clock.now() {
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
if let Err(TrySendError::Disconnected(_)) = notif_sender.try_send(current_epoch) {

View File

@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dependencies]
tokio = { version = "0.3.2", features = ["time"] }
tokio = { version = "1.1.0", features = ["time"] }
tokio-compat-02 = "0.1"
web3 = "0.14.0"
futures = "0.3.7"

View File

@ -10,7 +10,7 @@ beacon_node = { path = "../../beacon_node" }
types = { path = "../../consensus/types" }
eth2_config = { path = "../../common/eth2_config" }
tempfile = "3.1.0"
reqwest = { version = "0.10.8", features = ["native-tls-vendored"] }
reqwest = { version = "0.11.0", features = ["native-tls-vendored"] }
url = "2.1.1"
serde = "1.0.116"
futures = "0.3.7"

View File

@ -11,10 +11,9 @@ hex = "0.4.2"
httpmock = "0.5.1"
remote_signer_client = { path = "../../remote_signer/client" }
remote_signer_consumer = { path = "../../common/remote_signer_consumer" }
reqwest = { version = "0.10.8", features = ["blocking", "json"] }
reqwest = { version = "0.11.0", features = ["blocking", "json"] }
serde = { version = "1.0.116", features = ["derive"] }
serde_json = "1.0.58"
tempfile = "3.1.0"
tokio = { version = "0.3.5", features = ["time"] }
tokio = { version = "1.1.0", features = ["time"] }
types = { path = "../../consensus/types" }
tokio-compat-02 = "0.1"

View File

@ -26,10 +26,7 @@ impl ApiTestSigner<E> {
let client = environment
.runtime()
.block_on(tokio_compat_02::FutureExt::compat(Client::new(
runtime_context,
&matches,
)))
.block_on(Client::new(runtime_context, &matches))
.map_err(|e| format!("Failed to init Rest API: {}", e))
.unwrap();

View File

@ -30,13 +30,13 @@ pub fn do_sign_request<E: EthSpec, T: RemoteSignerObject>(
) -> Result<String, Error> {
let runtime = Builder::new_multi_thread().enable_all().build().unwrap();
runtime.block_on(tokio_compat_02::FutureExt::compat(test_client.sign(
runtime.block_on(test_client.sign(
&test_input.public_key,
test_input.bls_domain,
test_input.data,
test_input.fork,
test_input.genesis_validators_root,
)))
))
}
#[derive(Serialize)]

View File

@ -13,9 +13,8 @@ types = { path = "../../consensus/types" }
validator_client = { path = "../../validator_client" }
parking_lot = "0.11.0"
futures = "0.3.7"
tokio = "0.3.2"
tokio = "1.1.0"
eth1_test_rig = { path = "../eth1_test_rig" }
env_logger = "0.8.2"
clap = "2.33.3"
rayon = "1.4.1"
tokio-compat-02 = "0.1"

View File

@ -90,7 +90,8 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
// Start a timer that produces eth1 blocks on an interval.
tokio::spawn(async move {
let mut interval = tokio::time::interval(eth1_block_time);
while interval.next().await.is_some() {
loop {
interval.tick().await;
let _ = ganache.evm_mine().await;
}
});
@ -219,9 +220,7 @@ pub fn run_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
Ok::<(), String>(())
};
env.runtime()
.block_on(tokio_compat_02::FutureExt::compat(main_future))
.unwrap();
env.runtime().block_on(main_future).unwrap();
env.fire_signal();
env.shutdown_on_idle();

View File

@ -158,9 +158,7 @@ pub fn run_no_eth1_sim(matches: &ArgMatches) -> Result<(), String> {
Ok::<(), String>(())
};
env.runtime()
.block_on(tokio_compat_02::FutureExt::compat(main_future))
.unwrap();
env.runtime().block_on(main_future).unwrap();
env.fire_signal();
env.shutdown_on_idle();

View File

@ -130,9 +130,7 @@ fn syncing_sim(
Ok::<(), String>(())
};
env.runtime()
.block_on(tokio_compat_02::FutureExt::compat(main_future))
.unwrap();
env.runtime().block_on(main_future).unwrap();
env.fire_signal();
env.shutdown_on_idle();
@ -217,7 +215,8 @@ pub async fn verify_one_node_sync<E: EthSpec>(
// limited to at most `sync_timeout` epochs
let mut interval = tokio::time::interval(epoch_duration);
let mut count = 0;
while interval.next().await.is_some() {
loop {
interval.tick().await;
if count >= sync_timeout || !check_still_syncing(&network_c).await? {
break;
}
@ -254,7 +253,8 @@ pub async fn verify_two_nodes_sync<E: EthSpec>(
// limited to at most `sync_timeout` epochs
let mut interval = tokio::time::interval(epoch_duration);
let mut count = 0;
while interval.next().await.is_some() {
loop {
interval.tick().await;
if count >= sync_timeout || !check_still_syncing(&network_c).await? {
break;
}
@ -302,7 +302,8 @@ pub async fn verify_in_between_sync<E: EthSpec>(
// limited to at most `sync_timeout` epochs
let mut interval = tokio::time::interval(epoch_duration);
let mut count = 0;
while interval.next().await.is_some() {
loop {
interval.tick().await;
if count >= sync_timeout || !check_still_syncing(&network_c).await? {
break;
}

View File

@ -9,9 +9,8 @@ name = "validator_client"
path = "src/lib.rs"
[dev-dependencies]
tokio = { version = "0.3.2", features = ["time", "rt-multi-thread", "macros"] }
tokio = { version = "1.1.0", features = ["time", "rt-multi-thread", "macros"] }
deposit_contract = { path = "../common/deposit_contract" }
tokio-compat-02 = "0.1"
[dependencies]
eth2_ssz = "0.1.2"
@ -30,7 +29,7 @@ serde_yaml = "0.8.13"
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
slog-async = "2.5.0"
slog-term = "2.6.0"
tokio = { version = "0.3.2", features = ["time"] }
tokio = { version = "1.1.0", features = ["time"] }
futures = "0.3.7"
dirs = "3.0.1"
directory = { path = "../common/directory" }
@ -53,8 +52,8 @@ eth2_keystore = { path = "../crypto/eth2_keystore" }
account_utils = { path = "../common/account_utils" }
lighthouse_version = { path = "../common/lighthouse_version" }
warp_utils = { path = "../common/warp_utils" }
warp = { git = "https://github.com/sigp/warp ", branch = "lighthouse" }
hyper = "0.13.8"
warp = "0.3.0"
hyper = "0.14.4"
serde_utils = { path = "../consensus/serde_utils" }
libsecp256k1 = "0.3.5"
ring = "0.16.19"

View File

@ -6,7 +6,6 @@ use crate::{
};
use environment::RuntimeContext;
use futures::future::FutureExt;
use futures::StreamExt;
use slog::{crit, error, info, trace};
use slot_clock::SlotClock;
use std::collections::HashMap;
@ -149,7 +148,8 @@ impl<T: SlotClock + 'static, E: EthSpec> AttestationService<T, E> {
let executor = self.context.executor.clone();
let interval_fut = async move {
while interval.next().await.is_some() {
loop {
interval.tick().await;
let log = self.context.log();
if let Err(e) = self.spawn_attestation_tasks(slot_duration) {

View File

@ -5,7 +5,7 @@ use crate::{
};
use environment::RuntimeContext;
use futures::channel::mpsc::Sender;
use futures::{SinkExt, StreamExt};
use futures::SinkExt;
use parking_lot::RwLock;
use slog::{debug, error, trace, warn};
use slot_clock::SlotClock;
@ -490,7 +490,8 @@ impl<T: SlotClock + 'static, E: EthSpec> DutiesService<T, E> {
let executor = self.inner.context.executor.clone();
let interval_fut = async move {
while interval.next().await.is_some() {
loop {
interval.tick().await;
self.clone().do_update(&mut block_service_tx, &spec).await;
}
};

View File

@ -3,7 +3,6 @@ use crate::http_metrics::metrics;
use environment::RuntimeContext;
use eth2::types::StateId;
use futures::future::FutureExt;
use futures::StreamExt;
use parking_lot::RwLock;
use slog::Logger;
use slog::{debug, trace};
@ -164,7 +163,8 @@ impl<T: SlotClock + 'static, E: EthSpec> ForkService<T, E> {
let executor = context.executor.clone();
let interval_fut = async move {
while interval.next().await.is_some() {
loop {
interval.tick().await;
self.clone().do_update().await.ok();
}
};

View File

@ -25,7 +25,6 @@ use std::sync::Arc;
use tempfile::{tempdir, TempDir};
use tokio::runtime::Runtime;
use tokio::sync::oneshot;
use tokio_compat_02::FutureExt;
const PASSWORD_BYTES: &[u8] = &[42, 50, 37];
@ -439,141 +438,126 @@ struct KeystoreValidatorScenario {
fn invalid_pubkey() {
let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(
async {
ApiTester::new(weak_runtime)
.await
.invalidate_api_token()
.test_get_lighthouse_version_invalid()
.await;
}
.compat(),
);
runtime.block_on(async {
ApiTester::new(weak_runtime)
.await
.invalidate_api_token()
.test_get_lighthouse_version_invalid()
.await;
});
}
#[test]
fn simple_getters() {
let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(
async {
ApiTester::new(weak_runtime)
.await
.test_get_lighthouse_version()
.await
.test_get_lighthouse_health()
.await
.test_get_lighthouse_spec()
.await;
}
.compat(),
);
runtime.block_on(async {
ApiTester::new(weak_runtime)
.await
.test_get_lighthouse_version()
.await
.test_get_lighthouse_health()
.await
.test_get_lighthouse_spec()
.await;
});
}
#[test]
fn hd_validator_creation() {
let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(
async {
ApiTester::new(weak_runtime)
.await
.assert_enabled_validators_count(0)
.assert_validators_count(0)
.create_hd_validators(HdValidatorScenario {
count: 2,
specify_mnemonic: true,
key_derivation_path_offset: 0,
disabled: vec![],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(2)
.create_hd_validators(HdValidatorScenario {
count: 1,
specify_mnemonic: false,
key_derivation_path_offset: 0,
disabled: vec![0],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(3)
.create_hd_validators(HdValidatorScenario {
count: 0,
specify_mnemonic: true,
key_derivation_path_offset: 4,
disabled: vec![],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(3);
}
.compat(),
);
runtime.block_on(async {
ApiTester::new(weak_runtime)
.await
.assert_enabled_validators_count(0)
.assert_validators_count(0)
.create_hd_validators(HdValidatorScenario {
count: 2,
specify_mnemonic: true,
key_derivation_path_offset: 0,
disabled: vec![],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(2)
.create_hd_validators(HdValidatorScenario {
count: 1,
specify_mnemonic: false,
key_derivation_path_offset: 0,
disabled: vec![0],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(3)
.create_hd_validators(HdValidatorScenario {
count: 0,
specify_mnemonic: true,
key_derivation_path_offset: 4,
disabled: vec![],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(3);
});
}
#[test]
fn validator_enabling() {
let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(
async {
ApiTester::new(weak_runtime)
.await
.create_hd_validators(HdValidatorScenario {
count: 2,
specify_mnemonic: false,
key_derivation_path_offset: 0,
disabled: vec![],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(2)
.set_validator_enabled(0, false)
.await
.assert_enabled_validators_count(1)
.assert_validators_count(2)
.set_validator_enabled(0, true)
.await
.assert_enabled_validators_count(2)
.assert_validators_count(2);
}
.compat(),
);
runtime.block_on(async {
ApiTester::new(weak_runtime)
.await
.create_hd_validators(HdValidatorScenario {
count: 2,
specify_mnemonic: false,
key_derivation_path_offset: 0,
disabled: vec![],
})
.await
.assert_enabled_validators_count(2)
.assert_validators_count(2)
.set_validator_enabled(0, false)
.await
.assert_enabled_validators_count(1)
.assert_validators_count(2)
.set_validator_enabled(0, true)
.await
.assert_enabled_validators_count(2)
.assert_validators_count(2);
});
}
#[test]
fn keystore_validator_creation() {
let runtime = build_runtime();
let weak_runtime = Arc::downgrade(&runtime);
runtime.block_on(
async {
ApiTester::new(weak_runtime)
.await
.assert_enabled_validators_count(0)
.assert_validators_count(0)
.create_keystore_validators(KeystoreValidatorScenario {
correct_password: true,
enabled: true,
})
.await
.assert_enabled_validators_count(1)
.assert_validators_count(1)
.create_keystore_validators(KeystoreValidatorScenario {
correct_password: false,
enabled: true,
})
.await
.assert_enabled_validators_count(1)
.assert_validators_count(1)
.create_keystore_validators(KeystoreValidatorScenario {
correct_password: true,
enabled: false,
})
.await
.assert_enabled_validators_count(1)
.assert_validators_count(2);
}
.compat(),
);
runtime.block_on(async {
ApiTester::new(weak_runtime)
.await
.assert_enabled_validators_count(0)
.assert_validators_count(0)
.create_keystore_validators(KeystoreValidatorScenario {
correct_password: true,
enabled: true,
})
.await
.assert_enabled_validators_count(1)
.assert_validators_count(1)
.create_keystore_validators(KeystoreValidatorScenario {
correct_password: false,
enabled: true,
})
.await
.assert_enabled_validators_count(1)
.assert_validators_count(1)
.create_keystore_validators(KeystoreValidatorScenario {
correct_password: true,
enabled: false,
})
.await
.assert_enabled_validators_count(1)
.assert_validators_count(2);
});
}

View File

@ -1,5 +1,4 @@
use crate::ProductionValidatorClient;
use futures::StreamExt;
use slog::{error, info};
use slot_clock::SlotClock;
use tokio::time::{interval_at, Duration, Instant};
@ -24,7 +23,8 @@ pub fn spawn_notifier<T: EthSpec>(client: &ProductionValidatorClient<T>) -> Resu
let interval_fut = async move {
let log = context.log();
while interval.next().await.is_some() {
loop {
interval.tick().await;
let num_available = duties_service.beacon_nodes.num_available().await;
let num_synced = duties_service.beacon_nodes.num_synced().await;
let num_total = duties_service.beacon_nodes.num_total().await;