Merge pull request #2389 from sigp/network-1.5

Network Updates for 1.5
This commit is contained in:
Age Manning 2021-07-15 18:15:07 +10:00 committed by GitHub
commit 9a8320beaa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
51 changed files with 1857 additions and 1999 deletions

1115
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -27,7 +27,7 @@ eth2_wallet = { path = "../crypto/eth2_wallet" }
eth2_wallet_manager = { path = "../common/eth2_wallet_manager" }
rand = "0.7.3"
validator_dir = { path = "../common/validator_dir" }
tokio = { version = "1.1.0", features = ["full"] }
tokio = { version = "1.7.1", features = ["full"] }
eth2_keystore = { path = "../crypto/eth2_keystore" }
account_utils = { path = "../common/account_utils" }
slashing_protection = { path = "../validator_client/slashing_protection" }

View File

@ -26,7 +26,7 @@ slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_tr
slog-term = "2.6.0"
slog-async = "2.5.0"
ctrlc = { version = "3.1.6", features = ["termination"] }
tokio = { version = "1.1.0", features = ["time"] }
tokio = { version = "1.7.1", features = ["time"] }
exit-future = "0.2.0"
dirs = "3.0.1"
logging = { path = "../common/logging" }

View File

@ -40,7 +40,7 @@ eth2_ssz_derive = "0.1.0"
state_processing = { path = "../../consensus/state_processing" }
tree_hash = "0.1.1"
types = { path = "../../consensus/types" }
tokio = "1.1.0"
tokio = "1.7.1"
eth1 = { path = "../eth1" }
futures = "0.3.7"
genesis = { path = "../genesis" }

View File

@ -26,7 +26,7 @@ error-chain = "0.12.4"
serde_yaml = "0.8.13"
slog = { version = "2.5.2", features = ["max_level_trace"] }
slog-async = "2.5.0"
tokio = "1.1.0"
tokio = "1.7.1"
dirs = "3.0.1"
futures = "0.3.7"
reqwest = { version = "0.11.0", features = ["native-tls-vendored"] }

View File

@ -26,7 +26,7 @@ tree_hash = "0.1.1"
eth2_hashing = "0.1.0"
parking_lot = "0.11.0"
slog = "2.5.2"
tokio = { version = "1.1.0", features = ["full"] }
tokio = { version = "1.7.1", features = ["full"] }
state_processing = { path = "../../consensus/state_processing" }
libflate = "1.0.2"
lighthouse_metrics = { path = "../../common/lighthouse_metrics"}

View File

@ -5,7 +5,7 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2018"
[dependencies]
discv5 = { version = "0.1.0-beta.5", features = ["libp2p"] }
discv5 = { version = "0.1.0-beta.7", features = ["libp2p"] }
unsigned-varint = { version = "0.6.0", features = ["codec"] }
types = { path = "../../consensus/types" }
hashset_delay = { path = "../../common/hashset_delay" }
@ -16,7 +16,7 @@ eth2_ssz = "0.1.2"
eth2_ssz_derive = "0.1.0"
slog = { version = "2.5.2", features = ["max_level_trace"] }
lighthouse_version = { path = "../../common/lighthouse_version" }
tokio = { version = "1.1.0", features = ["time", "macros"] }
tokio = { version = "1.7.1", features = ["time", "macros"] }
futures = "0.3.7"
futures-io = "0.3.7"
error-chain = "0.12.4"
@ -42,12 +42,14 @@ regex = "1.3.9"
strum = { version = "0.20", features = ["derive"] }
[dependencies.libp2p]
version = "0.35.1"
default-features = false
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp-tokio"]
#version = "0.39.1"
#default-features = false
git = "https://github.com/sigp/rust-libp2p"
rev = "323cae1d08112052740834aa1fb262ae43e6f783"
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio"]
[dev-dependencies]
tokio = { version = "1.1.0", features = ["full"] }
tokio = { version = "1.7.1", features = ["full"] }
slog-term = "2.6.0"
slog-async = "2.5.0"
tempfile = "3.1.0"

View File

@ -17,6 +17,23 @@ const VOLUNTARY_EXIT_WEIGHT: f64 = 0.05;
const PROPOSER_SLASHING_WEIGHT: f64 = 0.05;
const ATTESTER_SLASHING_WEIGHT: f64 = 0.05;
/// The time window (seconds) that we expect messages to be forwarded to us in the mesh.
const MESH_MESSAGE_DELIVERIES_WINDOW: u64 = 2;
// Const as this is used in the peer manager to prevent gossip from disconnecting peers.
pub const GREYLIST_THRESHOLD: f64 = -16000.0;
/// Builds the peer score thresholds.
pub fn lighthouse_gossip_thresholds() -> PeerScoreThresholds {
PeerScoreThresholds {
gossip_threshold: -4000.0,
publish_threshold: -8000.0,
graylist_threshold: GREYLIST_THRESHOLD,
accept_px_threshold: 100.0,
opportunistic_graft_threshold: 5.0,
}
}
pub struct PeerScoreSettings<TSpec: EthSpec> {
slot: Duration,
epoch: Duration,
@ -75,7 +92,7 @@ impl<TSpec: EthSpec> PeerScoreSettings<TSpec> {
decay_to_zero: self.decay_to_zero,
retain_score: self.epoch * 100,
app_specific_weight: 1.0,
ip_colocation_factor_threshold: 3.0,
ip_colocation_factor_threshold: 8.0, // Allow up to 8 nodes per IP
behaviour_penalty_threshold: 6.0,
behaviour_penalty_decay: self.score_parameter_decay(self.epoch * 10),
..Default::default()
@ -313,10 +330,10 @@ impl<TSpec: EthSpec> PeerScoreSettings<TSpec> {
cap_factor * t_params.mesh_message_deliveries_threshold
};
t_params.mesh_message_deliveries_activation = activation_window;
t_params.mesh_message_deliveries_window = Duration::from_secs(2);
t_params.mesh_message_deliveries_window =
Duration::from_secs(MESH_MESSAGE_DELIVERIES_WINDOW);
t_params.mesh_failure_penalty_decay = t_params.mesh_message_deliveries_decay;
t_params.mesh_message_deliveries_weight = -self.max_positive_score
/ (t_params.topic_weight * t_params.mesh_message_deliveries_threshold.powi(2));
t_params.mesh_message_deliveries_weight = -t_params.topic_weight;
t_params.mesh_failure_penalty_weight = t_params.mesh_message_deliveries_weight;
if decay_slots >= current_slot.as_u64() {
t_params.mesh_message_deliveries_threshold = 0.0;

View File

@ -1,368 +0,0 @@
use crate::behaviour::Gossipsub;
use crate::rpc::*;
use libp2p::{
core::either::{EitherError, EitherOutput},
core::upgrade::{EitherUpgrade, InboundUpgrade, OutboundUpgrade, SelectUpgrade, UpgradeError},
identify::Identify,
swarm::{
protocols_handler::{
KeepAlive, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
},
NegotiatedSubstream, NetworkBehaviour, ProtocolsHandler,
},
};
use std::task::{Context, Poll};
use types::EthSpec;
/* Auxiliary types for simplicity */
type GossipHandler = <Gossipsub as NetworkBehaviour>::ProtocolsHandler;
type RPCHandler<TSpec> = <RPC<TSpec> as NetworkBehaviour>::ProtocolsHandler;
type IdentifyHandler = <Identify as NetworkBehaviour>::ProtocolsHandler;
/// Handler that combines Lighthouse's Behaviours' handlers in a delegating manner.
pub(super) struct DelegatingHandler<TSpec: EthSpec> {
/// Handler for the Gossipsub protocol.
gossip_handler: GossipHandler,
/// Handler for the RPC protocol.
rpc_handler: RPCHandler<TSpec>,
/// Handler for the Identify protocol.
identify_handler: IdentifyHandler,
}
impl<TSpec: EthSpec> DelegatingHandler<TSpec> {
pub fn new(gossipsub: &mut Gossipsub, rpc: &mut RPC<TSpec>, identify: &mut Identify) -> Self {
DelegatingHandler {
gossip_handler: gossipsub.new_handler(),
rpc_handler: rpc.new_handler(),
identify_handler: identify.new_handler(),
}
}
/// Gives mutable access to the rpc handler.
pub fn rpc_mut(&mut self) -> &mut RPCHandler<TSpec> {
&mut self.rpc_handler
}
/// Gives access to the rpc handler.
pub fn rpc(&self) -> &RPCHandler<TSpec> {
&self.rpc_handler
}
/// Gives access to identify's handler.
pub fn _identify(&self) -> &IdentifyHandler {
&self.identify_handler
}
}
/// Wrapper around the `ProtocolsHandler::InEvent` types of the handlers.
/// Simply delegated to the corresponding behaviour's handler.
#[derive(Debug, Clone)]
pub enum DelegateIn<TSpec: EthSpec> {
Gossipsub(<GossipHandler as ProtocolsHandler>::InEvent),
RPC(<RPCHandler<TSpec> as ProtocolsHandler>::InEvent),
Identify(<IdentifyHandler as ProtocolsHandler>::InEvent),
}
/// Wrapper around the `ProtocolsHandler::OutEvent` types of the handlers.
/// Simply delegated to the corresponding behaviour's handler.
pub enum DelegateOut<TSpec: EthSpec> {
Gossipsub(<GossipHandler as ProtocolsHandler>::OutEvent),
RPC(<RPCHandler<TSpec> as ProtocolsHandler>::OutEvent),
Identify(Box<<IdentifyHandler as ProtocolsHandler>::OutEvent>),
}
/// Wrapper around the `ProtocolsHandler::Error` types of the handlers.
/// Simply delegated to the corresponding behaviour's handler.
#[derive(Debug)]
pub enum DelegateError<TSpec: EthSpec> {
Gossipsub(<GossipHandler as ProtocolsHandler>::Error),
RPC(<RPCHandler<TSpec> as ProtocolsHandler>::Error),
Identify(<IdentifyHandler as ProtocolsHandler>::Error),
Disconnected,
}
impl<TSpec: EthSpec> std::error::Error for DelegateError<TSpec> {}
impl<TSpec: EthSpec> std::fmt::Display for DelegateError<TSpec> {
fn fmt(
&self,
formater: &mut std::fmt::Formatter<'_>,
) -> std::result::Result<(), std::fmt::Error> {
match self {
DelegateError::Gossipsub(err) => err.fmt(formater),
DelegateError::RPC(err) => err.fmt(formater),
DelegateError::Identify(err) => err.fmt(formater),
DelegateError::Disconnected => write!(formater, "Disconnected"),
}
}
}
pub type DelegateInProto<TSpec> = SelectUpgrade<
<GossipHandler as ProtocolsHandler>::InboundProtocol,
SelectUpgrade<
<RPCHandler<TSpec> as ProtocolsHandler>::InboundProtocol,
<IdentifyHandler as ProtocolsHandler>::InboundProtocol,
>,
>;
pub type DelegateOutProto<TSpec> = EitherUpgrade<
<GossipHandler as ProtocolsHandler>::OutboundProtocol,
EitherUpgrade<
<RPCHandler<TSpec> as ProtocolsHandler>::OutboundProtocol,
<IdentifyHandler as ProtocolsHandler>::OutboundProtocol,
>,
>;
pub type DelegateOutInfo<TSpec> = EitherOutput<
<GossipHandler as ProtocolsHandler>::OutboundOpenInfo,
EitherOutput<
<RPCHandler<TSpec> as ProtocolsHandler>::OutboundOpenInfo,
<IdentifyHandler as ProtocolsHandler>::OutboundOpenInfo,
>,
>;
impl<TSpec: EthSpec> ProtocolsHandler for DelegatingHandler<TSpec> {
type InEvent = DelegateIn<TSpec>;
type OutEvent = DelegateOut<TSpec>;
type Error = DelegateError<TSpec>;
type InboundProtocol = DelegateInProto<TSpec>;
type OutboundProtocol = DelegateOutProto<TSpec>;
type OutboundOpenInfo = DelegateOutInfo<TSpec>;
type InboundOpenInfo = ();
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, ()> {
let gossip_proto = self.gossip_handler.listen_protocol();
let rpc_proto = self.rpc_handler.listen_protocol();
let identify_proto = self.identify_handler.listen_protocol();
let timeout = *gossip_proto
.timeout()
.max(rpc_proto.timeout())
.max(identify_proto.timeout());
let select = SelectUpgrade::new(
gossip_proto.into_upgrade().1,
SelectUpgrade::new(rpc_proto.into_upgrade().1, identify_proto.into_upgrade().1),
);
SubstreamProtocol::new(select, ()).with_timeout(timeout)
}
fn inject_fully_negotiated_inbound(
&mut self,
out: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
_info: Self::InboundOpenInfo,
) {
match out {
// Gossipsub
EitherOutput::First(out) => {
self.gossip_handler.inject_fully_negotiated_inbound(out, ())
}
// RPC
EitherOutput::Second(EitherOutput::First(out)) => {
self.rpc_handler.inject_fully_negotiated_inbound(out, ())
}
// Identify
EitherOutput::Second(EitherOutput::Second(out)) => self
.identify_handler
.inject_fully_negotiated_inbound(out, ()),
}
}
fn inject_fully_negotiated_outbound(
&mut self,
protocol: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
info: Self::OutboundOpenInfo,
) {
match (protocol, info) {
// Gossipsub
(EitherOutput::First(protocol), EitherOutput::First(info)) => self
.gossip_handler
.inject_fully_negotiated_outbound(protocol, info),
// RPC
(
EitherOutput::Second(EitherOutput::First(protocol)),
EitherOutput::Second(EitherOutput::First(info)),
) => self
.rpc_handler
.inject_fully_negotiated_outbound(protocol, info),
// Identify
(
EitherOutput::Second(EitherOutput::Second(protocol)),
EitherOutput::Second(EitherOutput::Second(())),
) => self
.identify_handler
.inject_fully_negotiated_outbound(protocol, ()),
// Reaching here means we got a protocol and info for different behaviours
_ => unreachable!("output and protocol don't match"),
}
}
fn inject_event(&mut self, event: Self::InEvent) {
match event {
DelegateIn::Gossipsub(ev) => self.gossip_handler.inject_event(ev),
DelegateIn::RPC(ev) => self.rpc_handler.inject_event(ev),
DelegateIn::Identify(()) => self.identify_handler.inject_event(()),
}
}
fn inject_dial_upgrade_error(
&mut self,
info: Self::OutboundOpenInfo,
error: ProtocolsHandlerUpgrErr<
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
>,
) {
match info {
// Gossipsub
EitherOutput::First(info) => match error {
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => {
self.gossip_handler.inject_dial_upgrade_error(
info,
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)),
)
}
ProtocolsHandlerUpgrErr::Timer => self
.gossip_handler
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timer),
ProtocolsHandlerUpgrErr::Timeout => self
.gossip_handler
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timeout),
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::A(err))) => {
self.gossip_handler.inject_dial_upgrade_error(
info,
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)),
)
}
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => {
unreachable!("info and error don't match")
}
},
// RPC
EitherOutput::Second(EitherOutput::First(info)) => match error {
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => {
self.rpc_handler.inject_dial_upgrade_error(
info,
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)),
)
}
ProtocolsHandlerUpgrErr::Timer => self
.rpc_handler
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timer),
ProtocolsHandlerUpgrErr::Timeout => self
.rpc_handler
.inject_dial_upgrade_error(info, ProtocolsHandlerUpgrErr::Timeout),
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B(
EitherError::A(err),
))) => self.rpc_handler.inject_dial_upgrade_error(
info,
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)),
),
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => {
unreachable!("info and error don't match")
}
},
// Identify
EitherOutput::Second(EitherOutput::Second(())) => match error {
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) => {
self.identify_handler.inject_dial_upgrade_error(
(),
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)),
)
}
ProtocolsHandlerUpgrErr::Timer => self
.identify_handler
.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timer),
ProtocolsHandlerUpgrErr::Timeout => self
.identify_handler
.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout),
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B(
EitherError::B(err),
))) => self.identify_handler.inject_dial_upgrade_error(
(),
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)),
),
ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(_)) => {
unreachable!("info and error don't match")
}
},
}
}
fn connection_keep_alive(&self) -> KeepAlive {
self.gossip_handler
.connection_keep_alive()
.max(self.rpc_handler.connection_keep_alive())
.max(self.identify_handler.connection_keep_alive())
}
#[allow(clippy::type_complexity)]
fn poll(
&mut self,
cx: &mut Context,
) -> Poll<
ProtocolsHandlerEvent<
Self::OutboundProtocol,
Self::OutboundOpenInfo,
Self::OutEvent,
Self::Error,
>,
> {
match self.gossip_handler.poll(cx) {
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Gossipsub(event)));
}
Poll::Ready(ProtocolsHandlerEvent::Close(event)) => {
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Gossipsub(
event,
)));
}
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => {
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
protocol: protocol
.map_upgrade(EitherUpgrade::A)
.map_info(EitherOutput::First),
});
}
Poll::Pending => (),
};
match self.rpc_handler.poll(cx) {
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::RPC(event)));
}
Poll::Ready(ProtocolsHandlerEvent::Close(event)) => {
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::RPC(event)));
}
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => {
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
protocol: protocol
.map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::A(u)))
.map_info(|info| EitherOutput::Second(EitherOutput::First(info))),
});
}
Poll::Pending => (),
};
match self.identify_handler.poll(cx) {
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
return Poll::Ready(ProtocolsHandlerEvent::Custom(DelegateOut::Identify(
Box::new(event),
)));
}
Poll::Ready(ProtocolsHandlerEvent::Close(event)) => {
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Identify(event)));
}
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => {
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest {
protocol: protocol
.map_upgrade(|u| EitherUpgrade::B(EitherUpgrade::B(u)))
.map_info(|_| EitherOutput::Second(EitherOutput::Second(()))),
});
}
Poll::Pending => (),
};
Poll::Pending
}
}

View File

@ -1,132 +0,0 @@
use crate::behaviour::Gossipsub;
use crate::rpc::*;
use delegate::DelegatingHandler;
pub(super) use delegate::{
DelegateError, DelegateIn, DelegateInProto, DelegateOut, DelegateOutInfo, DelegateOutProto,
};
use libp2p::{
core::upgrade::{InboundUpgrade, OutboundUpgrade},
identify::Identify,
swarm::protocols_handler::{
KeepAlive, ProtocolsHandlerEvent, ProtocolsHandlerUpgrErr, SubstreamProtocol,
},
swarm::{NegotiatedSubstream, ProtocolsHandler},
};
use std::task::{Context, Poll};
use types::EthSpec;
mod delegate;
/// Handler that combines Lighthouse's Behaviours' handlers in a delegating manner.
pub struct BehaviourHandler<TSpec: EthSpec> {
/// Handler combining all sub behaviour's handlers.
delegate: DelegatingHandler<TSpec>,
/// Flag indicating if the handler is shutting down.
shutting_down: bool,
}
impl<TSpec: EthSpec> BehaviourHandler<TSpec> {
pub fn new(gossipsub: &mut Gossipsub, rpc: &mut RPC<TSpec>, identify: &mut Identify) -> Self {
BehaviourHandler {
delegate: DelegatingHandler::new(gossipsub, rpc, identify),
shutting_down: false,
}
}
}
#[derive(Clone)]
pub enum BehaviourHandlerIn<TSpec: EthSpec> {
Delegate(DelegateIn<TSpec>),
/// Start the shutdown process.
Shutdown(Option<(RequestId, OutboundRequest<TSpec>)>),
}
impl<TSpec: EthSpec> ProtocolsHandler for BehaviourHandler<TSpec> {
type InEvent = BehaviourHandlerIn<TSpec>;
type OutEvent = DelegateOut<TSpec>;
type Error = DelegateError<TSpec>;
type InboundProtocol = DelegateInProto<TSpec>;
type OutboundProtocol = DelegateOutProto<TSpec>;
type OutboundOpenInfo = DelegateOutInfo<TSpec>;
type InboundOpenInfo = ();
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, ()> {
self.delegate.listen_protocol()
}
fn inject_fully_negotiated_inbound(
&mut self,
out: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
_info: Self::InboundOpenInfo,
) {
self.delegate.inject_fully_negotiated_inbound(out, ())
}
fn inject_fully_negotiated_outbound(
&mut self,
out: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
info: Self::OutboundOpenInfo,
) {
self.delegate.inject_fully_negotiated_outbound(out, info)
}
fn inject_event(&mut self, event: Self::InEvent) {
match event {
BehaviourHandlerIn::Delegate(delegated_ev) => self.delegate.inject_event(delegated_ev),
/* Events coming from the behaviour */
BehaviourHandlerIn::Shutdown(last_message) => {
self.shutting_down = true;
self.delegate.rpc_mut().shutdown(last_message);
}
}
}
fn inject_dial_upgrade_error(
&mut self,
info: Self::OutboundOpenInfo,
err: ProtocolsHandlerUpgrErr<
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
>,
) {
self.delegate.inject_dial_upgrade_error(info, err)
}
// We don't use the keep alive to disconnect. This is handled in the poll
fn connection_keep_alive(&self) -> KeepAlive {
KeepAlive::Yes
}
#[allow(clippy::type_complexity)]
fn poll(
&mut self,
cx: &mut Context,
) -> Poll<
ProtocolsHandlerEvent<
Self::OutboundProtocol,
Self::OutboundOpenInfo,
Self::OutEvent,
Self::Error,
>,
> {
// Disconnect if the sub-handlers are ready.
// Currently we only respect the RPC handler.
if self.shutting_down && KeepAlive::No == self.delegate.rpc().connection_keep_alive() {
return Poll::Ready(ProtocolsHandlerEvent::Close(DelegateError::Disconnected));
}
match self.delegate.poll(cx) {
Poll::Ready(ProtocolsHandlerEvent::Custom(event)) => {
return Poll::Ready(ProtocolsHandlerEvent::Custom(event))
}
Poll::Ready(ProtocolsHandlerEvent::Close(err)) => {
return Poll::Ready(ProtocolsHandlerEvent::Close(err))
}
Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol }) => {
return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol });
}
Poll::Pending => (),
}
Poll::Pending
}
}

File diff suppressed because it is too large Load Diff

View File

@ -14,13 +14,15 @@ use sha2::{Digest, Sha256};
use std::path::PathBuf;
use std::time::Duration;
/// The maximum transmit size of gossip messages in bytes.
pub const GOSSIP_MAX_SIZE: usize = 1_048_576;
/// This is a constant to be used in discovery. The lower bound of the gossipsub mesh.
pub const MESH_N_LOW: usize = 6;
// We treat uncompressed messages as invalid and never use the INVALID_SNAPPY_DOMAIN as in the
// specification. We leave it here for posterity.
// const MESSAGE_DOMAIN_INVALID_SNAPPY: [u8; 4] = [0, 0, 0, 0];
const MESSAGE_DOMAIN_VALID_SNAPPY: [u8; 4] = [1, 0, 0, 0];
pub const MESH_N_LOW: usize = 6;
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)]
@ -138,8 +140,8 @@ impl Default for Config {
.mesh_n_high(12)
.gossip_lazy(6)
.fanout_ttl(Duration::from_secs(60))
.history_length(6)
.max_messages_per_rpc(Some(10))
.history_length(12)
.max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large
.history_gossip(3)
.validate_messages() // require validation before propagation
.validation_mode(ValidationMode::Anonymous)
@ -151,10 +153,20 @@ impl Default for Config {
.build()
.expect("valid gossipsub configuration");
// Discv5 Unsolicited Packet Rate Limiter
let filter_rate_limiter = Some(
discv5::RateLimiterBuilder::new()
.total_n_every(10, Duration::from_secs(1)) // Allow bursts, average 10 per second
.ip_n_every(9, Duration::from_secs(1)) // Allow bursts, average 9 per second
.node_n_every(8, Duration::from_secs(1)) // Allow bursts, average 8 per second
.build()
.expect("The total rate limit has been specified"),
);
// discv5 configuration
let discv5_config = Discv5ConfigBuilder::new()
.enable_packet_filter()
.session_cache_capacity(1000)
.session_cache_capacity(5000)
.request_timeout(Duration::from_secs(1))
.query_peer_timeout(Duration::from_secs(2))
.query_timeout(Duration::from_secs(30))
@ -163,6 +175,11 @@ impl Default for Config {
.query_parallelism(5)
.disable_report_discovered_peers()
.ip_limit() // limits /24 IP's in buckets.
.incoming_bucket_limit(8) // half the bucket size
.filter_rate_limiter(filter_rate_limiter)
.filter_max_bans_per_ip(Some(5))
.filter_max_nodes_per_ip(Some(10))
.ban_duration(Some(Duration::from_secs(3600)))
.ping_interval(Duration::from_secs(300))
.build();

View File

@ -1,22 +1,34 @@
///! This manages the discovery and management of peers.
//! The discovery sub-behaviour of Lighthouse.
//!
//! This module creates a libp2p dummy-behaviour built around the discv5 protocol. It handles
//! queries and manages access to the discovery routing table.
pub(crate) mod enr;
pub mod enr_ext;
// Allow external use of the lighthouse ENR builder
use crate::{config, metrics};
use crate::{error, Enr, NetworkConfig, NetworkGlobals, SubnetDiscovery};
use discv5::{enr::NodeId, Discv5, Discv5Event};
pub use enr::{
build_enr, create_enr_builder_from_config, load_enr_from_disk, use_or_load_enr, CombinedKey,
Eth2Enr,
};
pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt};
pub use libp2p::core::identity::{Keypair, PublicKey};
use crate::{config, metrics};
use crate::{error, Enr, NetworkConfig, NetworkGlobals, SubnetDiscovery};
use discv5::{enr::NodeId, Discv5, Discv5Event};
use enr::{BITFIELD_ENR_KEY, ETH2_ENR_KEY};
pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt};
use futures::prelude::*;
use futures::stream::FuturesUnordered;
use libp2p::core::PeerId;
pub use libp2p::{
core::{
connection::ConnectionId,
identity::{Keypair, PublicKey},
ConnectedPoint, Multiaddr, PeerId,
},
swarm::{
protocols_handler::ProtocolsHandler, NetworkBehaviour, NetworkBehaviourAction as NBAction,
NotifyHandler, PollParameters, SubstreamProtocol,
},
};
use lru::LruCache;
use slog::{crit, debug, error, info, warn};
use ssz::{Decode, Encode};
@ -295,6 +307,11 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
self.cached_enrs.iter()
}
/// Removes a cached ENR from the list.
pub fn remove_cached_enr(&mut self, peer_id: &PeerId) -> Option<Enr> {
self.cached_enrs.pop(peer_id)
}
/// This adds a new `FindPeers` query to the queue if one doesn't already exist.
pub fn discover_peers(&mut self) {
// If the discv5 service isn't running or we are in the process of a query, don't bother queuing a new one.
@ -492,33 +509,38 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
// first try and convert the peer_id to a node_id.
if let Ok(node_id) = peer_id_to_node_id(peer_id) {
// If we could convert this peer id, remove it from the DHT and ban it from discovery.
self.discv5.ban_node(&node_id);
self.discv5.ban_node(&node_id, None);
// Remove the node from the routing table.
self.discv5.remove_node(&node_id);
}
for ip_address in ip_addresses {
self.discv5.ban_ip(ip_address);
self.discv5.ban_ip(ip_address, None);
}
}
/// Unbans the peer in discovery.
pub fn unban_peer(&mut self, peer_id: &PeerId, ip_addresses: Vec<IpAddr>) {
// first try and convert the peer_id to a node_id.
if let Ok(node_id) = peer_id_to_node_id(peer_id) {
// If we could convert this peer id, remove it from the DHT and ban it from discovery.
self.discv5.permit_node(&node_id);
self.discv5.ban_node_remove(&node_id);
}
for ip_address in ip_addresses {
self.discv5.permit_ip(ip_address);
self.discv5.ban_ip_remove(&ip_address);
}
}
// mark node as disconnected in DHT, freeing up space for other nodes
/// Marks node as disconnected in the DHT, freeing up space for other nodes, this also removes
/// nodes from the cached ENR list.
pub fn disconnect_peer(&mut self, peer_id: &PeerId) {
if let Ok(node_id) = peer_id_to_node_id(peer_id) {
self.discv5.disconnect_node(&node_id);
}
// Remove the peer from the cached list, to prevent redialing disconnected
// peers.
self.cached_enrs.pop(peer_id);
}
/* Internal Functions */
@ -727,7 +749,11 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
};
// predicate for finding nodes with a matching fork and valid tcp port
let eth2_fork_predicate = move |enr: &Enr| {
enr.eth2() == Ok(enr_fork_id.clone()) && (enr.tcp().is_some() || enr.tcp6().is_some())
// `next_fork_epoch` and `next_fork_version` can be different so that
// we can connect to peers who aren't compatible with an upcoming fork.
// `fork_digest` **must** be same.
enr.eth2().map(|e| e.fork_digest) == Ok(enr_fork_id.fork_digest)
&& (enr.tcp().is_some() || enr.tcp6().is_some())
};
// General predicate
@ -871,9 +897,68 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
}
None
}
}
// Main execution loop to be driven by the peer manager.
pub fn poll(&mut self, cx: &mut Context) -> Poll<DiscoveryEvent> {
/* NetworkBehaviour Implementation */
impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
// Discovery is not a real NetworkBehaviour...
type ProtocolsHandler = libp2p::swarm::protocols_handler::DummyProtocolsHandler;
type OutEvent = DiscoveryEvent;
fn new_handler(&mut self) -> Self::ProtocolsHandler {
libp2p::swarm::protocols_handler::DummyProtocolsHandler::default()
}
// Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them.
fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec<Multiaddr> {
if let Some(enr) = self.enr_of_peer(peer_id) {
// ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP
// port is removed, which is assumed to be associated with the discv5 protocol (and
// therefore irrelevant for other libp2p components).
enr.multiaddr_tcp()
} else {
// PeerId is not known
Vec::new()
}
}
fn inject_connected(&mut self, _peer_id: &PeerId) {}
fn inject_disconnected(&mut self, _peer_id: &PeerId) {}
fn inject_connection_established(
&mut self,
_: &PeerId,
_: &ConnectionId,
_connected_point: &ConnectedPoint,
) {
}
fn inject_connection_closed(
&mut self,
_: &PeerId,
_: &ConnectionId,
_connected_point: &ConnectedPoint,
) {
}
fn inject_event(
&mut self,
_: PeerId,
_: ConnectionId,
_: <Self::ProtocolsHandler as ProtocolsHandler>::OutEvent,
) {
}
fn inject_dial_failure(&mut self, peer_id: &PeerId) {
// set peer as disconnected in discovery DHT
debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id);
self.disconnect_peer(peer_id);
}
// Main execution loop to drive the behaviour
fn poll(
&mut self,
cx: &mut Context,
_: &mut impl PollParameters,
) -> Poll<NBAction<<Self::ProtocolsHandler as ProtocolsHandler>::InEvent, Self::OutEvent>> {
if !self.started {
return Poll::Pending;
}
@ -884,7 +969,9 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
// Drive the queries and return any results from completed queries
if let Some(results) = self.poll_queries(cx) {
// return the result to the peer manager
return Poll::Ready(DiscoveryEvent::QueryResult(results));
return Poll::Ready(NBAction::GenerateEvent(DiscoveryEvent::QueryResult(
results,
)));
}
// Process the server event stream
@ -932,9 +1019,13 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
enr::save_enr_to_disk(Path::new(&self.enr_dir), &enr, &self.log);
// update network globals
*self.network_globals.local_enr.write() = enr;
return Poll::Ready(DiscoveryEvent::SocketUpdated(socket));
return Poll::Ready(NBAction::GenerateEvent(
DiscoveryEvent::SocketUpdated(socket),
));
}
_ => {} // Ignore all other discv5 server events
Discv5Event::EnrAdded { .. }
| Discv5Event::TalkRequest(_)
| Discv5Event::NodeInserted { .. } => {} // Ignore all other discv5 server events
}
}
}

View File

@ -1,20 +1,19 @@
//! Implementation of a Lighthouse's peer management system.
//! Implementation of Lighthouse's peer management system.
pub use self::peerdb::*;
use crate::discovery::{subnet_predicate, Discovery, DiscoveryEvent, TARGET_SUBNET_PEERS};
use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode};
use crate::types::SyncState;
use crate::{error, metrics, Gossipsub};
use crate::{EnrExt, NetworkConfig, NetworkGlobals, PeerId, SubnetDiscovery};
use crate::{NetworkConfig, NetworkGlobals, PeerId};
use discv5::Enr;
use futures::prelude::*;
use futures::Stream;
use hashset_delay::HashSetDelay;
use libp2p::core::multiaddr::Protocol as MProtocol;
use libp2p::core::ConnectedPoint;
use libp2p::identify::IdentifyInfo;
use slog::{crit, debug, error, trace, warn};
use slog::{crit, debug, error, warn};
use smallvec::SmallVec;
use std::{
net::SocketAddr,
pin::Pin,
sync::Arc,
task::{Context, Poll},
@ -36,6 +35,7 @@ pub use peer_sync_status::{PeerSyncStatus, SyncInfo};
use score::{PeerAction, ReportSource, ScoreState};
use std::cmp::Ordering;
use std::collections::HashMap;
use std::net::IpAddr;
/// The time in seconds between re-status's peers.
const STATUS_INTERVAL: u64 = 300;
@ -54,15 +54,14 @@ const HEARTBEAT_INTERVAL: u64 = 30;
/// A fraction of `PeerManager::target_peers` that we allow to connect to us in excess of
/// `PeerManager::target_peers`. For clarity, if `PeerManager::target_peers` is 50 and
/// PEER_EXCESS_FACTOR = 0.1 we allow 10% more nodes, i.e 55.
const PEER_EXCESS_FACTOR: f32 = 0.1;
pub const PEER_EXCESS_FACTOR: f32 = 0.1;
/// A fraction of `PeerManager::target_peers` that need to be outbound-only connections.
pub const MIN_OUTBOUND_ONLY_FACTOR: f32 = 0.1;
/// Relative factor of peers that are allowed to have a negative gossipsub score without penalizing
/// them in lighthouse.
const ALLOWED_NEGATIVE_GOSSIPSUB_FACTOR: f32 = 0.1;
/// A fraction of `PeerManager::target_peers` that need to be outbound-only connections.
const MIN_OUTBOUND_ONLY_FACTOR: f32 = 0.1;
/// The main struct that handles peer's reputation and connection status.
pub struct PeerManager<TSpec: EthSpec> {
/// Storage of network globals to access the `PeerDB`.
@ -79,20 +78,22 @@ pub struct PeerManager<TSpec: EthSpec> {
target_peers: usize,
/// The maximum number of peers we allow (exceptions for subnet peers)
max_peers: usize,
/// The discovery service.
discovery: Discovery<TSpec>,
/// The heartbeat interval to perform routine maintenance.
heartbeat: tokio::time::Interval,
/// Keeps track of whether the discovery service is enabled or not.
discovery_enabled: bool,
/// The logger associated with the `PeerManager`.
log: slog::Logger,
}
/// The events that the `PeerManager` outputs (requests).
pub enum PeerManagerEvent {
/// Dial a PeerId.
Dial(PeerId),
/// Inform libp2p that our external socket addr has been updated.
SocketUpdated(Multiaddr),
/// A peer has dialed us.
PeerConnectedIncoming(PeerId),
/// A peer has been dialed.
PeerConnectedOutgoing(PeerId),
/// A peer has disconnected.
PeerDisconnected(PeerId),
/// Sends a STATUS to a peer.
Status(PeerId),
/// Sends a PING to a peer.
@ -101,22 +102,22 @@ pub enum PeerManagerEvent {
MetaData(PeerId),
/// The peer should be disconnected.
DisconnectPeer(PeerId, GoodbyeReason),
/// Inform the behaviour to ban this peer and associated ip addresses.
Banned(PeerId, Vec<IpAddr>),
/// The peer should be unbanned with the associated ip addresses.
UnBanned(PeerId, Vec<IpAddr>),
/// Request the behaviour to discover more peers.
DiscoverPeers,
}
impl<TSpec: EthSpec> PeerManager<TSpec> {
// NOTE: Must be run inside a tokio executor.
pub async fn new(
local_key: &Keypair,
config: &NetworkConfig,
network_globals: Arc<NetworkGlobals<TSpec>>,
log: &slog::Logger,
) -> error::Result<Self> {
// start the discovery service
let mut discovery = Discovery::new(local_key, config, network_globals.clone(), log).await?;
// start searching for peers
discovery.discover_peers();
// Set up the peer manager heartbeat interval
let heartbeat = tokio::time::interval(tokio::time::Duration::from_secs(HEARTBEAT_INTERVAL));
Ok(PeerManager {
@ -127,22 +128,14 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
status_peers: HashSetDelay::new(Duration::from_secs(STATUS_INTERVAL)),
target_peers: config.target_peers,
max_peers: (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as usize,
discovery,
heartbeat,
discovery_enabled: !config.disable_discovery,
log: log.clone(),
})
}
/* Public accessible functions */
/// Attempts to connect to a peer.
///
/// Returns true if the peer was accepted into the database.
pub fn dial_peer(&mut self, peer_id: &PeerId) -> bool {
self.events.push(PeerManagerEvent::Dial(*peer_id));
self.connect_peer(peer_id, ConnectingType::Dialing)
}
/// The application layer wants to disconnect from a peer for a particular reason.
///
/// All instant disconnections are fatal and we ban the associated peer.
@ -217,66 +210,52 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
self.ban_and_unban_peers(to_ban_peers, to_unban_peers);
}
/* Discovery Requests */
/// Peers that have been returned by discovery requests that are suitable for dialing are
/// returned here.
///
/// NOTE: By dialing `PeerId`s and not multiaddrs, libp2p requests the multiaddr associated
/// with a new `PeerId` which involves a discovery routing table lookup. We could dial the
/// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup
/// proves resource constraining, we should switch to multiaddr dialling here.
#[allow(clippy::mutable_key_type)]
pub fn peers_discovered(&mut self, results: HashMap<PeerId, Option<Instant>>) -> Vec<PeerId> {
let mut to_dial_peers = Vec::new();
/// Provides a reference to the underlying discovery service.
pub fn discovery(&self) -> &Discovery<TSpec> {
&self.discovery
}
/// Provides a mutable reference to the underlying discovery service.
pub fn discovery_mut(&mut self) -> &mut Discovery<TSpec> {
&mut self.discovery
}
/// A request to find peers on a given subnet.
pub fn discover_subnet_peers(&mut self, subnets_to_discover: Vec<SubnetDiscovery>) {
// If discovery is not started or disabled, ignore the request
if !self.discovery.started {
return;
}
let filtered: Vec<SubnetDiscovery> = subnets_to_discover
.into_iter()
.filter(|s| {
// Extend min_ttl of connected peers on required subnets
if let Some(min_ttl) = s.min_ttl {
let connected_or_dialing = self.network_globals.connected_or_dialing_peers();
for (peer_id, min_ttl) in results {
// we attempt a connection if this peer is a subnet peer or if the max peer count
// is not yet filled (including dialing peers)
if (min_ttl.is_some() || connected_or_dialing + to_dial_peers.len() < self.max_peers)
&& self.network_globals.peers.read().should_dial(&peer_id)
{
// This should be updated with the peer dialing. In fact created once the peer is
// dialed
if let Some(min_ttl) = min_ttl {
self.network_globals
.peers
.write()
.extend_peers_on_subnet(s.subnet_id, min_ttl);
.update_min_ttl(&peer_id, min_ttl);
}
// Already have target number of peers, no need for subnet discovery
let peers_on_subnet = self
.network_globals
.peers
.read()
.good_peers_on_subnet(s.subnet_id)
.count();
if peers_on_subnet >= TARGET_SUBNET_PEERS {
trace!(
self.log,
"Discovery query ignored";
"subnet_id" => ?s.subnet_id,
"reason" => "Already connected to desired peers",
"connected_peers_on_subnet" => peers_on_subnet,
"target_subnet_peers" => TARGET_SUBNET_PEERS,
);
false
// Queue an outgoing connection request to the cached peers that are on `s.subnet_id`.
// If we connect to the cached peers before the discovery query starts, then we potentially
// save a costly discovery query.
} else {
self.dial_cached_enrs_in_subnet(s.subnet_id);
true
}
})
.collect();
// request the subnet query from discovery
if !filtered.is_empty() {
self.discovery.discover_subnet_peers(filtered);
to_dial_peers.push(peer_id);
}
}
// Queue another discovery if we need to
let peer_count = self.network_globals.connected_or_dialing_peers();
let outbound_only_peer_count = self.network_globals.connected_outbound_only_peers();
let min_outbound_only_target =
(self.target_peers as f32 * MIN_OUTBOUND_ONLY_FACTOR).ceil() as usize;
if self.discovery_enabled
&& (peer_count < self.target_peers.saturating_sub(to_dial_peers.len())
|| outbound_only_peer_count < min_outbound_only_target)
{
// We need more peers, re-queue a discovery lookup.
debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers);
self.events.push(PeerManagerEvent::DiscoverPeers);
}
to_dial_peers
}
/// A STATUS message has been received from a peer. This resets the status timer.
@ -307,19 +286,144 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
/* Notifications from the Swarm */
/// Updates the state of the peer as disconnected.
///
/// This is also called when dialing a peer fails.
pub fn notify_disconnect(&mut self, peer_id: &PeerId) {
self.network_globals
.peers
.write()
.notify_disconnect(peer_id);
// A peer is being dialed.
pub fn inject_dialing(&mut self, peer_id: &PeerId, enr: Option<Enr>) {
self.inject_peer_connection(peer_id, ConnectingType::Dialing, enr);
}
// remove the ping and status timer for the peer
self.inbound_ping_peers.remove(peer_id);
self.outbound_ping_peers.remove(peer_id);
self.status_peers.remove(peer_id);
pub fn inject_connection_established(
&mut self,
peer_id: PeerId,
endpoint: ConnectedPoint,
num_established: std::num::NonZeroU32,
enr: Option<Enr>,
) {
// Log the connection
match &endpoint {
ConnectedPoint::Listener { .. } => {
debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Incoming", "connections" => %num_established);
}
ConnectedPoint::Dialer { .. } => {
debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => "Outgoing", "connections" => %num_established);
}
}
// Should not be able to connect to a banned peer. Double check here
if self.is_banned(&peer_id) {
warn!(self.log, "Connected to a banned peer"; "peer_id" => %peer_id);
self.events.push(PeerManagerEvent::DisconnectPeer(
peer_id,
GoodbyeReason::Banned,
));
self.network_globals
.peers
.write()
.notify_disconnecting(peer_id, true);
return;
}
// Check the connection limits
if self.peer_limit_reached()
&& self
.network_globals
.peers
.read()
.peer_info(&peer_id)
.map_or(true, |peer| !peer.has_future_duty())
{
self.events.push(PeerManagerEvent::DisconnectPeer(
peer_id,
GoodbyeReason::TooManyPeers,
));
self.network_globals
.peers
.write()
.notify_disconnecting(peer_id, false);
return;
}
// Register the newly connected peer (regardless if we are about to disconnect them).
// NOTE: We don't register peers that we are disconnecting immediately. The network service
// does not need to know about these peers.
match endpoint {
ConnectedPoint::Listener { send_back_addr, .. } => {
self.inject_connect_ingoing(&peer_id, send_back_addr, enr);
if num_established == std::num::NonZeroU32::new(1).expect("valid") {
self.events
.push(PeerManagerEvent::PeerConnectedIncoming(peer_id));
}
}
ConnectedPoint::Dialer { address } => {
self.inject_connect_outgoing(&peer_id, address, enr);
if num_established == std::num::NonZeroU32::new(1).expect("valid") {
self.events
.push(PeerManagerEvent::PeerConnectedOutgoing(peer_id));
}
}
}
// increment prometheus metrics
metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT);
metrics::set_gauge(
&metrics::PEERS_CONNECTED,
self.network_globals.connected_peers() as i64,
);
}
pub fn inject_connection_closed(
&mut self,
peer_id: PeerId,
_endpoint: ConnectedPoint,
num_established: u32,
) {
if num_established == 0 {
// There are no more connections
// Remove all subnet subscriptions from the peer_db
self.remove_all_subscriptions(&peer_id);
if self
.network_globals
.peers
.read()
.is_connected_or_disconnecting(&peer_id)
{
// We are disconnecting the peer or the peer has already been connected.
// Both these cases, the peer has been previously registered by the peer manager and
// potentially the application layer.
// Inform the application.
self.events
.push(PeerManagerEvent::PeerDisconnected(peer_id));
debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id);
// Decrement the PEERS_PER_CLIENT metric
if let Some(kind) = self
.network_globals
.peers
.read()
.peer_info(&peer_id)
.map(|info| info.client.kind.clone())
{
if let Some(v) =
metrics::get_int_gauge(&metrics::PEERS_PER_CLIENT, &[&kind.to_string()])
{
v.dec()
};
}
}
// NOTE: It may be the case that a rejected node, due to too many peers is disconnected
// here and the peer manager has no knowledge of its connection. We insert it here for
// reference so that peer manager can track this peer.
self.inject_disconnect(&peer_id);
// Update the prometheus metrics
metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT);
metrics::set_gauge(
&metrics::PEERS_CONNECTED,
self.network_globals.connected_peers() as i64,
);
}
}
/// A dial attempt has failed.
@ -327,27 +431,12 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
/// NOTE: It can be the case that we are dialing a peer and during the dialing process the peer
/// connects and the dial attempt later fails. To handle this, we only update the peer_db if
/// the peer is not already connected.
pub fn notify_dial_failure(&mut self, peer_id: &PeerId) {
pub fn inject_dial_failure(&mut self, peer_id: &PeerId) {
if !self.network_globals.peers.read().is_connected(peer_id) {
self.notify_disconnect(peer_id);
// set peer as disconnected in discovery DHT
debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id);
self.discovery.disconnect_peer(peer_id);
self.inject_disconnect(peer_id);
}
}
/// Sets a peer as connected as long as their reputation allows it
/// Informs if the peer was accepted
pub fn connect_ingoing(&mut self, peer_id: &PeerId, multiaddr: Multiaddr) -> bool {
self.connect_peer(peer_id, ConnectingType::IngoingConnected { multiaddr })
}
/// Sets a peer as connected as long as their reputation allows it
/// Informs if the peer was accepted
pub fn connect_outgoing(&mut self, peer_id: &PeerId, multiaddr: Multiaddr) -> bool {
self.connect_peer(peer_id, ConnectingType::OutgoingConnected { multiaddr })
}
/// Reports if a peer is banned or not.
///
/// This is used to determine if we should accept incoming connections.
@ -483,6 +572,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
},
},
RPCError::NegotiationTimeout => PeerAction::LowToleranceError,
RPCError::Disconnected => return, // No penalty for a graceful disconnection
};
self.report_peer(peer_id, peer_action, ReportSource::RPC);
@ -574,22 +664,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
}
}
// Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them.
pub fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec<Multiaddr> {
if let Some(enr) = self.discovery.enr_of_peer(peer_id) {
// ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP
// port is removed, which is assumed to be associated with the discv5 protocol (and
// therefore irrelevant for other libp2p components).
let mut out_list = enr.multiaddr();
out_list.retain(|addr| !addr.iter().any(|v| matches!(v, MProtocol::Udp(_))));
out_list
} else {
// PeerId is not known
Vec::new()
}
}
pub(crate) fn update_gossipsub_scores(&mut self, gossipsub: &Gossipsub) {
let mut to_ban_peers = Vec::new();
let mut to_unban_peers = Vec::new();
@ -645,71 +719,49 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
/* Internal functions */
// The underlying discovery server has updated our external IP address. We send this up to
// notify libp2p.
fn socket_updated(&mut self, socket: SocketAddr) {
// Build a multiaddr to report to libp2p
let mut multiaddr = Multiaddr::from(socket.ip());
// NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling
// should handle this.
multiaddr.push(MProtocol::Tcp(self.network_globals.listen_port_tcp()));
self.events.push(PeerManagerEvent::SocketUpdated(multiaddr));
/// Sets a peer as connected as long as their reputation allows it
/// Informs if the peer was accepted
fn inject_connect_ingoing(
&mut self,
peer_id: &PeerId,
multiaddr: Multiaddr,
enr: Option<Enr>,
) -> bool {
self.inject_peer_connection(peer_id, ConnectingType::IngoingConnected { multiaddr }, enr)
}
/// Dial cached enrs in discovery service that are in the given `subnet_id` and aren't
/// in Connected, Dialing or Banned state.
fn dial_cached_enrs_in_subnet(&mut self, subnet_id: SubnetId) {
let predicate = subnet_predicate::<TSpec>(vec![subnet_id], &self.log);
let peers_to_dial: Vec<PeerId> = self
.discovery()
.cached_enrs()
.filter_map(|(peer_id, enr)| {
let peers = self.network_globals.peers.read();
if predicate(enr) && peers.should_dial(peer_id) {
Some(*peer_id)
} else {
None
}
})
.collect();
for peer_id in &peers_to_dial {
debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %peer_id);
self.dial_peer(peer_id);
}
/// Sets a peer as connected as long as their reputation allows it
/// Informs if the peer was accepted
fn inject_connect_outgoing(
&mut self,
peer_id: &PeerId,
multiaddr: Multiaddr,
enr: Option<Enr>,
) -> bool {
self.inject_peer_connection(
peer_id,
ConnectingType::OutgoingConnected { multiaddr },
enr,
)
}
/// Peers that have been returned by discovery requests are dialed here if they are suitable.
/// Updates the state of the peer as disconnected.
///
/// NOTE: By dialing `PeerId`s and not multiaddrs, libp2p requests the multiaddr associated
/// with a new `PeerId` which involves a discovery routing table lookup. We could dial the
/// multiaddr here, however this could relate to duplicate PeerId's etc. If the lookup
/// proves resource constraining, we should switch to multiaddr dialling here.
#[allow(clippy::mutable_key_type)]
fn peers_discovered(&mut self, results: HashMap<PeerId, Option<Instant>>) {
let mut to_dial_peers = Vec::new();
/// This is also called when dialing a peer fails.
fn inject_disconnect(&mut self, peer_id: &PeerId) {
if self
.network_globals
.peers
.write()
.inject_disconnect(peer_id)
{
self.ban_peer(peer_id);
}
let connected_or_dialing = self.network_globals.connected_or_dialing_peers();
for (peer_id, min_ttl) in results {
// we attempt a connection if this peer is a subnet peer or if the max peer count
// is not yet filled (including dialing peers)
if (min_ttl.is_some() || connected_or_dialing + to_dial_peers.len() < self.max_peers)
&& self.network_globals.peers.read().should_dial(&peer_id)
{
// This should be updated with the peer dialing. In fact created once the peer is
// dialed
if let Some(min_ttl) = min_ttl {
self.network_globals
.peers
.write()
.update_min_ttl(&peer_id, min_ttl);
}
to_dial_peers.push(peer_id);
}
}
for peer_id in to_dial_peers {
debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id);
self.dial_peer(&peer_id);
}
// remove the ping and status timer for the peer
self.inbound_ping_peers.remove(peer_id);
self.outbound_ping_peers.remove(peer_id);
self.status_peers.remove(peer_id);
}
/// Registers a peer as connected. The `ingoing` parameter determines if the peer is being
@ -718,7 +770,12 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
/// This is called by `connect_ingoing` and `connect_outgoing`.
///
/// Informs if the peer was accepted in to the db or not.
fn connect_peer(&mut self, peer_id: &PeerId, connection: ConnectingType) -> bool {
fn inject_peer_connection(
&mut self,
peer_id: &PeerId,
connection: ConnectingType,
enr: Option<Enr>,
) -> bool {
{
let mut peerdb = self.network_globals.peers.write();
if peerdb.is_banned(&peer_id) {
@ -726,8 +783,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
slog::crit!(self.log, "Connection has been allowed to a banned peer"; "peer_id" => %peer_id);
}
let enr = self.discovery.enr_of_peer(peer_id);
match connection {
ConnectingType::Dialing => {
peerdb.dialing_peer(peer_id, enr);
@ -774,6 +829,8 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
true
}
/// This handles score transitions between states. It transitions peers states from
/// disconnected/banned/connected.
fn handle_score_transitions(
previous_state: ScoreState,
peer_id: &PeerId,
@ -814,6 +871,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
}
}
/// Updates the state of banned peers.
fn ban_and_unban_peers(&mut self, to_ban_peers: Vec<PeerId>, to_unban_peers: Vec<PeerId>) {
// process banning peers
for peer_id in to_ban_peers {
@ -883,7 +941,9 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
})
.unwrap_or_default();
self.discovery.ban_peer(&peer_id, banned_ip_addresses);
// Inform the Swarm to ban the peer
self.events
.push(PeerManagerEvent::Banned(*peer_id, banned_ip_addresses));
}
/// Unbans a peer.
@ -899,7 +959,9 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
.map(|info| info.seen_addresses().collect::<Vec<_>>())
.unwrap_or_default();
self.discovery.unban_peer(&peer_id, seen_ip_addresses);
// Inform the Swarm to unban the peer
self.events
.push(PeerManagerEvent::UnBanned(*peer_id, seen_ip_addresses));
Ok(())
}
@ -915,12 +977,13 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
let min_outbound_only_target =
(self.target_peers as f32 * MIN_OUTBOUND_ONLY_FACTOR).ceil() as usize;
if peer_count < self.target_peers || outbound_only_peer_count < min_outbound_only_target {
if self.discovery_enabled
&& (peer_count < self.target_peers
|| outbound_only_peer_count < min_outbound_only_target)
{
// If we need more peers, queue a discovery lookup.
if self.discovery.started {
debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers);
self.discovery.discover_peers();
}
debug!(self.log, "Starting a new peer discovery query"; "connected_peers" => peer_count, "target_peers" => self.target_peers);
self.events.push(PeerManagerEvent::DiscoverPeers);
}
// Updates peer's scores.
@ -959,7 +1022,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
let mut peer_db = self.network_globals.peers.write();
for peer_id in disconnecting_peers {
peer_db.notify_disconnecting(&peer_id);
peer_db.notify_disconnecting(peer_id, false);
self.events.push(PeerManagerEvent::DisconnectPeer(
peer_id,
GoodbyeReason::TooManyPeers,
@ -977,14 +1040,6 @@ impl<TSpec: EthSpec> Stream for PeerManager<TSpec> {
self.heartbeat();
}
// handle any discovery events
while let Poll::Ready(event) = self.discovery.poll(cx) {
match event {
DiscoveryEvent::SocketUpdated(socket_addr) => self.socket_updated(socket_addr),
DiscoveryEvent::QueryResult(results) => self.peers_discovered(results),
}
}
// poll the timeouts for pings and status'
loop {
match self.inbound_ping_peers.poll_next_unpin(cx) {
@ -1108,7 +1163,7 @@ mod tests {
vec![],
&log,
);
PeerManager::new(&keypair, &config, Arc::new(globals), &log)
PeerManager::new(&config, Arc::new(globals), &log)
.await
.unwrap()
}
@ -1125,11 +1180,19 @@ mod tests {
let outbound_only_peer1 = PeerId::random();
let outbound_only_peer2 = PeerId::random();
peer_manager.connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.connect_outgoing(&outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.connect_outgoing(&outbound_only_peer2, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None);
peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None);
peer_manager.inject_connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap(), None);
peer_manager.inject_connect_outgoing(
&outbound_only_peer1,
"/ip4/0.0.0.0".parse().unwrap(),
None,
);
peer_manager.inject_connect_outgoing(
&outbound_only_peer2,
"/ip4/0.0.0.0".parse().unwrap(),
None,
);
// Set the outbound-only peers to have the lowest score.
peer_manager
@ -1181,13 +1244,17 @@ mod tests {
// Connect to 20 ingoing-only peers.
for _i in 0..19 {
let peer = PeerId::random();
peer_manager.connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.inject_connect_ingoing(&peer, "/ip4/0.0.0.0".parse().unwrap(), None);
}
// Connect an outbound-only peer.
// Give it the lowest score so that it is evaluated first in the disconnect list iterator.
let outbound_only_peer = PeerId::random();
peer_manager.connect_ingoing(&outbound_only_peer, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.inject_connect_ingoing(
&outbound_only_peer,
"/ip4/0.0.0.0".parse().unwrap(),
None,
);
peer_manager
.network_globals
.peers
@ -1213,12 +1280,20 @@ mod tests {
let inbound_only_peer1 = PeerId::random();
let outbound_only_peer1 = PeerId::random();
peer_manager.connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.connect_outgoing(&peer0, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None);
peer_manager.inject_connect_outgoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None);
// Connect to two peers that are on the threshold of being disconnected.
peer_manager.connect_ingoing(&inbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.connect_outgoing(&outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.inject_connect_ingoing(
&inbound_only_peer1,
"/ip4/0.0.0.0".parse().unwrap(),
None,
);
peer_manager.inject_connect_outgoing(
&outbound_only_peer1,
"/ip4/0.0.0.0".parse().unwrap(),
None,
);
peer_manager
.network_globals
.peers
@ -1268,12 +1343,20 @@ mod tests {
let inbound_only_peer1 = PeerId::random();
let outbound_only_peer1 = PeerId::random();
peer_manager.connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None);
peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None);
// Connect to two peers that are on the threshold of being disconnected.
peer_manager.connect_ingoing(&inbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.connect_outgoing(&outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.inject_connect_ingoing(
&inbound_only_peer1,
"/ip4/0.0.0.0".parse().unwrap(),
None,
);
peer_manager.inject_connect_outgoing(
&outbound_only_peer1,
"/ip4/0.0.0.0".parse().unwrap(),
None,
);
peer_manager
.network_globals
.peers
@ -1320,12 +1403,20 @@ mod tests {
let inbound_only_peer1 = PeerId::random();
let outbound_only_peer1 = PeerId::random();
peer_manager.connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.connect_outgoing(&outbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.inject_connect_ingoing(&peer0, "/ip4/0.0.0.0".parse().unwrap(), None);
peer_manager.inject_connect_ingoing(&peer1, "/ip4/0.0.0.0".parse().unwrap(), None);
peer_manager.inject_connect_ingoing(&peer2, "/ip4/0.0.0.0".parse().unwrap(), None);
peer_manager.inject_connect_outgoing(
&outbound_only_peer1,
"/ip4/0.0.0.0".parse().unwrap(),
None,
);
// Have one peer be on the verge of disconnection.
peer_manager.connect_ingoing(&inbound_only_peer1, "/ip4/0.0.0.0".parse().unwrap());
peer_manager.inject_connect_ingoing(
&inbound_only_peer1,
"/ip4/0.0.0.0".parse().unwrap(),
None,
);
peer_manager
.network_globals
.peers

View File

@ -198,25 +198,16 @@ impl<T: EthSpec> PeerInfo<T> {
// Setters
/// Modifies the status to Disconnected and sets the last seen instant to now. Returns None if
/// no changes were made. Returns Some(bool) where the bool represents if peer became banned or
/// simply just disconnected.
/// no changes were made. Returns Some(bool) where the bool represents if peer is to now be
/// baned
pub fn notify_disconnect(&mut self) -> Option<bool> {
match self.connection_status {
Banned { .. } | Disconnected { .. } => None,
Disconnecting { to_ban } => {
// If we are disconnecting this peer in the process of banning, we now ban the
// peer.
if to_ban {
self.connection_status = Banned {
since: Instant::now(),
};
Some(true)
} else {
self.connection_status = Disconnected {
since: Instant::now(),
};
Some(false)
}
self.connection_status = Disconnected {
since: Instant::now(),
};
Some(to_ban)
}
Connected { .. } | Dialing { .. } | Unknown => {
self.connection_status = Disconnected {
@ -227,11 +218,8 @@ impl<T: EthSpec> PeerInfo<T> {
}
}
/// Notify the we are currently disconnecting this peer, after which the peer will be
/// considered banned.
// This intermediate state is required to inform the network behaviours that the sub-protocols
// are aware this peer exists and it is in the process of being banned. Compared to nodes that
// try to connect to us and are already banned (sub protocols do not know of these peers).
/// Notify the we are currently disconnecting this peer. Optionally ban the peer after the
/// disconnect.
pub fn disconnecting(&mut self, to_ban: bool) {
self.connection_status = Disconnecting { to_ban }
}

View File

@ -453,29 +453,33 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
self.connect(peer_id, multiaddr, enr, ConnectionDirection::Outgoing)
}
/// Sets the peer as disconnected. A banned peer remains banned
pub fn notify_disconnect(&mut self, peer_id: &PeerId) {
/// Sets the peer as disconnected. A banned peer remains banned. If the node has become banned,
/// this returns true, otherwise this is false.
pub fn inject_disconnect(&mut self, peer_id: &PeerId) -> bool {
// Note that it could be the case we prevent new nodes from joining. In this instance,
// we don't bother tracking the new node.
if let Some(info) = self.peers.get_mut(peer_id) {
if let Some(became_banned) = info.notify_disconnect() {
if became_banned {
self.banned_peers_count
.add_banned_peer(info.seen_addresses());
} else {
self.disconnected_peers += 1;
}
if !matches!(
info.connection_status(),
PeerConnectionStatus::Disconnected { .. } | PeerConnectionStatus::Banned { .. }
) {
self.disconnected_peers += 1;
}
let result = info.notify_disconnect().unwrap_or(false);
self.shrink_to_fit();
result
} else {
false
}
}
/// Notifies the peer manager that the peer is undergoing a normal disconnect (without banning
/// afterwards.
pub fn notify_disconnecting(&mut self, peer_id: &PeerId) {
if let Some(info) = self.peers.get_mut(peer_id) {
info.disconnecting(false);
}
/// Notifies the peer manager that the peer is undergoing a normal disconnect. Optionally tag
/// the peer to be banned after the disconnect.
pub fn notify_disconnecting(&mut self, peer_id: PeerId, to_ban_afterwards: bool) {
self.peers
.entry(peer_id)
.or_default()
.disconnecting(to_ban_afterwards);
}
/// Marks a peer to be disconnected and then banned.
@ -505,15 +509,17 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
PeerConnectionStatus::Disconnected { .. } => {
// It is possible to ban a peer that has a disconnected score, if there are many
// events that score it poorly and are processed after it has disconnected.
debug!(log_ref, "Banning a disconnected peer"; "peer_id" => %peer_id);
self.disconnected_peers = self.disconnected_peers.saturating_sub(1);
info.ban();
self.banned_peers_count
.add_banned_peer(info.seen_addresses());
self.shrink_to_fit();
false
}
PeerConnectionStatus::Disconnecting { .. } => {
warn!(log_ref, "Banning peer that is currently disconnecting"; "peer_id" => %peer_id);
// NOTE: This can occur due a rapid downscore of a peer. It goes through the
// disconnection phase and straight into banning in a short time-frame.
debug!(log_ref, "Banning peer that is currently disconnecting"; "peer_id" => %peer_id);
info.disconnecting(true);
false
}
@ -532,6 +538,7 @@ impl<TSpec: EthSpec> PeerDB<TSpec> {
self.banned_peers_count
.add_banned_peer(info.seen_addresses());
info.ban();
self.shrink_to_fit();
false
}
}
@ -726,7 +733,7 @@ mod tests {
assert_eq!(pdb.disconnected_peers, 0);
for p in pdb.connected_peer_ids().cloned().collect::<Vec<_>>() {
pdb.notify_disconnect(&p);
pdb.inject_disconnect(&p);
}
assert_eq!(pdb.disconnected_peers, MAX_DC_PEERS);
@ -744,7 +751,8 @@ mod tests {
for p in pdb.connected_peer_ids().cloned().collect::<Vec<_>>() {
pdb.disconnect_and_ban(&p);
pdb.notify_disconnect(&p);
pdb.inject_disconnect(&p);
pdb.disconnect_and_ban(&p);
}
assert_eq!(pdb.banned_peers_count.banned_peers(), MAX_BANNED_PEERS);
@ -804,23 +812,24 @@ mod tests {
pdb.connect_ingoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap(), None);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
pdb.notify_disconnect(&random_peer);
pdb.inject_disconnect(&random_peer);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
pdb.connect_outgoing(&random_peer, "/ip4/0.0.0.0".parse().unwrap(), None);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
pdb.notify_disconnect(&random_peer);
pdb.inject_disconnect(&random_peer);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
pdb.disconnect_and_ban(&random_peer);
pdb.notify_disconnect(&random_peer);
pdb.inject_disconnect(&random_peer);
pdb.disconnect_and_ban(&random_peer);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
pdb.notify_disconnect(&random_peer);
pdb.inject_disconnect(&random_peer);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
pdb.notify_disconnect(&random_peer);
pdb.inject_disconnect(&random_peer);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
pdb.notify_disconnect(&random_peer);
pdb.inject_disconnect(&random_peer);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
}
@ -835,6 +844,10 @@ mod tests {
let random_peer1 = PeerId::random();
let random_peer2 = PeerId::random();
let random_peer3 = PeerId::random();
println!("{}", random_peer);
println!("{}", random_peer1);
println!("{}", random_peer2);
println!("{}", random_peer3);
pdb.connect_ingoing(&random_peer, multiaddr.clone(), None);
pdb.connect_ingoing(&random_peer1, multiaddr.clone(), None);
@ -846,10 +859,17 @@ mod tests {
pdb.banned_peers().count()
);
println!("1:{}", pdb.disconnected_peers);
pdb.connect_ingoing(&random_peer, multiaddr.clone(), None);
pdb.notify_disconnect(&random_peer1);
pdb.inject_disconnect(&random_peer1);
println!("2:{}", pdb.disconnected_peers);
pdb.disconnect_and_ban(&random_peer2);
pdb.notify_disconnect(&random_peer2);
println!("3:{}", pdb.disconnected_peers);
pdb.inject_disconnect(&random_peer2);
println!("4:{}", pdb.disconnected_peers);
pdb.disconnect_and_ban(&random_peer2);
println!("5:{}", pdb.disconnected_peers);
pdb.connect_ingoing(&random_peer3, multiaddr.clone(), None);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
assert_eq!(
@ -857,7 +877,16 @@ mod tests {
pdb.banned_peers().count()
);
pdb.disconnect_and_ban(&random_peer1);
pdb.notify_disconnect(&random_peer1);
println!("6:{}", pdb.disconnected_peers);
pdb.inject_disconnect(&random_peer1);
println!("7:{}", pdb.disconnected_peers);
pdb.disconnect_and_ban(&random_peer1);
println!("8:{}", pdb.disconnected_peers);
println!(
"{}, {:?}",
pdb.disconnected_peers,
pdb.disconnected_peers().collect::<Vec<_>>()
);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
assert_eq!(
pdb.banned_peers_count.banned_peers(),
@ -871,7 +900,8 @@ mod tests {
pdb.banned_peers().count()
);
pdb.disconnect_and_ban(&random_peer3);
pdb.notify_disconnect(&random_peer3);
pdb.inject_disconnect(&random_peer3);
pdb.disconnect_and_ban(&random_peer3);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
assert_eq!(
pdb.banned_peers_count.banned_peers(),
@ -879,32 +909,34 @@ mod tests {
);
pdb.disconnect_and_ban(&random_peer3);
pdb.notify_disconnect(&random_peer3);
pdb.connect_ingoing(&random_peer1, multiaddr.clone(), None);
pdb.notify_disconnect(&random_peer2);
pdb.inject_disconnect(&random_peer3);
pdb.disconnect_and_ban(&random_peer3);
pdb.connect_ingoing(&random_peer1, multiaddr.clone(), None);
pdb.inject_disconnect(&random_peer2);
pdb.disconnect_and_ban(&random_peer3);
pdb.inject_disconnect(&random_peer3);
pdb.disconnect_and_ban(&random_peer3);
pdb.notify_disconnect(&random_peer3);
pdb.connect_ingoing(&random_peer, multiaddr, None);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
assert_eq!(
pdb.banned_peers_count.banned_peers(),
pdb.banned_peers().count()
);
pdb.notify_disconnect(&random_peer);
pdb.inject_disconnect(&random_peer);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
assert_eq!(
pdb.banned_peers_count.banned_peers(),
pdb.banned_peers().count()
);
pdb.notify_disconnect(&random_peer);
pdb.inject_disconnect(&random_peer);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
assert_eq!(
pdb.banned_peers_count.banned_peers(),
pdb.banned_peers().count()
);
pdb.disconnect_and_ban(&random_peer);
pdb.notify_disconnect(&random_peer);
pdb.inject_disconnect(&random_peer);
assert_eq!(pdb.disconnected_peers, pdb.disconnected_peers().count());
}
@ -950,7 +982,8 @@ mod tests {
for p in &peers[..BANNED_PEERS_PER_IP_THRESHOLD + 1] {
pdb.disconnect_and_ban(p);
pdb.notify_disconnect(p);
pdb.inject_disconnect(p);
pdb.disconnect_and_ban(p);
}
//check that ip1 and ip2 are banned but ip3-5 not
@ -962,7 +995,8 @@ mod tests {
//ban also the last peer in peers
pdb.disconnect_and_ban(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]);
pdb.notify_disconnect(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]);
pdb.inject_disconnect(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]);
pdb.disconnect_and_ban(&peers[BANNED_PEERS_PER_IP_THRESHOLD + 1]);
//check that ip1-ip4 are banned but ip5 not
assert!(pdb.is_banned(&p1));
@ -1012,7 +1046,8 @@ mod tests {
// ban all peers
for p in &peers {
pdb.disconnect_and_ban(p);
pdb.notify_disconnect(p);
pdb.inject_disconnect(p);
pdb.disconnect_and_ban(p);
}
// check ip is banned
@ -1033,7 +1068,8 @@ mod tests {
for p in &peers {
pdb.connect_ingoing(&p, socker_addr.clone(), None);
pdb.disconnect_and_ban(p);
pdb.notify_disconnect(p);
pdb.inject_disconnect(p);
pdb.disconnect_and_ban(p);
}
// both IP's are now banned
@ -1049,7 +1085,8 @@ mod tests {
// reban every peer except one
for p in &peers[1..] {
pdb.disconnect_and_ban(p);
pdb.notify_disconnect(p);
pdb.inject_disconnect(p);
pdb.disconnect_and_ban(p);
}
// nothing is banned
@ -1058,7 +1095,8 @@ mod tests {
//reban last peer
pdb.disconnect_and_ban(&peers[0]);
pdb.notify_disconnect(&peers[0]);
pdb.inject_disconnect(&peers[0]);
pdb.disconnect_and_ban(&peers[0]);
//Ip's are banned again
assert!(pdb.is_banned(&p1));

View File

@ -5,7 +5,7 @@
//! As the logic develops this documentation will advance.
//!
//! The scoring algorithms are currently experimental.
use crate::behaviour::GOSSIPSUB_GREYLIST_THRESHOLD;
use crate::behaviour::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD;
use serde::Serialize;
use std::time::Instant;
use strum::AsRefStr;
@ -31,7 +31,7 @@ const MIN_SCORE: f64 = -100.0;
/// The halflife of a peer's score. I.e the number of seconds it takes for the score to decay to half its value.
const SCORE_HALFLIFE: f64 = 600.0;
/// The number of seconds we ban a peer for before their score begins to decay.
const BANNED_BEFORE_DECAY: Duration = Duration::from_secs(1800);
const BANNED_BEFORE_DECAY: Duration = Duration::from_secs(12 * 3600); // 12 hours
/// We weight negative gossipsub scores in such a way that they never result in a disconnect by
/// themselves. This "solves" the problem of non-decaying gossipsub scores for disconnected peers.

View File

@ -1,8 +1,10 @@
#![allow(clippy::type_complexity)]
#![allow(clippy::cognitive_complexity)]
use super::methods::{RPCCodedResponse, RPCResponseErrorCode, RequestId, ResponseTermination};
use super::protocol::{Protocol, RPCError, RPCProtocol};
use super::methods::{
GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, RequestId, ResponseTermination,
};
use super::protocol::{InboundRequest, Protocol, RPCError, RPCProtocol};
use super::{RPCReceived, RPCSend};
use crate::rpc::outbound::{OutboundFramed, OutboundRequest};
use crate::rpc::protocol::InboundFramed;
@ -221,13 +223,14 @@ where
}
}
/// Initiates the handler's shutdown process, sending an optional last message to the peer.
pub fn shutdown(&mut self, final_msg: Option<(RequestId, OutboundRequest<TSpec>)>) {
/// Initiates the handler's shutdown process, sending an optional Goodbye message to the
/// peer.
fn shutdown(&mut self, goodbye_reason: Option<GoodbyeReason>) {
if matches!(self.state, HandlerState::Active) {
if !self.dial_queue.is_empty() {
debug!(self.log, "Starting handler shutdown"; "unsent_queued_requests" => self.dial_queue.len());
}
// we now drive to completion communications already dialed/established
// We now drive to completion communications already dialed/established
while let Some((id, req)) = self.dial_queue.pop() {
self.events_out.push(Err(HandlerErr::Outbound {
error: RPCError::HandlerRejected,
@ -236,9 +239,10 @@ where
}));
}
// Queue our final message, if any
if let Some((id, req)) = final_msg {
self.dial_queue.push((id, req));
// Queue our goodbye message.
if let Some(reason) = goodbye_reason {
self.dial_queue
.push((RequestId::Router, OutboundRequest::Goodbye(reason)));
}
self.state = HandlerState::ShuttingDown(Box::new(sleep_until(
@ -345,6 +349,11 @@ where
);
}
// If we received a goodbye, shutdown the connection.
if let InboundRequest::Goodbye(_) = req {
self.shutdown(None);
}
self.events_out.push(Ok(RPCReceived::Request(
self.current_inbound_substream_id,
req,
@ -412,6 +421,7 @@ where
match rpc_event {
RPCSend::Request(id, req) => self.send_request(id, req),
RPCSend::Response(inbound_id, response) => self.send_response(inbound_id, response),
RPCSend::Shutdown(reason) => self.shutdown(Some(reason)),
}
}
@ -512,6 +522,9 @@ where
if delay.is_elapsed() {
self.state = HandlerState::Deactivated;
debug!(self.log, "Handler deactivated");
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::InternalError(
"Shutdown timeout",
)));
}
}
@ -864,6 +877,19 @@ where
protocol: SubstreamProtocol::new(req.clone(), ()).map_info(|()| (id, req)),
});
}
// Check if we have completed sending a goodbye, disconnect.
if let HandlerState::ShuttingDown(_) = self.state {
if self.dial_queue.is_empty()
&& self.outbound_substreams.is_empty()
&& self.inbound_substreams.is_empty()
&& self.events_out.is_empty()
&& self.dial_negotiated == 0
{
return Poll::Ready(ProtocolsHandlerEvent::Close(RPCError::Disconnected));
}
}
Poll::Pending
}
}

View File

@ -149,9 +149,9 @@ impl From<u64> for GoodbyeReason {
}
}
impl Into<u64> for GoodbyeReason {
fn into(self) -> u64 {
self as u64
impl From<GoodbyeReason> for u64 {
fn from(reason: GoodbyeReason) -> u64 {
reason as u64
}
}

View File

@ -52,6 +52,8 @@ pub enum RPCSend<TSpec: EthSpec> {
/// peer. The second parameter is a single chunk of a response. These go over *inbound*
/// connections.
Response(SubstreamId, RPCCodedResponse<TSpec>),
/// Lighthouse has requested to terminate the connection with a goodbye message.
Shutdown(GoodbyeReason),
}
/// RPC events received from outside Lighthouse.
@ -77,6 +79,7 @@ impl<T: EthSpec> std::fmt::Display for RPCSend<T> {
match self {
RPCSend::Request(id, req) => write!(f, "RPC Request(id: {:?}, {})", id, req),
RPCSend::Response(id, res) => write!(f, "RPC Response(id: {:?}, {})", id, res),
RPCSend::Shutdown(reason) => write!(f, "Sending Goodbye: {}", reason),
}
}
}
@ -115,11 +118,7 @@ impl<TSpec: EthSpec> RPC<TSpec> {
methods::MAX_REQUEST_BLOCKS,
Duration::from_secs(10),
)
.n_every(
Protocol::BlocksByRoot,
methods::MAX_REQUEST_BLOCKS,
Duration::from_secs(10),
)
.n_every(Protocol::BlocksByRoot, 128, Duration::from_secs(10))
.build()
.expect("Configuration parameters are valid");
RPC {
@ -160,6 +159,16 @@ impl<TSpec: EthSpec> RPC<TSpec> {
event: RPCSend::Request(request_id, event),
});
}
/// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This
/// gracefully terminates the RPC behaviour with a goodbye message.
pub fn shutdown(&mut self, peer_id: PeerId, reason: GoodbyeReason) {
self.events.push(NetworkBehaviourAction::NotifyHandler {
peer_id,
handler: NotifyHandler::Any,
event: RPCSend::Shutdown(reason),
});
}
}
impl<TSpec> NetworkBehaviour for RPC<TSpec>

View File

@ -452,6 +452,8 @@ pub enum RPCError {
NegotiationTimeout,
/// Handler rejected this request.
HandlerRejected,
/// We have intentionally disconnected.
Disconnected,
}
impl From<ssz::DecodeError> for RPCError {
@ -490,6 +492,7 @@ impl std::fmt::Display for RPCError {
RPCError::InternalError(ref err) => write!(f, "Internal error: {}", err),
RPCError::NegotiationTimeout => write!(f, "Negotiation timeout"),
RPCError::HandlerRejected => write!(f, "Handler rejected the request"),
RPCError::Disconnected => write!(f, "Gracefully Disconnected"),
}
}
}
@ -508,6 +511,7 @@ impl std::error::Error for RPCError {
RPCError::ErrorResponse(_, _) => None,
RPCError::NegotiationTimeout => None,
RPCError::HandlerRejected => None,
RPCError::Disconnected => None,
}
}
}

View File

@ -27,6 +27,8 @@ use std::sync::Arc;
use std::time::Duration;
use types::{ChainSpec, EnrForkId, EthSpec};
use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR};
pub const NETWORK_KEY_FILENAME: &str = "key";
/// The maximum simultaneous libp2p connections per peer.
const MAX_CONNECTIONS_PER_PEER: u32 = 1;
@ -129,8 +131,17 @@ impl<TSpec: EthSpec> Service<TSpec> {
let limits = ConnectionLimits::default()
.with_max_pending_incoming(Some(5))
.with_max_pending_outgoing(Some(16))
.with_max_established_incoming(Some((config.target_peers as f64 * 1.2) as u32))
.with_max_established_outgoing(Some((config.target_peers as f64 * 1.2) as u32))
.with_max_established_incoming(Some(
(config.target_peers as f32
* (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR))
as u32,
))
.with_max_established_outgoing(Some(
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)) as u32,
))
.with_max_established_total(Some(
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)) as u32,
))
.with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER));
(
@ -221,7 +232,7 @@ impl<TSpec: EthSpec> Service<TSpec> {
let mut subscribed_topics: Vec<GossipKind> = vec![];
for topic_kind in &config.topics {
if swarm.subscribe_kind(topic_kind.clone()) {
if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) {
subscribed_topics.push(topic_kind.clone());
} else {
warn!(log, "Could not subscribe to topic"; "topic" => %topic_kind);
@ -244,7 +255,9 @@ impl<TSpec: EthSpec> Service<TSpec> {
/// Sends a request to a peer, with a given Id.
pub fn send_request(&mut self, peer_id: PeerId, request_id: RequestId, request: Request) {
self.swarm.send_request(peer_id, request_id, request);
self.swarm
.behaviour_mut()
.send_request(peer_id, request_id, request);
}
/// Informs the peer that their request failed.
@ -255,42 +268,80 @@ impl<TSpec: EthSpec> Service<TSpec> {
error: RPCResponseErrorCode,
reason: String,
) {
self.swarm._send_error_reponse(peer_id, id, error, reason);
self.swarm
.behaviour_mut()
._send_error_reponse(peer_id, id, error, reason);
}
/// Report a peer's action.
pub fn report_peer(&mut self, peer_id: &PeerId, action: PeerAction, source: ReportSource) {
self.swarm.report_peer(peer_id, action, source);
self.swarm
.behaviour_mut()
.peer_manager_mut()
.report_peer(peer_id, action, source);
}
/// Disconnect and ban a peer, providing a reason.
pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) {
self.swarm.goodbye_peer(peer_id, reason, source);
self.swarm
.behaviour_mut()
.goodbye_peer(peer_id, reason, source);
}
/// Sends a response to a peer's request.
pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response<TSpec>) {
self.swarm.send_successful_response(peer_id, id, response);
self.swarm
.behaviour_mut()
.send_successful_response(peer_id, id, response);
}
pub async fn next_event(&mut self) -> Libp2pEvent<TSpec> {
loop {
match self.swarm.next_event().await {
SwarmEvent::Behaviour(behaviour) => return Libp2pEvent::Behaviour(behaviour),
SwarmEvent::ConnectionEstablished { .. } => {
// A connection could be established with a banned peer. This is
// handled inside the behaviour.
match self.swarm.select_next_some().await {
SwarmEvent::Behaviour(behaviour) => {
// Handle banning here
match &behaviour {
BehaviourEvent::PeerBanned(peer_id) => {
self.swarm.ban_peer_id(*peer_id);
}
BehaviourEvent::PeerUnbanned(peer_id) => {
self.swarm.unban_peer_id(*peer_id);
}
_ => {}
}
return Libp2pEvent::Behaviour(behaviour);
}
SwarmEvent::ConnectionEstablished {
peer_id,
endpoint,
num_established,
} => {
// Inform the peer manager.
// We require the ENR to inject into the peer db, if it exists.
let enr = self
.swarm
.behaviour_mut()
.discovery_mut()
.enr_of_peer(&peer_id);
self.swarm
.behaviour_mut()
.peer_manager_mut()
.inject_connection_established(peer_id, endpoint, num_established, enr);
}
SwarmEvent::ConnectionClosed {
peer_id,
cause,
endpoint: _,
cause: _,
endpoint,
num_established,
} => {
trace!(self.log, "Connection closed"; "peer_id" => %peer_id, "cause" => ?cause, "connections" => num_established);
// Inform the peer manager.
self.swarm
.behaviour_mut()
.peer_manager_mut()
.inject_connection_closed(peer_id, endpoint, num_established);
}
SwarmEvent::NewListenAddr(multiaddr) => {
return Libp2pEvent::NewListenAddr(multiaddr)
SwarmEvent::NewListenAddr { address, .. } => {
return Libp2pEvent::NewListenAddr(address)
}
SwarmEvent::IncomingConnection {
local_addr,
@ -303,10 +354,10 @@ impl<TSpec: EthSpec> Service<TSpec> {
send_back_addr,
error,
} => {
debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error)
debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error);
}
SwarmEvent::BannedPeer { .. } => {
// We do not ban peers at the swarm layer, so this should never occur.
SwarmEvent::BannedPeer { peer_id, .. } => {
debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id);
}
SwarmEvent::UnreachableAddr {
peer_id,
@ -315,20 +366,26 @@ impl<TSpec: EthSpec> Service<TSpec> {
attempts_remaining,
} => {
debug!(self.log, "Failed to dial address"; "peer_id" => %peer_id, "address" => %address, "error" => %error, "attempts_remaining" => attempts_remaining);
self.swarm
.behaviour_mut()
.peer_manager_mut()
.inject_dial_failure(&peer_id);
}
SwarmEvent::UnknownPeerUnreachableAddr { address, error } => {
debug!(self.log, "Peer not known at dialed address"; "address" => %address, "error" => %error);
}
SwarmEvent::ExpiredListenAddr(multiaddr) => {
debug!(self.log, "Listen address expired"; "multiaddr" => %multiaddr)
SwarmEvent::ExpiredListenAddr { address, .. } => {
debug!(self.log, "Listen address expired"; "address" => %address)
}
SwarmEvent::ListenerClosed { addresses, reason } => {
SwarmEvent::ListenerClosed {
addresses, reason, ..
} => {
crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason);
if Swarm::listeners(&self.swarm).count() == 0 {
return Libp2pEvent::ZeroListeners;
}
}
SwarmEvent::ListenerError { error } => {
SwarmEvent::ListenerError { error, .. } => {
// this is non fatal, but we still check
warn!(self.log, "Listener error"; "error" => ?error);
if Swarm::listeners(&self.swarm).count() == 0 {
@ -336,7 +393,16 @@ impl<TSpec: EthSpec> Service<TSpec> {
}
}
SwarmEvent::Dialing(peer_id) => {
debug!(self.log, "Dialing peer"; "peer_id" => %peer_id);
// We require the ENR to inject into the peer db, if it exists.
let enr = self
.swarm
.behaviour_mut()
.discovery_mut()
.enr_of_peer(&peer_id);
self.swarm
.behaviour_mut()
.peer_manager_mut()
.inject_dialing(&peer_id, enr);
}
}
}
@ -350,8 +416,8 @@ type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>;
fn build_transport(
local_private_key: Keypair,
) -> std::io::Result<(BoxedTransport, Arc<BandwidthSinks>)> {
let transport = libp2p::tcp::TokioTcpConfig::new().nodelay(true);
let transport = libp2p::dns::DnsConfig::new(transport)?;
let tcp = libp2p::tcp::TokioTcpConfig::new().nodelay(true);
let transport = libp2p::dns::TokioDnsConfig::system(tcp)?;
#[cfg(feature = "libp2p-websocket")]
let transport = {
let trans_clone = transport.clone();
@ -365,13 +431,17 @@ fn build_transport(
mplex_config.set_max_buffer_size(256);
mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block);
// yamux config
let mut yamux_config = libp2p::yamux::YamuxConfig::default();
yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read());
// Authentication
Ok((
transport
.upgrade(core::upgrade::Version::V1)
.authenticate(generate_noise_config(&local_private_key))
.multiplex(core::upgrade::SelectUpgrade::new(
libp2p::yamux::YamuxConfig::default(),
yamux_config,
mplex_config,
))
.timeout(Duration::from_secs(10))

View File

@ -144,19 +144,19 @@ impl GossipTopic {
}
}
impl Into<Topic> for GossipTopic {
fn into(self) -> Topic {
Topic::new(self)
impl From<GossipTopic> for Topic {
fn from(topic: GossipTopic) -> Topic {
Topic::new(topic)
}
}
impl Into<String> for GossipTopic {
fn into(self) -> String {
let encoding = match self.encoding {
impl From<GossipTopic> for String {
fn from(topic: GossipTopic) -> String {
let encoding = match topic.encoding {
GossipEncoding::SSZSnappy => SSZ_SNAPPY_ENCODING_POSTFIX,
};
let kind = match self.kind {
let kind = match topic.kind {
GossipKind::BeaconBlock => BEACON_BLOCK_TOPIC.into(),
GossipKind::BeaconAggregateAndProof => BEACON_AGGREGATE_AND_PROOF_TOPIC.into(),
GossipKind::VoluntaryExit => VOLUNTARY_EXIT_TOPIC.into(),
@ -167,7 +167,7 @@ impl Into<String> for GossipTopic {
format!(
"/{}/{}/{}/{}",
TOPIC_PREFIX,
hex::encode(self.fork_digest),
hex::encode(topic.fork_digest),
kind,
encoding
)

View File

@ -126,7 +126,7 @@ pub async fn build_libp2p_instance(
#[allow(dead_code)]
pub fn get_enr(node: &LibP2PService<E>) -> Enr {
node.swarm.local_enr()
node.swarm.behaviour().local_enr()
}
// Returns `n` libp2p peers in fully connected topology.
@ -171,7 +171,7 @@ pub async fn build_node_pair(
let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log).await;
let mut receiver = build_libp2p_instance(rt, vec![], receiver_log).await;
let receiver_multiaddr = receiver.swarm.local_enr().multiaddr()[1].clone();
let receiver_multiaddr = receiver.swarm.behaviour_mut().local_enr().multiaddr()[1].clone();
// let the two nodes set up listeners
let sender_fut = async {

View File

@ -53,10 +53,10 @@ fn test_status_rpc() {
let sender_future = async {
loop {
match sender.next_event().await {
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
// Send a STATUS message
debug!(log, "Sending RPC");
sender.swarm.send_request(
sender.swarm.behaviour_mut().send_request(
peer_id,
RequestId::Sync(10),
rpc_request.clone(),
@ -90,7 +90,7 @@ fn test_status_rpc() {
if request == rpc_request {
// send the response
debug!(log, "Receiver Received");
receiver.swarm.send_successful_response(
receiver.swarm.behaviour_mut().send_successful_response(
peer_id,
id,
rpc_response.clone(),
@ -149,10 +149,10 @@ fn test_blocks_by_range_chunked_rpc() {
let sender_future = async {
loop {
match sender.next_event().await {
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
// Send a STATUS message
debug!(log, "Sending RPC");
sender.swarm.send_request(
sender.swarm.behaviour_mut().send_request(
peer_id,
RequestId::Sync(10),
rpc_request.clone(),
@ -197,14 +197,14 @@ fn test_blocks_by_range_chunked_rpc() {
// send the response
warn!(log, "Receiver got request");
for _ in 1..=messages_to_send {
receiver.swarm.send_successful_response(
receiver.swarm.behaviour_mut().send_successful_response(
peer_id,
id,
rpc_response.clone(),
);
}
// send the stream termination
receiver.swarm.send_successful_response(
receiver.swarm.behaviour_mut().send_successful_response(
peer_id,
id,
Response::BlocksByRange(None),
@ -263,10 +263,10 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
let sender_future = async {
loop {
match sender.next_event().await {
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
// Send a STATUS message
debug!(log, "Sending RPC");
sender.swarm.send_request(
sender.swarm.behaviour_mut().send_request(
peer_id,
RequestId::Sync(10),
rpc_request.clone(),
@ -335,7 +335,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
if message_info.is_some() {
messages_sent += 1;
let (peer_id, stream_id) = message_info.as_ref().unwrap();
receiver.swarm.send_successful_response(
receiver.swarm.behaviour_mut().send_successful_response(
*peer_id,
*stream_id,
rpc_response.clone(),
@ -395,10 +395,10 @@ fn test_blocks_by_range_single_empty_rpc() {
let sender_future = async {
loop {
match sender.next_event().await {
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
// Send a STATUS message
debug!(log, "Sending RPC");
sender.swarm.send_request(
sender.swarm.behaviour_mut().send_request(
peer_id,
RequestId::Sync(10),
rpc_request.clone(),
@ -441,14 +441,14 @@ fn test_blocks_by_range_single_empty_rpc() {
warn!(log, "Receiver got request");
for _ in 1..=messages_to_send {
receiver.swarm.send_successful_response(
receiver.swarm.behaviour_mut().send_successful_response(
peer_id,
id,
rpc_response.clone(),
);
}
// send the stream termination
receiver.swarm.send_successful_response(
receiver.swarm.behaviour_mut().send_successful_response(
peer_id,
id,
Response::BlocksByRange(None),
@ -510,10 +510,10 @@ fn test_blocks_by_root_chunked_rpc() {
let sender_future = async {
loop {
match sender.next_event().await {
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
// Send a STATUS message
debug!(log, "Sending RPC");
sender.swarm.send_request(
sender.swarm.behaviour_mut().send_request(
peer_id,
RequestId::Sync(10),
rpc_request.clone(),
@ -556,7 +556,7 @@ fn test_blocks_by_root_chunked_rpc() {
debug!(log, "Receiver got request");
for _ in 1..=messages_to_send {
receiver.swarm.send_successful_response(
receiver.swarm.behaviour_mut().send_successful_response(
peer_id,
id,
rpc_response.clone(),
@ -564,7 +564,7 @@ fn test_blocks_by_root_chunked_rpc() {
debug!(log, "Sending message");
}
// send the stream termination
receiver.swarm.send_successful_response(
receiver.swarm.behaviour_mut().send_successful_response(
peer_id,
id,
Response::BlocksByRange(None),
@ -631,10 +631,10 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
let sender_future = async {
loop {
match sender.next_event().await {
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
// Send a STATUS message
debug!(log, "Sending RPC");
sender.swarm.send_request(
sender.swarm.behaviour_mut().send_request(
peer_id,
RequestId::Sync(10),
rpc_request.clone(),
@ -703,7 +703,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
if message_info.is_some() {
messages_sent += 1;
let (peer_id, stream_id) = message_info.as_ref().unwrap();
receiver.swarm.send_successful_response(
receiver.swarm.behaviour_mut().send_successful_response(
*peer_id,
*stream_id,
rpc_response.clone(),
@ -746,10 +746,10 @@ fn test_goodbye_rpc() {
let sender_future = async {
loop {
match sender.next_event().await {
Libp2pEvent::Behaviour(BehaviourEvent::PeerDialed(peer_id)) => {
Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => {
// Send a goodbye and disconnect
debug!(log, "Sending RPC");
sender.swarm.goodbye_peer(
sender.swarm.behaviour_mut().goodbye_peer(
&peer_id,
GoodbyeReason::IrrelevantNetwork,
ReportSource::SyncService,

View File

@ -20,7 +20,7 @@ merkle_proof = { path = "../../consensus/merkle_proof" }
eth2_ssz = "0.1.2"
eth2_hashing = "0.1.0"
tree_hash = "0.1.1"
tokio = { version = "1.1.0", features = ["full"] }
tokio = { version = "1.7.1", features = ["full"] }
parking_lot = "0.11.0"
slog = "2.5.2"
exit-future = "0.2.0"

View File

@ -7,7 +7,7 @@ edition = "2018"
[dependencies]
warp = { git = "https://github.com/paulhauner/warp ", branch = "cors-wildcard" }
serde = { version = "1.0.116", features = ["derive"] }
tokio = { version = "1.1.0", features = ["macros","sync"] }
tokio = { version = "1.7.1", features = ["macros","sync"] }
tokio-stream = { version = "0.1.3", features = ["sync"] }
tokio-util = "0.6.3"
parking_lot = "0.11.0"
@ -34,5 +34,4 @@ futures = "0.3.8"
store = { path = "../store" }
environment = { path = "../../lighthouse/environment" }
tree_hash = "0.1.1"
discv5 = { version = "0.1.0-beta.5", features = ["libp2p"] }
sensitive_url = { path = "../../common/sensitive_url" }

View File

@ -5,11 +5,11 @@ use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
BeaconChain, StateSkipConfig, WhenSlotSkipped, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
};
use discv5::enr::{CombinedKey, EnrBuilder};
use environment::null_logger;
use eth2::Error;
use eth2::StatusCode;
use eth2::{types::*, BeaconNodeHttpClient, Timeouts};
use eth2_libp2p::discv5::enr::{CombinedKey, EnrBuilder};
use eth2_libp2p::{
rpc::methods::MetaData,
types::{EnrBitfield, SyncState},

View File

@ -23,7 +23,7 @@ warp_utils = { path = "../../common/warp_utils" }
malloc_utils = { path = "../../common/malloc_utils" }
[dev-dependencies]
tokio = { version = "1.1.0", features = ["sync"] }
tokio = { version = "1.7.1", features = ["sync"] }
reqwest = { version = "0.11.0", features = ["json"] }
environment = { path = "../../lighthouse/environment" }
types = { path = "../../consensus/types" }

View File

@ -15,7 +15,6 @@ slog-term = "2.6.0"
slog-async = "2.5.0"
logging = { path = "../../common/logging" }
environment = { path = "../../lighthouse/environment" }
discv5 = { version = "0.1.0-beta.3" }
[dependencies]
beacon_chain = { path = "../beacon_chain" }
@ -32,7 +31,7 @@ eth2_ssz_types = { path = "../../consensus/ssz_types" }
tree_hash = "0.1.1"
futures = "0.3.7"
error-chain = "0.12.4"
tokio = { version = "1.1.0", features = ["full"] }
tokio = { version = "1.7.1", features = ["full"] }
tokio-stream = "0.1.3"
parking_lot = "0.11.0"
smallvec = "1.6.1"

View File

@ -8,8 +8,8 @@ use beacon_chain::test_utils::{
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
};
use beacon_chain::{BeaconChain, MAXIMUM_GOSSIP_CLOCK_DISPARITY};
use discv5::enr::{CombinedKey, EnrBuilder};
use environment::{null_logger, Environment, EnvironmentBuilder};
use eth2_libp2p::discv5::enr::{CombinedKey, EnrBuilder};
use eth2_libp2p::{rpc::methods::MetaData, types::EnrBitfield, MessageId, NetworkGlobals, PeerId};
use slot_clock::SlotClock;
use std::cmp;

View File

@ -1,7 +1,7 @@
use super::work_reprocessing_queue::ReprocessQueueMessage;
use crate::{service::NetworkMessage, sync::SyncMessage};
use beacon_chain::{BeaconChain, BeaconChainTypes};
use slog::{error, Logger};
use slog::{debug, Logger};
use std::sync::Arc;
use tokio::sync::mpsc;
@ -27,7 +27,7 @@ impl<T: BeaconChainTypes> Worker<T> {
/// Creates a log if there is an internal error.
fn send_sync_message(&self, message: SyncMessage<T::EthSpec>) {
self.sync_tx.send(message).unwrap_or_else(|e| {
error!(self.log, "Could not send message to the sync service";
debug!(self.log, "Could not send message to the sync service, likely shutdown";
"error" => %e)
});
}
@ -37,7 +37,7 @@ impl<T: BeaconChainTypes> Worker<T> {
/// Creates a log if there is an internal error.
fn send_network_message(&self, message: NetworkMessage<T::EthSpec>) {
self.network_tx.send(message).unwrap_or_else(|e| {
error!(self.log, "Could not send message to the network service";
debug!(self.log, "Could not send message to the network service, likely shutdown";
"error" => %e)
});
}

View File

@ -27,6 +27,13 @@ pub fn persist_dht<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
store.put_item(&DHT_DB_KEY, &PersistedDht { enrs })
}
/// Attempts to clear any DHT entries.
pub fn clear_dht<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>>(
store: Arc<HotColdDB<E, Hot, Cold>>,
) -> Result<(), store::Error> {
store.hot_db.delete::<PersistedDht>(&DHT_DB_KEY)
}
/// Wrapper around DHT for persistence to disk.
pub struct PersistedDht {
pub enrs: Vec<Enr>,

View File

@ -1,4 +1,4 @@
use crate::persisted_dht::{load_dht, persist_dht};
use crate::persisted_dht::{clear_dht, load_dht, persist_dht};
use crate::router::{Router, RouterMessage};
use crate::{
attestation_service::{AttServiceMessage, AttestationService},
@ -178,7 +178,7 @@ impl<T: BeaconChainTypes> NetworkService<T> {
"Loading peers into the routing table"; "peers" => enrs_to_load.len()
);
for enr in enrs_to_load {
libp2p.swarm.add_enr(enr.clone());
libp2p.swarm.behaviour_mut().add_enr(enr.clone());
}
}
@ -251,7 +251,7 @@ fn spawn_service<T: BeaconChainTypes>(
.map(|gauge| gauge.reset());
}
metrics::update_gossip_metrics::<T::EthSpec>(
&service.libp2p.swarm.gs(),
&service.libp2p.swarm.behaviour_mut().gs(),
&service.network_globals,
);
// update sync metrics
@ -287,8 +287,7 @@ fn spawn_service<T: BeaconChainTypes>(
})
)
}).unwrap_or(None) {
if (*service.libp2p.swarm)
.update_gossipsub_parameters(active_validators, slot).is_err() {
if service.libp2p.swarm.behaviour_mut().update_gossipsub_parameters(active_validators, slot).is_err() {
error!(
service.log,
"Failed to update gossipsub parameters";
@ -314,7 +313,7 @@ fn spawn_service<T: BeaconChainTypes>(
service.upnp_mappings = (tcp_socket.map(|s| s.port()), udp_socket.map(|s| s.port()));
// If there is an external TCP port update, modify our local ENR.
if let Some(tcp_socket) = tcp_socket {
if let Err(e) = service.libp2p.swarm.peer_manager().discovery_mut().update_enr_tcp_port(tcp_socket.port()) {
if let Err(e) = service.libp2p.swarm.behaviour_mut().discovery_mut().update_enr_tcp_port(tcp_socket.port()) {
warn!(service.log, "Failed to update ENR"; "error" => e);
}
}
@ -322,7 +321,7 @@ fn spawn_service<T: BeaconChainTypes>(
// UPnP mappings
if !service.discovery_auto_update {
if let Some(udp_socket) = udp_socket {
if let Err(e) = service.libp2p.swarm.peer_manager().discovery_mut().update_enr_udp_socket(udp_socket) {
if let Err(e) = service.libp2p.swarm.behaviour_mut().discovery_mut().update_enr_udp_socket(udp_socket) {
warn!(service.log, "Failed to update ENR"; "error" => e);
}
}
@ -341,6 +340,7 @@ fn spawn_service<T: BeaconChainTypes>(
service
.libp2p
.swarm
.behaviour_mut()
.report_message_validation_result(
&propagation_source, message_id, validation_result
);
@ -359,7 +359,7 @@ fn spawn_service<T: BeaconChainTypes>(
"topics" => ?topic_kinds
);
metrics::expose_publish_metrics(&messages);
service.libp2p.swarm.publish(messages);
service.libp2p.swarm.behaviour_mut().publish(messages);
}
NetworkMessage::ReportPeer { peer_id, action, source } => service.libp2p.report_peer(&peer_id, action, source),
NetworkMessage::GoodbyePeer { peer_id, reason, source } => service.libp2p.goodbye_peer(&peer_id, reason, source),
@ -375,7 +375,7 @@ fn spawn_service<T: BeaconChainTypes>(
let already_subscribed = service.network_globals.gossipsub_subscriptions.read().clone();
let already_subscribed = already_subscribed.iter().map(|x| x.kind()).collect::<std::collections::HashSet<_>>();
for topic_kind in eth2_libp2p::types::CORE_TOPICS.iter().filter(|topic| already_subscribed.get(topic).is_none()) {
if service.libp2p.swarm.subscribe_kind(topic_kind.clone()) {
if service.libp2p.swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) {
subscribed_topics.push(topic_kind.clone());
} else {
warn!(service.log, "Could not subscribe to topic"; "topic" => %topic_kind);
@ -387,9 +387,9 @@ fn spawn_service<T: BeaconChainTypes>(
for subnet_id in 0..<<T as BeaconChainTypes>::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() {
let subnet_id = SubnetId::new(subnet_id);
let topic_kind = eth2_libp2p::types::GossipKind::Attestation(subnet_id);
if service.libp2p.swarm.subscribe_kind(topic_kind.clone()) {
if service.libp2p.swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) {
// Update the ENR bitfield.
service.libp2p.swarm.update_enr_subnet(subnet_id, true);
service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet_id, true);
subscribed_topics.push(topic_kind.clone());
} else {
warn!(service.log, "Could not subscribe to topic"; "topic" => %topic_kind);
@ -407,19 +407,19 @@ fn spawn_service<T: BeaconChainTypes>(
Some(attestation_service_message) = service.attestation_service.next() => {
match attestation_service_message {
AttServiceMessage::Subscribe(subnet_id) => {
service.libp2p.swarm.subscribe_to_subnet(subnet_id);
service.libp2p.swarm.behaviour_mut().subscribe_to_subnet(subnet_id);
}
AttServiceMessage::Unsubscribe(subnet_id) => {
service.libp2p.swarm.unsubscribe_from_subnet(subnet_id);
service.libp2p.swarm.behaviour_mut().unsubscribe_from_subnet(subnet_id);
}
AttServiceMessage::EnrAdd(subnet_id) => {
service.libp2p.swarm.update_enr_subnet(subnet_id, true);
service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet_id, true);
}
AttServiceMessage::EnrRemove(subnet_id) => {
service.libp2p.swarm.update_enr_subnet(subnet_id, false);
service.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet_id, false);
}
AttServiceMessage::DiscoverPeers(subnets_to_discover) => {
service.libp2p.swarm.discover_subnet_peers(subnets_to_discover);
service.libp2p.swarm.behaviour_mut().discover_subnet_peers(subnets_to_discover);
}
}
}
@ -427,17 +427,15 @@ fn spawn_service<T: BeaconChainTypes>(
// poll the swarm
match libp2p_event {
Libp2pEvent::Behaviour(event) => match event {
BehaviourEvent::PeerDialed(peer_id) => {
BehaviourEvent::PeerConnectedOutgoing(peer_id) => {
let _ = service
.router_send
.send(RouterMessage::PeerDialed(peer_id))
.map_err(|_| {
debug!(service.log, "Failed to send peer dialed to router"); });
},
BehaviourEvent::PeerConnected(_peer_id) => {
// A peer has connected to us
// We currently do not perform any action here.
BehaviourEvent::PeerConnectedIncoming(_) | BehaviourEvent::PeerBanned(_) | BehaviourEvent::PeerUnbanned(_) => {
// No action required for these events.
},
BehaviourEvent::PeerDisconnected(peer_id) => {
let _ = service
@ -541,6 +539,7 @@ fn spawn_service<T: BeaconChainTypes>(
service
.libp2p
.swarm
.behaviour_mut()
.update_fork_version(service.beacon_chain.enr_fork_id());
service.next_fork_update = next_fork_delay(&service.beacon_chain);
}
@ -566,12 +565,16 @@ fn next_fork_delay<T: BeaconChainTypes>(
impl<T: BeaconChainTypes> Drop for NetworkService<T> {
fn drop(&mut self) {
// network thread is terminating
let enrs = self.libp2p.swarm.enr_entries();
let enrs = self.libp2p.swarm.behaviour_mut().enr_entries();
debug!(
self.log,
"Persisting DHT to store";
"Number of peers" => enrs.len(),
);
if let Err(e) = clear_dht::<T::EthSpec, T::HotStore, T::ColdStore>(self.store.clone()) {
error!(self.log, "Failed to clear old DHT entries"; "error" => ?e);
}
// Still try to update new entries
match persist_dht::<T::EthSpec, T::HotStore, T::ColdStore>(self.store.clone(), enrs) {
Err(e) => error!(
self.log,

View File

@ -8,7 +8,7 @@ edition = "2018"
beacon_chain = { path = "../beacon_chain" }
types = { path = "../../consensus/types" }
slot_clock = { path = "../../common/slot_clock" }
tokio = { version = "1.1.0", features = ["full"] }
tokio = { version = "1.7.1", features = ["full"] }
slog = "2.5.2"
parking_lot = "0.11.0"
futures = "0.3.7"

View File

@ -13,7 +13,7 @@ eth2_network_config = { path = "../common/eth2_network_config" }
eth2_ssz = "0.1.2"
slog = "2.5.2"
sloggers = "1.0.1"
tokio = "1.1.0"
tokio = "1.7.1"
log = "0.4.11"
slog-term = "2.6.0"
logging = { path = "../common/logging" }

View File

@ -89,6 +89,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
// Ignore these events here
}
Discv5Event::EnrAdded { .. } => {} // Ignore
Discv5Event::TalkRequest(_) => {} // Ignore
Discv5Event::NodeInserted { .. } => {} // Ignore
Discv5Event::SocketUpdated(socket_addr) => {
info!(log, "External socket address updated"; "socket_addr" => format!("{:?}", socket_addr));

View File

@ -19,4 +19,4 @@ serde_yaml = "0.8.13"
types = { path = "../../consensus/types"}
eth2_ssz = "0.1.2"
eth2_config = { path = "../eth2_config"}
enr = { version = "0.5.0", features = ["ed25519", "k256"] }
enr = { version = "0.5.1", features = ["ed25519", "k256"] }

View File

@ -9,4 +9,4 @@ futures = "0.3.7"
tokio-util = { version = "0.6.2", features = ["time"] }
[dev-dependencies]
tokio = { version = "1.1.0", features = ["time", "rt-multi-thread", "macros"] }
tokio = { version = "1.7.1", features = ["time", "rt-multi-thread", "macros"] }

View File

@ -10,7 +10,7 @@ edition = "2018"
reqwest = { version = "0.11.0", features = ["json","stream"] }
futures = "0.3.7"
task_executor = { path = "../task_executor" }
tokio = "1.1.0"
tokio = "1.7.1"
eth2 = {path = "../eth2"}
serde_json = "1.0.58"
serde = "1.0.116"

View File

@ -11,6 +11,6 @@ remote_signer_test = { path = "../../testing/remote_signer_test" }
[dependencies]
reqwest = { version = "0.11.0", features = ["json"] }
serde = { version = "1.0.116", features = ["derive"] }
tokio = { version = "1.1.0", features = ["time"] }
tokio = { version = "1.7.1", features = ["time"] }
types = { path = "../../consensus/types" }
sensitive_url = { path = "../sensitive_url" }

View File

@ -5,7 +5,7 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2018"
[dependencies]
tokio = { version = "1.1.0", features = ["rt"] }
tokio = { version = "1.7.1", features = ["rt"] }
slog = "2.5.2"
futures = "0.3.7"
exit-future = "0.2.0"

View File

@ -14,7 +14,7 @@ beacon_chain = { path = "../../beacon_node/beacon_chain" }
state_processing = { path = "../../consensus/state_processing" }
safe_arith = { path = "../../consensus/safe_arith" }
serde = { version = "1.0.116", features = ["derive"] }
tokio = { version = "1.1.0", features = ["sync"] }
tokio = { version = "1.7.1", features = ["sync"] }
headers = "0.3.2"
lighthouse_metrics = { path = "../lighthouse_metrics" }
lazy_static = "1.4.0"

View File

@ -27,7 +27,7 @@ dirs = "3.0.1"
genesis = { path = "../beacon_node/genesis" }
deposit_contract = { path = "../common/deposit_contract" }
tree_hash = "0.1.1"
tokio = { version = "1.1.0", features = ["full"] }
tokio = { version = "1.7.1", features = ["full"] }
clap_utils = { path = "../common/clap_utils" }
eth2_libp2p = { path = "../beacon_node/eth2_libp2p" }
validator_dir = { path = "../common/validator_dir", features = ["insecure_keys"] }

View File

@ -19,7 +19,7 @@ spec-minimal = []
[dependencies]
beacon_node = { "path" = "../beacon_node" }
tokio = "1.1.0"
tokio = "1.7.1"
slog = { version = "2.5.2", features = ["max_level_trace"] }
sloggers = "1.0.1"
types = { "path" = "../consensus/types" }

View File

@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dependencies]
tokio = { version = "1.1.0", features = ["macros", "rt", "rt-multi-thread" ] }
tokio = { version = "1.7.1", features = ["macros", "rt", "rt-multi-thread" ] }
slog = { version = "2.5.2", features = ["max_level_trace"] }
sloggers = "1.0.1"
types = { "path" = "../../consensus/types" }

View File

@ -14,6 +14,6 @@ slog = "2.5.2"
slot_clock = { path = "../../common/slot_clock" }
state_processing = { path = "../../consensus/state_processing" }
task_executor = { path = "../../common/task_executor" }
tokio = { version = "1.1.0", features = ["full"] }
tokio = { version = "1.7.1", features = ["full"] }
tokio-stream = "0.1.3"
types = { path = "../../consensus/types" }

View File

@ -5,7 +5,7 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
edition = "2018"
[dependencies]
tokio = { version = "1.1.0", features = ["time"] }
tokio = { version = "1.7.1", features = ["time"] }
tokio-compat-02 = "0.2.0"
web3 = { version = "0.16.0", default-features = false, features = ["http-tls", "signing", "ws-tls-tokio"] }
futures = "0.3.7"

View File

@ -15,7 +15,7 @@ reqwest = { version = "0.11.0", features = ["blocking", "json"] }
serde = { version = "1.0.116", features = ["derive"] }
serde_json = "1.0.58"
tempfile = "3.1.0"
tokio = { version = "1.1.0", features = ["time"] }
tokio = { version = "1.7.1", features = ["time"] }
types = { path = "../../consensus/types" }
sensitive_url = { path = "../../common/sensitive_url" }

View File

@ -13,7 +13,7 @@ types = { path = "../../consensus/types" }
validator_client = { path = "../../validator_client" }
parking_lot = "0.11.0"
futures = "0.3.7"
tokio = "1.1.0"
tokio = "1.7.1"
eth1_test_rig = { path = "../eth1_test_rig" }
env_logger = "0.8.2"
clap = "2.33.3"

View File

@ -9,7 +9,7 @@ name = "validator_client"
path = "src/lib.rs"
[dev-dependencies]
tokio = { version = "1.1.0", features = ["time", "rt-multi-thread", "macros"] }
tokio = { version = "1.7.1", features = ["time", "rt-multi-thread", "macros"] }
deposit_contract = { path = "../common/deposit_contract" }
[dependencies]
@ -30,7 +30,7 @@ serde_yaml = "0.8.13"
slog = { version = "2.5.2", features = ["max_level_trace", "release_max_level_trace"] }
slog-async = "2.5.0"
slog-term = "2.6.0"
tokio = { version = "1.1.0", features = ["time"] }
tokio = { version = "1.7.1", features = ["time"] }
futures = "0.3.7"
dirs = "3.0.1"
directory = { path = "../common/directory" }