Add gossipsub as a Lighthouse behaviour (#5066)

* Move gossipsub as a lighthouse behaviour

* Update dependencies, pin to corrected libp2p version

* Merge latest unstable

* Fix test

* Remove unused dep

* Fix cargo.lock

* Re-order behaviour, pin upstream libp2p

* Pin discv5 to latest version
This commit is contained in:
Age Manning 2024-01-31 17:32:31 +00:00 committed by GitHub
parent b9c519d565
commit 4273004bd9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
41 changed files with 17722 additions and 184 deletions

618
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -105,7 +105,8 @@ criterion = "0.3"
delay_map = "0.3" delay_map = "0.3"
derivative = "2" derivative = "2"
dirs = "3" dirs = "3"
discv5 = { git="https://github.com/sigp/discv5", rev="e30a2c31b7ac0c57876458b971164654dfa4513b", features = ["libp2p"] } either = "1.9"
discv5 = { version = "0.4.1", features = ["libp2p"] }
env_logger = "0.9" env_logger = "0.9"
error-chain = "0.12" error-chain = "0.12"
ethereum-types = "0.14" ethereum-types = "0.14"
@ -160,6 +161,7 @@ tempfile = "3"
tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal"] } tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal"] }
tokio-stream = { version = "0.1", features = ["sync"] } tokio-stream = { version = "0.1", features = ["sync"] }
tokio-util = { version = "0.6", features = ["codec", "compat", "time"] } tokio-util = { version = "0.6", features = ["codec", "compat", "time"] }
tracing = "0.1.40"
tracing-appender = "0.2" tracing-appender = "0.2"
tracing-core = "0.1" tracing-core = "0.1"
tracing-log = "0.2" tracing-log = "0.2"

View File

@ -42,14 +42,29 @@ superstruct = { workspace = true }
prometheus-client = "0.22.0" prometheus-client = "0.22.0"
unused_port = { workspace = true } unused_port = { workspace = true }
delay_map = { workspace = true } delay_map = { workspace = true }
void = "1" tracing = { workspace = true }
libp2p-mplex = { git = "https://github.com/sigp/rust-libp2p/", rev = "cfa3275ca17e502799ed56e555b6c0611752e369" } byteorder = { workspace = true }
bytes = { workspace = true }
either = { workspace = true }
# Local dependencies
futures-ticker = "0.0.3"
futures-timer = "3.0.2"
getrandom = "0.2.11"
hex_fmt = "0.3.0"
instant = "0.1.12"
quick-protobuf = "0.8"
void = "1.0.2"
async-channel = "1.9.0"
asynchronous-codec = "0.7.0"
base64 = "0.21.5"
libp2p-mplex = "0.41"
quick-protobuf-codec = "0.3"
[dependencies.libp2p] [dependencies.libp2p]
git = "https://github.com/sigp/rust-libp2p/" version = "0.53"
rev = "cfa3275ca17e502799ed56e555b6c0611752e369"
default-features = false default-features = false
features = ["identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic"] features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic"]
[dev-dependencies] [dev-dependencies]
slog-term = { workspace = true } slog-term = { workspace = true }
@ -58,6 +73,7 @@ tempfile = { workspace = true }
exit-future = { workspace = true } exit-future = { workspace = true }
quickcheck = { workspace = true } quickcheck = { workspace = true }
quickcheck_macros = { workspace = true } quickcheck_macros = { workspace = true }
async-std = { version = "1.6.3", features = ["unstable"] }
[features] [features]
libp2p-websocket = [] libp2p-websocket = []

View File

@ -1,3 +1,4 @@
use crate::gossipsub;
use crate::listen_addr::{ListenAddr, ListenAddress}; use crate::listen_addr::{ListenAddr, ListenAddress};
use crate::rpc::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig}; use crate::rpc::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig};
use crate::types::GossipKind; use crate::types::GossipKind;
@ -5,7 +6,6 @@ use crate::{Enr, PeerIdSerialized};
use directory::{ use directory::{
DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR,
}; };
use libp2p::gossipsub;
use libp2p::Multiaddr; use libp2p::Multiaddr;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
@ -158,7 +158,7 @@ pub struct Config {
/// Configuration for the inbound rate limiter (requests received by this node). /// Configuration for the inbound rate limiter (requests received by this node).
pub inbound_rate_limiter_config: Option<InboundRateLimiterConfig>, pub inbound_rate_limiter_config: Option<InboundRateLimiterConfig>,
/// Whether to disable logging duplicate gossip messages as WARN. If set to true, duplicate /// Whether to disable logging duplicate gossip messages as WARN. If set to true, duplicate
/// errors will be logged at DEBUG level. /// errors will be logged at DEBUG level.
pub disable_duplicate_warn_logs: bool, pub disable_duplicate_warn_logs: bool,
} }

View File

@ -0,0 +1,175 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Data structure for efficiently storing known back-off's when pruning peers.
use crate::gossipsub::topic::TopicHash;
use instant::Instant;
use libp2p::identity::PeerId;
use std::collections::{
hash_map::{Entry, HashMap},
HashSet,
};
use std::time::Duration;
#[derive(Copy, Clone)]
struct HeartbeatIndex(usize);
/// Stores backoffs in an efficient manner.
pub(crate) struct BackoffStorage {
/// Stores backoffs and the index in backoffs_by_heartbeat per peer per topic.
backoffs: HashMap<TopicHash, HashMap<PeerId, (Instant, HeartbeatIndex)>>,
/// Stores peer topic pairs per heartbeat (this is cyclic the current index is
/// heartbeat_index).
backoffs_by_heartbeat: Vec<HashSet<(TopicHash, PeerId)>>,
/// The index in the backoffs_by_heartbeat vector corresponding to the current heartbeat.
heartbeat_index: HeartbeatIndex,
/// The heartbeat interval duration from the config.
heartbeat_interval: Duration,
/// Backoff slack from the config.
backoff_slack: u32,
}
impl BackoffStorage {
fn heartbeats(d: &Duration, heartbeat_interval: &Duration) -> usize {
((d.as_nanos() + heartbeat_interval.as_nanos() - 1) / heartbeat_interval.as_nanos())
as usize
}
pub(crate) fn new(
prune_backoff: &Duration,
heartbeat_interval: Duration,
backoff_slack: u32,
) -> BackoffStorage {
// We add one additional slot for partial heartbeat
let max_heartbeats =
Self::heartbeats(prune_backoff, &heartbeat_interval) + backoff_slack as usize + 1;
BackoffStorage {
backoffs: HashMap::new(),
backoffs_by_heartbeat: vec![HashSet::new(); max_heartbeats],
heartbeat_index: HeartbeatIndex(0),
heartbeat_interval,
backoff_slack,
}
}
/// Updates the backoff for a peer (if there is already a more restrictive backoff then this call
/// doesn't change anything).
pub(crate) fn update_backoff(&mut self, topic: &TopicHash, peer: &PeerId, time: Duration) {
let instant = Instant::now() + time;
let insert_into_backoffs_by_heartbeat =
|heartbeat_index: HeartbeatIndex,
backoffs_by_heartbeat: &mut Vec<HashSet<_>>,
heartbeat_interval,
backoff_slack| {
let pair = (topic.clone(), *peer);
let index = (heartbeat_index.0
+ Self::heartbeats(&time, heartbeat_interval)
+ backoff_slack as usize)
% backoffs_by_heartbeat.len();
backoffs_by_heartbeat[index].insert(pair);
HeartbeatIndex(index)
};
match self.backoffs.entry(topic.clone()).or_default().entry(*peer) {
Entry::Occupied(mut o) => {
let (backoff, index) = o.get();
if backoff < &instant {
let pair = (topic.clone(), *peer);
if let Some(s) = self.backoffs_by_heartbeat.get_mut(index.0) {
s.remove(&pair);
}
let index = insert_into_backoffs_by_heartbeat(
self.heartbeat_index,
&mut self.backoffs_by_heartbeat,
&self.heartbeat_interval,
self.backoff_slack,
);
o.insert((instant, index));
}
}
Entry::Vacant(v) => {
let index = insert_into_backoffs_by_heartbeat(
self.heartbeat_index,
&mut self.backoffs_by_heartbeat,
&self.heartbeat_interval,
self.backoff_slack,
);
v.insert((instant, index));
}
};
}
/// Checks if a given peer is backoffed for the given topic. This method respects the
/// configured BACKOFF_SLACK and may return true even if the backup is already over.
/// It is guaranteed to return false if the backoff is not over and eventually if enough time
/// passed true if the backoff is over.
///
/// This method should be used for deciding if we can already send a GRAFT to a previously
/// backoffed peer.
pub(crate) fn is_backoff_with_slack(&self, topic: &TopicHash, peer: &PeerId) -> bool {
self.backoffs
.get(topic)
.map_or(false, |m| m.contains_key(peer))
}
pub(crate) fn get_backoff_time(&self, topic: &TopicHash, peer: &PeerId) -> Option<Instant> {
Self::get_backoff_time_from_backoffs(&self.backoffs, topic, peer)
}
fn get_backoff_time_from_backoffs(
backoffs: &HashMap<TopicHash, HashMap<PeerId, (Instant, HeartbeatIndex)>>,
topic: &TopicHash,
peer: &PeerId,
) -> Option<Instant> {
backoffs
.get(topic)
.and_then(|m| m.get(peer).map(|(i, _)| *i))
}
/// Applies a heartbeat. That should be called regularly in intervals of length
/// `heartbeat_interval`.
pub(crate) fn heartbeat(&mut self) {
// Clean up backoffs_by_heartbeat
if let Some(s) = self.backoffs_by_heartbeat.get_mut(self.heartbeat_index.0) {
let backoffs = &mut self.backoffs;
let slack = self.heartbeat_interval * self.backoff_slack;
let now = Instant::now();
s.retain(|(topic, peer)| {
let keep = match Self::get_backoff_time_from_backoffs(backoffs, topic, peer) {
Some(backoff_time) => backoff_time + slack > now,
None => false,
};
if !keep {
//remove from backoffs
if let Entry::Occupied(mut m) = backoffs.entry(topic.clone()) {
if m.get_mut().remove(peer).is_some() && m.get().is_empty() {
m.remove();
}
}
}
keep
});
}
// Increase heartbeat index
self.heartbeat_index =
HeartbeatIndex((self.heartbeat_index.0 + 1) % self.backoffs_by_heartbeat.len());
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,156 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! Error types that can result from gossipsub.
use libp2p::identity::SigningError;
/// Error associated with publishing a gossipsub message.
#[derive(Debug)]
pub enum PublishError {
/// This message has already been published.
Duplicate,
/// An error occurred whilst signing the message.
SigningError(SigningError),
/// There were no peers to send this message to.
InsufficientPeers,
/// The overall message was too large. This could be due to excessive topics or an excessive
/// message size.
MessageTooLarge,
/// The compression algorithm failed.
TransformFailed(std::io::Error),
}
impl std::fmt::Display for PublishError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{self:?}")
}
}
impl std::error::Error for PublishError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::SigningError(err) => Some(err),
Self::TransformFailed(err) => Some(err),
_ => None,
}
}
}
/// Error associated with subscribing to a topic.
#[derive(Debug)]
pub enum SubscriptionError {
/// Couldn't publish our subscription
PublishError(PublishError),
/// We are not allowed to subscribe to this topic by the subscription filter
NotAllowed,
}
impl std::fmt::Display for SubscriptionError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{self:?}")
}
}
impl std::error::Error for SubscriptionError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
Self::PublishError(err) => Some(err),
_ => None,
}
}
}
impl From<SigningError> for PublishError {
fn from(error: SigningError) -> Self {
PublishError::SigningError(error)
}
}
#[derive(Debug, Clone, Copy)]
pub enum ValidationError {
/// The message has an invalid signature,
InvalidSignature,
/// The sequence number was empty, expected a value.
EmptySequenceNumber,
/// The sequence number was the incorrect size
InvalidSequenceNumber,
/// The PeerId was invalid
InvalidPeerId,
/// Signature existed when validation has been sent to
/// [`crate::behaviour::MessageAuthenticity::Anonymous`].
SignaturePresent,
/// Sequence number existed when validation has been sent to
/// [`crate::behaviour::MessageAuthenticity::Anonymous`].
SequenceNumberPresent,
/// Message source existed when validation has been sent to
/// [`crate::behaviour::MessageAuthenticity::Anonymous`].
MessageSourcePresent,
/// The data transformation failed.
TransformFailed,
}
impl std::fmt::Display for ValidationError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{self:?}")
}
}
impl std::error::Error for ValidationError {}
impl From<std::io::Error> for PublishError {
fn from(error: std::io::Error) -> PublishError {
PublishError::TransformFailed(error)
}
}
/// Error associated with Config building.
#[derive(Debug)]
pub enum ConfigBuilderError {
/// Maximum transmission size is too small.
MaxTransmissionSizeTooSmall,
/// Histroy length less than history gossip length.
HistoryLengthTooSmall,
/// The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high
MeshParametersInvalid,
/// The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2
MeshOutboundInvalid,
/// unsubscribe_backoff is zero
UnsubscribeBackoffIsZero,
/// Invalid protocol
InvalidProtocol,
}
impl std::error::Error for ConfigBuilderError {}
impl std::fmt::Display for ConfigBuilderError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::MaxTransmissionSizeTooSmall => {
write!(f, "Maximum transmission size is too small")
}
Self::HistoryLengthTooSmall => write!(f, "Histroy length less than history gossip length"),
Self::MeshParametersInvalid => write!(f, "The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high"),
Self::MeshOutboundInvalid => write!(f, "The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2"),
Self::UnsubscribeBackoffIsZero => write!(f, "unsubscribe_backoff is zero"),
Self::InvalidProtocol => write!(f, "Invalid protocol"),
}
}
}

View File

@ -0,0 +1,12 @@
syntax = "proto2";
package compat.pb;
message Message {
optional bytes from = 1;
optional bytes data = 2;
optional bytes seqno = 3;
repeated string topic_ids = 4;
optional bytes signature = 5;
optional bytes key = 6;
}

View File

@ -0,0 +1,2 @@
// Automatically generated mod.rs
pub mod pb;

View File

@ -0,0 +1,67 @@
// Automatically generated rust module for 'compat.proto' file
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![cfg_attr(rustfmt, rustfmt_skip)]
use quick_protobuf::{MessageInfo, MessageRead, MessageWrite, BytesReader, Writer, WriterBackend, Result};
use quick_protobuf::sizeofs::*;
use super::super::*;
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct Message {
pub from: Option<Vec<u8>>,
pub data: Option<Vec<u8>>,
pub seqno: Option<Vec<u8>>,
pub topic_ids: Vec<String>,
pub signature: Option<Vec<u8>>,
pub key: Option<Vec<u8>>,
}
impl<'a> MessageRead<'a> for Message {
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
let mut msg = Self::default();
while !r.is_eof() {
match r.next_tag(bytes) {
Ok(10) => msg.from = Some(r.read_bytes(bytes)?.to_owned()),
Ok(18) => msg.data = Some(r.read_bytes(bytes)?.to_owned()),
Ok(26) => msg.seqno = Some(r.read_bytes(bytes)?.to_owned()),
Ok(34) => msg.topic_ids.push(r.read_string(bytes)?.to_owned()),
Ok(42) => msg.signature = Some(r.read_bytes(bytes)?.to_owned()),
Ok(50) => msg.key = Some(r.read_bytes(bytes)?.to_owned()),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
}
Ok(msg)
}
}
impl MessageWrite for Message {
fn get_size(&self) -> usize {
0
+ self.from.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
+ self.data.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
+ self.seqno.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
+ self.topic_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::<usize>()
+ self.signature.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
+ self.key.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
}
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
if let Some(ref s) = self.from { w.write_with_tag(10, |w| w.write_bytes(&**s))?; }
if let Some(ref s) = self.data { w.write_with_tag(18, |w| w.write_bytes(&**s))?; }
if let Some(ref s) = self.seqno { w.write_with_tag(26, |w| w.write_bytes(&**s))?; }
for s in &self.topic_ids { w.write_with_tag(34, |w| w.write_string(&**s))?; }
if let Some(ref s) = self.signature { w.write_with_tag(42, |w| w.write_bytes(&**s))?; }
if let Some(ref s) = self.key { w.write_with_tag(50, |w| w.write_bytes(&**s))?; }
Ok(())
}
}

View File

@ -0,0 +1,2 @@
// Automatically generated mod.rs
pub mod pb;

View File

@ -0,0 +1,567 @@
// Automatically generated rust module for 'rpc.proto' file
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![cfg_attr(rustfmt, rustfmt_skip)]
use quick_protobuf::{MessageInfo, MessageRead, MessageWrite, BytesReader, Writer, WriterBackend, Result};
use quick_protobuf::sizeofs::*;
use super::super::*;
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct RPC {
pub subscriptions: Vec<gossipsub::pb::mod_RPC::SubOpts>,
pub publish: Vec<gossipsub::pb::Message>,
pub control: Option<gossipsub::pb::ControlMessage>,
}
impl<'a> MessageRead<'a> for RPC {
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
let mut msg = Self::default();
while !r.is_eof() {
match r.next_tag(bytes) {
Ok(10) => msg.subscriptions.push(r.read_message::<gossipsub::pb::mod_RPC::SubOpts>(bytes)?),
Ok(18) => msg.publish.push(r.read_message::<gossipsub::pb::Message>(bytes)?),
Ok(26) => msg.control = Some(r.read_message::<gossipsub::pb::ControlMessage>(bytes)?),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
}
Ok(msg)
}
}
impl MessageWrite for RPC {
fn get_size(&self) -> usize {
0
+ self.subscriptions.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
+ self.publish.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
+ self.control.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size()))
}
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
for s in &self.subscriptions { w.write_with_tag(10, |w| w.write_message(s))?; }
for s in &self.publish { w.write_with_tag(18, |w| w.write_message(s))?; }
if let Some(ref s) = self.control { w.write_with_tag(26, |w| w.write_message(s))?; }
Ok(())
}
}
pub mod mod_RPC {
use super::*;
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct SubOpts {
pub subscribe: Option<bool>,
pub topic_id: Option<String>,
}
impl<'a> MessageRead<'a> for SubOpts {
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
let mut msg = Self::default();
while !r.is_eof() {
match r.next_tag(bytes) {
Ok(8) => msg.subscribe = Some(r.read_bool(bytes)?),
Ok(18) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
}
Ok(msg)
}
}
impl MessageWrite for SubOpts {
fn get_size(&self) -> usize {
0
+ self.subscribe.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64))
+ self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
}
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
if let Some(ref s) = self.subscribe { w.write_with_tag(8, |w| w.write_bool(*s))?; }
if let Some(ref s) = self.topic_id { w.write_with_tag(18, |w| w.write_string(&**s))?; }
Ok(())
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct Message {
pub from: Option<Vec<u8>>,
pub data: Option<Vec<u8>>,
pub seqno: Option<Vec<u8>>,
pub topic: String,
pub signature: Option<Vec<u8>>,
pub key: Option<Vec<u8>>,
}
impl<'a> MessageRead<'a> for Message {
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
let mut msg = Self::default();
while !r.is_eof() {
match r.next_tag(bytes) {
Ok(10) => msg.from = Some(r.read_bytes(bytes)?.to_owned()),
Ok(18) => msg.data = Some(r.read_bytes(bytes)?.to_owned()),
Ok(26) => msg.seqno = Some(r.read_bytes(bytes)?.to_owned()),
Ok(34) => msg.topic = r.read_string(bytes)?.to_owned(),
Ok(42) => msg.signature = Some(r.read_bytes(bytes)?.to_owned()),
Ok(50) => msg.key = Some(r.read_bytes(bytes)?.to_owned()),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
}
Ok(msg)
}
}
impl MessageWrite for Message {
fn get_size(&self) -> usize {
0
+ self.from.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
+ self.data.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
+ self.seqno.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
+ 1 + sizeof_len((&self.topic).len())
+ self.signature.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
+ self.key.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
}
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
if let Some(ref s) = self.from { w.write_with_tag(10, |w| w.write_bytes(&**s))?; }
if let Some(ref s) = self.data { w.write_with_tag(18, |w| w.write_bytes(&**s))?; }
if let Some(ref s) = self.seqno { w.write_with_tag(26, |w| w.write_bytes(&**s))?; }
w.write_with_tag(34, |w| w.write_string(&**&self.topic))?;
if let Some(ref s) = self.signature { w.write_with_tag(42, |w| w.write_bytes(&**s))?; }
if let Some(ref s) = self.key { w.write_with_tag(50, |w| w.write_bytes(&**s))?; }
Ok(())
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct ControlMessage {
pub ihave: Vec<gossipsub::pb::ControlIHave>,
pub iwant: Vec<gossipsub::pb::ControlIWant>,
pub graft: Vec<gossipsub::pb::ControlGraft>,
pub prune: Vec<gossipsub::pb::ControlPrune>,
}
impl<'a> MessageRead<'a> for ControlMessage {
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
let mut msg = Self::default();
while !r.is_eof() {
match r.next_tag(bytes) {
Ok(10) => msg.ihave.push(r.read_message::<gossipsub::pb::ControlIHave>(bytes)?),
Ok(18) => msg.iwant.push(r.read_message::<gossipsub::pb::ControlIWant>(bytes)?),
Ok(26) => msg.graft.push(r.read_message::<gossipsub::pb::ControlGraft>(bytes)?),
Ok(34) => msg.prune.push(r.read_message::<gossipsub::pb::ControlPrune>(bytes)?),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
}
Ok(msg)
}
}
impl MessageWrite for ControlMessage {
fn get_size(&self) -> usize {
0
+ self.ihave.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
+ self.iwant.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
+ self.graft.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
+ self.prune.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
}
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
for s in &self.ihave { w.write_with_tag(10, |w| w.write_message(s))?; }
for s in &self.iwant { w.write_with_tag(18, |w| w.write_message(s))?; }
for s in &self.graft { w.write_with_tag(26, |w| w.write_message(s))?; }
for s in &self.prune { w.write_with_tag(34, |w| w.write_message(s))?; }
Ok(())
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct ControlIHave {
pub topic_id: Option<String>,
pub message_ids: Vec<Vec<u8>>,
}
impl<'a> MessageRead<'a> for ControlIHave {
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
let mut msg = Self::default();
while !r.is_eof() {
match r.next_tag(bytes) {
Ok(10) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()),
Ok(18) => msg.message_ids.push(r.read_bytes(bytes)?.to_owned()),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
}
Ok(msg)
}
}
impl MessageWrite for ControlIHave {
fn get_size(&self) -> usize {
0
+ self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
+ self.message_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::<usize>()
}
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
if let Some(ref s) = self.topic_id { w.write_with_tag(10, |w| w.write_string(&**s))?; }
for s in &self.message_ids { w.write_with_tag(18, |w| w.write_bytes(&**s))?; }
Ok(())
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct ControlIWant {
pub message_ids: Vec<Vec<u8>>,
}
impl<'a> MessageRead<'a> for ControlIWant {
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
let mut msg = Self::default();
while !r.is_eof() {
match r.next_tag(bytes) {
Ok(10) => msg.message_ids.push(r.read_bytes(bytes)?.to_owned()),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
}
Ok(msg)
}
}
impl MessageWrite for ControlIWant {
fn get_size(&self) -> usize {
0
+ self.message_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::<usize>()
}
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
for s in &self.message_ids { w.write_with_tag(10, |w| w.write_bytes(&**s))?; }
Ok(())
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct ControlGraft {
pub topic_id: Option<String>,
}
impl<'a> MessageRead<'a> for ControlGraft {
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
let mut msg = Self::default();
while !r.is_eof() {
match r.next_tag(bytes) {
Ok(10) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
}
Ok(msg)
}
}
impl MessageWrite for ControlGraft {
fn get_size(&self) -> usize {
0
+ self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
}
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
if let Some(ref s) = self.topic_id { w.write_with_tag(10, |w| w.write_string(&**s))?; }
Ok(())
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct ControlPrune {
pub topic_id: Option<String>,
pub peers: Vec<gossipsub::pb::PeerInfo>,
pub backoff: Option<u64>,
}
impl<'a> MessageRead<'a> for ControlPrune {
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
let mut msg = Self::default();
while !r.is_eof() {
match r.next_tag(bytes) {
Ok(10) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()),
Ok(18) => msg.peers.push(r.read_message::<gossipsub::pb::PeerInfo>(bytes)?),
Ok(24) => msg.backoff = Some(r.read_uint64(bytes)?),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
}
Ok(msg)
}
}
impl MessageWrite for ControlPrune {
fn get_size(&self) -> usize {
0
+ self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
+ self.peers.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
+ self.backoff.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64))
}
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
if let Some(ref s) = self.topic_id { w.write_with_tag(10, |w| w.write_string(&**s))?; }
for s in &self.peers { w.write_with_tag(18, |w| w.write_message(s))?; }
if let Some(ref s) = self.backoff { w.write_with_tag(24, |w| w.write_uint64(*s))?; }
Ok(())
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct PeerInfo {
pub peer_id: Option<Vec<u8>>,
pub signed_peer_record: Option<Vec<u8>>,
}
impl<'a> MessageRead<'a> for PeerInfo {
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
let mut msg = Self::default();
while !r.is_eof() {
match r.next_tag(bytes) {
Ok(10) => msg.peer_id = Some(r.read_bytes(bytes)?.to_owned()),
Ok(18) => msg.signed_peer_record = Some(r.read_bytes(bytes)?.to_owned()),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
}
Ok(msg)
}
}
impl MessageWrite for PeerInfo {
fn get_size(&self) -> usize {
0
+ self.peer_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
+ self.signed_peer_record.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
}
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
if let Some(ref s) = self.peer_id { w.write_with_tag(10, |w| w.write_bytes(&**s))?; }
if let Some(ref s) = self.signed_peer_record { w.write_with_tag(18, |w| w.write_bytes(&**s))?; }
Ok(())
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct TopicDescriptor {
pub name: Option<String>,
pub auth: Option<gossipsub::pb::mod_TopicDescriptor::AuthOpts>,
pub enc: Option<gossipsub::pb::mod_TopicDescriptor::EncOpts>,
}
impl<'a> MessageRead<'a> for TopicDescriptor {
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
let mut msg = Self::default();
while !r.is_eof() {
match r.next_tag(bytes) {
Ok(10) => msg.name = Some(r.read_string(bytes)?.to_owned()),
Ok(18) => msg.auth = Some(r.read_message::<gossipsub::pb::mod_TopicDescriptor::AuthOpts>(bytes)?),
Ok(26) => msg.enc = Some(r.read_message::<gossipsub::pb::mod_TopicDescriptor::EncOpts>(bytes)?),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
}
Ok(msg)
}
}
impl MessageWrite for TopicDescriptor {
fn get_size(&self) -> usize {
0
+ self.name.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
+ self.auth.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size()))
+ self.enc.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size()))
}
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
if let Some(ref s) = self.name { w.write_with_tag(10, |w| w.write_string(&**s))?; }
if let Some(ref s) = self.auth { w.write_with_tag(18, |w| w.write_message(s))?; }
if let Some(ref s) = self.enc { w.write_with_tag(26, |w| w.write_message(s))?; }
Ok(())
}
}
pub mod mod_TopicDescriptor {
use super::*;
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct AuthOpts {
pub mode: Option<gossipsub::pb::mod_TopicDescriptor::mod_AuthOpts::AuthMode>,
pub keys: Vec<Vec<u8>>,
}
impl<'a> MessageRead<'a> for AuthOpts {
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
let mut msg = Self::default();
while !r.is_eof() {
match r.next_tag(bytes) {
Ok(8) => msg.mode = Some(r.read_enum(bytes)?),
Ok(18) => msg.keys.push(r.read_bytes(bytes)?.to_owned()),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
}
Ok(msg)
}
}
impl MessageWrite for AuthOpts {
fn get_size(&self) -> usize {
0
+ self.mode.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64))
+ self.keys.iter().map(|s| 1 + sizeof_len((s).len())).sum::<usize>()
}
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
if let Some(ref s) = self.mode { w.write_with_tag(8, |w| w.write_enum(*s as i32))?; }
for s in &self.keys { w.write_with_tag(18, |w| w.write_bytes(&**s))?; }
Ok(())
}
}
pub mod mod_AuthOpts {
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum AuthMode {
NONE = 0,
KEY = 1,
WOT = 2,
}
impl Default for AuthMode {
fn default() -> Self {
AuthMode::NONE
}
}
impl From<i32> for AuthMode {
fn from(i: i32) -> Self {
match i {
0 => AuthMode::NONE,
1 => AuthMode::KEY,
2 => AuthMode::WOT,
_ => Self::default(),
}
}
}
impl<'a> From<&'a str> for AuthMode {
fn from(s: &'a str) -> Self {
match s {
"NONE" => AuthMode::NONE,
"KEY" => AuthMode::KEY,
"WOT" => AuthMode::WOT,
_ => Self::default(),
}
}
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Default, PartialEq, Clone)]
pub struct EncOpts {
pub mode: Option<gossipsub::pb::mod_TopicDescriptor::mod_EncOpts::EncMode>,
pub key_hashes: Vec<Vec<u8>>,
}
impl<'a> MessageRead<'a> for EncOpts {
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
let mut msg = Self::default();
while !r.is_eof() {
match r.next_tag(bytes) {
Ok(8) => msg.mode = Some(r.read_enum(bytes)?),
Ok(18) => msg.key_hashes.push(r.read_bytes(bytes)?.to_owned()),
Ok(t) => { r.read_unknown(bytes, t)?; }
Err(e) => return Err(e),
}
}
Ok(msg)
}
}
impl MessageWrite for EncOpts {
fn get_size(&self) -> usize {
0
+ self.mode.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64))
+ self.key_hashes.iter().map(|s| 1 + sizeof_len((s).len())).sum::<usize>()
}
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
if let Some(ref s) = self.mode { w.write_with_tag(8, |w| w.write_enum(*s as i32))?; }
for s in &self.key_hashes { w.write_with_tag(18, |w| w.write_bytes(&**s))?; }
Ok(())
}
}
pub mod mod_EncOpts {
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum EncMode {
NONE = 0,
SHAREDKEY = 1,
WOT = 2,
}
impl Default for EncMode {
fn default() -> Self {
EncMode::NONE
}
}
impl From<i32> for EncMode {
fn from(i: i32) -> Self {
match i {
0 => EncMode::NONE,
1 => EncMode::SHAREDKEY,
2 => EncMode::WOT,
_ => Self::default(),
}
}
}
impl<'a> From<&'a str> for EncMode {
fn from(s: &'a str) -> Self {
match s {
"NONE" => EncMode::NONE,
"SHAREDKEY" => EncMode::SHAREDKEY,
"WOT" => EncMode::WOT,
_ => Self::default(),
}
}
}
}
}

View File

@ -0,0 +1,3 @@
// Automatically generated mod.rs
pub mod compat;
pub mod gossipsub;

View File

@ -0,0 +1,84 @@
syntax = "proto2";
package gossipsub.pb;
message RPC {
repeated SubOpts subscriptions = 1;
repeated Message publish = 2;
message SubOpts {
optional bool subscribe = 1; // subscribe or unsubscribe
optional string topic_id = 2;
}
optional ControlMessage control = 3;
}
message Message {
optional bytes from = 1;
optional bytes data = 2;
optional bytes seqno = 3;
required string topic = 4;
optional bytes signature = 5;
optional bytes key = 6;
}
message ControlMessage {
repeated ControlIHave ihave = 1;
repeated ControlIWant iwant = 2;
repeated ControlGraft graft = 3;
repeated ControlPrune prune = 4;
}
message ControlIHave {
optional string topic_id = 1;
repeated bytes message_ids = 2;
}
message ControlIWant {
repeated bytes message_ids= 1;
}
message ControlGraft {
optional string topic_id = 1;
}
message ControlPrune {
optional string topic_id = 1;
repeated PeerInfo peers = 2; // gossipsub v1.1 PX
optional uint64 backoff = 3; // gossipsub v1.1 backoff time (in seconds)
}
message PeerInfo {
optional bytes peer_id = 1;
optional bytes signed_peer_record = 2;
}
// topicID = hash(topicDescriptor); (not the topic.name)
message TopicDescriptor {
optional string name = 1;
optional AuthOpts auth = 2;
optional EncOpts enc = 3;
message AuthOpts {
optional AuthMode mode = 1;
repeated bytes keys = 2; // root keys to trust
enum AuthMode {
NONE = 0; // no authentication, anyone can publish
KEY = 1; // only messages signed by keys in the topic descriptor are accepted
WOT = 2; // web of trust, certificates can allow publisher set to grow
}
}
message EncOpts {
optional EncMode mode = 1;
repeated bytes key_hashes = 2; // the hashes of the shared keys used (salted)
enum EncMode {
NONE = 0; // no encryption, anyone can read
SHAREDKEY = 1; // messages are encrypted with shared key
WOT = 2; // web of trust, certificates can allow publisher set to grow
}
}
}

View File

@ -0,0 +1,101 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use super::peer_score::RejectReason;
use super::MessageId;
use super::ValidationError;
use instant::Instant;
use libp2p::identity::PeerId;
use std::collections::HashMap;
/// Tracks recently sent `IWANT` messages and checks if peers respond to them.
#[derive(Default)]
pub(crate) struct GossipPromises {
/// Stores for each tracked message id and peer the instant when this promise expires.
///
/// If the peer didn't respond until then we consider the promise as broken and penalize the
/// peer.
promises: HashMap<MessageId, HashMap<PeerId, Instant>>,
}
impl GossipPromises {
/// Returns true if the message id exists in the promises.
pub(crate) fn contains(&self, message: &MessageId) -> bool {
self.promises.contains_key(message)
}
/// Track a promise to deliver a message from a list of [`MessageId`]s we are requesting.
pub(crate) fn add_promise(&mut self, peer: PeerId, messages: &[MessageId], expires: Instant) {
for message_id in messages {
// If a promise for this message id and peer already exists we don't update the expiry!
self.promises
.entry(message_id.clone())
.or_default()
.entry(peer)
.or_insert(expires);
}
}
pub(crate) fn message_delivered(&mut self, message_id: &MessageId) {
// Someone delivered a message, we can stop tracking all promises for it.
self.promises.remove(message_id);
}
pub(crate) fn reject_message(&mut self, message_id: &MessageId, reason: &RejectReason) {
// A message got rejected, so we can stop tracking promises and let the score penalty apply
// from invalid message delivery.
// We do take exception and apply promise penalty regardless in the following cases, where
// the peer delivered an obviously invalid message.
match reason {
RejectReason::ValidationError(ValidationError::InvalidSignature) => (),
RejectReason::SelfOrigin => (),
_ => {
self.promises.remove(message_id);
}
};
}
/// Returns the number of broken promises for each peer who didn't follow up on an IWANT
/// request.
/// This should be called not too often relative to the expire times, since it iterates over
/// the whole stored data.
pub(crate) fn get_broken_promises(&mut self) -> HashMap<PeerId, usize> {
let now = Instant::now();
let mut result = HashMap::new();
self.promises.retain(|msg, peers| {
peers.retain(|peer_id, expires| {
if *expires < now {
let count = result.entry(*peer_id).or_insert(0);
*count += 1;
tracing::debug!(
peer=%peer_id,
message=%msg,
"[Penalty] The peer broke the promise to deliver message in time!"
);
false
} else {
true
}
});
!peers.is_empty()
});
result
}
}

View File

@ -0,0 +1,570 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use super::protocol::{GossipsubCodec, ProtocolConfig};
use super::rpc_proto::proto;
use super::types::{PeerKind, RawMessage, Rpc, RpcOut, RpcReceiver};
use super::ValidationError;
use asynchronous_codec::Framed;
use futures::future::Either;
use futures::prelude::*;
use futures::StreamExt;
use instant::Instant;
use libp2p::core::upgrade::DeniedUpgrade;
use libp2p::swarm::handler::{
ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError,
FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol,
};
use libp2p::swarm::Stream;
use std::{
pin::Pin,
task::{Context, Poll},
};
/// The event emitted by the Handler. This informs the behaviour of various events created
/// by the handler.
#[derive(Debug)]
pub enum HandlerEvent {
/// A GossipsubRPC message has been received. This also contains a list of invalid messages (if
/// any) that were received.
Message {
/// The GossipsubRPC message excluding any invalid messages.
rpc: Rpc,
/// Any invalid messages that were received in the RPC, along with the associated
/// validation error.
invalid_messages: Vec<(RawMessage, ValidationError)>,
},
/// An inbound or outbound substream has been established with the peer and this informs over
/// which protocol. This message only occurs once per connection.
PeerKind(PeerKind),
/// A message to be published was dropped because it could not be sent in time.
MessageDropped(RpcOut),
}
/// A message sent from the behaviour to the handler.
#[allow(clippy::large_enum_variant)]
#[derive(Debug)]
pub enum HandlerIn {
/// The peer has joined the mesh.
JoinedMesh,
/// The peer has left the mesh.
LeftMesh,
}
/// The maximum number of inbound or outbound substreams attempts we allow.
///
/// Gossipsub is supposed to have a single long-lived inbound and outbound substream. On failure we
/// attempt to recreate these. This imposes an upper bound of new substreams before we consider the
/// connection faulty and disable the handler. This also prevents against potential substream
/// creation loops.
const MAX_SUBSTREAM_ATTEMPTS: usize = 5;
#[allow(clippy::large_enum_variant)]
pub enum Handler {
Enabled(EnabledHandler),
Disabled(DisabledHandler),
}
/// Protocol Handler that manages a single long-lived substream with a peer.
pub struct EnabledHandler {
/// Upgrade configuration for the gossipsub protocol.
listen_protocol: ProtocolConfig,
/// The single long-lived outbound substream.
outbound_substream: Option<OutboundSubstreamState>,
/// The single long-lived inbound substream.
inbound_substream: Option<InboundSubstreamState>,
/// Queue of values that we want to send to the remote
send_queue: RpcReceiver,
/// Flag indicating that an outbound substream is being established to prevent duplicate
/// requests.
outbound_substream_establishing: bool,
/// The number of outbound substreams we have requested.
outbound_substream_attempts: usize,
/// The number of inbound substreams that have been created by the peer.
inbound_substream_attempts: usize,
/// The type of peer this handler is associated to.
peer_kind: Option<PeerKind>,
/// Keeps track on whether we have sent the peer kind to the behaviour.
//
// NOTE: Use this flag rather than checking the substream count each poll.
peer_kind_sent: bool,
last_io_activity: Instant,
/// Keeps track of whether this connection is for a peer in the mesh. This is used to make
/// decisions about the keep alive state for this connection.
in_mesh: bool,
}
pub enum DisabledHandler {
/// If the peer doesn't support the gossipsub protocol we do not immediately disconnect.
/// Rather, we disable the handler and prevent any incoming or outgoing substreams from being
/// established.
ProtocolUnsupported {
/// Keeps track on whether we have sent the peer kind to the behaviour.
peer_kind_sent: bool,
},
/// The maximum number of inbound or outbound substream attempts have happened and thereby the
/// handler has been disabled.
MaxSubstreamAttempts,
}
/// State of the inbound substream, opened either by us or by the remote.
enum InboundSubstreamState {
/// Waiting for a message from the remote. The idle state for an inbound substream.
WaitingInput(Framed<Stream, GossipsubCodec>),
/// The substream is being closed.
Closing(Framed<Stream, GossipsubCodec>),
/// An error occurred during processing.
Poisoned,
}
/// State of the outbound substream, opened either by us or by the remote.
enum OutboundSubstreamState {
/// Waiting for the user to send a message. The idle state for an outbound substream.
WaitingOutput(Framed<Stream, GossipsubCodec>),
/// Waiting to send a message to the remote.
PendingSend(Framed<Stream, GossipsubCodec>, proto::RPC),
/// Waiting to flush the substream so that the data arrives to the remote.
PendingFlush(Framed<Stream, GossipsubCodec>),
/// An error occurred during processing.
Poisoned,
}
impl Handler {
/// Builds a new [`Handler`].
pub fn new(protocol_config: ProtocolConfig, message_queue: RpcReceiver) -> Self {
Handler::Enabled(EnabledHandler {
listen_protocol: protocol_config,
inbound_substream: None,
outbound_substream: None,
outbound_substream_establishing: false,
outbound_substream_attempts: 0,
inbound_substream_attempts: 0,
peer_kind: None,
peer_kind_sent: false,
last_io_activity: Instant::now(),
in_mesh: false,
send_queue: message_queue,
})
}
}
impl EnabledHandler {
#[cfg(test)]
/// For testing purposed obtain the RPCReceiver
pub fn receiver(&mut self) -> RpcReceiver {
self.send_queue.clone()
}
fn on_fully_negotiated_inbound(
&mut self,
(substream, peer_kind): (Framed<Stream, GossipsubCodec>, PeerKind),
) {
// update the known kind of peer
if self.peer_kind.is_none() {
self.peer_kind = Some(peer_kind);
}
// new inbound substream. Replace the current one, if it exists.
tracing::trace!("New inbound substream request");
self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream));
}
fn on_fully_negotiated_outbound(
&mut self,
FullyNegotiatedOutbound { protocol, .. }: FullyNegotiatedOutbound<
<Handler as ConnectionHandler>::OutboundProtocol,
<Handler as ConnectionHandler>::OutboundOpenInfo,
>,
) {
let (substream, peer_kind) = protocol;
// update the known kind of peer
if self.peer_kind.is_none() {
self.peer_kind = Some(peer_kind);
}
assert!(
self.outbound_substream.is_none(),
"Established an outbound substream with one already available"
);
self.outbound_substream = Some(OutboundSubstreamState::WaitingOutput(substream));
}
fn poll(
&mut self,
cx: &mut Context<'_>,
) -> Poll<
ConnectionHandlerEvent<
<Handler as ConnectionHandler>::OutboundProtocol,
<Handler as ConnectionHandler>::OutboundOpenInfo,
<Handler as ConnectionHandler>::ToBehaviour,
>,
> {
if !self.peer_kind_sent {
if let Some(peer_kind) = self.peer_kind.as_ref() {
self.peer_kind_sent = true;
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(
HandlerEvent::PeerKind(peer_kind.clone()),
));
}
}
// determine if we need to create the outbound stream
if !self.send_queue.is_empty()
&& self.outbound_substream.is_none()
&& !self.outbound_substream_establishing
{
self.outbound_substream_establishing = true;
return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest {
protocol: SubstreamProtocol::new(self.listen_protocol.clone(), ()),
});
}
// We may need to inform the behviour if we have a dropped a message. This gets set if that
// is the case.
let mut dropped_message = None;
// process outbound stream
loop {
match std::mem::replace(
&mut self.outbound_substream,
Some(OutboundSubstreamState::Poisoned),
) {
// outbound idle state
Some(OutboundSubstreamState::WaitingOutput(substream)) => {
if let Poll::Ready(Some(mut message)) = self.send_queue.poll_next_unpin(cx) {
match message {
RpcOut::Publish {
message: _,
ref mut timeout,
}
| RpcOut::Forward {
message: _,
ref mut timeout,
} => {
if Pin::new(timeout).poll(cx).is_ready() {
// Inform the behaviour and end the poll.
dropped_message = Some(HandlerEvent::MessageDropped(message));
self.outbound_substream =
Some(OutboundSubstreamState::WaitingOutput(substream));
break;
}
}
_ => {} // All other messages are not time-bound.
}
self.outbound_substream = Some(OutboundSubstreamState::PendingSend(
substream,
message.into_protobuf(),
));
continue;
}
self.outbound_substream =
Some(OutboundSubstreamState::WaitingOutput(substream));
break;
}
Some(OutboundSubstreamState::PendingSend(mut substream, message)) => {
match Sink::poll_ready(Pin::new(&mut substream), cx) {
Poll::Ready(Ok(())) => {
match Sink::start_send(Pin::new(&mut substream), message) {
Ok(()) => {
self.outbound_substream =
Some(OutboundSubstreamState::PendingFlush(substream))
}
Err(e) => {
tracing::debug!(
"Failed to send message on outbound stream: {e}"
);
self.outbound_substream = None;
break;
}
}
}
Poll::Ready(Err(e)) => {
tracing::debug!("Failed to send message on outbound stream: {e}");
self.outbound_substream = None;
break;
}
Poll::Pending => {
self.outbound_substream =
Some(OutboundSubstreamState::PendingSend(substream, message));
break;
}
}
}
Some(OutboundSubstreamState::PendingFlush(mut substream)) => {
match Sink::poll_flush(Pin::new(&mut substream), cx) {
Poll::Ready(Ok(())) => {
self.last_io_activity = Instant::now();
self.outbound_substream =
Some(OutboundSubstreamState::WaitingOutput(substream))
}
Poll::Ready(Err(e)) => {
tracing::debug!("Failed to flush outbound stream: {e}");
self.outbound_substream = None;
break;
}
Poll::Pending => {
self.outbound_substream =
Some(OutboundSubstreamState::PendingFlush(substream));
break;
}
}
}
None => {
self.outbound_substream = None;
break;
}
Some(OutboundSubstreamState::Poisoned) => {
unreachable!("Error occurred during outbound stream processing")
}
}
}
// If there was a timeout in sending a message, inform the behaviour before restarting the
// poll
if let Some(handler_event) = dropped_message {
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(handler_event));
}
// Handle inbound messages
loop {
match std::mem::replace(
&mut self.inbound_substream,
Some(InboundSubstreamState::Poisoned),
) {
// inbound idle state
Some(InboundSubstreamState::WaitingInput(mut substream)) => {
match substream.poll_next_unpin(cx) {
Poll::Ready(Some(Ok(message))) => {
self.last_io_activity = Instant::now();
self.inbound_substream =
Some(InboundSubstreamState::WaitingInput(substream));
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(message));
}
Poll::Ready(Some(Err(error))) => {
tracing::debug!("Failed to read from inbound stream: {error}");
// Close this side of the stream. If the
// peer is still around, they will re-establish their
// outbound stream i.e. our inbound stream.
self.inbound_substream =
Some(InboundSubstreamState::Closing(substream));
}
// peer closed the stream
Poll::Ready(None) => {
tracing::debug!("Inbound stream closed by remote");
self.inbound_substream =
Some(InboundSubstreamState::Closing(substream));
}
Poll::Pending => {
self.inbound_substream =
Some(InboundSubstreamState::WaitingInput(substream));
break;
}
}
}
Some(InboundSubstreamState::Closing(mut substream)) => {
match Sink::poll_close(Pin::new(&mut substream), cx) {
Poll::Ready(res) => {
if let Err(e) = res {
// Don't close the connection but just drop the inbound substream.
// In case the remote has more to send, they will open up a new
// substream.
tracing::debug!("Inbound substream error while closing: {e}");
}
self.inbound_substream = None;
break;
}
Poll::Pending => {
self.inbound_substream =
Some(InboundSubstreamState::Closing(substream));
break;
}
}
}
None => {
self.inbound_substream = None;
break;
}
Some(InboundSubstreamState::Poisoned) => {
unreachable!("Error occurred during inbound stream processing")
}
}
}
Poll::Pending
}
}
impl ConnectionHandler for Handler {
type FromBehaviour = HandlerIn;
type ToBehaviour = HandlerEvent;
type InboundOpenInfo = ();
type InboundProtocol = either::Either<ProtocolConfig, DeniedUpgrade>;
type OutboundOpenInfo = ();
type OutboundProtocol = ProtocolConfig;
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, Self::InboundOpenInfo> {
match self {
Handler::Enabled(handler) => {
SubstreamProtocol::new(either::Either::Left(handler.listen_protocol.clone()), ())
}
Handler::Disabled(_) => {
SubstreamProtocol::new(either::Either::Right(DeniedUpgrade), ())
}
}
}
fn on_behaviour_event(&mut self, message: HandlerIn) {
match self {
Handler::Enabled(handler) => match message {
HandlerIn::JoinedMesh => {
handler.in_mesh = true;
}
HandlerIn::LeftMesh => {
handler.in_mesh = false;
}
},
Handler::Disabled(_) => {
tracing::debug!(?message, "Handler is disabled. Dropping message");
}
}
}
fn connection_keep_alive(&self) -> bool {
matches!(self, Handler::Enabled(h) if h.in_mesh)
}
#[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))]
fn poll(
&mut self,
cx: &mut Context<'_>,
) -> Poll<
ConnectionHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::ToBehaviour>,
> {
match self {
Handler::Enabled(handler) => handler.poll(cx),
Handler::Disabled(DisabledHandler::ProtocolUnsupported { peer_kind_sent }) => {
if !*peer_kind_sent {
*peer_kind_sent = true;
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(
HandlerEvent::PeerKind(PeerKind::NotSupported),
));
}
Poll::Pending
}
Handler::Disabled(DisabledHandler::MaxSubstreamAttempts) => Poll::Pending,
}
}
fn on_connection_event(
&mut self,
event: ConnectionEvent<
Self::InboundProtocol,
Self::OutboundProtocol,
Self::InboundOpenInfo,
Self::OutboundOpenInfo,
>,
) {
match self {
Handler::Enabled(handler) => {
if event.is_inbound() {
handler.inbound_substream_attempts += 1;
if handler.inbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS {
tracing::warn!(
"The maximum number of inbound substreams attempts has been exceeded"
);
*self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts);
return;
}
}
if event.is_outbound() {
handler.outbound_substream_establishing = false;
handler.outbound_substream_attempts += 1;
if handler.outbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS {
tracing::warn!(
"The maximum number of outbound substream attempts has been exceeded"
);
*self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts);
return;
}
}
match event {
ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound {
protocol,
..
}) => match protocol {
Either::Left(protocol) => handler.on_fully_negotiated_inbound(protocol),
Either::Right(v) => void::unreachable(v),
},
ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => {
handler.on_fully_negotiated_outbound(fully_negotiated_outbound)
}
ConnectionEvent::DialUpgradeError(DialUpgradeError {
error: StreamUpgradeError::Timeout,
..
}) => {
tracing::debug!("Dial upgrade error: Protocol negotiation timeout");
}
ConnectionEvent::DialUpgradeError(DialUpgradeError {
error: StreamUpgradeError::Apply(e),
..
}) => void::unreachable(e),
ConnectionEvent::DialUpgradeError(DialUpgradeError {
error: StreamUpgradeError::NegotiationFailed,
..
}) => {
// The protocol is not supported
tracing::debug!(
"The remote peer does not support gossipsub on this connection"
);
*self = Handler::Disabled(DisabledHandler::ProtocolUnsupported {
peer_kind_sent: false,
});
}
ConnectionEvent::DialUpgradeError(DialUpgradeError {
error: StreamUpgradeError::Io(e),
..
}) => {
tracing::debug!("Protocol negotiation failed: {e}")
}
_ => {}
}
}
Handler::Disabled(_) => {}
}
}
}

View File

@ -0,0 +1,387 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use super::topic::TopicHash;
use super::types::{MessageId, RawMessage};
use libp2p::identity::PeerId;
use std::collections::hash_map::Entry;
use std::fmt::Debug;
use std::{
collections::{HashMap, HashSet},
fmt,
};
/// CacheEntry stored in the history.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub(crate) struct CacheEntry {
mid: MessageId,
topic: TopicHash,
}
/// MessageCache struct holding history of messages.
#[derive(Clone)]
pub(crate) struct MessageCache {
msgs: HashMap<MessageId, (RawMessage, HashSet<PeerId>)>,
/// For every message and peer the number of times this peer asked for the message
iwant_counts: HashMap<MessageId, HashMap<PeerId, u32>>,
history: Vec<Vec<CacheEntry>>,
/// The number of indices in the cache history used for gossiping. That means that a message
/// won't get gossiped anymore when shift got called `gossip` many times after inserting the
/// message in the cache.
gossip: usize,
}
impl fmt::Debug for MessageCache {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MessageCache")
.field("msgs", &self.msgs)
.field("history", &self.history)
.field("gossip", &self.gossip)
.finish()
}
}
/// Implementation of the MessageCache.
impl MessageCache {
pub(crate) fn new(gossip: usize, history_capacity: usize) -> Self {
MessageCache {
gossip,
msgs: HashMap::default(),
iwant_counts: HashMap::default(),
history: vec![Vec::new(); history_capacity],
}
}
/// Put a message into the memory cache.
///
/// Returns true if the message didn't already exist in the cache.
pub(crate) fn put(&mut self, message_id: &MessageId, msg: RawMessage) -> bool {
match self.msgs.entry(message_id.clone()) {
Entry::Occupied(_) => {
// Don't add duplicate entries to the cache.
false
}
Entry::Vacant(entry) => {
let cache_entry = CacheEntry {
mid: message_id.clone(),
topic: msg.topic.clone(),
};
entry.insert((msg, HashSet::default()));
self.history[0].push(cache_entry);
tracing::trace!(message=?message_id, "Put message in mcache");
true
}
}
}
/// Keeps track of peers we know have received the message to prevent forwarding to said peers.
pub(crate) fn observe_duplicate(&mut self, message_id: &MessageId, source: &PeerId) {
if let Some((message, originating_peers)) = self.msgs.get_mut(message_id) {
// if the message is already validated, we don't need to store extra peers sending us
// duplicates as the message has already been forwarded
if message.validated {
return;
}
originating_peers.insert(*source);
}
}
/// Get a message with `message_id`
#[cfg(test)]
pub(crate) fn get(&self, message_id: &MessageId) -> Option<&RawMessage> {
self.msgs.get(message_id).map(|(message, _)| message)
}
/// Increases the iwant count for the given message by one and returns the message together
/// with the iwant if the message exists.
pub(crate) fn get_with_iwant_counts(
&mut self,
message_id: &MessageId,
peer: &PeerId,
) -> Option<(&RawMessage, u32)> {
let iwant_counts = &mut self.iwant_counts;
self.msgs.get(message_id).and_then(|(message, _)| {
if !message.validated {
None
} else {
Some((message, {
let count = iwant_counts
.entry(message_id.clone())
.or_default()
.entry(*peer)
.or_default();
*count += 1;
*count
}))
}
})
}
/// Gets a message with [`MessageId`] and tags it as validated.
/// This function also returns the known peers that have sent us this message. This is used to
/// prevent us sending redundant messages to peers who have already propagated it.
pub(crate) fn validate(
&mut self,
message_id: &MessageId,
) -> Option<(&RawMessage, HashSet<PeerId>)> {
self.msgs.get_mut(message_id).map(|(message, known_peers)| {
message.validated = true;
// Clear the known peers list (after a message is validated, it is forwarded and we no
// longer need to store the originating peers).
let originating_peers = std::mem::take(known_peers);
(&*message, originating_peers)
})
}
/// Get a list of [`MessageId`]s for a given topic.
pub(crate) fn get_gossip_message_ids(&self, topic: &TopicHash) -> Vec<MessageId> {
self.history[..self.gossip]
.iter()
.fold(vec![], |mut current_entries, entries| {
// search for entries with desired topic
let mut found_entries: Vec<MessageId> = entries
.iter()
.filter_map(|entry| {
if &entry.topic == topic {
let mid = &entry.mid;
// Only gossip validated messages
if let Some(true) = self.msgs.get(mid).map(|(msg, _)| msg.validated) {
Some(mid.clone())
} else {
None
}
} else {
None
}
})
.collect();
// generate the list
current_entries.append(&mut found_entries);
current_entries
})
}
/// Shift the history array down one and delete messages associated with the
/// last entry.
pub(crate) fn shift(&mut self) {
for entry in self.history.pop().expect("history is always > 1") {
if let Some((msg, _)) = self.msgs.remove(&entry.mid) {
if !msg.validated {
// If GossipsubConfig::validate_messages is true, the implementing
// application has to ensure that Gossipsub::validate_message gets called for
// each received message within the cache timeout time."
tracing::debug!(
message=%&entry.mid,
"The message got removed from the cache without being validated."
);
}
}
tracing::trace!(message=%&entry.mid, "Remove message from the cache");
self.iwant_counts.remove(&entry.mid);
}
// Insert an empty vec in position 0
self.history.insert(0, Vec::new());
}
/// Removes a message from the cache and returns it if existent
pub(crate) fn remove(
&mut self,
message_id: &MessageId,
) -> Option<(RawMessage, HashSet<PeerId>)> {
//We only remove the message from msgs and iwant_count and keep the message_id in the
// history vector. Zhe id in the history vector will simply be ignored on popping.
self.iwant_counts.remove(message_id);
self.msgs.remove(message_id)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::gossipsub::types::RawMessage;
use crate::{IdentTopic as Topic, TopicHash};
use libp2p::identity::PeerId;
fn gen_testm(x: u64, topic: TopicHash) -> (MessageId, RawMessage) {
let default_id = |message: &RawMessage| {
// default message id is: source + sequence number
let mut source_string = message.source.as_ref().unwrap().to_base58();
source_string.push_str(&message.sequence_number.unwrap().to_string());
MessageId::from(source_string)
};
let u8x: u8 = x as u8;
let source = Some(PeerId::random());
let data: Vec<u8> = vec![u8x];
let sequence_number = Some(x);
let m = RawMessage {
source,
data,
sequence_number,
topic,
signature: None,
key: None,
validated: false,
};
let id = default_id(&m);
(id, m)
}
fn new_cache(gossip_size: usize, history: usize) -> MessageCache {
MessageCache::new(gossip_size, history)
}
#[test]
/// Test that the message cache can be created.
fn test_new_cache() {
let x: usize = 3;
let mc = new_cache(x, 5);
assert_eq!(mc.gossip, x);
}
#[test]
/// Test you can put one message and get one.
fn test_put_get_one() {
let mut mc = new_cache(10, 15);
let topic1_hash = Topic::new("topic1").hash();
let (id, m) = gen_testm(10, topic1_hash);
mc.put(&id, m.clone());
assert_eq!(mc.history[0].len(), 1);
let fetched = mc.get(&id);
assert_eq!(fetched.unwrap(), &m);
}
#[test]
/// Test attempting to 'get' with a wrong id.
fn test_get_wrong() {
let mut mc = new_cache(10, 15);
let topic1_hash = Topic::new("topic1").hash();
let (id, m) = gen_testm(10, topic1_hash);
mc.put(&id, m);
// Try to get an incorrect ID
let wrong_id = MessageId::new(b"wrongid");
let fetched = mc.get(&wrong_id);
assert!(fetched.is_none());
}
#[test]
/// Test attempting to 'get' empty message cache.
fn test_get_empty() {
let mc = new_cache(10, 15);
// Try to get an incorrect ID
let wrong_string = MessageId::new(b"imempty");
let fetched = mc.get(&wrong_string);
assert!(fetched.is_none());
}
#[test]
/// Test shift mechanism.
fn test_shift() {
let mut mc = new_cache(1, 5);
let topic1_hash = Topic::new("topic1").hash();
// Build the message
for i in 0..10 {
let (id, m) = gen_testm(i, topic1_hash.clone());
mc.put(&id, m.clone());
}
mc.shift();
// Ensure the shift occurred
assert!(mc.history[0].is_empty());
assert!(mc.history[1].len() == 10);
// Make sure no messages deleted
assert!(mc.msgs.len() == 10);
}
#[test]
/// Test Shift with no additions.
fn test_empty_shift() {
let mut mc = new_cache(1, 5);
let topic1_hash = Topic::new("topic1").hash();
// Build the message
for i in 0..10 {
let (id, m) = gen_testm(i, topic1_hash.clone());
mc.put(&id, m.clone());
}
mc.shift();
// Ensure the shift occurred
assert!(mc.history[0].is_empty());
assert!(mc.history[1].len() == 10);
mc.shift();
assert!(mc.history[2].len() == 10);
assert!(mc.history[1].is_empty());
assert!(mc.history[0].is_empty());
}
#[test]
/// Test shift to see if the last history messages are removed.
fn test_remove_last_from_shift() {
let mut mc = new_cache(4, 5);
let topic1_hash = Topic::new("topic1").hash();
// Build the message
for i in 0..10 {
let (id, m) = gen_testm(i, topic1_hash.clone());
mc.put(&id, m.clone());
}
// Shift right until deleting messages
mc.shift();
mc.shift();
mc.shift();
mc.shift();
assert_eq!(mc.history[mc.history.len() - 1].len(), 10);
// Shift and delete the messages
mc.shift();
assert_eq!(mc.history[mc.history.len() - 1].len(), 0);
assert_eq!(mc.history[0].len(), 0);
assert_eq!(mc.msgs.len(), 0);
}
}

View File

@ -0,0 +1,672 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! A set of metrics used to help track and diagnose the network behaviour of the gossipsub
//! protocol.
use std::collections::HashMap;
use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue};
use prometheus_client::metrics::counter::Counter;
use prometheus_client::metrics::family::{Family, MetricConstructor};
use prometheus_client::metrics::gauge::Gauge;
use prometheus_client::metrics::histogram::{linear_buckets, Histogram};
use prometheus_client::registry::Registry;
use super::topic::TopicHash;
use super::types::{MessageAcceptance, PeerKind};
// Default value that limits for how many topics do we store metrics.
const DEFAULT_MAX_TOPICS: usize = 300;
// Default value that limits how many topics for which there has never been a subscription do we
// store metrics.
const DEFAULT_MAX_NEVER_SUBSCRIBED_TOPICS: usize = 50;
#[derive(Debug, Clone)]
pub struct Config {
/// This provides an upper bound to the number of mesh topics we create metrics for. It
/// prevents unbounded labels being created in the metrics.
pub max_topics: usize,
/// Mesh topics are controlled by the user via subscriptions whereas non-mesh topics are
/// determined by users on the network. This limit permits a fixed amount of topics to allow,
/// in-addition to the mesh topics.
pub max_never_subscribed_topics: usize,
/// Buckets used for the score histograms.
pub score_buckets: Vec<f64>,
}
impl Config {
/// Create buckets for the score histograms based on score thresholds.
pub fn buckets_using_scoring_thresholds(&mut self, params: &super::PeerScoreThresholds) {
self.score_buckets = vec![
params.graylist_threshold,
params.publish_threshold,
params.gossip_threshold,
params.gossip_threshold / 2.0,
params.gossip_threshold / 4.0,
0.0,
1.0,
10.0,
100.0,
];
}
}
impl Default for Config {
fn default() -> Self {
// Some sensible defaults
let gossip_threshold = -4000.0;
let publish_threshold = -8000.0;
let graylist_threshold = -16000.0;
let score_buckets: Vec<f64> = vec![
graylist_threshold,
publish_threshold,
gossip_threshold,
gossip_threshold / 2.0,
gossip_threshold / 4.0,
0.0,
1.0,
10.0,
100.0,
];
Config {
max_topics: DEFAULT_MAX_TOPICS,
max_never_subscribed_topics: DEFAULT_MAX_NEVER_SUBSCRIBED_TOPICS,
score_buckets,
}
}
}
/// Whether we have ever been subscribed to this topic.
type EverSubscribed = bool;
/// A collection of metrics used throughout the Gossipsub behaviour.
pub(crate) struct Metrics {
/* Configuration parameters */
/// Maximum number of topics for which we store metrics. This helps keep the metrics bounded.
max_topics: usize,
/// Maximum number of topics for which we store metrics, where the topic in not one to which we
/// have subscribed at some point. This helps keep the metrics bounded, since these topics come
/// from received messages and not explicit application subscriptions.
max_never_subscribed_topics: usize,
/* Auxiliary variables */
/// Information needed to decide if a topic is allowed or not.
topic_info: HashMap<TopicHash, EverSubscribed>,
/* Metrics per known topic */
/// Status of our subscription to this topic. This metric allows analyzing other topic metrics
/// filtered by our current subscription status.
topic_subscription_status: Family<TopicHash, Gauge>,
/// Number of peers subscribed to each topic. This allows us to analyze a topic's behaviour
/// regardless of our subscription status.
topic_peers_count: Family<TopicHash, Gauge>,
/// The number of invalid messages received for a given topic.
invalid_messages: Family<TopicHash, Counter>,
/// The number of messages accepted by the application (validation result).
accepted_messages: Family<TopicHash, Counter>,
/// The number of messages ignored by the application (validation result).
ignored_messages: Family<TopicHash, Counter>,
/// The number of messages rejected by the application (validation result).
rejected_messages: Family<TopicHash, Counter>,
/// The number of publish messages dropped by the sender.
publish_messages_dropped: Family<TopicHash, Counter>,
/// The number of forward messages dropped by the sender.
forward_messages_dropped: Family<TopicHash, Counter>,
/* Metrics regarding mesh state */
/// Number of peers in our mesh. This metric should be updated with the count of peers for a
/// topic in the mesh regardless of inclusion and churn events.
mesh_peer_counts: Family<TopicHash, Gauge>,
/// Number of times we include peers in a topic mesh for different reasons.
mesh_peer_inclusion_events: Family<InclusionLabel, Counter>,
/// Number of times we remove peers in a topic mesh for different reasons.
mesh_peer_churn_events: Family<ChurnLabel, Counter>,
/* Metrics regarding messages sent/received */
/// Number of gossip messages sent to each topic.
topic_msg_sent_counts: Family<TopicHash, Counter>,
/// Bytes from gossip messages sent to each topic.
topic_msg_sent_bytes: Family<TopicHash, Counter>,
/// Number of gossipsub messages published to each topic.
topic_msg_published: Family<TopicHash, Counter>,
/// Number of gossipsub messages received on each topic (without filtering duplicates).
topic_msg_recv_counts_unfiltered: Family<TopicHash, Counter>,
/// Number of gossipsub messages received on each topic (after filtering duplicates).
topic_msg_recv_counts: Family<TopicHash, Counter>,
/// Bytes received from gossip messages for each topic.
topic_msg_recv_bytes: Family<TopicHash, Counter>,
/* Metrics related to scoring */
/// Histogram of the scores for each mesh topic.
score_per_mesh: Family<TopicHash, Histogram, HistBuilder>,
/// A counter of the kind of penalties being applied to peers.
scoring_penalties: Family<PenaltyLabel, Counter>,
/* General Metrics */
/// Gossipsub supports floodsub, gossipsub v1.0 and gossipsub v1.1. Peers are classified based
/// on which protocol they support. This metric keeps track of the number of peers that are
/// connected of each type.
peers_per_protocol: Family<ProtocolLabel, Gauge>,
/// The time it takes to complete one iteration of the heartbeat.
heartbeat_duration: Histogram,
/* Performance metrics */
/// When the user validates a message, it tries to re propagate it to its mesh peers. If the
/// message expires from the memcache before it can be validated, we count this a cache miss
/// and it is an indicator that the memcache size should be increased.
memcache_misses: Counter,
/// The number of times we have decided that an IWANT control message is required for this
/// topic. A very high metric might indicate an underperforming network.
topic_iwant_msgs: Family<TopicHash, Counter>,
/// The size of the priority queue.
priority_queue_size: Histogram,
/// The size of the non-priority queue.
non_priority_queue_size: Histogram,
}
impl Metrics {
pub(crate) fn new(registry: &mut Registry, config: Config) -> Self {
// Destructure the config to be sure everything is used.
let Config {
max_topics,
max_never_subscribed_topics,
score_buckets,
} = config;
macro_rules! register_family {
($name:expr, $help:expr) => {{
let fam = Family::default();
registry.register($name, $help, fam.clone());
fam
}};
}
let topic_subscription_status = register_family!(
"topic_subscription_status",
"Subscription status per known topic"
);
let topic_peers_count = register_family!(
"topic_peers_counts",
"Number of peers subscribed to each topic"
);
let invalid_messages = register_family!(
"invalid_messages_per_topic",
"Number of invalid messages received for each topic"
);
let accepted_messages = register_family!(
"accepted_messages_per_topic",
"Number of accepted messages received for each topic"
);
let ignored_messages = register_family!(
"ignored_messages_per_topic",
"Number of ignored messages received for each topic"
);
let rejected_messages = register_family!(
"rejected_messages_per_topic",
"Number of rejected messages received for each topic"
);
let publish_messages_dropped = register_family!(
"publish_messages_dropped_per_topic",
"Number of publish messages dropped per topic"
);
let forward_messages_dropped = register_family!(
"forward_messages_dropped_per_topic",
"Number of forward messages dropped per topic"
);
let mesh_peer_counts = register_family!(
"mesh_peer_counts",
"Number of peers in each topic in our mesh"
);
let mesh_peer_inclusion_events = register_family!(
"mesh_peer_inclusion_events",
"Number of times a peer gets added to our mesh for different reasons"
);
let mesh_peer_churn_events = register_family!(
"mesh_peer_churn_events",
"Number of times a peer gets removed from our mesh for different reasons"
);
let topic_msg_sent_counts = register_family!(
"topic_msg_sent_counts",
"Number of gossip messages sent to each topic"
);
let topic_msg_published = register_family!(
"topic_msg_published",
"Number of gossip messages published to each topic"
);
let topic_msg_sent_bytes = register_family!(
"topic_msg_sent_bytes",
"Bytes from gossip messages sent to each topic"
);
let topic_msg_recv_counts_unfiltered = register_family!(
"topic_msg_recv_counts_unfiltered",
"Number of gossip messages received on each topic (without duplicates being filtered)"
);
let topic_msg_recv_counts = register_family!(
"topic_msg_recv_counts",
"Number of gossip messages received on each topic (after duplicates have been filtered)"
);
let topic_msg_recv_bytes = register_family!(
"topic_msg_recv_bytes",
"Bytes received from gossip messages for each topic"
);
let hist_builder = HistBuilder {
buckets: score_buckets,
};
let score_per_mesh: Family<_, _, HistBuilder> = Family::new_with_constructor(hist_builder);
registry.register(
"score_per_mesh",
"Histogram of scores per mesh topic",
score_per_mesh.clone(),
);
let scoring_penalties = register_family!(
"scoring_penalties",
"Counter of types of scoring penalties given to peers"
);
let peers_per_protocol = register_family!(
"peers_per_protocol",
"Number of connected peers by protocol type"
);
let heartbeat_duration = Histogram::new(linear_buckets(0.0, 50.0, 10));
registry.register(
"heartbeat_duration",
"Histogram of observed heartbeat durations",
heartbeat_duration.clone(),
);
let topic_iwant_msgs = register_family!(
"topic_iwant_msgs",
"Number of times we have decided an IWANT is required for this topic"
);
let memcache_misses = {
let metric = Counter::default();
registry.register(
"memcache_misses",
"Number of times a message is not found in the duplicate cache when validating",
metric.clone(),
);
metric
};
let priority_queue_size = Histogram::new(linear_buckets(0.0, 25.0, 100));
registry.register(
"priority_queue_size",
"Histogram of observed priority queue sizes",
priority_queue_size.clone(),
);
let non_priority_queue_size = Histogram::new(linear_buckets(0.0, 25.0, 100));
registry.register(
"non_priority_queue_size",
"Histogram of observed non-priority queue sizes",
non_priority_queue_size.clone(),
);
Self {
max_topics,
max_never_subscribed_topics,
topic_info: HashMap::default(),
topic_subscription_status,
topic_peers_count,
invalid_messages,
accepted_messages,
ignored_messages,
rejected_messages,
publish_messages_dropped,
forward_messages_dropped,
mesh_peer_counts,
mesh_peer_inclusion_events,
mesh_peer_churn_events,
topic_msg_sent_counts,
topic_msg_sent_bytes,
topic_msg_published,
topic_msg_recv_counts_unfiltered,
topic_msg_recv_counts,
topic_msg_recv_bytes,
score_per_mesh,
scoring_penalties,
peers_per_protocol,
heartbeat_duration,
memcache_misses,
topic_iwant_msgs,
priority_queue_size,
non_priority_queue_size,
}
}
fn non_subscription_topics_count(&self) -> usize {
self.topic_info
.values()
.filter(|&ever_subscribed| !ever_subscribed)
.count()
}
/// Registers a topic if not already known and if the bounds allow it.
fn register_topic(&mut self, topic: &TopicHash) -> Result<(), ()> {
if self.topic_info.contains_key(topic) {
Ok(())
} else if self.topic_info.len() < self.max_topics
&& self.non_subscription_topics_count() < self.max_never_subscribed_topics
{
// This is a topic without an explicit subscription and we register it if we are within
// the configured bounds.
self.topic_info.entry(topic.clone()).or_insert(false);
self.topic_subscription_status.get_or_create(topic).set(0);
Ok(())
} else {
// We don't know this topic and there is no space left to store it
Err(())
}
}
/// Increase the number of peers do we known are subscribed to this topic.
pub(crate) fn inc_topic_peers(&mut self, topic: &TopicHash) {
if self.register_topic(topic).is_ok() {
self.topic_peers_count.get_or_create(topic).inc();
}
}
pub(crate) fn dec_topic_peers(&mut self, topic: &TopicHash) {
if self.register_topic(topic).is_ok() {
self.topic_peers_count.get_or_create(topic).dec();
}
}
/* Mesh related methods */
/// Registers the subscription to a topic if the configured limits allow it.
/// Sets the registered number of peers in the mesh to 0.
pub(crate) fn joined(&mut self, topic: &TopicHash) {
if self.topic_info.contains_key(topic) || self.topic_info.len() < self.max_topics {
self.topic_info.insert(topic.clone(), true);
let was_subscribed = self.topic_subscription_status.get_or_create(topic).set(1);
debug_assert_eq!(was_subscribed, 0);
self.mesh_peer_counts.get_or_create(topic).set(0);
}
}
/// Registers the unsubscription to a topic if the topic was previously allowed.
/// Sets the registered number of peers in the mesh to 0.
pub(crate) fn left(&mut self, topic: &TopicHash) {
if self.topic_info.contains_key(topic) {
// Depending on the configured topic bounds we could miss a mesh topic.
// So, check first if the topic was previously allowed.
let was_subscribed = self.topic_subscription_status.get_or_create(topic).set(0);
debug_assert_eq!(was_subscribed, 1);
self.mesh_peer_counts.get_or_create(topic).set(0);
}
}
/// Register the inclusion of peers in our mesh due to some reason.
pub(crate) fn peers_included(&mut self, topic: &TopicHash, reason: Inclusion, count: usize) {
if self.register_topic(topic).is_ok() {
self.mesh_peer_inclusion_events
.get_or_create(&InclusionLabel {
hash: topic.to_string(),
reason,
})
.inc_by(count as u64);
}
}
/// Register the removal of peers in our mesh due to some reason.
pub(crate) fn peers_removed(&mut self, topic: &TopicHash, reason: Churn, count: usize) {
if self.register_topic(topic).is_ok() {
self.mesh_peer_churn_events
.get_or_create(&ChurnLabel {
hash: topic.to_string(),
reason,
})
.inc_by(count as u64);
}
}
/// Register the current number of peers in our mesh for this topic.
pub(crate) fn set_mesh_peers(&mut self, topic: &TopicHash, count: usize) {
if self.register_topic(topic).is_ok() {
// Due to limits, this topic could have not been allowed, so we check.
self.mesh_peer_counts.get_or_create(topic).set(count as i64);
}
}
/// Register that an invalid message was received on a specific topic.
pub(crate) fn register_invalid_message(&mut self, topic: &TopicHash) {
if self.register_topic(topic).is_ok() {
self.invalid_messages.get_or_create(topic).inc();
}
}
/// Register a score penalty.
pub(crate) fn register_score_penalty(&mut self, penalty: Penalty) {
self.scoring_penalties
.get_or_create(&PenaltyLabel { penalty })
.inc();
}
/// Registers that a message was published on a specific topic.
pub(crate) fn register_published_message(&mut self, topic: &TopicHash) {
if self.register_topic(topic).is_ok() {
self.topic_msg_published.get_or_create(topic).inc();
}
}
/// Register sending a message over a topic.
pub(crate) fn msg_sent(&mut self, topic: &TopicHash, bytes: usize) {
if self.register_topic(topic).is_ok() {
self.topic_msg_sent_counts.get_or_create(topic).inc();
self.topic_msg_sent_bytes
.get_or_create(topic)
.inc_by(bytes as u64);
}
}
/// Register sending a message over a topic.
pub(crate) fn publish_msg_dropped(&mut self, topic: &TopicHash) {
if self.register_topic(topic).is_ok() {
self.publish_messages_dropped.get_or_create(topic).inc();
}
}
/// Register dropping a message over a topic.
pub(crate) fn forward_msg_dropped(&mut self, topic: &TopicHash) {
if self.register_topic(topic).is_ok() {
self.forward_messages_dropped.get_or_create(topic).inc();
}
}
/// Register that a message was received (and was not a duplicate).
pub(crate) fn msg_recvd(&mut self, topic: &TopicHash) {
if self.register_topic(topic).is_ok() {
self.topic_msg_recv_counts.get_or_create(topic).inc();
}
}
/// Register that a message was received (could have been a duplicate).
pub(crate) fn msg_recvd_unfiltered(&mut self, topic: &TopicHash, bytes: usize) {
if self.register_topic(topic).is_ok() {
self.topic_msg_recv_counts_unfiltered
.get_or_create(topic)
.inc();
self.topic_msg_recv_bytes
.get_or_create(topic)
.inc_by(bytes as u64);
}
}
pub(crate) fn register_msg_validation(
&mut self,
topic: &TopicHash,
validation: &MessageAcceptance,
) {
if self.register_topic(topic).is_ok() {
match validation {
MessageAcceptance::Accept => self.accepted_messages.get_or_create(topic).inc(),
MessageAcceptance::Ignore => self.ignored_messages.get_or_create(topic).inc(),
MessageAcceptance::Reject => self.rejected_messages.get_or_create(topic).inc(),
};
}
}
/// Register a memcache miss.
pub(crate) fn memcache_miss(&mut self) {
self.memcache_misses.inc();
}
/// Register sending an IWANT msg for this topic.
pub(crate) fn register_iwant(&mut self, topic: &TopicHash) {
if self.register_topic(topic).is_ok() {
self.topic_iwant_msgs.get_or_create(topic).inc();
}
}
/// Observes a heartbeat duration.
pub(crate) fn observe_heartbeat_duration(&mut self, millis: u64) {
self.heartbeat_duration.observe(millis as f64);
}
/// Observes a priority queue size.
pub(crate) fn observe_priority_queue_size(&mut self, len: usize) {
self.priority_queue_size.observe(len as f64);
}
/// Observes a non-priority queue size.
pub(crate) fn observe_non_priority_queue_size(&mut self, len: usize) {
self.non_priority_queue_size.observe(len as f64);
}
/// Observe a score of a mesh peer.
pub(crate) fn observe_mesh_peers_score(&mut self, topic: &TopicHash, score: f64) {
if self.register_topic(topic).is_ok() {
self.score_per_mesh.get_or_create(topic).observe(score);
}
}
/// Register a new peers connection based on its protocol.
pub(crate) fn peer_protocol_connected(&mut self, kind: PeerKind) {
self.peers_per_protocol
.get_or_create(&ProtocolLabel { protocol: kind })
.inc();
}
/// Removes a peer from the counter based on its protocol when it disconnects.
pub(crate) fn peer_protocol_disconnected(&mut self, kind: PeerKind) {
let metric = self
.peers_per_protocol
.get_or_create(&ProtocolLabel { protocol: kind });
if metric.get() != 0 {
// decrement the counter
metric.set(metric.get() - 1);
}
}
}
/// Reasons why a peer was included in the mesh.
#[derive(PartialEq, Eq, Hash, EncodeLabelValue, Clone, Debug)]
pub(crate) enum Inclusion {
/// Peer was a fanaout peer.
Fanout,
/// Included from random selection.
Random,
/// Peer subscribed.
Subscribed,
/// Peer was included to fill the outbound quota.
Outbound,
}
/// Reasons why a peer was removed from the mesh.
#[derive(PartialEq, Eq, Hash, EncodeLabelValue, Clone, Debug)]
pub(crate) enum Churn {
/// Peer disconnected.
Dc,
/// Peer had a bad score.
BadScore,
/// Peer sent a PRUNE.
Prune,
/// Peer unsubscribed.
Unsub,
/// Too many peers.
Excess,
}
/// Kinds of reasons a peer's score has been penalized
#[derive(PartialEq, Eq, Hash, EncodeLabelValue, Clone, Debug)]
pub(crate) enum Penalty {
/// A peer grafted before waiting the back-off time.
GraftBackoff,
/// A Peer did not respond to an IWANT request in time.
BrokenPromise,
/// A Peer did not send enough messages as expected.
MessageDeficit,
/// Too many peers under one IP address.
IPColocation,
}
/// Label for the mesh inclusion event metrics.
#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)]
struct InclusionLabel {
hash: String,
reason: Inclusion,
}
/// Label for the mesh churn event metrics.
#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)]
struct ChurnLabel {
hash: String,
reason: Churn,
}
/// Label for the kinds of protocols peers can connect as.
#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)]
struct ProtocolLabel {
protocol: PeerKind,
}
/// Label for the kinds of scoring penalties that can occur
#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)]
struct PenaltyLabel {
penalty: Penalty,
}
#[derive(Clone)]
struct HistBuilder {
buckets: Vec<f64>,
}
impl MetricConstructor<Histogram> for HistBuilder {
fn new_metric(&self) -> Histogram {
Histogram::new(self.buckets.clone().into_iter())
}
}

View File

@ -0,0 +1,111 @@
//! Implementation of the [Gossipsub](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/README.md) protocol.
//!
//! Gossipsub is a P2P pubsub (publish/subscription) routing layer designed to extend upon
//! floodsub and meshsub routing protocols.
//!
//! # Overview
//!
//! *Note: The gossipsub protocol specifications
//! (<https://github.com/libp2p/specs/tree/master/pubsub/gossipsub>) provide an outline for the
//! routing protocol. They should be consulted for further detail.*
//!
//! Gossipsub is a blend of meshsub for data and randomsub for mesh metadata. It provides bounded
//! degree and amplification factor with the meshsub construction and augments it using gossip
//! propagation of metadata with the randomsub technique.
//!
//! The router maintains an overlay mesh network of peers on which to efficiently send messages and
//! metadata. Peers use control messages to broadcast and request known messages and
//! subscribe/unsubscribe from topics in the mesh network.
//!
//! # Important Discrepancies
//!
//! This section outlines the current implementation's potential discrepancies from that of other
//! implementations, due to undefined elements in the current specification.
//!
//! - **Topics** - In gossipsub, topics configurable by the `hash_topics` configuration parameter.
//! Topics are of type [`TopicHash`]. The current go implementation uses raw utf-8 strings, and this
//! is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64
//! encoded) by setting the `hash_topics` configuration parameter to true.
//!
//! - **Sequence Numbers** - A message on the gossipsub network is identified by the source
//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence numbers in
//! this implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned
//! integers. When messages are signed, they are monotonically increasing integers starting from a
//! random value and wrapping around u64::MAX. When messages are unsigned, they are chosen at random.
//! NOTE: These numbers are sequential in the current go implementation.
//!
//! # Peer Discovery
//!
//! Gossipsub does not provide peer discovery by itself. Peer discovery is the process by which
//! peers in a p2p network exchange information about each other among other reasons to become resistant
//! against the failure or replacement of the
//! [boot nodes](https://docs.libp2p.io/reference/glossary/#boot-node) of the network.
//!
//! Peer
//! discovery can e.g. be implemented with the help of the [Kademlia](https://github.com/libp2p/specs/blob/master/kad-dht/README.md) protocol
//! in combination with the [Identify](https://github.com/libp2p/specs/tree/master/identify) protocol. See the
//! Kademlia implementation documentation for more information.
//!
//! # Using Gossipsub
//!
//! ## Gossipsub Config
//!
//! The [`Config`] struct specifies various network performance/tuning configuration
//! parameters. Specifically it specifies:
//!
//! [`Config`]: struct.Config.html
//!
//! This struct implements the [`Default`] trait and can be initialised via
//! [`Config::default()`].
//!
//!
//! ## Behaviour
//!
//! The [`Behaviour`] struct implements the [`libp2p_swarm::NetworkBehaviour`] trait allowing it to
//! act as the routing behaviour in a [`libp2p_swarm::Swarm`]. This struct requires an instance of
//! [`PeerId`](libp2p_identity::PeerId) and [`Config`].
//!
//! [`Behaviour`]: struct.Behaviour.html
//! ## Example
//!
//! For an example on how to use gossipsub, see the [chat-example](https://github.com/libp2p/rust-libp2p/tree/master/examples/chat).
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
mod backoff;
mod behaviour;
mod config;
mod error;
mod gossip_promises;
mod handler;
mod mcache;
mod metrics;
mod peer_score;
mod protocol;
mod rpc_proto;
mod subscription_filter;
mod time_cache;
mod topic;
mod transform;
mod types;
pub use self::behaviour::{Behaviour, Event, MessageAuthenticity};
pub use self::config::{Config, ConfigBuilder, ValidationMode, Version};
pub use self::error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError};
pub use self::metrics::Config as MetricsConfig;
pub use self::peer_score::{
score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds,
TopicScoreParams,
};
pub use self::subscription_filter::{
AllowAllSubscriptionFilter, CallbackSubscriptionFilter, CombinedSubscriptionFilters,
MaxCountSubscriptionFilter, RegexSubscriptionFilter, TopicSubscriptionFilter,
WhitelistSubscriptionFilter,
};
pub use self::topic::{Hasher, Topic, TopicHash};
pub use self::transform::{DataTransform, IdentityTransform};
pub use self::types::{Message, MessageAcceptance, MessageId, RawMessage};
pub type IdentTopic = Topic<self::topic::IdentityHash>;
pub type Sha256Topic = Topic<self::topic::Sha256Hash>;
pub use self::types::FailedMessages;

View File

@ -0,0 +1,937 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//!
//! Manages and stores the Scoring logic of a particular peer on the gossipsub behaviour.
use super::metrics::{Metrics, Penalty};
use super::time_cache::TimeCache;
use super::{MessageId, TopicHash};
use instant::Instant;
use libp2p::identity::PeerId;
use std::collections::{hash_map, HashMap, HashSet};
use std::net::IpAddr;
use std::time::Duration;
mod params;
use super::ValidationError;
pub use params::{
score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds,
TopicScoreParams,
};
#[cfg(test)]
mod tests;
/// The number of seconds delivery messages are stored in the cache.
const TIME_CACHE_DURATION: u64 = 120;
pub(crate) struct PeerScore {
params: PeerScoreParams,
/// The score parameters.
peer_stats: HashMap<PeerId, PeerStats>,
/// Tracking peers per IP.
peer_ips: HashMap<IpAddr, HashSet<PeerId>>,
/// Message delivery tracking. This is a time-cache of [`DeliveryRecord`]s.
deliveries: TimeCache<MessageId, DeliveryRecord>,
/// callback for monitoring message delivery times
message_delivery_time_callback: Option<fn(&PeerId, &TopicHash, f64)>,
}
/// General statistics for a given gossipsub peer.
struct PeerStats {
/// Connection status of the peer.
status: ConnectionStatus,
/// Stats per topic.
topics: HashMap<TopicHash, TopicStats>,
/// IP tracking for individual peers.
known_ips: HashSet<IpAddr>,
/// Behaviour penalty that is applied to the peer, assigned by the behaviour.
behaviour_penalty: f64,
/// Application specific score. Can be manipulated by calling PeerScore::set_application_score
application_score: f64,
/// Scoring based on how whether this peer consumes messages fast enough or not.
slow_peer_penalty: f64,
}
enum ConnectionStatus {
/// The peer is connected.
Connected,
/// The peer is disconnected
Disconnected {
/// Expiration time of the score state for disconnected peers.
expire: Instant,
},
}
impl Default for PeerStats {
fn default() -> Self {
PeerStats {
status: ConnectionStatus::Connected,
topics: HashMap::new(),
known_ips: HashSet::new(),
behaviour_penalty: 0f64,
application_score: 0f64,
slow_peer_penalty: 0f64,
}
}
}
impl PeerStats {
/// Returns a mutable reference to topic stats if they exist, otherwise if the supplied parameters score the
/// topic, inserts the default stats and returns a reference to those. If neither apply, returns None.
pub(crate) fn stats_or_default_mut(
&mut self,
topic_hash: TopicHash,
params: &PeerScoreParams,
) -> Option<&mut TopicStats> {
if params.topics.get(&topic_hash).is_some() {
Some(self.topics.entry(topic_hash).or_default())
} else {
self.topics.get_mut(&topic_hash)
}
}
}
/// Stats assigned to peer for each topic.
struct TopicStats {
mesh_status: MeshStatus,
/// Number of first message deliveries.
first_message_deliveries: f64,
/// True if the peer has been in the mesh for enough time to activate mesh message deliveries.
mesh_message_deliveries_active: bool,
/// Number of message deliveries from the mesh.
mesh_message_deliveries: f64,
/// Mesh rate failure penalty.
mesh_failure_penalty: f64,
/// Invalid message counter.
invalid_message_deliveries: f64,
}
impl TopicStats {
/// Returns true if the peer is in the `mesh`.
pub(crate) fn in_mesh(&self) -> bool {
matches!(self.mesh_status, MeshStatus::Active { .. })
}
}
/// Status defining a peer's inclusion in the mesh and associated parameters.
enum MeshStatus {
Active {
/// The time the peer was last GRAFTed;
graft_time: Instant,
/// The time the peer has been in the mesh.
mesh_time: Duration,
},
InActive,
}
impl MeshStatus {
/// Initialises a new [`MeshStatus::Active`] mesh status.
pub(crate) fn new_active() -> Self {
MeshStatus::Active {
graft_time: Instant::now(),
mesh_time: Duration::from_secs(0),
}
}
}
impl Default for TopicStats {
fn default() -> Self {
TopicStats {
mesh_status: MeshStatus::InActive,
first_message_deliveries: Default::default(),
mesh_message_deliveries_active: Default::default(),
mesh_message_deliveries: Default::default(),
mesh_failure_penalty: Default::default(),
invalid_message_deliveries: Default::default(),
}
}
}
#[derive(PartialEq, Debug)]
struct DeliveryRecord {
status: DeliveryStatus,
first_seen: Instant,
peers: HashSet<PeerId>,
}
#[derive(PartialEq, Debug)]
enum DeliveryStatus {
/// Don't know (yet) if the message is valid.
Unknown,
/// The message is valid together with the validated time.
Valid(Instant),
/// The message is invalid.
Invalid,
/// Instructed by the validator to ignore the message.
Ignored,
}
impl Default for DeliveryRecord {
fn default() -> Self {
DeliveryRecord {
status: DeliveryStatus::Unknown,
first_seen: Instant::now(),
peers: HashSet::new(),
}
}
}
impl PeerScore {
/// Creates a new [`PeerScore`] using a given set of peer scoring parameters.
#[allow(dead_code)]
pub(crate) fn new(params: PeerScoreParams) -> Self {
Self::new_with_message_delivery_time_callback(params, None)
}
pub(crate) fn new_with_message_delivery_time_callback(
params: PeerScoreParams,
callback: Option<fn(&PeerId, &TopicHash, f64)>,
) -> Self {
PeerScore {
params,
peer_stats: HashMap::new(),
peer_ips: HashMap::new(),
deliveries: TimeCache::new(Duration::from_secs(TIME_CACHE_DURATION)),
message_delivery_time_callback: callback,
}
}
/// Returns the score for a peer
pub(crate) fn score(&self, peer_id: &PeerId) -> f64 {
self.metric_score(peer_id, None)
}
/// Returns the score for a peer, logging metrics. This is called from the heartbeat and
/// increments the metric counts for penalties.
pub(crate) fn metric_score(&self, peer_id: &PeerId, mut metrics: Option<&mut Metrics>) -> f64 {
let Some(peer_stats) = self.peer_stats.get(peer_id) else {
return 0.0;
};
let mut score = 0.0;
// topic scores
for (topic, topic_stats) in peer_stats.topics.iter() {
// topic parameters
if let Some(topic_params) = self.params.topics.get(topic) {
// we are tracking the topic
// the topic score
let mut topic_score = 0.0;
// P1: time in mesh
if let MeshStatus::Active { mesh_time, .. } = topic_stats.mesh_status {
let p1 = {
let v = mesh_time.as_secs_f64()
/ topic_params.time_in_mesh_quantum.as_secs_f64();
if v < topic_params.time_in_mesh_cap {
v
} else {
topic_params.time_in_mesh_cap
}
};
topic_score += p1 * topic_params.time_in_mesh_weight;
}
// P2: first message deliveries
let p2 = {
let v = topic_stats.first_message_deliveries;
if v < topic_params.first_message_deliveries_cap {
v
} else {
topic_params.first_message_deliveries_cap
}
};
topic_score += p2 * topic_params.first_message_deliveries_weight;
// P3: mesh message deliveries
if topic_stats.mesh_message_deliveries_active
&& topic_stats.mesh_message_deliveries
< topic_params.mesh_message_deliveries_threshold
{
let deficit = topic_params.mesh_message_deliveries_threshold
- topic_stats.mesh_message_deliveries;
let p3 = deficit * deficit;
topic_score += p3 * topic_params.mesh_message_deliveries_weight;
if let Some(metrics) = metrics.as_mut() {
metrics.register_score_penalty(Penalty::MessageDeficit);
}
tracing::debug!(
peer=%peer_id,
%topic,
%deficit,
penalty=%topic_score,
"[Penalty] The peer has a mesh deliveries deficit and will be penalized"
);
}
// P3b:
// NOTE: the weight of P3b is negative (validated in TopicScoreParams.validate), so this detracts.
let p3b = topic_stats.mesh_failure_penalty;
topic_score += p3b * topic_params.mesh_failure_penalty_weight;
// P4: invalid messages
// NOTE: the weight of P4 is negative (validated in TopicScoreParams.validate), so this detracts.
let p4 =
topic_stats.invalid_message_deliveries * topic_stats.invalid_message_deliveries;
topic_score += p4 * topic_params.invalid_message_deliveries_weight;
// update score, mixing with topic weight
score += topic_score * topic_params.topic_weight;
}
}
// apply the topic score cap, if any
if self.params.topic_score_cap > 0f64 && score > self.params.topic_score_cap {
score = self.params.topic_score_cap;
}
// P5: application-specific score
let p5 = peer_stats.application_score;
score += p5 * self.params.app_specific_weight;
// P6: IP collocation factor
for ip in peer_stats.known_ips.iter() {
if self.params.ip_colocation_factor_whitelist.get(ip).is_some() {
continue;
}
// P6 has a cliff (ip_colocation_factor_threshold); it's only applied iff
// at least that many peers are connected to us from that source IP
// addr. It is quadratic, and the weight is negative (validated by
// peer_score_params.validate()).
if let Some(peers_in_ip) = self.peer_ips.get(ip).map(|peers| peers.len()) {
if (peers_in_ip as f64) > self.params.ip_colocation_factor_threshold {
let surplus = (peers_in_ip as f64) - self.params.ip_colocation_factor_threshold;
let p6 = surplus * surplus;
if let Some(metrics) = metrics.as_mut() {
metrics.register_score_penalty(Penalty::IPColocation);
}
tracing::debug!(
peer=%peer_id,
surplus_ip=%ip,
surplus=%surplus,
"[Penalty] The peer gets penalized because of too many peers with the same ip"
);
score += p6 * self.params.ip_colocation_factor_weight;
}
}
}
// P7: behavioural pattern penalty
if peer_stats.behaviour_penalty > self.params.behaviour_penalty_threshold {
let excess = peer_stats.behaviour_penalty - self.params.behaviour_penalty_threshold;
let p7 = excess * excess;
score += p7 * self.params.behaviour_penalty_weight;
}
// Slow peer weighting
if peer_stats.slow_peer_penalty > self.params.slow_peer_threshold {
let excess = peer_stats.slow_peer_penalty - self.params.slow_peer_threshold;
score += excess * self.params.slow_peer_weight;
}
score
}
pub(crate) fn add_penalty(&mut self, peer_id: &PeerId, count: usize) {
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
tracing::debug!(
peer=%peer_id,
%count,
"[Penalty] Behavioral penalty for peer"
);
peer_stats.behaviour_penalty += count as f64;
}
}
fn remove_ips_for_peer(
peer_stats: &PeerStats,
peer_ips: &mut HashMap<IpAddr, HashSet<PeerId>>,
peer_id: &PeerId,
) {
for ip in peer_stats.known_ips.iter() {
if let Some(peer_set) = peer_ips.get_mut(ip) {
peer_set.remove(peer_id);
}
}
}
pub(crate) fn refresh_scores(&mut self) {
let now = Instant::now();
let params_ref = &self.params;
let peer_ips_ref = &mut self.peer_ips;
self.peer_stats.retain(|peer_id, peer_stats| {
if let ConnectionStatus::Disconnected { expire } = peer_stats.status {
// has the retention period expired?
if now > expire {
// yes, throw it away (but clean up the IP tracking first)
Self::remove_ips_for_peer(peer_stats, peer_ips_ref, peer_id);
// re address this, use retain or entry
return false;
}
// we don't decay retained scores, as the peer is not active.
// this way the peer cannot reset a negative score by simply disconnecting and reconnecting,
// unless the retention period has elapsed.
// similarly, a well behaved peer does not lose its score by getting disconnected.
return true;
}
for (topic, topic_stats) in peer_stats.topics.iter_mut() {
// the topic parameters
if let Some(topic_params) = params_ref.topics.get(topic) {
// decay counters
topic_stats.first_message_deliveries *=
topic_params.first_message_deliveries_decay;
if topic_stats.first_message_deliveries < params_ref.decay_to_zero {
topic_stats.first_message_deliveries = 0.0;
}
topic_stats.mesh_message_deliveries *=
topic_params.mesh_message_deliveries_decay;
if topic_stats.mesh_message_deliveries < params_ref.decay_to_zero {
topic_stats.mesh_message_deliveries = 0.0;
}
topic_stats.mesh_failure_penalty *= topic_params.mesh_failure_penalty_decay;
if topic_stats.mesh_failure_penalty < params_ref.decay_to_zero {
topic_stats.mesh_failure_penalty = 0.0;
}
topic_stats.invalid_message_deliveries *=
topic_params.invalid_message_deliveries_decay;
if topic_stats.invalid_message_deliveries < params_ref.decay_to_zero {
topic_stats.invalid_message_deliveries = 0.0;
}
// update mesh time and activate mesh message delivery parameter if need be
if let MeshStatus::Active {
ref mut mesh_time,
ref mut graft_time,
} = topic_stats.mesh_status
{
*mesh_time = now.duration_since(*graft_time);
if *mesh_time > topic_params.mesh_message_deliveries_activation {
topic_stats.mesh_message_deliveries_active = true;
}
}
}
}
// decay P7 counter
peer_stats.behaviour_penalty *= params_ref.behaviour_penalty_decay;
if peer_stats.behaviour_penalty < params_ref.decay_to_zero {
peer_stats.behaviour_penalty = 0.0;
}
// decay slow peer score
peer_stats.slow_peer_penalty *= params_ref.slow_peer_decay;
if peer_stats.slow_peer_penalty < params_ref.decay_to_zero {
peer_stats.slow_peer_penalty = 0.0;
}
true
});
}
/// Adds a connected peer to [`PeerScore`], initialising with empty ips (ips get added later
/// through add_ip.
pub(crate) fn add_peer(&mut self, peer_id: PeerId) {
let peer_stats = self.peer_stats.entry(peer_id).or_default();
// mark the peer as connected
peer_stats.status = ConnectionStatus::Connected;
}
/// Adds a new ip to a peer, if the peer is not yet known creates a new peer_stats entry for it
pub(crate) fn add_ip(&mut self, peer_id: &PeerId, ip: IpAddr) {
tracing::trace!(peer=%peer_id, %ip, "Add ip for peer");
let peer_stats = self.peer_stats.entry(*peer_id).or_default();
// Mark the peer as connected (currently the default is connected, but we don't want to
// rely on the default).
peer_stats.status = ConnectionStatus::Connected;
// Insert the ip
peer_stats.known_ips.insert(ip);
self.peer_ips.entry(ip).or_default().insert(*peer_id);
}
/// Indicate that a peer has been too slow to consume a message.
pub(crate) fn failed_message_slow_peer(&mut self, peer_id: &PeerId) {
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
peer_stats.slow_peer_penalty += 1.0;
tracing::debug!(peer=%peer_id, %peer_stats.slow_peer_penalty, "[Penalty] Expired message penalty.");
}
}
/// Removes an ip from a peer
pub(crate) fn remove_ip(&mut self, peer_id: &PeerId, ip: &IpAddr) {
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
peer_stats.known_ips.remove(ip);
if let Some(peer_ids) = self.peer_ips.get_mut(ip) {
tracing::trace!(peer=%peer_id, %ip, "Remove ip for peer");
peer_ids.remove(peer_id);
} else {
tracing::trace!(
peer=%peer_id,
%ip,
"No entry in peer_ips for ip which should get removed for peer"
);
}
} else {
tracing::trace!(
peer=%peer_id,
%ip,
"No peer_stats for peer which should remove the ip"
);
}
}
/// Removes a peer from the score table. This retains peer statistics if their score is
/// non-positive.
pub(crate) fn remove_peer(&mut self, peer_id: &PeerId) {
// we only retain non-positive scores of peers
if self.score(peer_id) > 0f64 {
if let hash_map::Entry::Occupied(entry) = self.peer_stats.entry(*peer_id) {
Self::remove_ips_for_peer(entry.get(), &mut self.peer_ips, peer_id);
entry.remove();
}
return;
}
// if the peer is retained (including it's score) the `first_message_delivery` counters
// are reset to 0 and mesh delivery penalties applied.
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
for (topic, topic_stats) in peer_stats.topics.iter_mut() {
topic_stats.first_message_deliveries = 0f64;
if let Some(threshold) = self
.params
.topics
.get(topic)
.map(|param| param.mesh_message_deliveries_threshold)
{
if topic_stats.in_mesh()
&& topic_stats.mesh_message_deliveries_active
&& topic_stats.mesh_message_deliveries < threshold
{
let deficit = threshold - topic_stats.mesh_message_deliveries;
topic_stats.mesh_failure_penalty += deficit * deficit;
}
}
topic_stats.mesh_status = MeshStatus::InActive;
topic_stats.mesh_message_deliveries_active = false;
}
peer_stats.status = ConnectionStatus::Disconnected {
expire: Instant::now() + self.params.retain_score,
};
}
}
/// Handles scoring functionality as a peer GRAFTs to a topic.
pub(crate) fn graft(&mut self, peer_id: &PeerId, topic: impl Into<TopicHash>) {
let topic = topic.into();
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
// if we are scoring the topic, update the mesh status.
if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic, &self.params) {
topic_stats.mesh_status = MeshStatus::new_active();
topic_stats.mesh_message_deliveries_active = false;
}
}
}
/// Handles scoring functionality as a peer PRUNEs from a topic.
pub(crate) fn prune(&mut self, peer_id: &PeerId, topic: TopicHash) {
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
// if we are scoring the topic, update the mesh status.
if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic.clone(), &self.params)
{
// sticky mesh delivery rate failure penalty
let threshold = self
.params
.topics
.get(&topic)
.expect("Topic must exist in order for there to be topic stats")
.mesh_message_deliveries_threshold;
if topic_stats.mesh_message_deliveries_active
&& topic_stats.mesh_message_deliveries < threshold
{
let deficit = threshold - topic_stats.mesh_message_deliveries;
topic_stats.mesh_failure_penalty += deficit * deficit;
}
topic_stats.mesh_message_deliveries_active = false;
topic_stats.mesh_status = MeshStatus::InActive;
}
}
}
pub(crate) fn validate_message(
&mut self,
from: &PeerId,
msg_id: &MessageId,
topic_hash: &TopicHash,
) {
// adds an empty record with the message id
self.deliveries.entry(msg_id.clone()).or_default();
if let Some(callback) = self.message_delivery_time_callback {
if self
.peer_stats
.get(from)
.and_then(|s| s.topics.get(topic_hash))
.map(|ts| ts.in_mesh())
.unwrap_or(false)
{
callback(from, topic_hash, 0.0);
}
}
}
pub(crate) fn deliver_message(
&mut self,
from: &PeerId,
msg_id: &MessageId,
topic_hash: &TopicHash,
) {
self.mark_first_message_delivery(from, topic_hash);
let record = self.deliveries.entry(msg_id.clone()).or_default();
// this should be the first delivery trace
if record.status != DeliveryStatus::Unknown {
tracing::warn!(
peer=%from,
status=?record.status,
first_seen=?record.first_seen.elapsed().as_secs(),
"Unexpected delivery trace"
);
return;
}
// mark the message as valid and reward mesh peers that have already forwarded it to us
record.status = DeliveryStatus::Valid(Instant::now());
for peer in record.peers.iter().cloned().collect::<Vec<_>>() {
// this check is to make sure a peer can't send us a message twice and get a double
// count if it is a first delivery
if &peer != from {
self.mark_duplicate_message_delivery(&peer, topic_hash, None);
}
}
}
/// Similar to `reject_message` except does not require the message id or reason for an invalid message.
pub(crate) fn reject_invalid_message(&mut self, from: &PeerId, topic_hash: &TopicHash) {
tracing::debug!(
peer=%from,
"[Penalty] Message from peer rejected because of ValidationError or SelfOrigin"
);
self.mark_invalid_message_delivery(from, topic_hash);
}
// Reject a message.
pub(crate) fn reject_message(
&mut self,
from: &PeerId,
msg_id: &MessageId,
topic_hash: &TopicHash,
reason: RejectReason,
) {
match reason {
// these messages are not tracked, but the peer is penalized as they are invalid
RejectReason::ValidationError(_) | RejectReason::SelfOrigin => {
self.reject_invalid_message(from, topic_hash);
return;
}
// we ignore those messages, so do nothing.
RejectReason::BlackListedPeer | RejectReason::BlackListedSource => {
return;
}
_ => {} // the rest are handled after record creation
}
let peers: Vec<_> = {
let record = self.deliveries.entry(msg_id.clone()).or_default();
// Multiple peers can now reject the same message as we track which peers send us the
// message. If we have already updated the status, return.
if record.status != DeliveryStatus::Unknown {
return;
}
if let RejectReason::ValidationIgnored = reason {
// we were explicitly instructed by the validator to ignore the message but not penalize
// the peer
record.status = DeliveryStatus::Ignored;
record.peers.clear();
return;
}
// mark the message as invalid and penalize peers that have already forwarded it.
record.status = DeliveryStatus::Invalid;
// release the delivery time tracking map to free some memory early
record.peers.drain().collect()
};
self.mark_invalid_message_delivery(from, topic_hash);
for peer_id in peers.iter() {
self.mark_invalid_message_delivery(peer_id, topic_hash)
}
}
pub(crate) fn duplicated_message(
&mut self,
from: &PeerId,
msg_id: &MessageId,
topic_hash: &TopicHash,
) {
let record = self.deliveries.entry(msg_id.clone()).or_default();
if record.peers.get(from).is_some() {
// we have already seen this duplicate!
return;
}
if let Some(callback) = self.message_delivery_time_callback {
let time = if let DeliveryStatus::Valid(validated) = record.status {
validated.elapsed().as_secs_f64()
} else {
0.0
};
if self
.peer_stats
.get(from)
.and_then(|s| s.topics.get(topic_hash))
.map(|ts| ts.in_mesh())
.unwrap_or(false)
{
callback(from, topic_hash, time);
}
}
match record.status {
DeliveryStatus::Unknown => {
// the message is being validated; track the peer delivery and wait for
// the Deliver/Reject notification.
record.peers.insert(*from);
}
DeliveryStatus::Valid(validated) => {
// mark the peer delivery time to only count a duplicate delivery once.
record.peers.insert(*from);
self.mark_duplicate_message_delivery(from, topic_hash, Some(validated));
}
DeliveryStatus::Invalid => {
// we no longer track delivery time
self.mark_invalid_message_delivery(from, topic_hash);
}
DeliveryStatus::Ignored => {
// the message was ignored; do nothing (we don't know if it was valid)
}
}
}
/// Sets the application specific score for a peer. Returns true if the peer is the peer is
/// connected or if the score of the peer is not yet expired and false otherwise.
pub(crate) fn set_application_score(&mut self, peer_id: &PeerId, new_score: f64) -> bool {
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
peer_stats.application_score = new_score;
true
} else {
false
}
}
/// Sets scoring parameters for a topic.
pub(crate) fn set_topic_params(&mut self, topic_hash: TopicHash, params: TopicScoreParams) {
use hash_map::Entry::*;
match self.params.topics.entry(topic_hash.clone()) {
Occupied(mut entry) => {
let first_message_deliveries_cap = params.first_message_deliveries_cap;
let mesh_message_deliveries_cap = params.mesh_message_deliveries_cap;
let old_params = entry.insert(params);
if old_params.first_message_deliveries_cap > first_message_deliveries_cap {
for stats in &mut self.peer_stats.values_mut() {
if let Some(tstats) = stats.topics.get_mut(&topic_hash) {
if tstats.first_message_deliveries > first_message_deliveries_cap {
tstats.first_message_deliveries = first_message_deliveries_cap;
}
}
}
}
if old_params.mesh_message_deliveries_cap > mesh_message_deliveries_cap {
for stats in self.peer_stats.values_mut() {
if let Some(tstats) = stats.topics.get_mut(&topic_hash) {
if tstats.mesh_message_deliveries > mesh_message_deliveries_cap {
tstats.mesh_message_deliveries = mesh_message_deliveries_cap;
}
}
}
}
}
Vacant(entry) => {
entry.insert(params);
}
}
}
/// Returns a scoring parameters for a topic if existent.
pub(crate) fn get_topic_params(&self, topic_hash: &TopicHash) -> Option<&TopicScoreParams> {
self.params.topics.get(topic_hash)
}
/// Increments the "invalid message deliveries" counter for all scored topics the message
/// is published in.
fn mark_invalid_message_delivery(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) {
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
if let Some(topic_stats) =
peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params)
{
tracing::debug!(
peer=%peer_id,
topic=%topic_hash,
"[Penalty] Peer delivered an invalid message in topic and gets penalized \
for it",
);
topic_stats.invalid_message_deliveries += 1f64;
}
}
}
/// Increments the "first message deliveries" counter for all scored topics the message is
/// published in, as well as the "mesh message deliveries" counter, if the peer is in the
/// mesh for the topic.
fn mark_first_message_delivery(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) {
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
if let Some(topic_stats) =
peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params)
{
let cap = self
.params
.topics
.get(topic_hash)
.expect("Topic must exist if there are known topic_stats")
.first_message_deliveries_cap;
topic_stats.first_message_deliveries =
if topic_stats.first_message_deliveries + 1f64 > cap {
cap
} else {
topic_stats.first_message_deliveries + 1f64
};
if let MeshStatus::Active { .. } = topic_stats.mesh_status {
let cap = self
.params
.topics
.get(topic_hash)
.expect("Topic must exist if there are known topic_stats")
.mesh_message_deliveries_cap;
topic_stats.mesh_message_deliveries =
if topic_stats.mesh_message_deliveries + 1f64 > cap {
cap
} else {
topic_stats.mesh_message_deliveries + 1f64
};
}
}
}
}
/// Increments the "mesh message deliveries" counter for messages we've seen before, as long the
/// message was received within the P3 window.
fn mark_duplicate_message_delivery(
&mut self,
peer_id: &PeerId,
topic_hash: &TopicHash,
validated_time: Option<Instant>,
) {
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
let now = if validated_time.is_some() {
Some(Instant::now())
} else {
None
};
if let Some(topic_stats) =
peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params)
{
if let MeshStatus::Active { .. } = topic_stats.mesh_status {
let topic_params = self
.params
.topics
.get(topic_hash)
.expect("Topic must exist if there are known topic_stats");
// check against the mesh delivery window -- if the validated time is passed as 0, then
// the message was received before we finished validation and thus falls within the mesh
// delivery window.
let mut falls_in_mesh_deliver_window = true;
if let Some(validated_time) = validated_time {
if let Some(now) = &now {
//should always be true
let window_time = validated_time
.checked_add(topic_params.mesh_message_deliveries_window)
.unwrap_or(*now);
if now > &window_time {
falls_in_mesh_deliver_window = false;
}
}
}
if falls_in_mesh_deliver_window {
let cap = topic_params.mesh_message_deliveries_cap;
topic_stats.mesh_message_deliveries =
if topic_stats.mesh_message_deliveries + 1f64 > cap {
cap
} else {
topic_stats.mesh_message_deliveries + 1f64
};
}
}
}
}
}
pub(crate) fn mesh_message_deliveries(&self, peer: &PeerId, topic: &TopicHash) -> Option<f64> {
self.peer_stats
.get(peer)
.and_then(|s| s.topics.get(topic))
.map(|t| t.mesh_message_deliveries)
}
}
/// The reason a Gossipsub message has been rejected.
#[derive(Clone, Copy)]
pub(crate) enum RejectReason {
/// The message failed the configured validation during decoding.
ValidationError(ValidationError),
/// The message source is us.
SelfOrigin,
/// The peer that sent the message was blacklisted.
BlackListedPeer,
/// The source (from field) of the message was blacklisted.
BlackListedSource,
/// The validation was ignored.
ValidationIgnored,
/// The validation failed.
ValidationFailed,
}

View File

@ -0,0 +1,404 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::gossipsub::TopicHash;
use std::collections::{HashMap, HashSet};
use std::net::IpAddr;
use std::time::Duration;
/// The default number of seconds for a decay interval.
const DEFAULT_DECAY_INTERVAL: u64 = 1;
/// The default rate to decay to 0.
const DEFAULT_DECAY_TO_ZERO: f64 = 0.1;
/// Computes the decay factor for a parameter, assuming the `decay_interval` is 1s
/// and that the value decays to zero if it drops below 0.01.
pub fn score_parameter_decay(decay: Duration) -> f64 {
score_parameter_decay_with_base(
decay,
Duration::from_secs(DEFAULT_DECAY_INTERVAL),
DEFAULT_DECAY_TO_ZERO,
)
}
/// Computes the decay factor for a parameter using base as the `decay_interval`.
pub fn score_parameter_decay_with_base(decay: Duration, base: Duration, decay_to_zero: f64) -> f64 {
// the decay is linear, so after n ticks the value is factor^n
// so factor^n = decay_to_zero => factor = decay_to_zero^(1/n)
let ticks = decay.as_secs_f64() / base.as_secs_f64();
decay_to_zero.powf(1f64 / ticks)
}
#[derive(Debug, Clone)]
pub struct PeerScoreThresholds {
/// The score threshold below which gossip propagation is suppressed;
/// should be negative.
pub gossip_threshold: f64,
/// The score threshold below which we shouldn't publish when using flood
/// publishing (also applies to fanout peers); should be negative and <= `gossip_threshold`.
pub publish_threshold: f64,
/// The score threshold below which message processing is suppressed altogether,
/// implementing an effective graylist according to peer score; should be negative and
/// <= `publish_threshold`.
pub graylist_threshold: f64,
/// The score threshold below which px will be ignored; this should be positive
/// and limited to scores attainable by bootstrappers and other trusted nodes.
pub accept_px_threshold: f64,
/// The median mesh score threshold before triggering opportunistic
/// grafting; this should have a small positive value.
pub opportunistic_graft_threshold: f64,
}
impl Default for PeerScoreThresholds {
fn default() -> Self {
PeerScoreThresholds {
gossip_threshold: -10.0,
publish_threshold: -50.0,
graylist_threshold: -80.0,
accept_px_threshold: 10.0,
opportunistic_graft_threshold: 20.0,
}
}
}
impl PeerScoreThresholds {
pub fn validate(&self) -> Result<(), &'static str> {
if self.gossip_threshold > 0f64 {
return Err("invalid gossip threshold; it must be <= 0");
}
if self.publish_threshold > 0f64 || self.publish_threshold > self.gossip_threshold {
return Err("Invalid publish threshold; it must be <= 0 and <= gossip threshold");
}
if self.graylist_threshold > 0f64 || self.graylist_threshold > self.publish_threshold {
return Err("Invalid graylist threshold; it must be <= 0 and <= publish threshold");
}
if self.accept_px_threshold < 0f64 {
return Err("Invalid accept px threshold; it must be >= 0");
}
if self.opportunistic_graft_threshold < 0f64 {
return Err("Invalid opportunistic grafting threshold; it must be >= 0");
}
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct PeerScoreParams {
/// Score parameters per topic.
pub topics: HashMap<TopicHash, TopicScoreParams>,
/// Aggregate topic score cap; this limits the total contribution of topics towards a positive
/// score. It must be positive (or 0 for no cap).
pub topic_score_cap: f64,
/// P5: Application-specific peer scoring
pub app_specific_weight: f64,
/// P6: IP-colocation factor.
/// The parameter has an associated counter which counts the number of peers with the same IP.
/// If the number of peers in the same IP exceeds `ip_colocation_factor_threshold, then the value
/// is the square of the difference, ie `(peers_in_same_ip - ip_colocation_threshold)^2`.
/// If the number of peers in the same IP is less than the threshold, then the value is 0.
/// The weight of the parameter MUST be negative, unless you want to disable for testing.
/// Note: In order to simulate many IPs in a manageable manner when testing, you can set the weight to 0
/// thus disabling the IP colocation penalty.
pub ip_colocation_factor_weight: f64,
pub ip_colocation_factor_threshold: f64,
pub ip_colocation_factor_whitelist: HashSet<IpAddr>,
/// P7: behavioural pattern penalties.
/// This parameter has an associated counter which tracks misbehaviour as detected by the
/// router. The router currently applies penalties for the following behaviors:
/// - attempting to re-graft before the prune backoff time has elapsed.
/// - not following up in IWANT requests for messages advertised with IHAVE.
///
/// The value of the parameter is the square of the counter over the threshold, which decays
/// with BehaviourPenaltyDecay.
/// The weight of the parameter MUST be negative (or zero to disable).
pub behaviour_penalty_weight: f64,
pub behaviour_penalty_threshold: f64,
pub behaviour_penalty_decay: f64,
/// The decay interval for parameter counters.
pub decay_interval: Duration,
/// Counter value below which it is considered 0.
pub decay_to_zero: f64,
/// Time to remember counters for a disconnected peer.
pub retain_score: Duration,
/// Slow peer penalty conditions
pub slow_peer_weight: f64,
pub slow_peer_threshold: f64,
pub slow_peer_decay: f64,
}
impl Default for PeerScoreParams {
fn default() -> Self {
PeerScoreParams {
topics: HashMap::new(),
topic_score_cap: 3600.0,
app_specific_weight: 10.0,
ip_colocation_factor_weight: -5.0,
ip_colocation_factor_threshold: 10.0,
ip_colocation_factor_whitelist: HashSet::new(),
behaviour_penalty_weight: -10.0,
behaviour_penalty_threshold: 0.0,
behaviour_penalty_decay: 0.2,
decay_interval: Duration::from_secs(DEFAULT_DECAY_INTERVAL),
decay_to_zero: DEFAULT_DECAY_TO_ZERO,
retain_score: Duration::from_secs(3600),
slow_peer_weight: -0.2,
slow_peer_threshold: 0.0,
slow_peer_decay: 0.2,
}
}
}
/// Peer score parameter validation
impl PeerScoreParams {
pub fn validate(&self) -> Result<(), String> {
for (topic, params) in self.topics.iter() {
if let Err(e) = params.validate() {
return Err(format!("Invalid score parameters for topic {topic}: {e}"));
}
}
// check that the topic score is 0 or something positive
if self.topic_score_cap < 0f64 {
return Err("Invalid topic score cap; must be positive (or 0 for no cap)".into());
}
// check the IP colocation factor
if self.ip_colocation_factor_weight > 0f64 {
return Err(
"Invalid ip_colocation_factor_weight; must be negative (or 0 to disable)".into(),
);
}
if self.ip_colocation_factor_weight != 0f64 && self.ip_colocation_factor_threshold < 1f64 {
return Err("Invalid ip_colocation_factor_threshold; must be at least 1".into());
}
// check the behaviour penalty
if self.behaviour_penalty_weight > 0f64 {
return Err(
"Invalid behaviour_penalty_weight; must be negative (or 0 to disable)".into(),
);
}
if self.behaviour_penalty_weight != 0f64
&& (self.behaviour_penalty_decay <= 0f64 || self.behaviour_penalty_decay >= 1f64)
{
return Err("invalid behaviour_penalty_decay; must be between 0 and 1".into());
}
if self.behaviour_penalty_threshold < 0f64 {
return Err("invalid behaviour_penalty_threshold; must be >= 0".into());
}
// check the decay parameters
if self.decay_interval < Duration::from_secs(1) {
return Err("Invalid decay_interval; must be at least 1s".into());
}
if self.decay_to_zero <= 0f64 || self.decay_to_zero >= 1f64 {
return Err("Invalid decay_to_zero; must be between 0 and 1".into());
}
// no need to check the score retention; a value of 0 means that we don't retain scores
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct TopicScoreParams {
/// The weight of the topic.
pub topic_weight: f64,
/// P1: time in the mesh
/// This is the time the peer has been grafted in the mesh.
/// The value of of the parameter is the `time/time_in_mesh_quantum`, capped by `time_in_mesh_cap`
/// The weight of the parameter must be positive (or zero to disable).
pub time_in_mesh_weight: f64,
pub time_in_mesh_quantum: Duration,
pub time_in_mesh_cap: f64,
/// P2: first message deliveries
/// This is the number of message deliveries in the topic.
/// The value of the parameter is a counter, decaying with `first_message_deliveries_decay`, and capped
/// by `first_message_deliveries_cap`.
/// The weight of the parameter MUST be positive (or zero to disable).
pub first_message_deliveries_weight: f64,
pub first_message_deliveries_decay: f64,
pub first_message_deliveries_cap: f64,
/// P3: mesh message deliveries
/// This is the number of message deliveries in the mesh, within the
/// `mesh_message_deliveries_window` of message validation; deliveries during validation also
/// count and are retroactively applied when validation succeeds.
/// This window accounts for the minimum time before a hostile mesh peer trying to game the
/// score could replay back a valid message we just sent them.
/// It effectively tracks first and near-first deliveries, ie a message seen from a mesh peer
/// before we have forwarded it to them.
/// The parameter has an associated counter, decaying with `mesh_message_deliveries_decay`.
/// If the counter exceeds the threshold, its value is 0.
/// If the counter is below the `mesh_message_deliveries_threshold`, the value is the square of
/// the deficit, ie (`message_deliveries_threshold - counter)^2`
/// The penalty is only activated after `mesh_message_deliveries_activation` time in the mesh.
/// The weight of the parameter MUST be negative (or zero to disable).
pub mesh_message_deliveries_weight: f64,
pub mesh_message_deliveries_decay: f64,
pub mesh_message_deliveries_cap: f64,
pub mesh_message_deliveries_threshold: f64,
pub mesh_message_deliveries_window: Duration,
pub mesh_message_deliveries_activation: Duration,
/// P3b: sticky mesh propagation failures
/// This is a sticky penalty that applies when a peer gets pruned from the mesh with an active
/// mesh message delivery penalty.
/// The weight of the parameter MUST be negative (or zero to disable)
pub mesh_failure_penalty_weight: f64,
pub mesh_failure_penalty_decay: f64,
/// P4: invalid messages
/// This is the number of invalid messages in the topic.
/// The value of the parameter is the square of the counter, decaying with
/// `invalid_message_deliveries_decay`.
/// The weight of the parameter MUST be negative (or zero to disable).
pub invalid_message_deliveries_weight: f64,
pub invalid_message_deliveries_decay: f64,
}
/// NOTE: The topic score parameters are very network specific.
/// For any production system, these values should be manually set.
impl Default for TopicScoreParams {
fn default() -> Self {
TopicScoreParams {
topic_weight: 0.5,
// P1
time_in_mesh_weight: 1.0,
time_in_mesh_quantum: Duration::from_millis(1),
time_in_mesh_cap: 3600.0,
// P2
first_message_deliveries_weight: 1.0,
first_message_deliveries_decay: 0.5,
first_message_deliveries_cap: 2000.0,
// P3
mesh_message_deliveries_weight: -1.0,
mesh_message_deliveries_decay: 0.5,
mesh_message_deliveries_cap: 100.0,
mesh_message_deliveries_threshold: 20.0,
mesh_message_deliveries_window: Duration::from_millis(10),
mesh_message_deliveries_activation: Duration::from_secs(5),
// P3b
mesh_failure_penalty_weight: -1.0,
mesh_failure_penalty_decay: 0.5,
// P4
invalid_message_deliveries_weight: -1.0,
invalid_message_deliveries_decay: 0.3,
}
}
}
impl TopicScoreParams {
pub fn validate(&self) -> Result<(), &'static str> {
// make sure we have a sane topic weight
if self.topic_weight < 0f64 {
return Err("invalid topic weight; must be >= 0");
}
if self.time_in_mesh_quantum == Duration::from_secs(0) {
return Err("Invalid time_in_mesh_quantum; must be non zero");
}
if self.time_in_mesh_weight < 0f64 {
return Err("Invalid time_in_mesh_weight; must be positive (or 0 to disable)");
}
if self.time_in_mesh_weight != 0f64 && self.time_in_mesh_cap <= 0f64 {
return Err("Invalid time_in_mesh_cap must be positive");
}
if self.first_message_deliveries_weight < 0f64 {
return Err(
"Invalid first_message_deliveries_weight; must be positive (or 0 to disable)",
);
}
if self.first_message_deliveries_weight != 0f64
&& (self.first_message_deliveries_decay <= 0f64
|| self.first_message_deliveries_decay >= 1f64)
{
return Err("Invalid first_message_deliveries_decay; must be between 0 and 1");
}
if self.first_message_deliveries_weight != 0f64 && self.first_message_deliveries_cap <= 0f64
{
return Err("Invalid first_message_deliveries_cap must be positive");
}
if self.mesh_message_deliveries_weight > 0f64 {
return Err(
"Invalid mesh_message_deliveries_weight; must be negative (or 0 to disable)",
);
}
if self.mesh_message_deliveries_weight != 0f64
&& (self.mesh_message_deliveries_decay <= 0f64
|| self.mesh_message_deliveries_decay >= 1f64)
{
return Err("Invalid mesh_message_deliveries_decay; must be between 0 and 1");
}
if self.mesh_message_deliveries_weight != 0f64 && self.mesh_message_deliveries_cap <= 0f64 {
return Err("Invalid mesh_message_deliveries_cap must be positive");
}
if self.mesh_message_deliveries_weight != 0f64
&& self.mesh_message_deliveries_threshold <= 0f64
{
return Err("Invalid mesh_message_deliveries_threshold; must be positive");
}
if self.mesh_message_deliveries_weight != 0f64
&& self.mesh_message_deliveries_activation < Duration::from_secs(1)
{
return Err("Invalid mesh_message_deliveries_activation; must be at least 1s");
}
// check P3b
if self.mesh_failure_penalty_weight > 0f64 {
return Err("Invalid mesh_failure_penalty_weight; must be negative (or 0 to disable)");
}
if self.mesh_failure_penalty_weight != 0f64
&& (self.mesh_failure_penalty_decay <= 0f64 || self.mesh_failure_penalty_decay >= 1f64)
{
return Err("Invalid mesh_failure_penalty_decay; must be between 0 and 1");
}
// check P4
if self.invalid_message_deliveries_weight > 0f64 {
return Err(
"Invalid invalid_message_deliveries_weight; must be negative (or 0 to disable)",
);
}
if self.invalid_message_deliveries_decay <= 0f64
|| self.invalid_message_deliveries_decay >= 1f64
{
return Err("Invalid invalid_message_deliveries_decay; must be between 0 and 1");
}
Ok(())
}
}

View File

@ -0,0 +1,978 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
/// A collection of unit tests mostly ported from the go implementation.
use super::*;
use crate::gossipsub::types::RawMessage;
use crate::gossipsub::{IdentTopic as Topic, Message};
// estimates a value within variance
fn within_variance(value: f64, expected: f64, variance: f64) -> bool {
if expected >= 0.0 {
return value > expected * (1.0 - variance) && value < expected * (1.0 + variance);
}
value > expected * (1.0 + variance) && value < expected * (1.0 - variance)
}
// generates a random gossipsub message with sequence number i
fn make_test_message(seq: u64) -> (MessageId, RawMessage) {
let raw_message = RawMessage {
source: Some(PeerId::random()),
data: vec![12, 34, 56],
sequence_number: Some(seq),
topic: Topic::new("test").hash(),
signature: None,
key: None,
validated: true,
};
let message = Message {
source: raw_message.source,
data: raw_message.data.clone(),
sequence_number: raw_message.sequence_number,
topic: raw_message.topic.clone(),
};
let id = default_message_id()(&message);
(id, raw_message)
}
fn default_message_id() -> fn(&Message) -> MessageId {
|message| {
// default message id is: source + sequence number
// NOTE: If either the peer_id or source is not provided, we set to 0;
let mut source_string = if let Some(peer_id) = message.source.as_ref() {
peer_id.to_base58()
} else {
PeerId::from_bytes(&[0, 1, 0])
.expect("Valid peer id")
.to_base58()
};
source_string.push_str(&message.sequence_number.unwrap_or_default().to_string());
MessageId::from(source_string)
}
}
#[test]
fn test_score_time_in_mesh() {
// Create parameters with reasonable default values
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams {
topic_score_cap: 1000.0,
..Default::default()
};
let topic_params = TopicScoreParams {
topic_weight: 0.5,
time_in_mesh_weight: 1.0,
time_in_mesh_quantum: Duration::from_millis(1),
time_in_mesh_cap: 3600.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let peer_id = PeerId::random();
let mut peer_score = PeerScore::new(params);
// Peer score should start at 0
peer_score.add_peer(peer_id);
let score = peer_score.score(&peer_id);
assert!(
score == 0.0,
"expected score to start at zero. Score found: {score}"
);
// The time in mesh depends on how long the peer has been grafted
peer_score.graft(&peer_id, topic);
let elapsed = topic_params.time_in_mesh_quantum * 200;
std::thread::sleep(elapsed);
peer_score.refresh_scores();
let score = peer_score.score(&peer_id);
let expected = topic_params.topic_weight
* topic_params.time_in_mesh_weight
* (elapsed.as_millis() / topic_params.time_in_mesh_quantum.as_millis()) as f64;
assert!(
score >= expected,
"The score: {score} should be greater than or equal to: {expected}"
);
}
#[test]
fn test_score_time_in_mesh_cap() {
// Create parameters with reasonable default values
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let topic_params = TopicScoreParams {
topic_weight: 0.5,
time_in_mesh_weight: 1.0,
time_in_mesh_quantum: Duration::from_millis(1),
time_in_mesh_cap: 10.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let peer_id = PeerId::random();
let mut peer_score = PeerScore::new(params);
// Peer score should start at 0
peer_score.add_peer(peer_id);
let score = peer_score.score(&peer_id);
assert!(
score == 0.0,
"expected score to start at zero. Score found: {score}"
);
// The time in mesh depends on how long the peer has been grafted
peer_score.graft(&peer_id, topic);
let elapsed = topic_params.time_in_mesh_quantum * 40;
std::thread::sleep(elapsed);
peer_score.refresh_scores();
let score = peer_score.score(&peer_id);
let expected = topic_params.topic_weight
* topic_params.time_in_mesh_weight
* topic_params.time_in_mesh_cap;
let variance = 0.5;
assert!(
within_variance(score, expected, variance),
"The score: {} should be within {} of {}",
score,
score * variance,
expected
);
}
#[test]
fn test_score_first_message_deliveries() {
// Create parameters with reasonable default values
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let topic_params = TopicScoreParams {
topic_weight: 1.0,
first_message_deliveries_weight: 1.0,
first_message_deliveries_decay: 1.0,
first_message_deliveries_cap: 2000.0,
time_in_mesh_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let peer_id = PeerId::random();
let mut peer_score = PeerScore::new(params);
// Peer score should start at 0
peer_score.add_peer(peer_id);
peer_score.graft(&peer_id, topic);
// deliver a bunch of messages from the peer
let messages = 100;
for seq in 0..messages {
let (id, msg) = make_test_message(seq);
peer_score.validate_message(&peer_id, &id, &msg.topic);
peer_score.deliver_message(&peer_id, &id, &msg.topic);
}
peer_score.refresh_scores();
let score = peer_score.score(&peer_id);
let expected =
topic_params.topic_weight * topic_params.first_message_deliveries_weight * messages as f64;
assert!(score == expected, "The score: {score} should be {expected}");
}
#[test]
fn test_score_first_message_deliveries_cap() {
// Create parameters with reasonable default values
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let topic_params = TopicScoreParams {
topic_weight: 1.0,
first_message_deliveries_weight: 1.0,
first_message_deliveries_decay: 1.0, // test without decay
first_message_deliveries_cap: 50.0,
time_in_mesh_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let peer_id = PeerId::random();
let mut peer_score = PeerScore::new(params);
// Peer score should start at 0
peer_score.add_peer(peer_id);
peer_score.graft(&peer_id, topic);
// deliver a bunch of messages from the peer
let messages = 100;
for seq in 0..messages {
let (id, msg) = make_test_message(seq);
peer_score.validate_message(&peer_id, &id, &msg.topic);
peer_score.deliver_message(&peer_id, &id, &msg.topic);
}
peer_score.refresh_scores();
let score = peer_score.score(&peer_id);
let expected = topic_params.topic_weight
* topic_params.first_message_deliveries_weight
* topic_params.first_message_deliveries_cap;
assert!(score == expected, "The score: {score} should be {expected}");
}
#[test]
fn test_score_first_message_deliveries_decay() {
// Create parameters with reasonable default values
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let topic_params = TopicScoreParams {
topic_weight: 1.0,
first_message_deliveries_weight: 1.0,
first_message_deliveries_decay: 0.9, // decay 10% per decay interval
first_message_deliveries_cap: 2000.0,
time_in_mesh_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let peer_id = PeerId::random();
let mut peer_score = PeerScore::new(params);
peer_score.add_peer(peer_id);
peer_score.graft(&peer_id, topic);
// deliver a bunch of messages from the peer
let messages = 100;
for seq in 0..messages {
let (id, msg) = make_test_message(seq);
peer_score.validate_message(&peer_id, &id, &msg.topic);
peer_score.deliver_message(&peer_id, &id, &msg.topic);
}
peer_score.refresh_scores();
let score = peer_score.score(&peer_id);
let mut expected = topic_params.topic_weight
* topic_params.first_message_deliveries_weight
* topic_params.first_message_deliveries_decay
* messages as f64;
assert!(score == expected, "The score: {score} should be {expected}");
// refreshing the scores applies the decay param
let decay_intervals = 10;
for _ in 0..decay_intervals {
peer_score.refresh_scores();
expected *= topic_params.first_message_deliveries_decay;
}
let score = peer_score.score(&peer_id);
assert!(score == expected, "The score: {score} should be {expected}");
}
#[test]
fn test_score_mesh_message_deliveries() {
// Create parameters with reasonable default values
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: -1.0,
mesh_message_deliveries_activation: Duration::from_secs(1),
mesh_message_deliveries_window: Duration::from_millis(10),
mesh_message_deliveries_threshold: 20.0,
mesh_message_deliveries_cap: 100.0,
mesh_message_deliveries_decay: 1.0,
first_message_deliveries_weight: 0.0,
time_in_mesh_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let mut peer_score = PeerScore::new(params);
// peer A always delivers the message first.
// peer B delivers next (within the delivery window).
// peer C delivers outside the delivery window.
// we expect peers A and B to have a score of zero, since all other parameter weights are zero.
// Peer C should have a negative score.
let peer_id_a = PeerId::random();
let peer_id_b = PeerId::random();
let peer_id_c = PeerId::random();
let peers = vec![peer_id_a, peer_id_b, peer_id_c];
for peer_id in &peers {
peer_score.add_peer(*peer_id);
peer_score.graft(peer_id, topic.clone());
}
// assert that nobody has been penalized yet for not delivering messages before activation time
peer_score.refresh_scores();
for peer_id in &peers {
let score = peer_score.score(peer_id);
assert!(
score >= 0.0,
"expected no mesh delivery penalty before activation time, got score {score}"
);
}
// wait for the activation time to kick in
std::thread::sleep(topic_params.mesh_message_deliveries_activation);
// deliver a bunch of messages from peer A, with duplicates within the window from peer B,
// and duplicates outside the window from peer C.
let messages = 100;
let mut messages_to_send = Vec::new();
for seq in 0..messages {
let (id, msg) = make_test_message(seq);
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
peer_score.deliver_message(&peer_id_a, &id, &msg.topic);
peer_score.duplicated_message(&peer_id_b, &id, &msg.topic);
messages_to_send.push((id, msg));
}
std::thread::sleep(topic_params.mesh_message_deliveries_window + Duration::from_millis(20));
for (id, msg) in messages_to_send {
peer_score.duplicated_message(&peer_id_c, &id, &msg.topic);
}
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
let score_b = peer_score.score(&peer_id_b);
let score_c = peer_score.score(&peer_id_c);
assert!(
score_a >= 0.0,
"expected non-negative score for Peer A, got score {score_a}"
);
assert!(
score_b >= 0.0,
"expected non-negative score for Peer B, got score {score_b}"
);
// the penalty is the difference between the threshold and the actual mesh deliveries, squared.
// since we didn't deliver anything, this is just the value of the threshold
let penalty = topic_params.mesh_message_deliveries_threshold
* topic_params.mesh_message_deliveries_threshold;
let expected =
topic_params.topic_weight * topic_params.mesh_message_deliveries_weight * penalty;
assert!(score_c == expected, "Score: {score_c}. Expected {expected}");
}
#[test]
fn test_score_mesh_message_deliveries_decay() {
// Create parameters with reasonable default values
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: -1.0,
mesh_message_deliveries_activation: Duration::from_secs(0),
mesh_message_deliveries_window: Duration::from_millis(10),
mesh_message_deliveries_threshold: 20.0,
mesh_message_deliveries_cap: 100.0,
mesh_message_deliveries_decay: 0.9,
first_message_deliveries_weight: 0.0,
time_in_mesh_weight: 0.0,
time_in_mesh_quantum: Duration::from_secs(1),
mesh_failure_penalty_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
peer_score.add_peer(peer_id_a);
peer_score.graft(&peer_id_a, topic);
// deliver a bunch of messages from peer A
let messages = 100;
for seq in 0..messages {
let (id, msg) = make_test_message(seq);
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
peer_score.deliver_message(&peer_id_a, &id, &msg.topic);
}
// we should have a positive score, since we delivered more messages than the threshold
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
assert!(
score_a >= 0.0,
"expected non-negative score for Peer A, got score {score_a}"
);
let mut decayed_delivery_count = (messages as f64) * topic_params.mesh_message_deliveries_decay;
for _ in 0..20 {
peer_score.refresh_scores();
decayed_delivery_count *= topic_params.mesh_message_deliveries_decay;
}
let score_a = peer_score.score(&peer_id_a);
// the penalty is the difference between the threshold and the (decayed) mesh deliveries, squared.
let deficit = topic_params.mesh_message_deliveries_threshold - decayed_delivery_count;
let penalty = deficit * deficit;
let expected =
topic_params.topic_weight * topic_params.mesh_message_deliveries_weight * penalty;
assert_eq!(score_a, expected, "Invalid score");
}
#[test]
fn test_score_mesh_failure_penalty() {
// Create parameters with reasonable default values
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let topic_params = TopicScoreParams {
// the mesh failure penalty is applied when a peer is pruned while their
// mesh deliveries are under the threshold.
// for this test, we set the mesh delivery threshold, but set
// mesh_message_deliveries to zero, so the only affect on the score
// is from the mesh failure penalty
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
mesh_message_deliveries_activation: Duration::from_secs(0),
mesh_message_deliveries_window: Duration::from_millis(10),
mesh_message_deliveries_threshold: 20.0,
mesh_message_deliveries_cap: 100.0,
mesh_message_deliveries_decay: 1.0,
first_message_deliveries_weight: 0.0,
time_in_mesh_weight: 0.0,
time_in_mesh_quantum: Duration::from_secs(1),
mesh_failure_penalty_weight: -1.0,
mesh_failure_penalty_decay: 1.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
let peer_id_b = PeerId::random();
let peers = vec![peer_id_a, peer_id_b];
for peer_id in &peers {
peer_score.add_peer(*peer_id);
peer_score.graft(peer_id, topic.clone());
}
// deliver a bunch of messages from peer A
let messages = 100;
for seq in 0..messages {
let (id, msg) = make_test_message(seq);
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
peer_score.deliver_message(&peer_id_a, &id, &msg.topic);
}
// peers A and B should both have zero scores, since the failure penalty hasn't been applied yet
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
let score_b = peer_score.score(&peer_id_b);
assert!(
score_a >= 0.0,
"expected non-negative score for Peer A, got score {score_a}"
);
assert!(
score_b >= 0.0,
"expected non-negative score for Peer B, got score {score_b}"
);
// prune peer B to apply the penalty
peer_score.prune(&peer_id_b, topic.hash());
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
assert_eq!(score_a, 0.0, "expected Peer A to have a 0");
// penalty calculation is the same as for mesh_message_deliveries, but multiplied by
// mesh_failure_penalty_weigh
// instead of mesh_message_deliveries_weight
let penalty = topic_params.mesh_message_deliveries_threshold
* topic_params.mesh_message_deliveries_threshold;
let expected = topic_params.topic_weight * topic_params.mesh_failure_penalty_weight * penalty;
let score_b = peer_score.score(&peer_id_b);
assert_eq!(score_b, expected, "Peer B should have expected score",);
}
#[test]
fn test_score_invalid_message_deliveries() {
// Create parameters with reasonable default values
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
mesh_message_deliveries_activation: Duration::from_secs(1),
mesh_message_deliveries_window: Duration::from_millis(10),
mesh_message_deliveries_threshold: 20.0,
mesh_message_deliveries_cap: 100.0,
mesh_message_deliveries_decay: 1.0,
first_message_deliveries_weight: 0.0,
time_in_mesh_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -1.0,
invalid_message_deliveries_decay: 1.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
peer_score.add_peer(peer_id_a);
peer_score.graft(&peer_id_a, topic);
// reject a bunch of messages from peer A
let messages = 100;
for seq in 0..messages {
let (id, msg) = make_test_message(seq);
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed);
}
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
let expected = topic_params.topic_weight
* topic_params.invalid_message_deliveries_weight
* (messages * messages) as f64;
assert_eq!(score_a, expected, "Peer has unexpected score",);
}
#[test]
fn test_score_invalid_message_deliveris_decay() {
// Create parameters with reasonable default values
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
mesh_message_deliveries_activation: Duration::from_secs(1),
mesh_message_deliveries_window: Duration::from_millis(10),
mesh_message_deliveries_threshold: 20.0,
mesh_message_deliveries_cap: 100.0,
mesh_message_deliveries_decay: 1.0,
first_message_deliveries_weight: 0.0,
time_in_mesh_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
invalid_message_deliveries_weight: -1.0,
invalid_message_deliveries_decay: 0.9,
..Default::default()
};
params.topics.insert(topic_hash, topic_params.clone());
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
peer_score.add_peer(peer_id_a);
peer_score.graft(&peer_id_a, topic);
// reject a bunch of messages from peer A
let messages = 100;
for seq in 0..messages {
let (id, msg) = make_test_message(seq);
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed);
}
peer_score.refresh_scores();
let decay = topic_params.invalid_message_deliveries_decay * messages as f64;
let mut expected =
topic_params.topic_weight * topic_params.invalid_message_deliveries_weight * decay * decay;
let score_a = peer_score.score(&peer_id_a);
assert_eq!(score_a, expected, "Peer has unexpected score");
// refresh scores a few times to apply decay
for _ in 0..10 {
peer_score.refresh_scores();
expected *= topic_params.invalid_message_deliveries_decay
* topic_params.invalid_message_deliveries_decay;
}
let score_a = peer_score.score(&peer_id_a);
assert_eq!(score_a, expected, "Peer has unexpected score");
}
#[test]
fn test_score_reject_message_deliveries() {
// This tests adds coverage for the dark corners of rejection tracing
// Create parameters with reasonable default values
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams::default();
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
first_message_deliveries_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
time_in_mesh_weight: 0.0,
time_in_mesh_quantum: Duration::from_secs(1),
invalid_message_deliveries_weight: -1.0,
invalid_message_deliveries_decay: 1.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params);
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
let peer_id_b = PeerId::random();
let peers = vec![peer_id_a, peer_id_b];
for peer_id in &peers {
peer_score.add_peer(*peer_id);
}
let (id, msg) = make_test_message(1);
// these should have no effect in the score
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::BlackListedPeer);
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::BlackListedSource);
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored);
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
let score_b = peer_score.score(&peer_id_b);
assert_eq!(score_a, 0.0, "Should have no effect on the score");
assert_eq!(score_b, 0.0, "Should have no effect on the score");
// insert a record in the message deliveries
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
// this should have no effect in the score, and subsequent duplicate messages should have no
// effect either
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored);
peer_score.duplicated_message(&peer_id_b, &id, &msg.topic);
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
let score_b = peer_score.score(&peer_id_b);
assert_eq!(score_a, 0.0, "Should have no effect on the score");
assert_eq!(score_b, 0.0, "Should have no effect on the score");
// now clear the delivery record
peer_score.deliveries.clear();
// insert a record in the message deliveries
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
// this should have no effect in the score, and subsequent duplicate messages should have no
// effect either
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored);
peer_score.duplicated_message(&peer_id_b, &id, &msg.topic);
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
let score_b = peer_score.score(&peer_id_b);
assert_eq!(score_a, 0.0, "Should have no effect on the score");
assert_eq!(score_b, 0.0, "Should have no effect on the score");
// now clear the delivery record
peer_score.deliveries.clear();
// insert a new record in the message deliveries
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
// and reject the message to make sure duplicates are also penalized
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed);
peer_score.duplicated_message(&peer_id_b, &id, &msg.topic);
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
let score_b = peer_score.score(&peer_id_b);
assert_eq!(score_a, -1.0, "Score should be effected");
assert_eq!(score_b, -1.0, "Score should be effected");
// now clear the delivery record again
peer_score.deliveries.clear();
// insert a new record in the message deliveries
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
// and reject the message after a duplicate has arrived
peer_score.duplicated_message(&peer_id_b, &id, &msg.topic);
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed);
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
let score_b = peer_score.score(&peer_id_b);
assert_eq!(score_a, -4.0, "Score should be effected");
assert_eq!(score_b, -4.0, "Score should be effected");
}
#[test]
fn test_application_score() {
// Create parameters with reasonable default values
let app_specific_weight = 0.5;
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams {
app_specific_weight,
..Default::default()
};
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
first_message_deliveries_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
time_in_mesh_weight: 0.0,
time_in_mesh_quantum: Duration::from_secs(1),
invalid_message_deliveries_weight: 0.0,
invalid_message_deliveries_decay: 1.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params);
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
peer_score.add_peer(peer_id_a);
peer_score.graft(&peer_id_a, topic);
let messages = 100;
for i in -100..messages {
let app_score_value = i as f64;
peer_score.set_application_score(&peer_id_a, app_score_value);
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
let expected = (i as f64) * app_specific_weight;
assert_eq!(score_a, expected, "Peer has unexpected score");
}
}
#[test]
fn test_score_ip_colocation() {
// Create parameters with reasonable default values
let ip_colocation_factor_weight = -1.0;
let ip_colocation_factor_threshold = 1.0;
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams {
ip_colocation_factor_weight,
ip_colocation_factor_threshold,
..Default::default()
};
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
first_message_deliveries_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
time_in_mesh_weight: 0.0,
time_in_mesh_quantum: Duration::from_secs(1),
invalid_message_deliveries_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params);
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
let peer_id_b = PeerId::random();
let peer_id_c = PeerId::random();
let peer_id_d = PeerId::random();
let peers = vec![peer_id_a, peer_id_b, peer_id_c, peer_id_d];
for peer_id in &peers {
peer_score.add_peer(*peer_id);
peer_score.graft(peer_id, topic.clone());
}
// peerA should have no penalty, but B, C, and D should be penalized for sharing an IP
peer_score.add_ip(&peer_id_a, "1.2.3.4".parse().unwrap());
peer_score.add_ip(&peer_id_b, "2.3.4.5".parse().unwrap());
peer_score.add_ip(&peer_id_c, "2.3.4.5".parse().unwrap());
peer_score.add_ip(&peer_id_c, "3.4.5.6".parse().unwrap());
peer_score.add_ip(&peer_id_d, "2.3.4.5".parse().unwrap());
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
let score_b = peer_score.score(&peer_id_b);
let score_c = peer_score.score(&peer_id_c);
let score_d = peer_score.score(&peer_id_d);
assert_eq!(score_a, 0.0, "Peer A should be unaffected");
let n_shared = 3.0;
let ip_surplus = n_shared - ip_colocation_factor_threshold;
let penalty = ip_surplus * ip_surplus;
let expected = ip_colocation_factor_weight * penalty;
assert_eq!(score_b, expected, "Peer B should have expected score");
assert_eq!(score_c, expected, "Peer C should have expected score");
assert_eq!(score_d, expected, "Peer D should have expected score");
}
#[test]
fn test_score_behaviour_penality() {
// Create parameters with reasonable default values
let behaviour_penalty_weight = -1.0;
let behaviour_penalty_decay = 0.99;
let topic = Topic::new("test");
let topic_hash = topic.hash();
let mut params = PeerScoreParams {
behaviour_penalty_decay,
behaviour_penalty_weight,
..Default::default()
};
let topic_params = TopicScoreParams {
topic_weight: 1.0,
mesh_message_deliveries_weight: 0.0,
first_message_deliveries_weight: 0.0,
mesh_failure_penalty_weight: 0.0,
time_in_mesh_weight: 0.0,
time_in_mesh_quantum: Duration::from_secs(1),
invalid_message_deliveries_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params);
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
// add a penalty to a non-existent peer.
peer_score.add_penalty(&peer_id_a, 1);
let score_a = peer_score.score(&peer_id_a);
assert_eq!(score_a, 0.0, "Peer A should be unaffected");
// add the peer and test penalties
peer_score.add_peer(peer_id_a);
assert_eq!(score_a, 0.0, "Peer A should be unaffected");
peer_score.add_penalty(&peer_id_a, 1);
let score_a = peer_score.score(&peer_id_a);
assert_eq!(score_a, -1.0, "Peer A should have been penalized");
peer_score.add_penalty(&peer_id_a, 1);
let score_a = peer_score.score(&peer_id_a);
assert_eq!(score_a, -4.0, "Peer A should have been penalized");
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
assert_eq!(score_a, -3.9204, "Peer A should have been penalized");
}
#[test]
fn test_score_retention() {
// Create parameters with reasonable default values
let topic = Topic::new("test");
let topic_hash = topic.hash();
let app_specific_weight = 1.0;
let app_score_value = -1000.0;
let retain_score = Duration::from_secs(1);
let mut params = PeerScoreParams {
app_specific_weight,
retain_score,
..Default::default()
};
let topic_params = TopicScoreParams {
topic_weight: 0.0,
mesh_message_deliveries_weight: 0.0,
mesh_message_deliveries_activation: Duration::from_secs(0),
first_message_deliveries_weight: 0.0,
time_in_mesh_weight: 0.0,
..Default::default()
};
params.topics.insert(topic_hash, topic_params);
let mut peer_score = PeerScore::new(params);
let peer_id_a = PeerId::random();
peer_score.add_peer(peer_id_a);
peer_score.graft(&peer_id_a, topic);
peer_score.set_application_score(&peer_id_a, app_score_value);
// score should equal -1000 (app specific score)
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
assert_eq!(
score_a, app_score_value,
"Score should be the application specific score"
);
// disconnect & wait half of RetainScore time. Should still have negative score
peer_score.remove_peer(&peer_id_a);
std::thread::sleep(retain_score / 2);
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
assert_eq!(
score_a, app_score_value,
"Score should be the application specific score"
);
// wait remaining time (plus a little slop) and the score should reset to zero
std::thread::sleep(retain_score / 2 + Duration::from_millis(50));
peer_score.refresh_scores();
let score_a = peer_score.score(&peer_id_a);
assert_eq!(
score_a, 0.0,
"Score should be the application specific score"
);
}

View File

@ -0,0 +1,625 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use super::config::ValidationMode;
use super::handler::HandlerEvent;
use super::rpc_proto::proto;
use super::topic::TopicHash;
use super::types::{
ControlAction, Graft, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, RawMessage, Rpc,
Subscription, SubscriptionAction,
};
use super::ValidationError;
use asynchronous_codec::{Decoder, Encoder, Framed};
use byteorder::{BigEndian, ByteOrder};
use bytes::BytesMut;
use futures::future;
use futures::prelude::*;
use libp2p::core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
use libp2p::identity::{PeerId, PublicKey};
use libp2p::swarm::StreamProtocol;
use quick_protobuf::Writer;
use std::pin::Pin;
use void::Void;
pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:";
pub(crate) const GOSSIPSUB_1_1_0_PROTOCOL: ProtocolId = ProtocolId {
protocol: StreamProtocol::new("/meshsub/1.1.0"),
kind: PeerKind::Gossipsubv1_1,
};
pub(crate) const GOSSIPSUB_1_0_0_PROTOCOL: ProtocolId = ProtocolId {
protocol: StreamProtocol::new("/meshsub/1.0.0"),
kind: PeerKind::Gossipsub,
};
pub(crate) const FLOODSUB_PROTOCOL: ProtocolId = ProtocolId {
protocol: StreamProtocol::new("/floodsub/1.0.0"),
kind: PeerKind::Floodsub,
};
/// Implementation of [`InboundUpgrade`] and [`OutboundUpgrade`] for the Gossipsub protocol.
#[derive(Debug, Clone)]
pub struct ProtocolConfig {
/// The Gossipsub protocol id to listen on.
pub(crate) protocol_ids: Vec<ProtocolId>,
/// The maximum transmit size for a packet.
pub(crate) max_transmit_size: usize,
/// Determines the level of validation to be done on incoming messages.
pub(crate) validation_mode: ValidationMode,
}
impl Default for ProtocolConfig {
fn default() -> Self {
Self {
max_transmit_size: 65536,
validation_mode: ValidationMode::Strict,
protocol_ids: vec![GOSSIPSUB_1_1_0_PROTOCOL, GOSSIPSUB_1_0_0_PROTOCOL],
}
}
}
/// The protocol ID
#[derive(Clone, Debug, PartialEq)]
pub struct ProtocolId {
/// The RPC message type/name.
pub protocol: StreamProtocol,
/// The type of protocol we support
pub kind: PeerKind,
}
impl AsRef<str> for ProtocolId {
fn as_ref(&self) -> &str {
self.protocol.as_ref()
}
}
impl UpgradeInfo for ProtocolConfig {
type Info = ProtocolId;
type InfoIter = Vec<Self::Info>;
fn protocol_info(&self) -> Self::InfoIter {
self.protocol_ids.clone()
}
}
impl<TSocket> InboundUpgrade<TSocket> for ProtocolConfig
where
TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static,
{
type Output = (Framed<TSocket, GossipsubCodec>, PeerKind);
type Error = Void;
type Future = Pin<Box<dyn Future<Output = Result<Self::Output, Self::Error>> + Send>>;
fn upgrade_inbound(self, socket: TSocket, protocol_id: Self::Info) -> Self::Future {
Box::pin(future::ok((
Framed::new(
socket,
GossipsubCodec::new(self.max_transmit_size, self.validation_mode),
),
protocol_id.kind,
)))
}
}
impl<TSocket> OutboundUpgrade<TSocket> for ProtocolConfig
where
TSocket: AsyncWrite + AsyncRead + Unpin + Send + 'static,
{
type Output = (Framed<TSocket, GossipsubCodec>, PeerKind);
type Error = Void;
type Future = Pin<Box<dyn Future<Output = Result<Self::Output, Self::Error>> + Send>>;
fn upgrade_outbound(self, socket: TSocket, protocol_id: Self::Info) -> Self::Future {
Box::pin(future::ok((
Framed::new(
socket,
GossipsubCodec::new(self.max_transmit_size, self.validation_mode),
),
protocol_id.kind,
)))
}
}
/* Gossip codec for the framing */
pub struct GossipsubCodec {
/// Determines the level of validation performed on incoming messages.
validation_mode: ValidationMode,
/// The codec to handle common encoding/decoding of protobuf messages
codec: quick_protobuf_codec::Codec<proto::RPC>,
}
impl GossipsubCodec {
pub fn new(max_length: usize, validation_mode: ValidationMode) -> GossipsubCodec {
let codec = quick_protobuf_codec::Codec::new(max_length);
GossipsubCodec {
validation_mode,
codec,
}
}
/// Verifies a gossipsub message. This returns either a success or failure. All errors
/// are logged, which prevents error handling in the codec and handler. We simply drop invalid
/// messages and log warnings, rather than propagating errors through the codec.
fn verify_signature(message: &proto::Message) -> bool {
use quick_protobuf::MessageWrite;
let Some(from) = message.from.as_ref() else {
tracing::debug!("Signature verification failed: No source id given");
return false;
};
let Ok(source) = PeerId::from_bytes(from) else {
tracing::debug!("Signature verification failed: Invalid Peer Id");
return false;
};
let Some(signature) = message.signature.as_ref() else {
tracing::debug!("Signature verification failed: No signature provided");
return false;
};
// If there is a key value in the protobuf, use that key otherwise the key must be
// obtained from the inlined source peer_id.
let public_key = match message.key.as_deref().map(PublicKey::try_decode_protobuf) {
Some(Ok(key)) => key,
_ => match PublicKey::try_decode_protobuf(&source.to_bytes()[2..]) {
Ok(v) => v,
Err(_) => {
tracing::warn!("Signature verification failed: No valid public key supplied");
return false;
}
},
};
// The key must match the peer_id
if source != public_key.to_peer_id() {
tracing::warn!(
"Signature verification failed: Public key doesn't match source peer id"
);
return false;
}
// Construct the signature bytes
let mut message_sig = message.clone();
message_sig.signature = None;
message_sig.key = None;
let mut buf = Vec::with_capacity(message_sig.get_size());
let mut writer = Writer::new(&mut buf);
message_sig
.write_message(&mut writer)
.expect("Encoding to succeed");
let mut signature_bytes = SIGNING_PREFIX.to_vec();
signature_bytes.extend_from_slice(&buf);
public_key.verify(&signature_bytes, signature)
}
}
impl Encoder for GossipsubCodec {
type Item<'a> = proto::RPC;
type Error = quick_protobuf_codec::Error;
fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> {
self.codec.encode(item, dst)
}
}
impl Decoder for GossipsubCodec {
type Item = HandlerEvent;
type Error = quick_protobuf_codec::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
let Some(rpc) = self.codec.decode(src)? else {
return Ok(None);
};
// Store valid messages.
let mut messages = Vec::with_capacity(rpc.publish.len());
// Store any invalid messages.
let mut invalid_messages = Vec::new();
for message in rpc.publish.into_iter() {
// Keep track of the type of invalid message.
let mut invalid_kind = None;
let mut verify_signature = false;
let mut verify_sequence_no = false;
let mut verify_source = false;
match self.validation_mode {
ValidationMode::Strict => {
// Validate everything
verify_signature = true;
verify_sequence_no = true;
verify_source = true;
}
ValidationMode::Permissive => {
// If the fields exist, validate them
if message.signature.is_some() {
verify_signature = true;
}
if message.seqno.is_some() {
verify_sequence_no = true;
}
if message.from.is_some() {
verify_source = true;
}
}
ValidationMode::Anonymous => {
if message.signature.is_some() {
tracing::warn!(
"Signature field was non-empty and anonymous validation mode is set"
);
invalid_kind = Some(ValidationError::SignaturePresent);
} else if message.seqno.is_some() {
tracing::warn!(
"Sequence number was non-empty and anonymous validation mode is set"
);
invalid_kind = Some(ValidationError::SequenceNumberPresent);
} else if message.from.is_some() {
tracing::warn!("Message dropped. Message source was non-empty and anonymous validation mode is set");
invalid_kind = Some(ValidationError::MessageSourcePresent);
}
}
ValidationMode::None => {}
}
// If the initial validation logic failed, add the message to invalid messages and
// continue processing the others.
if let Some(validation_error) = invalid_kind.take() {
let message = RawMessage {
source: None, // don't bother inform the application
data: message.data.unwrap_or_default(),
sequence_number: None, // don't inform the application
topic: TopicHash::from_raw(message.topic),
signature: None, // don't inform the application
key: message.key,
validated: false,
};
invalid_messages.push((message, validation_error));
// proceed to the next message
continue;
}
// verify message signatures if required
if verify_signature && !GossipsubCodec::verify_signature(&message) {
tracing::warn!("Invalid signature for received message");
// Build the invalid message (ignoring further validation of sequence number
// and source)
let message = RawMessage {
source: None, // don't bother inform the application
data: message.data.unwrap_or_default(),
sequence_number: None, // don't inform the application
topic: TopicHash::from_raw(message.topic),
signature: None, // don't inform the application
key: message.key,
validated: false,
};
invalid_messages.push((message, ValidationError::InvalidSignature));
// proceed to the next message
continue;
}
// ensure the sequence number is a u64
let sequence_number = if verify_sequence_no {
if let Some(seq_no) = message.seqno {
if seq_no.is_empty() {
None
} else if seq_no.len() != 8 {
tracing::debug!(
sequence_number=?seq_no,
sequence_length=%seq_no.len(),
"Invalid sequence number length for received message"
);
let message = RawMessage {
source: None, // don't bother inform the application
data: message.data.unwrap_or_default(),
sequence_number: None, // don't inform the application
topic: TopicHash::from_raw(message.topic),
signature: message.signature, // don't inform the application
key: message.key,
validated: false,
};
invalid_messages.push((message, ValidationError::InvalidSequenceNumber));
// proceed to the next message
continue;
} else {
// valid sequence number
Some(BigEndian::read_u64(&seq_no))
}
} else {
// sequence number was not present
tracing::debug!("Sequence number not present but expected");
let message = RawMessage {
source: None, // don't bother inform the application
data: message.data.unwrap_or_default(),
sequence_number: None, // don't inform the application
topic: TopicHash::from_raw(message.topic),
signature: message.signature, // don't inform the application
key: message.key,
validated: false,
};
invalid_messages.push((message, ValidationError::EmptySequenceNumber));
continue;
}
} else {
// Do not verify the sequence number, consider it empty
None
};
// Verify the message source if required
let source = if verify_source {
if let Some(bytes) = message.from {
if !bytes.is_empty() {
match PeerId::from_bytes(&bytes) {
Ok(peer_id) => Some(peer_id), // valid peer id
Err(_) => {
// invalid peer id, add to invalid messages
tracing::debug!("Message source has an invalid PeerId");
let message = RawMessage {
source: None, // don't bother inform the application
data: message.data.unwrap_or_default(),
sequence_number,
topic: TopicHash::from_raw(message.topic),
signature: message.signature, // don't inform the application
key: message.key,
validated: false,
};
invalid_messages.push((message, ValidationError::InvalidPeerId));
continue;
}
}
} else {
None
}
} else {
None
}
} else {
None
};
// This message has passed all validation, add it to the validated messages.
messages.push(RawMessage {
source,
data: message.data.unwrap_or_default(),
sequence_number,
topic: TopicHash::from_raw(message.topic),
signature: message.signature,
key: message.key,
validated: false,
});
}
let mut control_msgs = Vec::new();
if let Some(rpc_control) = rpc.control {
// Collect the gossipsub control messages
let ihave_msgs: Vec<ControlAction> = rpc_control
.ihave
.into_iter()
.map(|ihave| {
ControlAction::IHave(IHave {
topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()),
message_ids: ihave
.message_ids
.into_iter()
.map(MessageId::from)
.collect::<Vec<_>>(),
})
})
.collect();
let iwant_msgs: Vec<ControlAction> = rpc_control
.iwant
.into_iter()
.map(|iwant| {
ControlAction::IWant(IWant {
message_ids: iwant
.message_ids
.into_iter()
.map(MessageId::from)
.collect::<Vec<_>>(),
})
})
.collect();
let graft_msgs: Vec<ControlAction> = rpc_control
.graft
.into_iter()
.map(|graft| {
ControlAction::Graft(Graft {
topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()),
})
})
.collect();
let mut prune_msgs = Vec::new();
for prune in rpc_control.prune {
// filter out invalid peers
let peers = prune
.peers
.into_iter()
.filter_map(|info| {
info.peer_id
.as_ref()
.and_then(|id| PeerId::from_bytes(id).ok())
.map(|peer_id|
//TODO signedPeerRecord, see https://github.com/libp2p/specs/pull/217
PeerInfo {
peer_id: Some(peer_id),
})
})
.collect::<Vec<PeerInfo>>();
let topic_hash = TopicHash::from_raw(prune.topic_id.unwrap_or_default());
prune_msgs.push(ControlAction::Prune(Prune {
topic_hash,
peers,
backoff: prune.backoff,
}));
}
control_msgs.extend(ihave_msgs);
control_msgs.extend(iwant_msgs);
control_msgs.extend(graft_msgs);
control_msgs.extend(prune_msgs);
}
Ok(Some(HandlerEvent::Message {
rpc: Rpc {
messages,
subscriptions: rpc
.subscriptions
.into_iter()
.map(|sub| Subscription {
action: if Some(true) == sub.subscribe {
SubscriptionAction::Subscribe
} else {
SubscriptionAction::Unsubscribe
},
topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()),
})
.collect(),
control_msgs,
},
invalid_messages,
}))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::gossipsub::config::Config;
use crate::gossipsub::protocol::{BytesMut, GossipsubCodec, HandlerEvent};
use crate::gossipsub::*;
use crate::gossipsub::{IdentTopic as Topic, Version};
use libp2p::identity::Keypair;
use quickcheck::*;
#[derive(Clone, Debug)]
struct Message(RawMessage);
impl Arbitrary for Message {
fn arbitrary(g: &mut Gen) -> Self {
let keypair = TestKeypair::arbitrary(g);
// generate an arbitrary GossipsubMessage using the behaviour signing functionality
let config = Config::default();
let mut gs: Behaviour =
Behaviour::new(MessageAuthenticity::Signed(keypair.0), config).unwrap();
let mut data_g = quickcheck::Gen::new(10024);
let data = (0..u8::arbitrary(&mut data_g))
.map(|_| u8::arbitrary(g))
.collect::<Vec<_>>();
let topic_id = TopicId::arbitrary(g).0;
Message(gs.build_raw_message(topic_id, data).unwrap())
}
}
#[derive(Clone, Debug)]
struct TopicId(TopicHash);
impl Arbitrary for TopicId {
fn arbitrary(g: &mut Gen) -> Self {
let mut data_g = quickcheck::Gen::new(1024);
let topic_string: String = (0..u8::arbitrary(&mut data_g))
.map(|_| char::arbitrary(g))
.collect::<String>();
TopicId(Topic::new(topic_string).into())
}
}
#[derive(Clone)]
struct TestKeypair(Keypair);
impl Arbitrary for TestKeypair {
#[cfg(feature = "rsa")]
fn arbitrary(g: &mut Gen) -> Self {
let keypair = if bool::arbitrary(g) {
// Small enough to be inlined.
Keypair::generate_ed25519()
} else {
// Too large to be inlined.
let mut rsa_key = hex::decode("308204bd020100300d06092a864886f70d0101010500048204a7308204a30201000282010100ef930f41a71288b643c1cbecbf5f72ab53992249e2b00835bf07390b6745419f3848cbcc5b030faa127bc88cdcda1c1d6f3ff699f0524c15ab9d2c9d8015f5d4bd09881069aad4e9f91b8b0d2964d215cdbbae83ddd31a7622a8228acee07079f6e501aea95508fa26c6122816ef7b00ac526d422bd12aed347c37fff6c1c307f3ba57bb28a7f28609e0bdcc839da4eedca39f5d2fa855ba4b0f9c763e9764937db929a1839054642175312a3de2d3405c9d27bdf6505ef471ce85c5e015eee85bf7874b3d512f715de58d0794fd8afe021c197fbd385bb88a930342fac8da31c27166e2edab00fa55dc1c3814448ba38363077f4e8fe2bdea1c081f85f1aa6f02030100010282010028ff427a1aac1a470e7b4879601a6656193d3857ea79f33db74df61e14730e92bf9ffd78200efb0c40937c3356cbe049cd32e5f15be5c96d5febcaa9bd3484d7fded76a25062d282a3856a1b3b7d2c525cdd8434beae147628e21adf241dd64198d5819f310d033743915ba40ea0b6acdbd0533022ad6daa1ff42de51885f9e8bab2306c6ef1181902d1cd7709006eba1ab0587842b724e0519f295c24f6d848907f772ae9a0953fc931f4af16a07df450fb8bfa94572562437056613647818c238a6ff3f606cffa0533e4b8755da33418dfbc64a85110b1a036623c947400a536bb8df65e5ebe46f2dfd0cfc86e7aeeddd7574c253e8fbf755562b3669525d902818100f9fff30c6677b78dd31ec7a634361438457e80be7a7faf390903067ea8355faa78a1204a82b6e99cb7d9058d23c1ecf6cfe4a900137a00cecc0113fd68c5931602980267ea9a95d182d48ba0a6b4d5dd32fdac685cb2e5d8b42509b2eb59c9579ea6a67ccc7547427e2bd1fb1f23b0ccb4dd6ba7d206c8dd93253d70a451701302818100f5530dfef678d73ce6a401ae47043af10a2e3f224c71ae933035ecd68ccbc4df52d72bc6ca2b17e8faf3e548b483a2506c0369ab80df3b137b54d53fac98f95547c2bc245b416e650ce617e0d29db36066f1335a9ba02ad3e0edf9dc3d58fd835835042663edebce81803972696c789012847cb1f854ab2ac0a1bd3867ac7fb502818029c53010d456105f2bf52a9a8482bca2224a5eac74bf3cc1a4d5d291fafcdffd15a6a6448cce8efdd661f6617ca5fc37c8c885cc3374e109ac6049bcbf72b37eabf44602a2da2d4a1237fd145c863e6d75059976de762d9d258c42b0984e2a2befa01c95217c3ee9c736ff209c355466ff99375194eff943bc402ea1d172a1ed02818027175bf493bbbfb8719c12b47d967bf9eac061c90a5b5711172e9095c38bb8cc493c063abffe4bea110b0a2f22ac9311b3947ba31b7ef6bfecf8209eebd6d86c316a2366bbafda7279b2b47d5bb24b6202254f249205dcad347b574433f6593733b806f84316276c1990a016ce1bbdbe5f650325acc7791aefe515ecc60063bd02818100b6a2077f4adcf15a17092d9c4a346d6022ac48f3861b73cf714f84c440a07419a7ce75a73b9cbff4597c53c128bf81e87b272d70428a272d99f90cd9b9ea1033298e108f919c6477400145a102df3fb5601ffc4588203cf710002517bfa24e6ad32f4d09c6b1a995fa28a3104131bedd9072f3b4fb4a5c2056232643d310453f").unwrap();
Keypair::rsa_from_pkcs8(&mut rsa_key).unwrap()
};
TestKeypair(keypair)
}
#[cfg(not(feature = "rsa"))]
fn arbitrary(_g: &mut Gen) -> Self {
// Small enough to be inlined.
TestKeypair(Keypair::generate_ed25519())
}
}
impl std::fmt::Debug for TestKeypair {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("TestKeypair")
.field("public", &self.0.public())
.finish()
}
}
#[test]
/// Test that RPC messages can be encoded and decoded successfully.
fn encode_decode() {
fn prop(message: Message) {
let message = message.0;
let rpc = crate::gossipsub::types::Rpc {
messages: vec![message.clone()],
subscriptions: vec![],
control_msgs: vec![],
};
let mut codec = GossipsubCodec::new(u32::MAX as usize, ValidationMode::Strict);
let mut buf = BytesMut::new();
codec.encode(rpc.into_protobuf(), &mut buf).unwrap();
let decoded_rpc = codec.decode(&mut buf).unwrap().unwrap();
// mark as validated as its a published message
match decoded_rpc {
HandlerEvent::Message { mut rpc, .. } => {
rpc.messages[0].validated = true;
assert_eq!(vec![message], rpc.messages);
}
_ => panic!("Must decode a message"),
}
}
QuickCheck::new().quickcheck(prop as fn(_) -> _)
}
#[test]
fn support_floodsub_with_custom_protocol() {
let protocol_config = ConfigBuilder::default()
.protocol_id("/foosub", Version::V1_1)
.support_floodsub()
.build()
.unwrap()
.protocol_config();
assert_eq!(protocol_config.protocol_ids[0].protocol, "/foosub");
assert_eq!(protocol_config.protocol_ids[1].protocol, "/floodsub/1.0.0");
}
}

View File

@ -0,0 +1,92 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
pub(crate) mod proto {
#![allow(unreachable_pub)]
include!("generated/mod.rs");
pub use self::gossipsub::pb::{mod_RPC::SubOpts, *};
}
#[cfg(test)]
mod test {
use crate::gossipsub::rpc_proto::proto::compat;
use crate::gossipsub::IdentTopic as Topic;
use libp2p::identity::PeerId;
use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer};
use rand::Rng;
#[test]
fn test_multi_topic_message_compatibility() {
let topic1 = Topic::new("t1").hash();
let topic2 = Topic::new("t2").hash();
let new_message1 = super::proto::Message {
from: Some(PeerId::random().to_bytes()),
data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()),
topic: topic1.clone().into_string(),
signature: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
};
let old_message1 = compat::pb::Message {
from: Some(PeerId::random().to_bytes()),
data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()),
topic_ids: vec![topic1.clone().into_string()],
signature: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
};
let old_message2 = compat::pb::Message {
from: Some(PeerId::random().to_bytes()),
data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()),
topic_ids: vec![topic1.clone().into_string(), topic2.clone().into_string()],
signature: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
};
let mut new_message1b = Vec::with_capacity(new_message1.get_size());
let mut writer = Writer::new(&mut new_message1b);
new_message1.write_message(&mut writer).unwrap();
let mut old_message1b = Vec::with_capacity(old_message1.get_size());
let mut writer = Writer::new(&mut old_message1b);
old_message1.write_message(&mut writer).unwrap();
let mut old_message2b = Vec::with_capacity(old_message2.get_size());
let mut writer = Writer::new(&mut old_message2b);
old_message2.write_message(&mut writer).unwrap();
let mut reader = BytesReader::from_bytes(&old_message1b[..]);
let new_message =
super::proto::Message::from_reader(&mut reader, &old_message1b[..]).unwrap();
assert_eq!(new_message.topic, topic1.clone().into_string());
let mut reader = BytesReader::from_bytes(&old_message2b[..]);
let new_message =
super::proto::Message::from_reader(&mut reader, &old_message2b[..]).unwrap();
assert_eq!(new_message.topic, topic2.into_string());
let mut reader = BytesReader::from_bytes(&new_message1b[..]);
let old_message =
compat::pb::Message::from_reader(&mut reader, &new_message1b[..]).unwrap();
assert_eq!(old_message.topic_ids, vec![topic1.into_string()]);
}
}

View File

@ -0,0 +1,436 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::gossipsub::types::Subscription;
use crate::gossipsub::TopicHash;
use std::collections::{BTreeSet, HashMap, HashSet};
pub trait TopicSubscriptionFilter {
/// Returns true iff the topic is of interest and we can subscribe to it.
fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool;
/// Filters a list of incoming subscriptions and returns a filtered set
/// By default this deduplicates the subscriptions and calls
/// [`Self::filter_incoming_subscription_set`] on the filtered set.
fn filter_incoming_subscriptions<'a>(
&mut self,
subscriptions: &'a [Subscription],
currently_subscribed_topics: &BTreeSet<TopicHash>,
) -> Result<HashSet<&'a Subscription>, String> {
let mut filtered_subscriptions: HashMap<TopicHash, &Subscription> = HashMap::new();
for subscription in subscriptions {
use std::collections::hash_map::Entry::*;
match filtered_subscriptions.entry(subscription.topic_hash.clone()) {
Occupied(entry) => {
if entry.get().action != subscription.action {
entry.remove();
}
}
Vacant(entry) => {
entry.insert(subscription);
}
}
}
self.filter_incoming_subscription_set(
filtered_subscriptions.into_values().collect(),
currently_subscribed_topics,
)
}
/// Filters a set of deduplicated subscriptions
/// By default this filters the elements based on [`Self::allow_incoming_subscription`].
fn filter_incoming_subscription_set<'a>(
&mut self,
mut subscriptions: HashSet<&'a Subscription>,
_currently_subscribed_topics: &BTreeSet<TopicHash>,
) -> Result<HashSet<&'a Subscription>, String> {
subscriptions.retain(|s| {
if self.allow_incoming_subscription(s) {
true
} else {
tracing::debug!(subscription=?s, "Filtered incoming subscription");
false
}
});
Ok(subscriptions)
}
/// Returns true iff we allow an incoming subscription.
/// This is used by the default implementation of filter_incoming_subscription_set to decide
/// whether to filter out a subscription or not.
/// By default this uses can_subscribe to decide the same for incoming subscriptions as for
/// outgoing ones.
fn allow_incoming_subscription(&mut self, subscription: &Subscription) -> bool {
self.can_subscribe(&subscription.topic_hash)
}
}
//some useful implementers
/// Allows all subscriptions
#[derive(Default, Clone)]
pub struct AllowAllSubscriptionFilter {}
impl TopicSubscriptionFilter for AllowAllSubscriptionFilter {
fn can_subscribe(&mut self, _: &TopicHash) -> bool {
true
}
}
/// Allows only whitelisted subscriptions
#[derive(Default, Clone)]
pub struct WhitelistSubscriptionFilter(pub HashSet<TopicHash>);
impl TopicSubscriptionFilter for WhitelistSubscriptionFilter {
fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool {
self.0.contains(topic_hash)
}
}
/// Adds a max count to a given subscription filter
pub struct MaxCountSubscriptionFilter<T: TopicSubscriptionFilter> {
pub filter: T,
pub max_subscribed_topics: usize,
pub max_subscriptions_per_request: usize,
}
impl<T: TopicSubscriptionFilter> TopicSubscriptionFilter for MaxCountSubscriptionFilter<T> {
fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool {
self.filter.can_subscribe(topic_hash)
}
fn filter_incoming_subscriptions<'a>(
&mut self,
subscriptions: &'a [Subscription],
currently_subscribed_topics: &BTreeSet<TopicHash>,
) -> Result<HashSet<&'a Subscription>, String> {
if subscriptions.len() > self.max_subscriptions_per_request {
return Err("too many subscriptions per request".into());
}
let result = self
.filter
.filter_incoming_subscriptions(subscriptions, currently_subscribed_topics)?;
use crate::gossipsub::types::SubscriptionAction::*;
let mut unsubscribed = 0;
let mut new_subscribed = 0;
for s in &result {
let currently_contained = currently_subscribed_topics.contains(&s.topic_hash);
match s.action {
Unsubscribe => {
if currently_contained {
unsubscribed += 1;
}
}
Subscribe => {
if !currently_contained {
new_subscribed += 1;
}
}
}
}
if new_subscribed + currently_subscribed_topics.len()
> self.max_subscribed_topics + unsubscribed
{
return Err("too many subscribed topics".into());
}
Ok(result)
}
}
/// Combines two subscription filters
pub struct CombinedSubscriptionFilters<T: TopicSubscriptionFilter, S: TopicSubscriptionFilter> {
pub filter1: T,
pub filter2: S,
}
impl<T, S> TopicSubscriptionFilter for CombinedSubscriptionFilters<T, S>
where
T: TopicSubscriptionFilter,
S: TopicSubscriptionFilter,
{
fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool {
self.filter1.can_subscribe(topic_hash) && self.filter2.can_subscribe(topic_hash)
}
fn filter_incoming_subscription_set<'a>(
&mut self,
subscriptions: HashSet<&'a Subscription>,
currently_subscribed_topics: &BTreeSet<TopicHash>,
) -> Result<HashSet<&'a Subscription>, String> {
let intermediate = self
.filter1
.filter_incoming_subscription_set(subscriptions, currently_subscribed_topics)?;
self.filter2
.filter_incoming_subscription_set(intermediate, currently_subscribed_topics)
}
}
pub struct CallbackSubscriptionFilter<T>(pub T)
where
T: FnMut(&TopicHash) -> bool;
impl<T> TopicSubscriptionFilter for CallbackSubscriptionFilter<T>
where
T: FnMut(&TopicHash) -> bool,
{
fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool {
(self.0)(topic_hash)
}
}
///A subscription filter that filters topics based on a regular expression.
pub struct RegexSubscriptionFilter(pub regex::Regex);
impl TopicSubscriptionFilter for RegexSubscriptionFilter {
fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool {
self.0.is_match(topic_hash.as_str())
}
}
#[cfg(test)]
mod test {
use super::*;
use crate::gossipsub::types::SubscriptionAction::*;
use std::iter::FromIterator;
#[test]
fn test_filter_incoming_allow_all_with_duplicates() {
let mut filter = AllowAllSubscriptionFilter {};
let t1 = TopicHash::from_raw("t1");
let t2 = TopicHash::from_raw("t2");
let old = BTreeSet::from_iter(vec![t1.clone()]);
let subscriptions = vec![
Subscription {
action: Unsubscribe,
topic_hash: t1.clone(),
},
Subscription {
action: Unsubscribe,
topic_hash: t2.clone(),
},
Subscription {
action: Subscribe,
topic_hash: t2,
},
Subscription {
action: Subscribe,
topic_hash: t1.clone(),
},
Subscription {
action: Unsubscribe,
topic_hash: t1,
},
];
let result = filter
.filter_incoming_subscriptions(&subscriptions, &old)
.unwrap();
assert_eq!(result, vec![&subscriptions[4]].into_iter().collect());
}
#[test]
fn test_filter_incoming_whitelist() {
let t1 = TopicHash::from_raw("t1");
let t2 = TopicHash::from_raw("t2");
let mut filter = WhitelistSubscriptionFilter(HashSet::from_iter(vec![t1.clone()]));
let old = Default::default();
let subscriptions = vec![
Subscription {
action: Subscribe,
topic_hash: t1,
},
Subscription {
action: Subscribe,
topic_hash: t2,
},
];
let result = filter
.filter_incoming_subscriptions(&subscriptions, &old)
.unwrap();
assert_eq!(result, vec![&subscriptions[0]].into_iter().collect());
}
#[test]
fn test_filter_incoming_too_many_subscriptions_per_request() {
let t1 = TopicHash::from_raw("t1");
let mut filter = MaxCountSubscriptionFilter {
filter: AllowAllSubscriptionFilter {},
max_subscribed_topics: 100,
max_subscriptions_per_request: 2,
};
let old = Default::default();
let subscriptions = vec![
Subscription {
action: Subscribe,
topic_hash: t1.clone(),
},
Subscription {
action: Unsubscribe,
topic_hash: t1.clone(),
},
Subscription {
action: Subscribe,
topic_hash: t1,
},
];
let result = filter.filter_incoming_subscriptions(&subscriptions, &old);
assert_eq!(result, Err("too many subscriptions per request".into()));
}
#[test]
fn test_filter_incoming_too_many_subscriptions() {
let t: Vec<_> = (0..4)
.map(|i| TopicHash::from_raw(format!("t{i}")))
.collect();
let mut filter = MaxCountSubscriptionFilter {
filter: AllowAllSubscriptionFilter {},
max_subscribed_topics: 3,
max_subscriptions_per_request: 2,
};
let old = t[0..2].iter().cloned().collect();
let subscriptions = vec![
Subscription {
action: Subscribe,
topic_hash: t[2].clone(),
},
Subscription {
action: Subscribe,
topic_hash: t[3].clone(),
},
];
let result = filter.filter_incoming_subscriptions(&subscriptions, &old);
assert_eq!(result, Err("too many subscribed topics".into()));
}
#[test]
fn test_filter_incoming_max_subscribed_valid() {
let t: Vec<_> = (0..5)
.map(|i| TopicHash::from_raw(format!("t{i}")))
.collect();
let mut filter = MaxCountSubscriptionFilter {
filter: WhitelistSubscriptionFilter(t.iter().take(4).cloned().collect()),
max_subscribed_topics: 2,
max_subscriptions_per_request: 5,
};
let old = t[0..2].iter().cloned().collect();
let subscriptions = vec![
Subscription {
action: Subscribe,
topic_hash: t[4].clone(),
},
Subscription {
action: Subscribe,
topic_hash: t[2].clone(),
},
Subscription {
action: Subscribe,
topic_hash: t[3].clone(),
},
Subscription {
action: Unsubscribe,
topic_hash: t[0].clone(),
},
Subscription {
action: Unsubscribe,
topic_hash: t[1].clone(),
},
];
let result = filter
.filter_incoming_subscriptions(&subscriptions, &old)
.unwrap();
assert_eq!(result, subscriptions[1..].iter().collect());
}
#[test]
fn test_callback_filter() {
let t1 = TopicHash::from_raw("t1");
let t2 = TopicHash::from_raw("t2");
let mut filter = CallbackSubscriptionFilter(|h| h.as_str() == "t1");
let old = Default::default();
let subscriptions = vec![
Subscription {
action: Subscribe,
topic_hash: t1,
},
Subscription {
action: Subscribe,
topic_hash: t2,
},
];
let result = filter
.filter_incoming_subscriptions(&subscriptions, &old)
.unwrap();
assert_eq!(result, vec![&subscriptions[0]].into_iter().collect());
}
#[test]
fn test_regex_subscription_filter() {
let t1 = TopicHash::from_raw("tt");
let t2 = TopicHash::from_raw("et3t3te");
let t3 = TopicHash::from_raw("abcdefghijklmnopqrsuvwxyz");
let mut filter = RegexSubscriptionFilter(regex::Regex::new("t.*t").unwrap());
let old = Default::default();
let subscriptions = vec![
Subscription {
action: Subscribe,
topic_hash: t1,
},
Subscription {
action: Subscribe,
topic_hash: t2,
},
Subscription {
action: Subscribe,
topic_hash: t3,
},
];
let result = filter
.filter_incoming_subscriptions(&subscriptions, &old)
.unwrap();
assert_eq!(result, subscriptions[..2].iter().collect());
}
}

View File

@ -0,0 +1,219 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! This implements a time-based LRU cache for checking gossipsub message duplicates.
use fnv::FnvHashMap;
use instant::Instant;
use std::collections::hash_map::{
self,
Entry::{Occupied, Vacant},
};
use std::collections::VecDeque;
use std::time::Duration;
struct ExpiringElement<Element> {
/// The element that expires
element: Element,
/// The expire time.
expires: Instant,
}
pub(crate) struct TimeCache<Key, Value> {
/// Mapping a key to its value together with its latest expire time (can be updated through
/// reinserts).
map: FnvHashMap<Key, ExpiringElement<Value>>,
/// An ordered list of keys by expires time.
list: VecDeque<ExpiringElement<Key>>,
/// The time elements remain in the cache.
ttl: Duration,
}
pub(crate) struct OccupiedEntry<'a, K, V> {
entry: hash_map::OccupiedEntry<'a, K, ExpiringElement<V>>,
}
impl<'a, K, V> OccupiedEntry<'a, K, V>
where
K: Eq + std::hash::Hash + Clone,
{
pub(crate) fn into_mut(self) -> &'a mut V {
&mut self.entry.into_mut().element
}
}
pub(crate) struct VacantEntry<'a, K, V> {
expiration: Instant,
entry: hash_map::VacantEntry<'a, K, ExpiringElement<V>>,
list: &'a mut VecDeque<ExpiringElement<K>>,
}
impl<'a, K, V> VacantEntry<'a, K, V>
where
K: Eq + std::hash::Hash + Clone,
{
pub(crate) fn insert(self, value: V) -> &'a mut V {
self.list.push_back(ExpiringElement {
element: self.entry.key().clone(),
expires: self.expiration,
});
&mut self
.entry
.insert(ExpiringElement {
element: value,
expires: self.expiration,
})
.element
}
}
pub(crate) enum Entry<'a, K: 'a, V: 'a> {
Occupied(OccupiedEntry<'a, K, V>),
Vacant(VacantEntry<'a, K, V>),
}
impl<'a, K: 'a, V: 'a> Entry<'a, K, V>
where
K: Eq + std::hash::Hash + Clone,
{
pub(crate) fn or_default(self) -> &'a mut V
where
V: Default,
{
match self {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => entry.insert(V::default()),
}
}
}
impl<Key, Value> TimeCache<Key, Value>
where
Key: Eq + std::hash::Hash + Clone,
{
pub(crate) fn new(ttl: Duration) -> Self {
TimeCache {
map: FnvHashMap::default(),
list: VecDeque::new(),
ttl,
}
}
fn remove_expired_keys(&mut self, now: Instant) {
while let Some(element) = self.list.pop_front() {
if element.expires > now {
self.list.push_front(element);
break;
}
if let Occupied(entry) = self.map.entry(element.element.clone()) {
if entry.get().expires <= now {
entry.remove();
}
}
}
}
pub(crate) fn entry(&mut self, key: Key) -> Entry<Key, Value> {
let now = Instant::now();
self.remove_expired_keys(now);
match self.map.entry(key) {
Occupied(entry) => Entry::Occupied(OccupiedEntry { entry }),
Vacant(entry) => Entry::Vacant(VacantEntry {
expiration: now + self.ttl,
entry,
list: &mut self.list,
}),
}
}
/// Empties the entire cache.
#[cfg(test)]
pub(crate) fn clear(&mut self) {
self.map.clear();
self.list.clear();
}
pub(crate) fn contains_key(&self, key: &Key) -> bool {
self.map.contains_key(key)
}
}
pub(crate) struct DuplicateCache<Key>(TimeCache<Key, ()>);
impl<Key> DuplicateCache<Key>
where
Key: Eq + std::hash::Hash + Clone,
{
pub(crate) fn new(ttl: Duration) -> Self {
Self(TimeCache::new(ttl))
}
// Inserts new elements and removes any expired elements.
//
// If the key was not present this returns `true`. If the value was already present this
// returns `false`.
pub(crate) fn insert(&mut self, key: Key) -> bool {
if let Entry::Vacant(entry) = self.0.entry(key) {
entry.insert(());
true
} else {
false
}
}
pub(crate) fn contains(&self, key: &Key) -> bool {
self.0.contains_key(key)
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn cache_added_entries_exist() {
let mut cache = DuplicateCache::new(Duration::from_secs(10));
cache.insert("t");
cache.insert("e");
// Should report that 't' and 't' already exists
assert!(!cache.insert("t"));
assert!(!cache.insert("e"));
}
#[test]
fn cache_entries_expire() {
let mut cache = DuplicateCache::new(Duration::from_millis(100));
cache.insert("t");
assert!(!cache.insert("t"));
cache.insert("e");
//assert!(!cache.insert("t"));
assert!(!cache.insert("e"));
// sleep until cache expiry
std::thread::sleep(Duration::from_millis(101));
// add another element to clear previous cache
cache.insert("s");
// should be removed from the cache
assert!(cache.insert("t"));
}
}

View File

@ -0,0 +1,123 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use crate::gossipsub::rpc_proto::proto;
use base64::prelude::*;
use prometheus_client::encoding::EncodeLabelSet;
use quick_protobuf::Writer;
use sha2::{Digest, Sha256};
use std::fmt;
/// A generic trait that can be extended for various hashing types for a topic.
pub trait Hasher {
/// The function that takes a topic string and creates a topic hash.
fn hash(topic_string: String) -> TopicHash;
}
/// A type for representing topics who use the identity hash.
#[derive(Debug, Clone)]
pub struct IdentityHash {}
impl Hasher for IdentityHash {
/// Creates a [`TopicHash`] as a raw string.
fn hash(topic_string: String) -> TopicHash {
TopicHash { hash: topic_string }
}
}
#[derive(Debug, Clone)]
pub struct Sha256Hash {}
impl Hasher for Sha256Hash {
/// Creates a [`TopicHash`] by SHA256 hashing the topic then base64 encoding the
/// hash.
fn hash(topic_string: String) -> TopicHash {
use quick_protobuf::MessageWrite;
let topic_descripter = proto::TopicDescriptor {
name: Some(topic_string),
auth: None,
enc: None,
};
let mut bytes = Vec::with_capacity(topic_descripter.get_size());
let mut writer = Writer::new(&mut bytes);
topic_descripter
.write_message(&mut writer)
.expect("Encoding to succeed");
let hash = BASE64_STANDARD.encode(Sha256::digest(&bytes));
TopicHash { hash }
}
}
#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, EncodeLabelSet)]
pub struct TopicHash {
/// The topic hash. Stored as a string to align with the protobuf API.
hash: String,
}
impl TopicHash {
pub fn from_raw(hash: impl Into<String>) -> TopicHash {
TopicHash { hash: hash.into() }
}
pub fn into_string(self) -> String {
self.hash
}
pub fn as_str(&self) -> &str {
&self.hash
}
}
/// A gossipsub topic.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct Topic<H: Hasher> {
topic: String,
phantom_data: std::marker::PhantomData<H>,
}
impl<H: Hasher> From<Topic<H>> for TopicHash {
fn from(topic: Topic<H>) -> TopicHash {
topic.hash()
}
}
impl<H: Hasher> Topic<H> {
pub fn new(topic: impl Into<String>) -> Self {
Topic {
topic: topic.into(),
phantom_data: std::marker::PhantomData,
}
}
pub fn hash(&self) -> TopicHash {
H::hash(self.topic.clone())
}
}
impl<H: Hasher> fmt::Display for Topic<H> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.topic)
}
}
impl fmt::Display for TopicHash {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.hash)
}
}

View File

@ -0,0 +1,72 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! This trait allows of extended user-level decoding that can apply to message-data before a
//! message-id is calculated.
//!
//! This is primarily designed to allow applications to implement their own custom compression
//! algorithms that can be topic-specific. Once the raw data is transformed the message-id is then
//! calculated, allowing for applications to employ message-id functions post compression.
use crate::gossipsub::{Message, RawMessage, TopicHash};
/// A general trait of transforming a [`RawMessage`] into a [`Message`]. The
/// [`RawMessage`] is obtained from the wire and the [`Message`] is used to
/// calculate the [`crate::gossipsub::MessageId`] of the message and is what is sent to the application.
///
/// The inbound/outbound transforms must be inverses. Applying the inbound transform and then the
/// outbound transform MUST leave the underlying data un-modified.
///
/// By default, this is the identity transform for all fields in [`Message`].
pub trait DataTransform {
/// Takes a [`RawMessage`] received and converts it to a [`Message`].
fn inbound_transform(&self, raw_message: RawMessage) -> Result<Message, std::io::Error>;
/// Takes the data to be published (a topic and associated data) transforms the data. The
/// transformed data will then be used to create a [`crate::gossipsub::RawMessage`] to be sent to peers.
fn outbound_transform(
&self,
topic: &TopicHash,
data: Vec<u8>,
) -> Result<Vec<u8>, std::io::Error>;
}
/// The default transform, the raw data is propagated as is to the application layer gossipsub.
#[derive(Default, Clone)]
pub struct IdentityTransform;
impl DataTransform for IdentityTransform {
fn inbound_transform(&self, raw_message: RawMessage) -> Result<Message, std::io::Error> {
Ok(Message {
source: raw_message.source,
data: raw_message.data,
sequence_number: raw_message.sequence_number,
topic: raw_message.topic,
})
}
fn outbound_transform(
&self,
_topic: &TopicHash,
data: Vec<u8>,
) -> Result<Vec<u8>, std::io::Error> {
Ok(data)
}
}

View File

@ -0,0 +1,768 @@
// Copyright 2020 Sigma Prime Pty Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//! A collection of types using the Gossipsub system.
use crate::gossipsub::metrics::Metrics;
use crate::gossipsub::TopicHash;
use async_channel::{Receiver, Sender};
use futures::Stream;
use futures_timer::Delay;
use instant::Duration;
use libp2p::identity::PeerId;
use libp2p::swarm::ConnectionId;
use prometheus_client::encoding::EncodeLabelValue;
use quick_protobuf::MessageWrite;
use std::collections::BTreeSet;
use std::fmt::Debug;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::task::Poll;
use std::{fmt, pin::Pin};
use crate::gossipsub::rpc_proto::proto;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
/// The type of messages that have expired while attempting to send to a peer.
#[derive(Clone, Debug, Default)]
pub struct FailedMessages {
/// The number of publish messages that failed to be published in a heartbeat.
pub publish: usize,
/// The number of forward messages that failed to be published in a heartbeat.
pub forward: usize,
/// The number of messages that were failed to be sent to the priority queue as it was full.
pub priority: usize,
/// The number of messages that were failed to be sent to the non-priority queue as it was full.
pub non_priority: usize,
}
impl FailedMessages {
/// The total number of messages that expired due a timeout.
pub fn total_timeout(&self) -> usize {
self.publish + self.forward
}
/// The total number of messages that failed due to the queue being full.
pub fn total_queue_full(&self) -> usize {
self.priority + self.non_priority
}
/// The total failed messages in a heartbeat.
pub fn total(&self) -> usize {
self.total_timeout() + self.total_queue_full()
}
}
#[derive(Debug)]
/// Validation kinds from the application for received messages.
pub enum MessageAcceptance {
/// The message is considered valid, and it should be delivered and forwarded to the network.
Accept,
/// The message is considered invalid, and it should be rejected and trigger the P₄ penalty.
Reject,
/// The message is neither delivered nor forwarded to the network, but the router does not
/// trigger the P₄ penalty.
Ignore,
}
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub struct MessageId(pub Vec<u8>);
impl MessageId {
pub fn new(value: &[u8]) -> Self {
Self(value.to_vec())
}
}
impl<T: Into<Vec<u8>>> From<T> for MessageId {
fn from(value: T) -> Self {
Self(value.into())
}
}
impl std::fmt::Display for MessageId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", hex_fmt::HexFmt(&self.0))
}
}
impl std::fmt::Debug for MessageId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "MessageId({})", hex_fmt::HexFmt(&self.0))
}
}
#[derive(Debug, Clone)]
pub(crate) struct PeerConnections {
/// The kind of protocol the peer supports.
pub(crate) kind: PeerKind,
/// Its current connections.
pub(crate) connections: Vec<ConnectionId>,
/// The rpc sender to the peer.
pub(crate) sender: RpcSender,
/// Subscribed topics.
pub(crate) topics: BTreeSet<TopicHash>,
}
/// Describes the types of peers that can exist in the gossipsub context.
#[derive(Debug, Clone, PartialEq, Hash, EncodeLabelValue, Eq)]
pub enum PeerKind {
/// A gossipsub 1.1 peer.
Gossipsubv1_1,
/// A gossipsub 1.0 peer.
Gossipsub,
/// A floodsub peer.
Floodsub,
/// The peer doesn't support any of the protocols.
NotSupported,
}
/// A message received by the gossipsub system and stored locally in caches..
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub struct RawMessage {
/// Id of the peer that published this message.
pub source: Option<PeerId>,
/// Content of the message. Its meaning is out of scope of this library.
pub data: Vec<u8>,
/// A random sequence number.
pub sequence_number: Option<u64>,
/// The topic this message belongs to
pub topic: TopicHash,
/// The signature of the message if it's signed.
pub signature: Option<Vec<u8>>,
/// The public key of the message if it is signed and the source [`PeerId`] cannot be inlined.
pub key: Option<Vec<u8>>,
/// Flag indicating if this message has been validated by the application or not.
pub validated: bool,
}
impl RawMessage {
/// Calculates the encoded length of this message (used for calculating metrics).
pub fn raw_protobuf_len(&self) -> usize {
let message = proto::Message {
from: self.source.map(|m| m.to_bytes()),
data: Some(self.data.clone()),
seqno: self.sequence_number.map(|s| s.to_be_bytes().to_vec()),
topic: TopicHash::into_string(self.topic.clone()),
signature: self.signature.clone(),
key: self.key.clone(),
};
message.get_size()
}
}
impl From<RawMessage> for proto::Message {
fn from(raw: RawMessage) -> Self {
proto::Message {
from: raw.source.map(|m| m.to_bytes()),
data: Some(raw.data),
seqno: raw.sequence_number.map(|s| s.to_be_bytes().to_vec()),
topic: TopicHash::into_string(raw.topic),
signature: raw.signature,
key: raw.key,
}
}
}
/// The message sent to the user after a [`RawMessage`] has been transformed by a
/// [`crate::gossipsub::DataTransform`].
#[derive(Clone, PartialEq, Eq, Hash)]
pub struct Message {
/// Id of the peer that published this message.
pub source: Option<PeerId>,
/// Content of the message.
pub data: Vec<u8>,
/// A random sequence number.
pub sequence_number: Option<u64>,
/// The topic this message belongs to
pub topic: TopicHash,
}
impl fmt::Debug for Message {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Message")
.field(
"data",
&format_args!("{:<20}", &hex_fmt::HexFmt(&self.data)),
)
.field("source", &self.source)
.field("sequence_number", &self.sequence_number)
.field("topic", &self.topic)
.finish()
}
}
/// A subscription received by the gossipsub system.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Subscription {
/// Action to perform.
pub action: SubscriptionAction,
/// The topic from which to subscribe or unsubscribe.
pub topic_hash: TopicHash,
}
/// Action that a subscription wants to perform.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum SubscriptionAction {
/// The remote wants to subscribe to the given topic.
Subscribe,
/// The remote wants to unsubscribe from the given topic.
Unsubscribe,
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub(crate) struct PeerInfo {
pub(crate) peer_id: Option<PeerId>,
//TODO add this when RFC: Signed Address Records got added to the spec (see pull request
// https://github.com/libp2p/specs/pull/217)
//pub signed_peer_record: ?,
}
/// A Control message received by the gossipsub system.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum ControlAction {
/// Node broadcasts known messages per topic - IHave control message.
IHave(IHave),
/// The node requests specific message ids (peer_id + sequence _number) - IWant control message.
IWant(IWant),
/// The node has been added to the mesh - Graft control message.
Graft(Graft),
/// The node has been removed from the mesh - Prune control message.
Prune(Prune),
}
/// Node broadcasts known messages per topic - IHave control message.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct IHave {
/// The topic of the messages.
pub(crate) topic_hash: TopicHash,
/// A list of known message ids (peer_id + sequence _number) as a string.
pub(crate) message_ids: Vec<MessageId>,
}
/// The node requests specific message ids (peer_id + sequence _number) - IWant control message.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct IWant {
/// A list of known message ids (peer_id + sequence _number) as a string.
pub(crate) message_ids: Vec<MessageId>,
}
/// The node has been added to the mesh - Graft control message.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Graft {
/// The mesh topic the peer should be added to.
pub(crate) topic_hash: TopicHash,
}
/// The node has been removed from the mesh - Prune control message.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Prune {
/// The mesh topic the peer should be removed from.
pub(crate) topic_hash: TopicHash,
/// A list of peers to be proposed to the removed peer as peer exchange
pub(crate) peers: Vec<PeerInfo>,
/// The backoff time in seconds before we allow to reconnect
pub(crate) backoff: Option<u64>,
}
/// A Gossipsub RPC message sent.
#[derive(Debug)]
pub enum RpcOut {
/// Publish a Gossipsub message on network. The [`Delay`] tags the time we attempted to
/// send it.
Publish { message: RawMessage, timeout: Delay },
/// Forward a Gossipsub message to the network. The [`Delay`] tags the time we attempted to
/// send it.
Forward { message: RawMessage, timeout: Delay },
/// Subscribe a topic.
Subscribe(TopicHash),
/// Unsubscribe a topic.
Unsubscribe(TopicHash),
/// Send a GRAFT control message.
Graft(Graft),
/// Send a PRUNE control message.
Prune(Prune),
/// Send a IHave control message.
IHave(IHave),
/// Send a IWant control message.
IWant(IWant),
}
impl RpcOut {
/// Converts the GossipsubRPC into its protobuf format.
// A convenience function to avoid explicitly specifying types.
pub fn into_protobuf(self) -> proto::RPC {
self.into()
}
}
impl From<RpcOut> for proto::RPC {
/// Converts the RPC into protobuf format.
fn from(rpc: RpcOut) -> Self {
match rpc {
RpcOut::Publish {
message,
timeout: _,
} => proto::RPC {
subscriptions: Vec::new(),
publish: vec![message.into()],
control: None,
},
RpcOut::Forward {
message,
timeout: _,
} => proto::RPC {
publish: vec![message.into()],
subscriptions: Vec::new(),
control: None,
},
RpcOut::Subscribe(topic) => proto::RPC {
publish: Vec::new(),
subscriptions: vec![proto::SubOpts {
subscribe: Some(true),
topic_id: Some(topic.into_string()),
}],
control: None,
},
RpcOut::Unsubscribe(topic) => proto::RPC {
publish: Vec::new(),
subscriptions: vec![proto::SubOpts {
subscribe: Some(false),
topic_id: Some(topic.into_string()),
}],
control: None,
},
RpcOut::IHave(IHave {
topic_hash,
message_ids,
}) => proto::RPC {
publish: Vec::new(),
subscriptions: Vec::new(),
control: Some(proto::ControlMessage {
ihave: vec![proto::ControlIHave {
topic_id: Some(topic_hash.into_string()),
message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(),
}],
iwant: vec![],
graft: vec![],
prune: vec![],
}),
},
RpcOut::IWant(IWant { message_ids }) => proto::RPC {
publish: Vec::new(),
subscriptions: Vec::new(),
control: Some(proto::ControlMessage {
ihave: vec![],
iwant: vec![proto::ControlIWant {
message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(),
}],
graft: vec![],
prune: vec![],
}),
},
RpcOut::Graft(Graft { topic_hash }) => proto::RPC {
publish: Vec::new(),
subscriptions: vec![],
control: Some(proto::ControlMessage {
ihave: vec![],
iwant: vec![],
graft: vec![proto::ControlGraft {
topic_id: Some(topic_hash.into_string()),
}],
prune: vec![],
}),
},
RpcOut::Prune(Prune {
topic_hash,
peers,
backoff,
}) => {
proto::RPC {
publish: Vec::new(),
subscriptions: vec![],
control: Some(proto::ControlMessage {
ihave: vec![],
iwant: vec![],
graft: vec![],
prune: vec![proto::ControlPrune {
topic_id: Some(topic_hash.into_string()),
peers: peers
.into_iter()
.map(|info| proto::PeerInfo {
peer_id: info.peer_id.map(|id| id.to_bytes()),
// TODO, see https://github.com/libp2p/specs/pull/217
signed_peer_record: None,
})
.collect(),
backoff,
}],
}),
}
}
}
}
}
/// An RPC received/sent.
#[derive(Clone, PartialEq, Eq, Hash)]
pub struct Rpc {
/// List of messages that were part of this RPC query.
pub messages: Vec<RawMessage>,
/// List of subscriptions.
pub subscriptions: Vec<Subscription>,
/// List of Gossipsub control messages.
pub control_msgs: Vec<ControlAction>,
}
impl Rpc {
/// Converts the GossipsubRPC into its protobuf format.
// A convenience function to avoid explicitly specifying types.
pub fn into_protobuf(self) -> proto::RPC {
self.into()
}
}
impl From<Rpc> for proto::RPC {
/// Converts the RPC into protobuf format.
fn from(rpc: Rpc) -> Self {
// Messages
let mut publish = Vec::new();
for message in rpc.messages.into_iter() {
let message = proto::Message {
from: message.source.map(|m| m.to_bytes()),
data: Some(message.data),
seqno: message.sequence_number.map(|s| s.to_be_bytes().to_vec()),
topic: TopicHash::into_string(message.topic),
signature: message.signature,
key: message.key,
};
publish.push(message);
}
// subscriptions
let subscriptions = rpc
.subscriptions
.into_iter()
.map(|sub| proto::SubOpts {
subscribe: Some(sub.action == SubscriptionAction::Subscribe),
topic_id: Some(sub.topic_hash.into_string()),
})
.collect::<Vec<_>>();
// control messages
let mut control = proto::ControlMessage {
ihave: Vec::new(),
iwant: Vec::new(),
graft: Vec::new(),
prune: Vec::new(),
};
let empty_control_msg = rpc.control_msgs.is_empty();
for action in rpc.control_msgs {
match action {
// collect all ihave messages
ControlAction::IHave(IHave {
topic_hash,
message_ids,
}) => {
let rpc_ihave = proto::ControlIHave {
topic_id: Some(topic_hash.into_string()),
message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(),
};
control.ihave.push(rpc_ihave);
}
ControlAction::IWant(IWant { message_ids }) => {
let rpc_iwant = proto::ControlIWant {
message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(),
};
control.iwant.push(rpc_iwant);
}
ControlAction::Graft(Graft { topic_hash }) => {
let rpc_graft = proto::ControlGraft {
topic_id: Some(topic_hash.into_string()),
};
control.graft.push(rpc_graft);
}
ControlAction::Prune(Prune {
topic_hash,
peers,
backoff,
}) => {
let rpc_prune = proto::ControlPrune {
topic_id: Some(topic_hash.into_string()),
peers: peers
.into_iter()
.map(|info| proto::PeerInfo {
peer_id: info.peer_id.map(|id| id.to_bytes()),
// TODO, see https://github.com/libp2p/specs/pull/217
signed_peer_record: None,
})
.collect(),
backoff,
};
control.prune.push(rpc_prune);
}
}
}
proto::RPC {
subscriptions,
publish,
control: if empty_control_msg {
None
} else {
Some(control)
},
}
}
}
impl fmt::Debug for Rpc {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut b = f.debug_struct("GossipsubRpc");
if !self.messages.is_empty() {
b.field("messages", &self.messages);
}
if !self.subscriptions.is_empty() {
b.field("subscriptions", &self.subscriptions);
}
if !self.control_msgs.is_empty() {
b.field("control_msgs", &self.control_msgs);
}
b.finish()
}
}
impl PeerKind {
pub fn as_static_ref(&self) -> &'static str {
match self {
Self::NotSupported => "Not Supported",
Self::Floodsub => "Floodsub",
Self::Gossipsub => "Gossipsub v1.0",
Self::Gossipsubv1_1 => "Gossipsub v1.1",
}
}
}
impl AsRef<str> for PeerKind {
fn as_ref(&self) -> &str {
self.as_static_ref()
}
}
impl fmt::Display for PeerKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_ref())
}
}
/// `RpcOut` sender that is priority aware.
#[derive(Debug, Clone)]
pub(crate) struct RpcSender {
cap: usize,
len: Arc<AtomicUsize>,
priority: Sender<RpcOut>,
non_priority: Sender<RpcOut>,
receiver: RpcReceiver,
}
impl RpcSender {
/// Create a RpcSender.
pub(crate) fn new(cap: usize) -> RpcSender {
let (priority_sender, priority_receiver) = async_channel::unbounded();
let (non_priority_sender, non_priority_receiver) = async_channel::bounded(cap / 2);
let len = Arc::new(AtomicUsize::new(0));
let receiver = RpcReceiver {
priority_len: len.clone(),
priority: priority_receiver,
non_priority: non_priority_receiver,
};
RpcSender {
cap: cap / 2,
len,
priority: priority_sender,
non_priority: non_priority_sender,
receiver: receiver.clone(),
}
}
/// Create a new Receiver to the sender.
pub(crate) fn new_receiver(&self) -> RpcReceiver {
self.receiver.clone()
}
/// Send a `RpcOut::Graft` message to the `RpcReceiver`
/// this is high priority.
pub(crate) fn graft(&mut self, graft: Graft) {
self.priority
.try_send(RpcOut::Graft(graft))
.expect("Channel is unbounded and should always be open");
}
/// Send a `RpcOut::Prune` message to the `RpcReceiver`
/// this is high priority.
pub(crate) fn prune(&mut self, prune: Prune) {
self.priority
.try_send(RpcOut::Prune(prune))
.expect("Channel is unbounded and should always be open");
}
/// Send a `RpcOut::IHave` message to the `RpcReceiver`
/// this is low priority, if the queue is full an Err is returned.
#[allow(clippy::result_large_err)]
pub(crate) fn ihave(&mut self, ihave: IHave) -> Result<(), RpcOut> {
self.non_priority
.try_send(RpcOut::IHave(ihave))
.map_err(|err| err.into_inner())
}
/// Send a `RpcOut::IHave` message to the `RpcReceiver`
/// this is low priority, if the queue is full an Err is returned.
#[allow(clippy::result_large_err)]
pub(crate) fn iwant(&mut self, iwant: IWant) -> Result<(), RpcOut> {
self.non_priority
.try_send(RpcOut::IWant(iwant))
.map_err(|err| err.into_inner())
}
/// Send a `RpcOut::Subscribe` message to the `RpcReceiver`
/// this is high priority.
pub(crate) fn subscribe(&mut self, topic: TopicHash) {
self.priority
.try_send(RpcOut::Subscribe(topic))
.expect("Channel is unbounded and should always be open");
}
/// Send a `RpcOut::Unsubscribe` message to the `RpcReceiver`
/// this is high priority.
pub(crate) fn unsubscribe(&mut self, topic: TopicHash) {
self.priority
.try_send(RpcOut::Unsubscribe(topic))
.expect("Channel is unbounded and should always be open");
}
/// Send a `RpcOut::Publish` message to the `RpcReceiver`
/// this is high priority. If message sending fails, an `Err` is returned.
pub(crate) fn publish(
&mut self,
message: RawMessage,
timeout: Duration,
metrics: Option<&mut Metrics>,
) -> Result<(), ()> {
if self.len.load(Ordering::Relaxed) >= self.cap {
return Err(());
}
self.priority
.try_send(RpcOut::Publish {
message: message.clone(),
timeout: Delay::new(timeout),
})
.expect("Channel is unbounded and should always be open");
self.len.fetch_add(1, Ordering::Relaxed);
if let Some(m) = metrics {
m.msg_sent(&message.topic, message.raw_protobuf_len());
}
Ok(())
}
/// Send a `RpcOut::Forward` message to the `RpcReceiver`
/// this is high priority. If the queue is full the message is discarded.
pub(crate) fn forward(
&mut self,
message: RawMessage,
timeout: Duration,
metrics: Option<&mut Metrics>,
) -> Result<(), ()> {
self.non_priority
.try_send(RpcOut::Forward {
message: message.clone(),
timeout: Delay::new(timeout),
})
.map_err(|_| ())?;
if let Some(m) = metrics {
m.msg_sent(&message.topic, message.raw_protobuf_len());
}
Ok(())
}
/// Returns the current size of the priority queue.
pub(crate) fn priority_len(&self) -> usize {
self.len.load(Ordering::Relaxed)
}
/// Returns the current size of the non-priority queue.
pub(crate) fn non_priority_len(&self) -> usize {
self.non_priority.len()
}
}
/// `RpcOut` sender that is priority aware.
#[derive(Debug, Clone)]
pub struct RpcReceiver {
/// The maximum length of the priority queue.
priority_len: Arc<AtomicUsize>,
/// The priority queue receiver.
pub(crate) priority: Receiver<RpcOut>,
/// The non priority queue receiver.
pub(crate) non_priority: Receiver<RpcOut>,
}
impl RpcReceiver {
/// Check if both queues are empty.
pub(crate) fn is_empty(&self) -> bool {
self.priority.is_empty() && self.non_priority.is_empty()
}
}
impl Stream for RpcReceiver {
type Item = RpcOut;
fn poll_next(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
// The priority queue is first polled.
if let Poll::Ready(rpc) = Pin::new(&mut self.priority).poll_next(cx) {
if let Some(RpcOut::Publish { .. }) = rpc {
self.priority_len.fetch_sub(1, Ordering::Relaxed);
}
return Poll::Ready(rpc);
}
// Then we poll the non priority.
Pin::new(&mut self.non_priority).poll_next(cx)
}
}

View File

@ -10,6 +10,7 @@ pub mod service;
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
pub mod discovery; pub mod discovery;
pub mod gossipsub;
pub mod listen_addr; pub mod listen_addr;
pub mod metrics; pub mod metrics;
pub mod peer_manager; pub mod peer_manager;
@ -114,8 +115,8 @@ pub use prometheus_client;
pub use config::Config as NetworkConfig; pub use config::Config as NetworkConfig;
pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr};
pub use discv5; pub use discv5;
pub use gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash};
pub use libp2p; pub use libp2p;
pub use libp2p::gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash};
pub use libp2p::{core::ConnectedPoint, PeerId, Swarm}; pub use libp2p::{core::ConnectedPoint, PeerId, Swarm};
pub use libp2p::{multiaddr, Multiaddr}; pub use libp2p::{multiaddr, Multiaddr};
pub use metrics::scrape_discovery_metrics; pub use metrics::scrape_discovery_metrics;

View File

@ -3,7 +3,7 @@ use crate::peer_manager::PeerManager;
use crate::rpc::{ReqId, RPC}; use crate::rpc::{ReqId, RPC};
use crate::types::SnappyTransform; use crate::types::SnappyTransform;
use libp2p::gossipsub; use crate::gossipsub;
use libp2p::identify; use libp2p::identify;
use libp2p::swarm::NetworkBehaviour; use libp2p::swarm::NetworkBehaviour;
use types::EthSpec; use types::EthSpec;
@ -22,8 +22,8 @@ where
{ {
/// Keep track of active and pending connections to enforce hard limits. /// Keep track of active and pending connections to enforce hard limits.
pub connection_limits: libp2p::connection_limits::Behaviour, pub connection_limits: libp2p::connection_limits::Behaviour,
/// The routing pub-sub mechanism for eth2. /// The peer manager that keeps track of peer's reputation and status.
pub gossipsub: Gossipsub, pub peer_manager: PeerManager<TSpec>,
/// The Eth2 RPC specified in the wire-0 protocol. /// The Eth2 RPC specified in the wire-0 protocol.
pub eth2_rpc: RPC<RequestId<AppReqId>, TSpec>, pub eth2_rpc: RPC<RequestId<AppReqId>, TSpec>,
/// Discv5 Discovery protocol. /// Discv5 Discovery protocol.
@ -32,6 +32,6 @@ where
// NOTE: The id protocol is used for initial interop. This will be removed by mainnet. // NOTE: The id protocol is used for initial interop. This will be removed by mainnet.
/// Provides IP addresses and peer information. /// Provides IP addresses and peer information.
pub identify: identify::Behaviour, pub identify: identify::Behaviour,
/// The peer manager that keeps track of peer's reputation and status. /// The routing pub-sub mechanism for eth2.
pub peer_manager: PeerManager<TSpec>, pub gossipsub: Gossipsub,
} }

View File

@ -1,9 +1,9 @@
use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::gossipsub::{
use crate::{error, TopicHash};
use libp2p::gossipsub::{
Config as GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, Config as GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds,
TopicScoreParams, TopicScoreParams,
}; };
use crate::types::{GossipEncoding, GossipKind, GossipTopic};
use crate::{error, TopicHash};
use std::cmp::max; use std::cmp::max;
use std::collections::HashMap; use std::collections::HashMap;
use std::marker::PhantomData; use std::marker::PhantomData;

View File

@ -4,6 +4,10 @@ use crate::config::{gossipsub_config, GossipsubConfigParams, NetworkLoad};
use crate::discovery::{ use crate::discovery::{
subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS,
}; };
use crate::gossipsub::{
self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError,
TopicScoreParams,
};
use crate::peer_manager::{ use crate::peer_manager::{
config::Config as PeerManagerCfg, peerdb::score::PeerAction, peerdb::score::ReportSource, config::Config as PeerManagerCfg, peerdb::score::PeerAction, peerdb::score::ReportSource,
ConnectionDirection, PeerManager, PeerManagerEvent, ConnectionDirection, PeerManager, PeerManagerEvent,
@ -24,10 +28,6 @@ use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash};
use api_types::{PeerRequestId, Request, RequestId, Response}; use api_types::{PeerRequestId, Request, RequestId, Response};
use futures::stream::StreamExt; use futures::stream::StreamExt;
use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings};
use libp2p::gossipsub::{
self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError,
TopicScoreParams,
};
use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol};
use libp2p::swarm::{Swarm, SwarmEvent}; use libp2p::swarm::{Swarm, SwarmEvent};
use libp2p::PeerId; use libp2p::PeerId;

View File

@ -1,3 +1,4 @@
use crate::gossipsub;
use crate::multiaddr::Protocol; use crate::multiaddr::Protocol;
use crate::rpc::{MetaData, MetaDataV1, MetaDataV2}; use crate::rpc::{MetaData, MetaDataV1, MetaDataV2};
use crate::types::{ use crate::types::{
@ -6,7 +7,6 @@ use crate::types::{
use crate::{GossipTopic, NetworkConfig}; use crate::{GossipTopic, NetworkConfig};
use futures::future::Either; use futures::future::Either;
use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed}; use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed};
use libp2p::gossipsub;
use libp2p::identity::{secp256k1, Keypair}; use libp2p::identity::{secp256k1, Keypair};
use libp2p::quic; use libp2p::quic;
use libp2p::{core, noise, yamux, PeerId, Transport}; use libp2p::{core, noise, yamux, PeerId, Transport};

View File

@ -1,8 +1,8 @@
//! Handles the encoding and decoding of pubsub messages. //! Handles the encoding and decoding of pubsub messages.
use crate::gossipsub;
use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::types::{GossipEncoding, GossipKind, GossipTopic};
use crate::TopicHash; use crate::TopicHash;
use libp2p::gossipsub;
use snap::raw::{decompress_len, Decoder, Encoder}; use snap::raw::{decompress_len, Decoder, Encoder};
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use std::boxed::Box; use std::boxed::Box;

View File

@ -1,4 +1,4 @@
use libp2p::gossipsub::{IdentTopic as Topic, TopicHash}; use crate::gossipsub::{IdentTopic as Topic, TopicHash};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use strum::AsRefStr; use strum::AsRefStr;
use types::{ChainSpec, EthSpec, ForkName, SubnetId, SyncSubnetId}; use types::{ChainSpec, EthSpec, ForkName, SubnetId, SyncSubnetId};

View File

@ -1,5 +1,5 @@
#![cfg(test)] #![cfg(test)]
use libp2p::gossipsub; use lighthouse_network::gossipsub;
use lighthouse_network::service::Network as LibP2PService; use lighthouse_network::service::Network as LibP2PService;
use lighthouse_network::Enr; use lighthouse_network::Enr;
use lighthouse_network::EnrExt; use lighthouse_network::EnrExt;

View File

@ -1056,7 +1056,7 @@ fn goodbye_test(log_level: Level, enable_logging: bool, protocol: Protocol) {
fn tcp_test_goodbye_rpc() { fn tcp_test_goodbye_rpc() {
// set up the logging. The level and enabled logging or not // set up the logging. The level and enabled logging or not
let log_level = Level::Debug; let log_level = Level::Debug;
let enable_logging = true; let enable_logging = false;
goodbye_test(log_level, enable_logging, Protocol::Tcp); goodbye_test(log_level, enable_logging, Protocol::Tcp);
} }
@ -1066,6 +1066,6 @@ fn tcp_test_goodbye_rpc() {
fn quic_test_goodbye_rpc() { fn quic_test_goodbye_rpc() {
// set up the logging. The level and enabled logging or not // set up the logging. The level and enabled logging or not
let log_level = Level::Debug; let log_level = Level::Debug;
let enable_logging = true; let enable_logging = false;
goodbye_test(log_level, enable_logging, Protocol::Quic); goodbye_test(log_level, enable_logging, Protocol::Quic);
} }

View File

@ -20,7 +20,7 @@ mod tests {
fn get_topic_params( fn get_topic_params(
&self, &self,
topic: GossipTopic, topic: GossipTopic,
) -> Option<&lighthouse_network::libp2p::gossipsub::TopicScoreParams> { ) -> Option<&lighthouse_network::gossipsub::TopicScoreParams> {
self.libp2p.get_topic_params(topic) self.libp2p.get_topic_params(topic)
} }
} }