upgrade to libp2p 0.52 (#4431)

## Issue Addressed

Upgrade libp2p to v0.52

## Proposed Changes
- **Workflows**: remove installation of `protoc`
- **Book**: remove installation of `protoc`
- **`Dockerfile`s and `cross`**: remove custom base `Dockerfile` for cross since it's no longer needed. Remove `protoc` from remaining `Dockerfiles`s
- **Upgrade `discv5` to `v0.3.1`:** we have some cool stuff in there: no longer needs `protoc` and faster ip updates on cold start
- **Upgrade `prometheus` to `0.21.0`**, now it no longer needs encoding checks
- **things that look like refactors:** bunch of api types were renamed and need to be accessed in a different (clearer) way
- **Lighthouse network**
	- connection limits is now a behaviour
	- banned peers no longer exist on the swarm level, but at the behaviour level
	- `connection_event_buffer_size` now is handled per connection with a buffer size of 4
	- `mplex` is deprecated and was removed
	- rpc handler now logs the peer to which it belongs

## Additional Info

Tried to keep as much behaviour unchanged as possible. However, there is a great deal of improvements we can do _after_ this upgrade:
- Smart connection limits: Connection limits have been checked only based on numbers, we can now use information about the incoming peer to decide if we want it
- More powerful peer management: Dial attempts from other behaviours can be rejected early
- Incoming connections can be rejected early
- Banning can be returned exclusively to the peer management: We should not get connections to banned peers anymore making use of this
- TCP Nat updates: We might be able to take advantage of confirmed external addresses to check out tcp ports/ips


Co-authored-by: Age Manning <Age@AgeManning.com>
Co-authored-by: Akihito Nakano <sora.akatsuki@gmail.com>
This commit is contained in:
Divma 2023-08-02 00:59:34 +00:00
parent 73764d0dd2
commit ff9b09d964
35 changed files with 1594 additions and 2408 deletions

View File

@ -21,10 +21,6 @@ jobs:
- name: Get latest version of stable Rust
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install geth (ubuntu)
if: matrix.os == 'ubuntu-22.04'
run: |

View File

@ -79,15 +79,6 @@ jobs:
if: startsWith(matrix.arch, 'x86_64-windows')
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
# ==============================
# Windows & Mac dependencies
# ==============================
- name: Install Protoc
if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows')
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# ==============================
# Builds
# ==============================

View File

@ -60,10 +60,6 @@ jobs:
- name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1
- name: Run tests in release
@ -83,7 +79,7 @@ jobs:
node-version: '14'
- name: Install windows build tools
run: |
choco install python protoc visualstudio2019-workload-vctools -y
choco install python visualstudio2019-workload-vctools -y
npm config set msvs_version 2019
- name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1
@ -108,10 +104,6 @@ jobs:
- name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run beacon_chain tests for all known forks
run: make test-beacon-chain
op-pool-tests:
@ -122,10 +114,6 @@ jobs:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run operation_pool tests for all known forks
run: make test-op-pool
slasher-tests:
@ -148,10 +136,6 @@ jobs:
- name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1
- name: Run tests in debug
@ -164,10 +148,6 @@ jobs:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run state_transition_vectors in release.
run: make run-state-transition-tests
ef-tests-ubuntu:
@ -180,10 +160,6 @@ jobs:
- name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run consensus-spec-tests with blst, milagro and fake_crypto
run: make test-ef
dockerfile-ubuntu:
@ -206,10 +182,6 @@ jobs:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1
- name: Run the beacon chain sim that starts from an eth1 contract
@ -222,10 +194,6 @@ jobs:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1
- name: Run the beacon chain sim and go through the merge transition
@ -238,10 +206,6 @@ jobs:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run the beacon chain sim without an eth1 connection
run: cargo run --release --bin simulator no-eth1-sim
syncing-simulator-ubuntu:
@ -252,10 +216,6 @@ jobs:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1
- name: Run the syncing simulator
@ -268,10 +228,6 @@ jobs:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install geth
run: |
sudo add-apt-repository -y ppa:ethereum/ethereum
@ -303,10 +259,6 @@ jobs:
dotnet-version: '6.0.201'
- name: Get latest version of stable Rust
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run exec engine integration tests in release
run: make test-exec-engine
check-benchmarks:
@ -317,10 +269,6 @@ jobs:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Typecheck benchmark code without running it
run: make check-benches
clippy:
@ -331,10 +279,6 @@ jobs:
- uses: actions/checkout@v3
- name: Get latest version of stable Rust
run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Lint code for quality and style with Clippy
run: make lint
- name: Certify Cargo.lock freshness
@ -347,10 +291,6 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }})
run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }}
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run cargo check
run: cargo check --workspace
arbitrary-check:
@ -389,10 +329,6 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust (${{ env.PINNED_NIGHTLY }})
run: rustup toolchain install $PINNED_NIGHTLY
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install cargo-udeps
run: cargo install cargo-udeps --locked --force
- name: Create Cargo config dir
@ -410,7 +346,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Install dependencies
run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler
run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang
- name: Use Rust beta
run: rustup override set beta
- name: Run make

2654
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
[target.x86_64-unknown-linux-gnu]
dockerfile = './scripts/cross/Dockerfile'
pre-build = ["apt-get install -y cmake clang-3.9"]
[target.aarch64-unknown-linux-gnu]
dockerfile = './scripts/cross/Dockerfile'
pre-build = ["apt-get install -y cmake clang-3.9"]

View File

@ -1,5 +1,5 @@
FROM rust:1.68.2-bullseye AS builder
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev
COPY . lighthouse
ARG FEATURES
ARG PROFILE=release

View File

@ -9,12 +9,9 @@ use directory::DEFAULT_ROOT_DIR;
use eth2::{BeaconNodeHttpClient, Timeouts};
use lighthouse_network::{
discv5::enr::{CombinedKey, EnrBuilder},
libp2p::{
core::connection::ConnectionId,
swarm::{
libp2p::swarm::{
behaviour::{ConnectionEstablished, FromSwarm},
NetworkBehaviour,
},
ConnectionId, NetworkBehaviour,
},
rpc::methods::{MetaData, MetaDataV2},
types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState},
@ -170,7 +167,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
local_addr: EXTERNAL_ADDR.parse().unwrap(),
send_back_addr: EXTERNAL_ADDR.parse().unwrap(),
};
let connection_id = ConnectionId::new(1);
let connection_id = ConnectionId::new_unchecked(1);
pm.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished {
peer_id,
connection_id,

View File

@ -1,6 +1,6 @@
use crate::Context;
use beacon_chain::BeaconChainTypes;
use lighthouse_metrics::{Encoder, TextEncoder};
use lighthouse_metrics::TextEncoder;
use lighthouse_network::prometheus_client::encoding::text::encode;
use malloc_utils::scrape_allocator_metrics;
@ -9,7 +9,7 @@ pub use lighthouse_metrics::*;
pub fn gather_prometheus_metrics<T: BeaconChainTypes>(
ctx: &Context<T>,
) -> std::result::Result<String, String> {
let mut buffer = vec![];
let mut buffer = String::new();
let encoder = TextEncoder::new();
// There are two categories of metrics:
@ -50,7 +50,7 @@ pub fn gather_prometheus_metrics<T: BeaconChainTypes>(
}
encoder
.encode(&lighthouse_metrics::gather(), &mut buffer)
.encode_utf8(&lighthouse_metrics::gather(), &mut buffer)
.unwrap();
// encode gossipsub metrics also if they exist
if let Some(registry) = ctx.gossipsub_registry.as_ref() {
@ -59,5 +59,5 @@ pub fn gather_prometheus_metrics<T: BeaconChainTypes>(
}
}
String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e))
Ok(buffer)
}

View File

@ -5,7 +5,7 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2021"
[dependencies]
discv5 = { version = "0.3.0", features = ["libp2p"]}
discv5 = { version = "0.3.1", features = ["libp2p"] }
unsigned-varint = { version = "0.6.0", features = ["codec"] }
types = { path = "../../consensus/types" }
ssz_types = "0.5.3"
@ -40,15 +40,15 @@ directory = { path = "../../common/directory" }
regex = "1.5.5"
strum = { version = "0.24.0", features = ["derive"] }
superstruct = "0.5.0"
prometheus-client = "0.18.0"
prometheus-client = "0.21.0"
unused_port = { path = "../../common/unused_port" }
delay_map = "0.3.0"
void = "1"
[dependencies.libp2p]
version = "0.50.0"
version = "0.52"
default-features = false
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"]
features = ["websocket", "identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"]
[dev-dependencies]
slog-term = "2.6.0"

View File

@ -6,10 +6,7 @@ use directory::{
DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR,
};
use discv5::{Discv5Config, Discv5ConfigBuilder};
use libp2p::gossipsub::{
FastMessageId, GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId,
RawGossipsubMessage, ValidationMode,
};
use libp2p::gossipsub;
use libp2p::Multiaddr;
use serde_derive::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
@ -83,7 +80,7 @@ pub struct Config {
/// Gossipsub configuration parameters.
#[serde(skip)]
pub gs_config: GossipsubConfig,
pub gs_config: gossipsub::Config,
/// Discv5 configuration parameters.
#[serde(skip)]
@ -265,7 +262,7 @@ impl Default for Config {
// Note: Using the default config here. Use `gossipsub_config` function for getting
// Lighthouse specific configuration for gossipsub.
let gs_config = GossipsubConfigBuilder::default()
let gs_config = gossipsub::ConfigBuilder::default()
.build()
.expect("valid gossipsub configuration");
@ -416,16 +413,16 @@ impl From<u8> for NetworkLoad {
}
/// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork.
pub fn gossipsub_config(network_load: u8, fork_context: Arc<ForkContext>) -> GossipsubConfig {
pub fn gossipsub_config(network_load: u8, fork_context: Arc<ForkContext>) -> gossipsub::Config {
// The function used to generate a gossipsub message id
// We use the first 8 bytes of SHA256(topic, data) for content addressing
let fast_gossip_message_id = |message: &RawGossipsubMessage| {
let fast_gossip_message_id = |message: &gossipsub::RawMessage| {
let data = [message.topic.as_str().as_bytes(), &message.data].concat();
FastMessageId::from(&Sha256::digest(data)[..8])
gossipsub::FastMessageId::from(&Sha256::digest(data)[..8])
};
fn prefix(
prefix: [u8; 4],
message: &GossipsubMessage,
message: &gossipsub::Message,
fork_context: Arc<ForkContext>,
) -> Vec<u8> {
let topic_bytes = message.topic.as_str().as_bytes();
@ -451,8 +448,8 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc<ForkContext>) -> Gos
}
let is_merge_enabled = fork_context.fork_exists(ForkName::Merge);
let gossip_message_id = move |message: &GossipsubMessage| {
MessageId::from(
let gossip_message_id = move |message: &gossipsub::Message| {
gossipsub::MessageId::from(
&Sha256::digest(
prefix(MESSAGE_DOMAIN_VALID_SNAPPY, message, fork_context.clone()).as_slice(),
)[..20],
@ -461,7 +458,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc<ForkContext>) -> Gos
let load = NetworkLoad::from(network_load);
GossipsubConfigBuilder::default()
gossipsub::ConfigBuilder::default()
.max_transmit_size(gossip_max_size(is_merge_enabled))
.heartbeat_interval(load.heartbeat_interval)
.mesh_n(load.mesh_n)
@ -474,7 +471,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc<ForkContext>) -> Gos
.max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large
.history_gossip(load.history_gossip)
.validate_messages() // require validation before propagation
.validation_mode(ValidationMode::Anonymous)
.validation_mode(gossipsub::ValidationMode::Anonymous)
.duplicate_cache_time(DUPLICATE_CACHE_TIME)
.message_id_fn(gossip_message_id)
.fast_message_id_fn(fast_gossip_message_id)

View File

@ -7,7 +7,7 @@ use super::ENR_FILENAME;
use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield};
use crate::NetworkConfig;
use discv5::enr::EnrKey;
use libp2p::core::identity::Keypair;
use libp2p::identity::Keypair;
use slog::{debug, warn};
use ssz::{Decode, Encode};
use ssz_types::BitVector;
@ -133,7 +133,7 @@ pub fn build_or_load_enr<T: EthSpec>(
// Build the local ENR.
// Note: Discovery should update the ENR record's IP to the external IP as seen by the
// majority of our peers, if the CLI doesn't expressly forbid it.
let enr_key = CombinedKey::from_libp2p(&local_key)?;
let enr_key = CombinedKey::from_libp2p(local_key)?;
let mut local_enr = build_enr::<T>(&enr_key, config, enr_fork_id)?;
use_or_load_enr(&enr_key, &mut local_enr, config, log)?;

View File

@ -1,10 +1,9 @@
//! ENR extension trait to support libp2p integration.
use crate::{Enr, Multiaddr, PeerId};
use discv5::enr::{CombinedKey, CombinedPublicKey};
use libp2p::{
core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol},
identity::secp256k1,
};
use libp2p::core::multiaddr::Protocol;
use libp2p::identity::{ed25519, secp256k1, KeyType, Keypair, PublicKey};
use tiny_keccak::{Hasher, Keccak};
/// Extend ENR for libp2p types.
@ -38,7 +37,8 @@ pub trait CombinedKeyPublicExt {
/// Extend ENR CombinedKey for conversion to libp2p keys.
pub trait CombinedKeyExt {
/// Converts a libp2p key into an ENR combined key.
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str>;
fn from_libp2p(key: Keypair) -> Result<CombinedKey, &'static str>;
/// Converts a [`secp256k1::Keypair`] into and Enr [`CombinedKey`].
fn from_secp256k1(key: &secp256k1::Keypair) -> CombinedKey;
}
@ -93,14 +93,14 @@ impl EnrExt for Enr {
if let Some(udp) = self.udp4() {
let mut multiaddr: Multiaddr = ip.into();
multiaddr.push(Protocol::Udp(udp));
multiaddr.push(Protocol::P2p(peer_id.into()));
multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr);
}
if let Some(tcp) = self.tcp4() {
let mut multiaddr: Multiaddr = ip.into();
multiaddr.push(Protocol::Tcp(tcp));
multiaddr.push(Protocol::P2p(peer_id.into()));
multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr);
}
}
@ -108,14 +108,14 @@ impl EnrExt for Enr {
if let Some(udp6) = self.udp6() {
let mut multiaddr: Multiaddr = ip6.into();
multiaddr.push(Protocol::Udp(udp6));
multiaddr.push(Protocol::P2p(peer_id.into()));
multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr);
}
if let Some(tcp6) = self.tcp6() {
let mut multiaddr: Multiaddr = ip6.into();
multiaddr.push(Protocol::Tcp(tcp6));
multiaddr.push(Protocol::P2p(peer_id.into()));
multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr);
}
}
@ -133,7 +133,7 @@ impl EnrExt for Enr {
if let Some(tcp) = self.tcp4() {
let mut multiaddr: Multiaddr = ip.into();
multiaddr.push(Protocol::Tcp(tcp));
multiaddr.push(Protocol::P2p(peer_id.into()));
multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr);
}
}
@ -141,7 +141,7 @@ impl EnrExt for Enr {
if let Some(tcp6) = self.tcp6() {
let mut multiaddr: Multiaddr = ip6.into();
multiaddr.push(Protocol::Tcp(tcp6));
multiaddr.push(Protocol::P2p(peer_id.into()));
multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr);
}
}
@ -159,7 +159,7 @@ impl EnrExt for Enr {
if let Some(udp) = self.udp4() {
let mut multiaddr: Multiaddr = ip.into();
multiaddr.push(Protocol::Udp(udp));
multiaddr.push(Protocol::P2p(peer_id.into()));
multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr);
}
}
@ -167,7 +167,7 @@ impl EnrExt for Enr {
if let Some(udp6) = self.udp6() {
let mut multiaddr: Multiaddr = ip6.into();
multiaddr.push(Protocol::Udp(udp6));
multiaddr.push(Protocol::P2p(peer_id.into()));
multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr);
}
}
@ -204,18 +204,16 @@ impl CombinedKeyPublicExt for CombinedPublicKey {
match self {
Self::Secp256k1(pk) => {
let pk_bytes = pk.to_sec1_bytes();
let libp2p_pk = libp2p::core::PublicKey::Secp256k1(
libp2p::core::identity::secp256k1::PublicKey::decode(&pk_bytes)
.expect("valid public key"),
);
let libp2p_pk: PublicKey = secp256k1::PublicKey::try_from_bytes(&pk_bytes)
.expect("valid public key")
.into();
PeerId::from_public_key(&libp2p_pk)
}
Self::Ed25519(pk) => {
let pk_bytes = pk.to_bytes();
let libp2p_pk = libp2p::core::PublicKey::Ed25519(
libp2p::core::identity::ed25519::PublicKey::decode(&pk_bytes)
.expect("valid public key"),
);
let libp2p_pk: PublicKey = ed25519::PublicKey::try_from_bytes(&pk_bytes)
.expect("valid public key")
.into();
PeerId::from_public_key(&libp2p_pk)
}
}
@ -223,18 +221,25 @@ impl CombinedKeyPublicExt for CombinedPublicKey {
}
impl CombinedKeyExt for CombinedKey {
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str> {
match key {
Keypair::Secp256k1(key) => Ok(CombinedKey::from_secp256k1(key)),
Keypair::Ed25519(key) => {
fn from_libp2p(key: Keypair) -> Result<CombinedKey, &'static str> {
match key.key_type() {
KeyType::Secp256k1 => {
let key = key.try_into_secp256k1().expect("right key type");
let secret =
discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes())
.expect("libp2p key must be valid");
Ok(CombinedKey::Secp256k1(secret))
}
KeyType::Ed25519 => {
let key = key.try_into_ed25519().expect("right key type");
let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes(
&(key.encode()[..32])
&(key.to_bytes()[..32])
.try_into()
.expect("libp2p key must be valid"),
);
Ok(CombinedKey::from(ed_keypair))
}
Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"),
_ => Err("Unsupported keypair kind"),
}
}
fn from_secp256k1(key: &secp256k1::Keypair) -> Self {
@ -251,37 +256,46 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result<discv5::enr::NodeId, Strin
// if generated from a PublicKey with Identity multihash.
let pk_bytes = &peer_id.to_bytes()[2..];
match PublicKey::from_protobuf_encoding(pk_bytes).map_err(|e| {
let public_key = PublicKey::try_decode_protobuf(pk_bytes).map_err(|e| {
format!(
" Cannot parse libp2p public key public key from peer id: {}",
e
)
})? {
PublicKey::Secp256k1(pk) => {
let uncompressed_key_bytes = &pk.encode_uncompressed()[1..];
})?;
match public_key.key_type() {
KeyType::Secp256k1 => {
let pk = public_key
.clone()
.try_into_secp256k1()
.expect("right key type");
let uncompressed_key_bytes = &pk.to_bytes_uncompressed()[1..];
let mut output = [0_u8; 32];
let mut hasher = Keccak::v256();
hasher.update(uncompressed_key_bytes);
hasher.finalize(&mut output);
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
}
PublicKey::Ed25519(pk) => {
let uncompressed_key_bytes = pk.encode();
KeyType::Ed25519 => {
let pk = public_key
.clone()
.try_into_ed25519()
.expect("right key type");
let uncompressed_key_bytes = pk.to_bytes();
let mut output = [0_u8; 32];
let mut hasher = Keccak::v256();
hasher.update(&uncompressed_key_bytes);
hasher.finalize(&mut output);
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
}
PublicKey::Ecdsa(_) => Err(format!(
"Unsupported public key (Ecdsa) from peer {}",
peer_id
)),
_ => Err(format!("Unsupported public key from peer {}", peer_id)),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
@ -290,9 +304,9 @@ mod tests {
let sk_bytes = hex::decode(sk_hex).unwrap();
let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_slice(&sk_bytes).unwrap();
let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap();
let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into();
let libp2p_kp = Keypair::Secp256k1(secp256k1_kp);
let libp2p_sk = secp256k1::SecretKey::try_from_bytes(sk_bytes).unwrap();
let secp256k1_kp: secp256k1::Keypair = libp2p_sk.into();
let libp2p_kp: Keypair = secp256k1_kp.into();
let peer_id = libp2p_kp.public().to_peer_id();
let enr = discv5::enr::EnrBuilder::new("v4")
@ -311,9 +325,9 @@ mod tests {
&sk_bytes.clone().try_into().unwrap(),
);
let libp2p_sk = libp2p::identity::ed25519::SecretKey::from_bytes(sk_bytes).unwrap();
let secp256k1_kp: libp2p::identity::ed25519::Keypair = libp2p_sk.into();
let libp2p_kp = Keypair::Ed25519(secp256k1_kp);
let libp2p_sk = ed25519::SecretKey::try_from_bytes(sk_bytes).unwrap();
let secp256k1_kp: ed25519::Keypair = libp2p_sk.into();
let libp2p_kp: Keypair = secp256k1_kp.into();
let peer_id = libp2p_kp.public().to_peer_id();
let enr = discv5::enr::EnrBuilder::new("v4")

View File

@ -16,19 +16,20 @@ pub use enr::{
Eth2Enr,
};
pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt};
pub use libp2p::core::identity::{Keypair, PublicKey};
pub use libp2p::identity::{Keypair, PublicKey};
use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY};
use futures::prelude::*;
use futures::stream::FuturesUnordered;
use libp2p::multiaddr::Protocol;
use libp2p::swarm::behaviour::{DialFailure, FromSwarm};
use libp2p::swarm::AddressScore;
use libp2p::swarm::THandlerInEvent;
pub use libp2p::{
core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId},
core::{ConnectedPoint, Multiaddr},
identity::PeerId,
swarm::{
dummy::ConnectionHandler, DialError, NetworkBehaviour, NetworkBehaviourAction as NBAction,
NotifyHandler, PollParameters, SubstreamProtocol,
dummy::ConnectionHandler, ConnectionId, DialError, NetworkBehaviour, NotifyHandler,
PollParameters, SubstreamProtocol, ToSwarm,
},
};
use lru::LruCache;
@ -191,7 +192,7 @@ pub struct Discovery<TSpec: EthSpec> {
impl<TSpec: EthSpec> Discovery<TSpec> {
/// NOTE: Creating discovery requires running within a tokio execution environment.
pub async fn new(
local_key: &Keypair,
local_key: Keypair,
config: &NetworkConfig,
network_globals: Arc<NetworkGlobals<TSpec>>,
log: &slog::Logger,
@ -925,22 +926,51 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
// Discovery is not a real NetworkBehaviour...
type ConnectionHandler = ConnectionHandler;
type OutEvent = DiscoveredPeers;
type ToSwarm = DiscoveredPeers;
fn new_handler(&mut self) -> Self::ConnectionHandler {
ConnectionHandler
fn handle_established_inbound_connection(
&mut self,
_connection_id: ConnectionId,
_peer: PeerId,
_local_addr: &Multiaddr,
_remote_addr: &Multiaddr,
) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
// TODO: we might want to check discovery's banned ips here in the future.
Ok(ConnectionHandler)
}
// Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them.
fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec<Multiaddr> {
if let Some(enr) = self.enr_of_peer(peer_id) {
fn handle_established_outbound_connection(
&mut self,
_connection_id: ConnectionId,
_peer: PeerId,
_addr: &Multiaddr,
_role_override: libp2p::core::Endpoint,
) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
Ok(ConnectionHandler)
}
fn on_connection_handler_event(
&mut self,
_peer_id: PeerId,
_connection_id: ConnectionId,
_event: void::Void,
) {
}
fn handle_pending_outbound_connection(
&mut self,
_connection_id: ConnectionId,
maybe_peer: Option<PeerId>,
_addresses: &[Multiaddr],
_effective_role: libp2p::core::Endpoint,
) -> Result<Vec<Multiaddr>, libp2p::swarm::ConnectionDenied> {
if let Some(enr) = maybe_peer.and_then(|peer_id| self.enr_of_peer(&peer_id)) {
// ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP
// port is removed, which is assumed to be associated with the discv5 protocol (and
// therefore irrelevant for other libp2p components).
enr.multiaddr_tcp()
Ok(enr.multiaddr_tcp())
} else {
// PeerId is not known
Vec::new()
Ok(vec![])
}
}
@ -949,7 +979,7 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
&mut self,
cx: &mut Context,
_: &mut impl PollParameters,
) -> Poll<NBAction<Self::OutEvent, Self::ConnectionHandler>> {
) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> {
if !self.started {
return Poll::Pending;
}
@ -960,7 +990,7 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
// Drive the queries and return any results from completed queries
if let Some(peers) = self.poll_queries(cx) {
// return the result to the peer manager
return Poll::Ready(NBAction::GenerateEvent(DiscoveredPeers { peers }));
return Poll::Ready(ToSwarm::GenerateEvent(DiscoveredPeers { peers }));
}
// Process the server event stream
@ -1034,10 +1064,7 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
if let Some(address) = addr {
// NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling
// should handle this.
return Poll::Ready(NBAction::ReportObservedAddr {
address,
score: AddressScore::Finite(1),
});
return Poll::Ready(ToSwarm::NewExternalAddrCandidate(address));
}
}
Discv5Event::EnrAdded { .. }
@ -1065,8 +1092,9 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
| FromSwarm::ExpiredListenAddr(_)
| FromSwarm::ListenerError(_)
| FromSwarm::ListenerClosed(_)
| FromSwarm::NewExternalAddr(_)
| FromSwarm::ExpiredExternalAddr(_) => {
| FromSwarm::NewExternalAddrCandidate(_)
| FromSwarm::ExternalAddrExpired(_)
| FromSwarm::ExternalAddrConfirmed(_) => {
// Ignore events not relevant to discovery
}
}
@ -1077,10 +1105,8 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
fn on_dial_failure(&mut self, peer_id: Option<PeerId>, error: &DialError) {
if let Some(peer_id) = peer_id {
match error {
DialError::Banned
| DialError::LocalPeerId
| DialError::InvalidPeerId(_)
| DialError::ConnectionIo(_)
DialError::LocalPeerId { .. }
| DialError::Denied { .. }
| DialError::NoAddresses
| DialError::Transport(_)
| DialError::WrongPeerId { .. } => {
@ -1088,9 +1114,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id);
self.disconnect_peer(&peer_id);
}
DialError::ConnectionLimit(_)
| DialError::DialPeerConditionFalse(_)
| DialError::Aborted => {}
DialError::DialPeerConditionFalse(_) | DialError::Aborted => {}
}
}
}
@ -1139,8 +1163,8 @@ mod tests {
false,
&log,
);
let keypair = Keypair::Secp256k1(keypair);
Discovery::new(&keypair, &config, Arc::new(globals), &log)
let keypair = keypair.into();
Discovery::new(keypair, &config, Arc::new(globals), &log)
.await
.unwrap()
}

View File

@ -21,7 +21,8 @@ use std::{
use strum::IntoEnumIterator;
use types::{EthSpec, SyncSubnetId};
pub use libp2p::core::{identity::Keypair, Multiaddr};
pub use libp2p::core::Multiaddr;
pub use libp2p::identity::Keypair;
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
pub mod peerdb;

View File

@ -1,12 +1,14 @@
//! Implementation of [`NetworkBehaviour`] for the [`PeerManager`].
use std::task::{Context, Poll};
use futures::StreamExt;
use libp2p::core::ConnectedPoint;
use libp2p::identity::PeerId;
use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm};
use libp2p::swarm::dial_opts::{DialOpts, PeerCondition};
use libp2p::swarm::dummy::ConnectionHandler;
use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters};
use libp2p::PeerId;
use libp2p::swarm::{ConnectionId, NetworkBehaviour, PollParameters, ToSwarm};
use slog::{debug, error};
use types::EthSpec;
@ -19,20 +21,24 @@ use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource};
impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
type ConnectionHandler = ConnectionHandler;
type OutEvent = PeerManagerEvent;
type ToSwarm = PeerManagerEvent;
/* Required trait members */
fn new_handler(&mut self) -> Self::ConnectionHandler {
ConnectionHandler
fn on_connection_handler_event(
&mut self,
_peer_id: PeerId,
_connection_id: ConnectionId,
_event: libp2p::swarm::THandlerOutEvent<Self>,
) {
// no events from the dummy handler
}
fn poll(
&mut self,
cx: &mut Context<'_>,
_params: &mut impl PollParameters,
) -> Poll<NetworkBehaviourAction<Self::OutEvent, Self::ConnectionHandler>> {
) -> Poll<ToSwarm<Self::ToSwarm, void::Void>> {
// perform the heartbeat when necessary
while self.heartbeat.poll_tick(cx).is_ready() {
self.heartbeat();
@ -84,19 +90,17 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
}
if !self.events.is_empty() {
return Poll::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0)));
return Poll::Ready(ToSwarm::GenerateEvent(self.events.remove(0)));
} else {
self.events.shrink_to_fit();
}
if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_first() {
self.inject_peer_connection(&peer_id, ConnectingType::Dialing, maybe_enr);
let handler = self.new_handler();
return Poll::Ready(NetworkBehaviourAction::Dial {
return Poll::Ready(ToSwarm::Dial {
opts: DialOpts::peer_id(peer_id)
.condition(PeerCondition::Disconnected)
.build(),
handler,
});
}
@ -110,13 +114,31 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
endpoint,
other_established,
..
}) => self.on_connection_established(peer_id, endpoint, other_established),
}) => {
// NOTE: We still need to handle the [`ConnectionEstablished`] because the
// [`NetworkBehaviour::handle_established_inbound_connection`] and
// [`NetworkBehaviour::handle_established_outbound_connection`] are fallible. This
// means another behaviour can kill the connection early, and we can't assume a
// peer as connected until this event is received.
self.on_connection_established(peer_id, endpoint, other_established)
}
FromSwarm::ConnectionClosed(ConnectionClosed {
peer_id,
remaining_established,
..
}) => self.on_connection_closed(peer_id, remaining_established),
FromSwarm::DialFailure(DialFailure { peer_id, .. }) => self.on_dial_failure(peer_id),
FromSwarm::DialFailure(DialFailure {
peer_id,
error,
connection_id: _,
}) => {
debug!(self.log, "Failed to dial peer"; "peer_id"=> ?peer_id, "error" => %error);
self.on_dial_failure(peer_id);
}
FromSwarm::ExternalAddrConfirmed(_) => {
// TODO: we likely want to check this against our assumed external tcp
// address
}
FromSwarm::AddressChange(_)
| FromSwarm::ListenFailure(_)
| FromSwarm::NewListener(_)
@ -124,13 +146,35 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
| FromSwarm::ExpiredListenAddr(_)
| FromSwarm::ListenerError(_)
| FromSwarm::ListenerClosed(_)
| FromSwarm::NewExternalAddr(_)
| FromSwarm::ExpiredExternalAddr(_) => {
| FromSwarm::NewExternalAddrCandidate(_)
| FromSwarm::ExternalAddrExpired(_) => {
// The rest of the events we ignore since they are handled in their associated
// `SwarmEvent`
}
}
}
fn handle_established_inbound_connection(
&mut self,
_connection_id: ConnectionId,
_peer: PeerId,
_local_addr: &libp2p::Multiaddr,
_remote_addr: &libp2p::Multiaddr,
) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
// TODO: we might want to check if we accept this peer or not in the future.
Ok(ConnectionHandler)
}
fn handle_established_outbound_connection(
&mut self,
_connection_id: ConnectionId,
_peer: PeerId,
_addr: &libp2p::Multiaddr,
_role_override: libp2p::core::Endpoint,
) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
// TODO: we might want to check if we accept this peer or not in the future.
Ok(ConnectionHandler)
}
}
impl<TSpec: EthSpec> PeerManager<TSpec> {

View File

@ -3,21 +3,21 @@
use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode, ResponseTermination};
use super::outbound::OutboundRequestContainer;
use super::protocol::{max_rpc_size, InboundRequest, Protocol, RPCError, RPCProtocol};
use super::protocol::{
max_rpc_size, InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol,
};
use super::{RPCReceived, RPCSend, ReqId};
use crate::rpc::outbound::{OutboundFramed, OutboundRequest};
use crate::rpc::protocol::InboundFramed;
use fnv::FnvHashMap;
use futures::prelude::*;
use futures::{Sink, SinkExt};
use libp2p::core::upgrade::{
InboundUpgrade, NegotiationError, OutboundUpgrade, ProtocolError, UpgradeError,
};
use libp2p::swarm::handler::{
ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive,
ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError,
FullyNegotiatedInbound, FullyNegotiatedOutbound, KeepAlive, StreamUpgradeError,
SubstreamProtocol,
};
use libp2p::swarm::NegotiatedSubstream;
use libp2p::swarm::Stream;
use slog::{crit, debug, trace, warn};
use smallvec::SmallVec;
use std::{
@ -47,7 +47,7 @@ const MAX_INBOUND_SUBSTREAMS: usize = 32;
#[derive(Debug, Clone, Copy, Hash, Eq, PartialEq)]
pub struct SubstreamId(usize);
type InboundSubstream<TSpec> = InboundFramed<NegotiatedSubstream, TSpec>;
type InboundSubstream<TSpec> = InboundFramed<Stream, TSpec>;
/// Events the handler emits to the behaviour.
pub type HandlerEvent<Id, T> = Result<RPCReceived<Id, T>, HandlerErr<Id>>;
@ -195,12 +195,12 @@ pub enum OutboundSubstreamState<TSpec: EthSpec> {
/// handler because GOODBYE requests can be handled and responses dropped instantly.
RequestPendingResponse {
/// The framed negotiated substream.
substream: Box<OutboundFramed<NegotiatedSubstream, TSpec>>,
substream: Box<OutboundFramed<Stream, TSpec>>,
/// Keeps track of the actual request sent.
request: OutboundRequest<TSpec>,
},
/// Closing an outbound substream>
Closing(Box<OutboundFramed<NegotiatedSubstream, TSpec>>),
Closing(Box<OutboundFramed<Stream, TSpec>>),
/// Temporary state during processing
Poisoned,
}
@ -212,7 +212,7 @@ where
pub fn new(
listen_protocol: SubstreamProtocol<RPCProtocol<TSpec>, ()>,
fork_context: Arc<ForkContext>,
log: &slog::Logger,
log: slog::Logger,
) -> Self {
RPCHandler {
listen_protocol,
@ -230,7 +230,7 @@ where
outbound_io_error_retries: 0,
fork_context,
waker: None,
log: log.clone(),
log,
}
}
@ -315,8 +315,8 @@ where
TSpec: EthSpec,
Id: ReqId,
{
type InEvent = RPCSend<Id, TSpec>;
type OutEvent = HandlerEvent<Id, TSpec>;
type FromBehaviour = RPCSend<Id, TSpec>;
type ToBehaviour = HandlerEvent<Id, TSpec>;
type Error = RPCError;
type InboundProtocol = RPCProtocol<TSpec>;
type OutboundProtocol = OutboundRequestContainer<TSpec>;
@ -327,121 +327,7 @@ where
self.listen_protocol.clone()
}
fn inject_fully_negotiated_outbound(
&mut self,
out: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
request_info: Self::OutboundOpenInfo,
) {
self.dial_negotiated -= 1;
let (id, request) = request_info;
let proto = request.versioned_protocol().protocol();
// accept outbound connections only if the handler is not deactivated
if matches!(self.state, HandlerState::Deactivated) {
self.events_out.push(Err(HandlerErr::Outbound {
error: RPCError::Disconnected,
proto,
id,
}));
}
// add the stream to substreams if we expect a response, otherwise drop the stream.
let expected_responses = request.expected_responses();
if expected_responses > 0 {
// new outbound request. Store the stream and tag the output.
let delay_key = self.outbound_substreams_delay.insert(
self.current_outbound_substream_id,
Duration::from_secs(RESPONSE_TIMEOUT),
);
let awaiting_stream = OutboundSubstreamState::RequestPendingResponse {
substream: Box::new(out),
request,
};
let expected_responses = if expected_responses > 1 {
// Currently enforced only for multiple responses
Some(expected_responses)
} else {
None
};
if self
.outbound_substreams
.insert(
self.current_outbound_substream_id,
OutboundInfo {
state: awaiting_stream,
delay_key,
proto,
remaining_chunks: expected_responses,
req_id: id,
},
)
.is_some()
{
crit!(self.log, "Duplicate outbound substream id"; "id" => self.current_outbound_substream_id);
}
self.current_outbound_substream_id.0 += 1;
}
}
fn inject_fully_negotiated_inbound(
&mut self,
substream: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
_info: Self::InboundOpenInfo,
) {
// only accept new peer requests when active
if !matches!(self.state, HandlerState::Active) {
return;
}
let (req, substream) = substream;
let expected_responses = req.expected_responses();
// store requests that expect responses
if expected_responses > 0 {
if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS {
// Store the stream and tag the output.
let delay_key = self.inbound_substreams_delay.insert(
self.current_inbound_substream_id,
Duration::from_secs(RESPONSE_TIMEOUT),
);
let awaiting_stream = InboundState::Idle(substream);
self.inbound_substreams.insert(
self.current_inbound_substream_id,
InboundInfo {
state: awaiting_stream,
pending_items: VecDeque::with_capacity(std::cmp::min(
expected_responses,
128,
) as usize),
delay_key: Some(delay_key),
protocol: req.versioned_protocol().protocol(),
request_start_time: Instant::now(),
remaining_chunks: expected_responses,
},
);
} else {
self.events_out.push(Err(HandlerErr::Inbound {
id: self.current_inbound_substream_id,
proto: req.versioned_protocol().protocol(),
error: RPCError::HandlerRejected,
}));
return self.shutdown(None);
}
}
// If we received a goodbye, shutdown the connection.
if let InboundRequest::Goodbye(_) = req {
self.shutdown(None);
}
self.events_out.push(Ok(RPCReceived::Request(
self.current_inbound_substream_id,
req,
)));
self.current_inbound_substream_id.0 += 1;
}
fn inject_event(&mut self, rpc_event: Self::InEvent) {
fn on_behaviour_event(&mut self, rpc_event: Self::FromBehaviour) {
match rpc_event {
RPCSend::Request(id, req) => self.send_request(id, req),
RPCSend::Response(inbound_id, response) => self.send_response(inbound_id, response),
@ -453,56 +339,6 @@ where
}
}
fn inject_dial_upgrade_error(
&mut self,
request_info: Self::OutboundOpenInfo,
error: ConnectionHandlerUpgrErr<
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
>,
) {
let (id, req) = request_info;
if let ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(RPCError::IoError(_))) = error
{
self.outbound_io_error_retries += 1;
if self.outbound_io_error_retries < IO_ERROR_RETRIES {
self.send_request(id, req);
return;
}
}
// This dialing is now considered failed
self.dial_negotiated -= 1;
self.outbound_io_error_retries = 0;
// map the error
let error = match error {
ConnectionHandlerUpgrErr::Timer => RPCError::InternalError("Timer failed"),
ConnectionHandlerUpgrErr::Timeout => RPCError::NegotiationTimeout,
ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)) => e,
ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => {
RPCError::UnsupportedProtocol
}
ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(
NegotiationError::ProtocolError(e),
)) => match e {
ProtocolError::IoError(io_err) => RPCError::IoError(io_err.to_string()),
ProtocolError::InvalidProtocol => {
RPCError::InternalError("Protocol was deemed invalid")
}
ProtocolError::InvalidMessage | ProtocolError::TooManyProtocols => {
// Peer is sending invalid data during the negotiation phase, not
// participating in the protocol
RPCError::InvalidData("Invalid message during negotiation".to_string())
}
},
};
self.events_out.push(Err(HandlerErr::Outbound {
error,
proto: req.versioned_protocol().protocol(),
id,
}));
}
fn connection_keep_alive(&self) -> KeepAlive {
// Check that we don't have outbound items pending for dialing, nor dialing, nor
// established. Also check that there are no established inbound substreams.
@ -535,7 +371,7 @@ where
ConnectionHandlerEvent<
Self::OutboundProtocol,
Self::OutboundOpenInfo,
Self::OutEvent,
Self::ToBehaviour,
Self::Error,
>,
> {
@ -548,7 +384,9 @@ where
}
// return any events that need to be reported
if !self.events_out.is_empty() {
return Poll::Ready(ConnectionHandlerEvent::Custom(self.events_out.remove(0)));
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(
self.events_out.remove(0),
));
} else {
self.events_out.shrink_to_fit();
}
@ -612,7 +450,9 @@ where
error: RPCError::StreamTimeout,
};
// notify the user
return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err)));
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err(
outbound_err,
)));
} else {
crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref());
}
@ -872,7 +712,7 @@ where
}),
};
return Poll::Ready(ConnectionHandlerEvent::Custom(received));
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(received));
}
Poll::Ready(None) => {
// stream closed
@ -887,7 +727,7 @@ where
// notify the application error
if request.expected_responses() > 1 {
// return an end of stream result
return Poll::Ready(ConnectionHandlerEvent::Custom(Ok(
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok(
RPCReceived::EndOfStream(request_id, request.stream_termination()),
)));
}
@ -898,7 +738,9 @@ where
proto: request.versioned_protocol().protocol(),
error: RPCError::IncompleteStream,
};
return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err)));
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err(
outbound_err,
)));
}
Poll::Pending => {
entry.get_mut().state =
@ -914,7 +756,9 @@ where
error: e,
};
entry.remove_entry();
return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err)));
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err(
outbound_err,
)));
}
},
OutboundSubstreamState::Closing(mut substream) => {
@ -940,7 +784,7 @@ where
};
if let Some(termination) = termination {
return Poll::Ready(ConnectionHandlerEvent::Custom(Ok(
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok(
RPCReceived::EndOfStream(request_id, termination),
)));
}
@ -989,6 +833,207 @@ where
Poll::Pending
}
fn on_connection_event(
&mut self,
event: ConnectionEvent<
Self::InboundProtocol,
Self::OutboundProtocol,
Self::InboundOpenInfo,
Self::OutboundOpenInfo,
>,
) {
match event {
ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound {
protocol,
info: _,
}) => self.on_fully_negotiated_inbound(protocol),
ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound {
protocol,
info,
}) => self.on_fully_negotiated_outbound(protocol, info),
ConnectionEvent::DialUpgradeError(DialUpgradeError { info, error }) => {
self.on_dial_upgrade_error(info, error)
}
ConnectionEvent::ListenUpgradeError(libp2p::swarm::handler::ListenUpgradeError {
info: _,
error: _, /* RPCError */
}) => {
// This is going to be removed in the next libp2p release. I think its fine to do
// nothing.
}
ConnectionEvent::LocalProtocolsChange(_) => {
// This shouldn't effect this handler, we will still negotiate streams if we support
// the protocol as usual.
}
ConnectionEvent::RemoteProtocolsChange(_) => {
// This shouldn't effect this handler, we will still negotiate streams if we support
// the protocol as usual.
}
ConnectionEvent::AddressChange(_) => {
// We dont care about these changes as they have no bearing on our RPC internal
// logic.
}
}
}
}
impl<Id, TSpec: EthSpec> RPCHandler<Id, TSpec>
where
Id: ReqId,
TSpec: EthSpec,
{
fn on_fully_negotiated_inbound(&mut self, substream: InboundOutput<Stream, TSpec>) {
// only accept new peer requests when active
if !matches!(self.state, HandlerState::Active) {
return;
}
let (req, substream) = substream;
let expected_responses = req.expected_responses();
// store requests that expect responses
if expected_responses > 0 {
if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS {
// Store the stream and tag the output.
let delay_key = self.inbound_substreams_delay.insert(
self.current_inbound_substream_id,
Duration::from_secs(RESPONSE_TIMEOUT),
);
let awaiting_stream = InboundState::Idle(substream);
self.inbound_substreams.insert(
self.current_inbound_substream_id,
InboundInfo {
state: awaiting_stream,
pending_items: VecDeque::with_capacity(std::cmp::min(
expected_responses,
128,
) as usize),
delay_key: Some(delay_key),
protocol: req.versioned_protocol().protocol(),
request_start_time: Instant::now(),
remaining_chunks: expected_responses,
},
);
} else {
self.events_out.push(Err(HandlerErr::Inbound {
id: self.current_inbound_substream_id,
proto: req.versioned_protocol().protocol(),
error: RPCError::HandlerRejected,
}));
return self.shutdown(None);
}
}
// If we received a goodbye, shutdown the connection.
if let InboundRequest::Goodbye(_) = req {
self.shutdown(None);
}
self.events_out.push(Ok(RPCReceived::Request(
self.current_inbound_substream_id,
req,
)));
self.current_inbound_substream_id.0 += 1;
}
fn on_fully_negotiated_outbound(
&mut self,
substream: OutboundFramed<Stream, TSpec>,
(id, request): (Id, OutboundRequest<TSpec>),
) {
self.dial_negotiated -= 1;
// Reset any io-retries counter.
self.outbound_io_error_retries = 0;
let proto = request.versioned_protocol().protocol();
// accept outbound connections only if the handler is not deactivated
if matches!(self.state, HandlerState::Deactivated) {
self.events_out.push(Err(HandlerErr::Outbound {
error: RPCError::Disconnected,
proto,
id,
}));
}
// add the stream to substreams if we expect a response, otherwise drop the stream.
let expected_responses = request.expected_responses();
if expected_responses > 0 {
// new outbound request. Store the stream and tag the output.
let delay_key = self.outbound_substreams_delay.insert(
self.current_outbound_substream_id,
Duration::from_secs(RESPONSE_TIMEOUT),
);
let awaiting_stream = OutboundSubstreamState::RequestPendingResponse {
substream: Box::new(substream),
request,
};
let expected_responses = if expected_responses > 1 {
// Currently enforced only for multiple responses
Some(expected_responses)
} else {
None
};
if self
.outbound_substreams
.insert(
self.current_outbound_substream_id,
OutboundInfo {
state: awaiting_stream,
delay_key,
proto,
remaining_chunks: expected_responses,
req_id: id,
},
)
.is_some()
{
crit!(self.log, "Duplicate outbound substream id"; "id" => self.current_outbound_substream_id);
}
self.current_outbound_substream_id.0 += 1;
}
}
fn on_dial_upgrade_error(
&mut self,
request_info: (Id, OutboundRequest<TSpec>),
error: StreamUpgradeError<RPCError>,
) {
let (id, req) = request_info;
// map the error
let error = match error {
StreamUpgradeError::Timeout => RPCError::NegotiationTimeout,
StreamUpgradeError::Apply(RPCError::IoError(e)) => {
self.outbound_io_error_retries += 1;
if self.outbound_io_error_retries < IO_ERROR_RETRIES {
self.send_request(id, req);
return;
}
RPCError::IoError(e)
}
StreamUpgradeError::NegotiationFailed => RPCError::UnsupportedProtocol,
StreamUpgradeError::Io(io_err) => {
self.outbound_io_error_retries += 1;
if self.outbound_io_error_retries < IO_ERROR_RETRIES {
self.send_request(id, req);
return;
}
RPCError::IoError(io_err.to_string())
}
StreamUpgradeError::Apply(other) => other,
};
// This dialing is now considered failed
self.dial_negotiated -= 1;
self.outbound_io_error_retries = 0;
self.events_out.push(Err(HandlerErr::Outbound {
error,
proto: req.versioned_protocol().protocol(),
id,
}));
}
}
impl slog::Value for SubstreamId {

View File

@ -6,11 +6,11 @@
use futures::future::FutureExt;
use handler::{HandlerEvent, RPCHandler};
use libp2p::core::connection::ConnectionId;
use libp2p::swarm::{
handler::ConnectionHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler,
PollParameters, SubstreamProtocol,
handler::ConnectionHandler, ConnectionId, NetworkBehaviour, NotifyHandler, PollParameters,
ToSwarm,
};
use libp2p::swarm::{FromSwarm, SubstreamProtocol, THandlerInEvent};
use libp2p::PeerId;
use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr};
use slog::{crit, debug, o};
@ -21,7 +21,7 @@ use types::{EthSpec, ForkContext};
pub(crate) use handler::HandlerErr;
pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse};
pub(crate) use protocol::{InboundRequest, RPCProtocol};
pub(crate) use protocol::InboundRequest;
pub use handler::SubstreamId;
pub use methods::{
@ -32,6 +32,7 @@ pub(crate) use outbound::OutboundRequest;
pub use protocol::{max_rpc_size, Protocol, RPCError};
use self::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig};
use self::protocol::RPCProtocol;
use self::self_limiter::SelfRateLimiter;
pub(crate) mod codec;
@ -104,8 +105,7 @@ pub struct RPCMessage<Id, TSpec: EthSpec> {
pub event: HandlerEvent<Id, TSpec>,
}
type BehaviourAction<Id, TSpec> =
NetworkBehaviourAction<RPCMessage<Id, TSpec>, RPCHandler<Id, TSpec>>;
type BehaviourAction<Id, TSpec> = ToSwarm<RPCMessage<Id, TSpec>, RPCSend<Id, TSpec>>;
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
/// logic.
@ -161,7 +161,7 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
id: (ConnectionId, SubstreamId),
event: RPCCodedResponse<TSpec>,
) {
self.events.push(NetworkBehaviourAction::NotifyHandler {
self.events.push(ToSwarm::NotifyHandler {
peer_id,
handler: NotifyHandler::One(id.0),
event: RPCSend::Response(id.1, event),
@ -181,7 +181,7 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
}
}
} else {
NetworkBehaviourAction::NotifyHandler {
ToSwarm::NotifyHandler {
peer_id,
handler: NotifyHandler::Any,
event: RPCSend::Request(request_id, req),
@ -194,7 +194,7 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
/// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This
/// gracefully terminates the RPC behaviour with a goodbye message.
pub fn shutdown(&mut self, peer_id: PeerId, id: Id, reason: GoodbyeReason) {
self.events.push(NetworkBehaviourAction::NotifyHandler {
self.events.push(ToSwarm::NotifyHandler {
peer_id,
handler: NotifyHandler::Any,
event: RPCSend::Shutdown(id, reason),
@ -208,11 +208,16 @@ where
Id: ReqId,
{
type ConnectionHandler = RPCHandler<Id, TSpec>;
type OutEvent = RPCMessage<Id, TSpec>;
type ToSwarm = RPCMessage<Id, TSpec>;
fn new_handler(&mut self) -> Self::ConnectionHandler {
RPCHandler::new(
SubstreamProtocol::new(
fn handle_established_inbound_connection(
&mut self,
_connection_id: ConnectionId,
peer_id: PeerId,
_local_addr: &libp2p::Multiaddr,
_remote_addr: &libp2p::Multiaddr,
) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
let protocol = SubstreamProtocol::new(
RPCProtocol {
fork_context: self.fork_context.clone(),
max_rpc_size: max_rpc_size(&self.fork_context),
@ -220,17 +225,66 @@ where
phantom: PhantomData,
},
(),
),
self.fork_context.clone(),
&self.log,
)
);
// NOTE: this is needed because PeerIds have interior mutability.
let peer_repr = peer_id.to_string();
let log = self.log.new(slog::o!("peer_id" => peer_repr));
let handler = RPCHandler::new(protocol, self.fork_context.clone(), log);
Ok(handler)
}
fn inject_event(
fn handle_established_outbound_connection(
&mut self,
_connection_id: ConnectionId,
peer_id: PeerId,
_addr: &libp2p::Multiaddr,
_role_override: libp2p::core::Endpoint,
) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
let protocol = SubstreamProtocol::new(
RPCProtocol {
fork_context: self.fork_context.clone(),
max_rpc_size: max_rpc_size(&self.fork_context),
enable_light_client_server: self.enable_light_client_server,
phantom: PhantomData,
},
(),
);
// NOTE: this is needed because PeerIds have interior mutability.
let peer_repr = peer_id.to_string();
let log = self.log.new(slog::o!("peer_id" => peer_repr));
let handler = RPCHandler::new(protocol, self.fork_context.clone(), log);
Ok(handler)
}
fn on_swarm_event(&mut self, event: FromSwarm<Self::ConnectionHandler>) {
match event {
FromSwarm::ConnectionClosed(_)
| FromSwarm::ConnectionEstablished(_)
| FromSwarm::AddressChange(_)
| FromSwarm::DialFailure(_)
| FromSwarm::ListenFailure(_)
| FromSwarm::NewListener(_)
| FromSwarm::NewListenAddr(_)
| FromSwarm::ExpiredListenAddr(_)
| FromSwarm::ListenerError(_)
| FromSwarm::ListenerClosed(_)
| FromSwarm::NewExternalAddrCandidate(_)
| FromSwarm::ExternalAddrExpired(_)
| FromSwarm::ExternalAddrConfirmed(_) => {
// Rpc Behaviour does not act on these swarm events. We use a comprehensive match
// statement to ensure future events are dealt with appropriately.
}
}
}
fn on_connection_handler_event(
&mut self,
peer_id: PeerId,
conn_id: ConnectionId,
event: <Self::ConnectionHandler as ConnectionHandler>::OutEvent,
event: <Self::ConnectionHandler as ConnectionHandler>::ToBehaviour,
) {
if let Ok(RPCReceived::Request(ref id, ref req)) = event {
if let Some(limiter) = self.limiter.as_mut() {
@ -238,8 +292,7 @@ where
match limiter.allows(&peer_id, req) {
Ok(()) => {
// send the event to the user
self.events
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage {
self.events.push(ToSwarm::GenerateEvent(RPCMessage {
peer_id,
conn_id,
event,
@ -281,16 +334,14 @@ where
}
} else {
// No rate limiting, send the event to the user
self.events
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage {
self.events.push(ToSwarm::GenerateEvent(RPCMessage {
peer_id,
conn_id,
event,
}))
}
} else {
self.events
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage {
self.events.push(ToSwarm::GenerateEvent(RPCMessage {
peer_id,
conn_id,
event,
@ -302,7 +353,7 @@ where
&mut self,
cx: &mut Context,
_: &mut impl PollParameters,
) -> Poll<NetworkBehaviourAction<Self::OutEvent, Self::ConnectionHandler>> {
) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> {
// let the rate limiter prune.
if let Some(limiter) = self.limiter.as_mut() {
let _ = limiter.poll_unpin(cx);

View File

@ -7,7 +7,7 @@ use crate::rpc::{
use futures::future::BoxFuture;
use futures::prelude::{AsyncRead, AsyncWrite};
use futures::{FutureExt, StreamExt};
use libp2p::core::{InboundUpgrade, ProtocolName, UpgradeInfo};
use libp2p::core::{InboundUpgrade, UpgradeInfo};
use ssz::Encode;
use ssz_types::VariableList;
use std::io;
@ -313,6 +313,12 @@ pub struct ProtocolId {
protocol_id: String,
}
impl AsRef<str> for ProtocolId {
fn as_ref(&self) -> &str {
self.protocol_id.as_ref()
}
}
impl ProtocolId {
/// Returns min and max size for messages of given protocol id requests.
pub fn rpc_request_limits(&self) -> RpcLimits {
@ -407,12 +413,6 @@ impl ProtocolId {
}
}
impl ProtocolName for ProtocolId {
fn protocol_name(&self) -> &[u8] {
self.protocol_id.as_bytes()
}
}
/* Inbound upgrade */
// The inbound protocol reads the request, decodes it and returns the stream to the protocol

View File

@ -64,7 +64,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
}
/// Checks if the rate limiter allows the request. If it's allowed, returns the
/// [`NetworkBehaviourAction`] that should be emitted. When not allowed, the request is delayed
/// [`ToSwarm`] that should be emitted. When not allowed, the request is delayed
/// until it can be sent.
pub fn allows(
&mut self,
@ -95,7 +95,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
}
/// Auxiliary function to deal with self rate limiting outcomes. If the rate limiter allows the
/// request, the [`NetworkBehaviourAction`] that should be emitted is returned. If the request
/// request, the [`ToSwarm`] that should be emitted is returned. If the request
/// should be delayed, it's returned with the duration to wait.
fn try_send_request(
limiter: &mut RateLimiter,

View File

@ -1,6 +1,6 @@
use std::sync::Arc;
use libp2p::core::connection::ConnectionId;
use libp2p::swarm::ConnectionId;
use types::light_client_bootstrap::LightClientBootstrap;
use types::{EthSpec, SignedBeaconBlock};

View File

@ -3,21 +3,27 @@ use crate::peer_manager::PeerManager;
use crate::rpc::{ReqId, RPC};
use crate::types::SnappyTransform;
use libp2p::gossipsub::subscription_filter::{
MaxCountSubscriptionFilter, WhitelistSubscriptionFilter,
};
use libp2p::gossipsub::Gossipsub as BaseGossipsub;
use libp2p::identify::Behaviour as Identify;
use libp2p::gossipsub;
use libp2p::identify;
use libp2p::swarm::NetworkBehaviour;
use types::EthSpec;
use super::api_types::RequestId;
pub type SubscriptionFilter = MaxCountSubscriptionFilter<WhitelistSubscriptionFilter>;
pub type Gossipsub = BaseGossipsub<SnappyTransform, SubscriptionFilter>;
pub type SubscriptionFilter =
gossipsub::MaxCountSubscriptionFilter<gossipsub::WhitelistSubscriptionFilter>;
pub type Gossipsub = gossipsub::Behaviour<SnappyTransform, SubscriptionFilter>;
#[derive(NetworkBehaviour)]
pub(crate) struct Behaviour<AppReqId: ReqId, TSpec: EthSpec> {
pub(crate) struct Behaviour<AppReqId, TSpec>
where
AppReqId: ReqId,
TSpec: EthSpec,
{
/// Peers banned.
pub banned_peers: libp2p::allow_block_list::Behaviour<libp2p::allow_block_list::BlockedPeers>,
/// Keep track of active and pending connections to enforce hard limits.
pub connection_limits: libp2p::connection_limits::Behaviour,
/// The routing pub-sub mechanism for eth2.
pub gossipsub: Gossipsub,
/// The Eth2 RPC specified in the wire-0 protocol.
@ -27,7 +33,7 @@ pub(crate) struct Behaviour<AppReqId: ReqId, TSpec: EthSpec> {
/// Keep regular connection to peers and disconnect if absent.
// NOTE: The id protocol is used for initial interop. This will be removed by mainnet.
/// Provides IP addresses and peer information.
pub identify: Identify,
pub identify: identify::Behaviour,
/// The peer manager that keeps track of peer's reputation and status.
pub peer_manager: PeerManager<TSpec>,
}

View File

@ -1,7 +1,8 @@
use crate::types::{GossipEncoding, GossipKind, GossipTopic};
use crate::{error, TopicHash};
use libp2p::gossipsub::{
GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, TopicScoreParams,
Config as GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds,
TopicScoreParams,
};
use std::cmp::max;
use std::collections::HashMap;

View File

@ -24,15 +24,12 @@ use api_types::{PeerRequestId, Request, RequestId, Response};
use futures::stream::StreamExt;
use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings};
use libp2p::bandwidth::BandwidthSinks;
use libp2p::gossipsub::error::PublishError;
use libp2p::gossipsub::metrics::Config as GossipsubMetricsConfig;
use libp2p::gossipsub::subscription_filter::MaxCountSubscriptionFilter;
use libp2p::gossipsub::{
GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId,
self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError,
};
use libp2p::identify::{Behaviour as Identify, Config as IdentifyConfig, Event as IdentifyEvent};
use libp2p::identify;
use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol};
use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent};
use libp2p::swarm::{Swarm, SwarmBuilder, SwarmEvent};
use libp2p::PeerId;
use slog::{crit, debug, info, o, trace, warn};
use std::path::PathBuf;
@ -66,10 +63,6 @@ pub enum NetworkEvent<AppReqId: ReqId, TSpec: EthSpec> {
PeerConnectedIncoming(PeerId),
/// A peer has disconnected.
PeerDisconnected(PeerId),
/// The peer needs to be banned.
PeerBanned(PeerId),
/// The peer has been unbanned.
PeerUnbanned(PeerId),
/// An RPC Request that was sent failed.
RPCFailed {
/// The id of the failed request.
@ -229,7 +222,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
let update_gossipsub_scores = tokio::time::interval(params.decay_interval);
let possible_fork_digests = ctx.fork_context.all_fork_digests();
let filter = MaxCountSubscriptionFilter {
let filter = gossipsub::MaxCountSubscriptionFilter {
filter: utils::create_whitelist_filter(
possible_fork_digests,
ctx.chain_spec.attestation_subnet_count,
@ -244,7 +237,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
// If metrics are enabled for gossipsub build the configuration
let gossipsub_metrics = ctx
.gossipsub_registry
.map(|registry| (registry, GossipsubMetricsConfig::default()));
.map(|registry| (registry, Default::default()));
let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size());
let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform(
@ -273,26 +266,32 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
let discovery = {
// Build and start the discovery sub-behaviour
let mut discovery =
Discovery::new(&local_keypair, &config, network_globals.clone(), &log).await?;
let mut discovery = Discovery::new(
local_keypair.clone(),
&config,
network_globals.clone(),
&log,
)
.await?;
// start searching for peers
discovery.discover_peers(FIND_NODE_QUERY_CLOSEST_PEERS);
discovery
};
let identify = {
let local_public_key = local_keypair.public();
let identify_config = if config.private {
IdentifyConfig::new(
identify::Config::new(
"".into(),
local_keypair.public(), // Still send legitimate public key
local_public_key, // Still send legitimate public key
)
.with_cache_size(0)
} else {
IdentifyConfig::new("eth2/1.0.0".into(), local_keypair.public())
identify::Config::new("eth2/1.0.0".into(), local_public_key)
.with_agent_version(lighthouse_version::version_with_platform())
.with_cache_size(0)
};
Identify::new(identify_config)
identify::Behaviour::new(identify_config)
};
let peer_manager = {
@ -305,13 +304,38 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
PeerManager::new(peer_manager_cfg, network_globals.clone(), &log)?
};
let connection_limits = {
let limits = libp2p::connection_limits::ConnectionLimits::default()
.with_max_pending_incoming(Some(5))
.with_max_pending_outgoing(Some(16))
.with_max_established_incoming(Some(
(config.target_peers as f32
* (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR))
.ceil() as u32,
))
.with_max_established_outgoing(Some(
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32,
))
.with_max_established(Some(
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS))
.ceil() as u32,
))
.with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER));
libp2p::connection_limits::Behaviour::new(limits)
};
let banned_peers = libp2p::allow_block_list::Behaviour::default();
let behaviour = {
Behaviour {
banned_peers,
gossipsub,
eth2_rpc,
discovery,
identify,
peer_manager,
connection_limits,
}
};
@ -329,22 +353,6 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
}
// sets up the libp2p connection limits
let limits = ConnectionLimits::default()
.with_max_pending_incoming(Some(5))
.with_max_pending_outgoing(Some(16))
.with_max_established_incoming(Some(
(config.target_peers as f32
* (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR))
.ceil() as u32,
))
.with_max_established_outgoing(Some(
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32,
))
.with_max_established(Some(
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS))
.ceil() as u32,
))
.with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER));
(
SwarmBuilder::with_executor(
@ -354,8 +362,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
Executor(executor),
)
.notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero"))
.connection_event_buffer_size(64)
.connection_limits(limits)
.per_connection_event_buffer_size(4)
.build(),
bandwidth,
)
@ -396,7 +403,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
match self.swarm.listen_on(listen_multiaddr.clone()) {
Ok(_) => {
let mut log_address = listen_multiaddr;
log_address.push(MProtocol::P2p(enr.peer_id().into()));
log_address.push(MProtocol::P2p(enr.peer_id()));
info!(self.log, "Listening established"; "address" => %log_address);
}
Err(err) => {
@ -493,7 +500,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
&mut self.swarm.behaviour_mut().discovery
}
/// Provides IP addresses and peer information.
pub fn identify_mut(&mut self) -> &mut Identify {
pub fn identify_mut(&mut self) -> &mut identify::Behaviour {
&mut self.swarm.behaviour_mut().identify
}
/// The peer manager that keeps track of peer's reputation and status.
@ -514,7 +521,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
&self.swarm.behaviour().discovery
}
/// Provides IP addresses and peer information.
pub fn identify(&self) -> &Identify {
pub fn identify(&self) -> &identify::Behaviour {
&self.swarm.behaviour().identify
}
/// The peer manager that keeps track of peer's reputation and status.
@ -1045,9 +1052,12 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
/* Sub-behaviour event handling functions */
/// Handle a gossipsub event.
fn inject_gs_event(&mut self, event: GossipsubEvent) -> Option<NetworkEvent<AppReqId, TSpec>> {
fn inject_gs_event(
&mut self,
event: gossipsub::Event,
) -> Option<NetworkEvent<AppReqId, TSpec>> {
match event {
GossipsubEvent::Message {
gossipsub::Event::Message {
propagation_source,
message_id: id,
message: gs_msg,
@ -1077,7 +1087,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
}
}
}
GossipsubEvent::Subscribed { peer_id, topic } => {
gossipsub::Event::Subscribed { peer_id, topic } => {
if let Ok(topic) = GossipTopic::decode(topic.as_str()) {
if let Some(subnet_id) = topic.subnet_id() {
self.network_globals
@ -1118,7 +1128,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
}
}
}
GossipsubEvent::Unsubscribed { peer_id, topic } => {
gossipsub::Event::Unsubscribed { peer_id, topic } => {
if let Some(subnet_id) = subnet_from_topic_hash(&topic) {
self.network_globals
.peers
@ -1126,7 +1136,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
.remove_subscription(&peer_id, &subnet_id);
}
}
GossipsubEvent::GossipsubNotSupported { peer_id } => {
gossipsub::Event::GossipsubNotSupported { peer_id } => {
debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id);
self.peer_manager_mut().report_peer(
&peer_id,
@ -1340,10 +1350,10 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
/// Handle an identify event.
fn inject_identify_event(
&mut self,
event: IdentifyEvent,
event: identify::Event,
) -> Option<NetworkEvent<AppReqId, TSpec>> {
match event {
IdentifyEvent::Received { peer_id, mut info } => {
identify::Event::Received { peer_id, mut info } => {
if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES {
debug!(
self.log,
@ -1354,9 +1364,9 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
// send peer info to the peer manager.
self.peer_manager_mut().identify(&peer_id, &info);
}
IdentifyEvent::Sent { .. } => {}
IdentifyEvent::Error { .. } => {}
IdentifyEvent::Pushed { .. } => {}
identify::Event::Sent { .. } => {}
identify::Event::Error { .. } => {}
identify::Event::Pushed { .. } => {}
}
None
}
@ -1377,14 +1387,17 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
Some(NetworkEvent::PeerDisconnected(peer_id))
}
PeerManagerEvent::Banned(peer_id, associated_ips) => {
self.swarm.ban_peer_id(peer_id);
self.swarm.behaviour_mut().banned_peers.block_peer(peer_id);
self.discovery_mut().ban_peer(&peer_id, associated_ips);
Some(NetworkEvent::PeerBanned(peer_id))
None
}
PeerManagerEvent::UnBanned(peer_id, associated_ips) => {
self.swarm.unban_peer_id(peer_id);
self.swarm
.behaviour_mut()
.banned_peers
.unblock_peer(peer_id);
self.discovery_mut().unban_peer(&peer_id, associated_ips);
Some(NetworkEvent::PeerUnbanned(peer_id))
None
}
PeerManagerEvent::Status(peer_id) => {
// it's time to status. We don't keep a beacon chain reference here, so we inform
@ -1431,17 +1444,20 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
let maybe_event = match swarm_event {
SwarmEvent::Behaviour(behaviour_event) => match behaviour_event {
// Handle sub-behaviour events.
BehaviourEvent::BannedPeers(void) => void::unreachable(void),
BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge),
BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re),
BehaviourEvent::Discovery(de) => self.inject_discovery_event(de),
BehaviourEvent::Identify(ie) => self.inject_identify_event(ie),
BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe),
BehaviourEvent::ConnectionLimits(le) => void::unreachable(le),
},
SwarmEvent::ConnectionEstablished { .. } => None,
SwarmEvent::ConnectionClosed { .. } => None,
SwarmEvent::IncomingConnection {
local_addr,
send_back_addr,
connection_id: _,
} => {
trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr);
None
@ -1450,19 +1466,41 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
local_addr,
send_back_addr,
error,
connection_id: _,
} => {
debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error);
let error_repr = match error {
libp2p::swarm::ListenError::Aborted => {
"Incoming connection aborted".to_string()
}
libp2p::swarm::ListenError::WrongPeerId { obtained, endpoint } => {
format!("Wrong peer id, obtained {obtained}, endpoint {endpoint:?}")
}
libp2p::swarm::ListenError::LocalPeerId { endpoint } => {
format!("Dialing local peer id {endpoint:?}")
}
libp2p::swarm::ListenError::Denied { cause } => {
format!("Connection was denied with cause {cause}")
}
libp2p::swarm::ListenError::Transport(t) => match t {
libp2p::TransportError::MultiaddrNotSupported(m) => {
format!("Transport error: Multiaddr not supported: {m}")
}
libp2p::TransportError::Other(e) => {
format!("Transport error: other: {e}")
}
},
};
debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => error_repr);
None
}
SwarmEvent::OutgoingConnectionError { peer_id, error } => {
debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error);
None
}
SwarmEvent::BannedPeer {
peer_id,
endpoint: _,
SwarmEvent::OutgoingConnectionError {
peer_id: _,
error: _,
connection_id: _,
} => {
debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id);
// The Behaviour event is more general than the swarm event here. It includes
// connection failures. So we use that log for now, in the peer manager
// behaviour implementation.
None
}
SwarmEvent::NewListenAddr { address, .. } => {
@ -1491,7 +1529,13 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
None
}
}
SwarmEvent::Dialing(_) => None,
SwarmEvent::Dialing {
peer_id,
connection_id: _,
} => {
debug!(self.log, "Swarm Dialing"; "peer_id" => ?peer_id);
None
}
};
if let Some(ev) = maybe_event {

View File

@ -4,13 +4,11 @@ use crate::types::{
error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind,
};
use crate::{GossipTopic, NetworkConfig};
use libp2p::bandwidth::{BandwidthLogging, BandwidthSinks};
use libp2p::core::{
identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed,
};
use libp2p::gossipsub::subscription_filter::WhitelistSubscriptionFilter;
use libp2p::gossipsub::IdentTopic as Topic;
use libp2p::{core, noise, PeerId, Transport};
use libp2p::bandwidth::BandwidthSinks;
use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed};
use libp2p::gossipsub;
use libp2p::identity::{secp256k1, Keypair};
use libp2p::{core, noise, yamux, PeerId, Transport, TransportExt};
use prometheus_client::registry::Registry;
use slog::{debug, warn};
use ssz::Decode;
@ -52,30 +50,19 @@ pub fn build_transport(
transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone))
};
let (transport, bandwidth) = BandwidthLogging::new(transport);
// mplex config
let mut mplex_config = libp2p::mplex::MplexConfig::new();
mplex_config.set_max_buffer_size(256);
mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block);
// yamux config
let mut yamux_config = libp2p::yamux::YamuxConfig::default();
yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read());
// Authentication
Ok((
transport
let mut yamux_config = yamux::Config::default();
yamux_config.set_window_update_mode(yamux::WindowUpdateMode::on_read());
let (transport, bandwidth) = transport
.upgrade(core::upgrade::Version::V1)
.authenticate(generate_noise_config(&local_private_key))
.multiplex(core::upgrade::SelectUpgrade::new(
yamux_config,
mplex_config,
))
.multiplex(yamux_config)
.timeout(Duration::from_secs(10))
.boxed(),
bandwidth,
))
.boxed()
.with_bandwidth_logging();
// Authentication
Ok((transport, bandwidth))
}
// Useful helper functions for debugging. Currently not used in the client.
@ -94,10 +81,10 @@ fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
#[allow(dead_code)]
fn keypair_from_bytes(mut bytes: Vec<u8>) -> error::Result<Keypair> {
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes)
secp256k1::SecretKey::try_from_bytes(&mut bytes)
.map(|secret| {
let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into();
Keypair::Secp256k1(keypair)
let keypair: secp256k1::Keypair = secret.into();
keypair.into()
})
.map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into())
}
@ -115,12 +102,10 @@ pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
Err(_) => debug!(log, "Could not read network key file"),
Ok(_) => {
// only accept secp256k1 keys for now
if let Ok(secret_key) =
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes)
{
let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into();
if let Ok(secret_key) = secp256k1::SecretKey::try_from_bytes(&mut key_bytes) {
let kp: secp256k1::Keypair = secret_key.into();
debug!(log, "Loaded network key from disk.");
return Keypair::Secp256k1(kp);
return kp.into();
} else {
debug!(log, "Network key file is not a valid secp256k1 key");
}
@ -129,11 +114,10 @@ pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
}
// if a key could not be loaded from disk, generate a new one and save it
let local_private_key = Keypair::generate_secp256k1();
if let Keypair::Secp256k1(key) = local_private_key.clone() {
let local_private_key = secp256k1::Keypair::generate();
let _ = std::fs::create_dir_all(&config.network_dir);
match File::create(network_key_f.clone())
.and_then(|mut f| f.write_all(&key.secret().to_bytes()))
.and_then(|mut f| f.write_all(&local_private_key.secret().to_bytes()))
{
Ok(_) => {
debug!(log, "New network key generated and written to disk");
@ -145,18 +129,12 @@ pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
);
}
}
}
local_private_key
local_private_key.into()
}
/// Generate authenticated XX Noise config from identity keys
fn generate_noise_config(
identity_keypair: &Keypair,
) -> noise::NoiseAuthenticated<noise::XX, noise::X25519Spec, ()> {
let static_dh_keys = noise::Keypair::<noise::X25519Spec>::new()
.into_authentic(identity_keypair)
.expect("signing can fail only once during starting a node");
noise::NoiseConfig::xx(static_dh_keys).into_authenticated()
fn generate_noise_config(identity_keypair: &Keypair) -> noise::Config {
noise::Config::new(identity_keypair).expect("signing can fail only once during starting a node")
}
/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p
@ -236,11 +214,11 @@ pub(crate) fn create_whitelist_filter(
possible_fork_digests: Vec<[u8; 4]>,
attestation_subnet_count: u64,
sync_committee_subnet_count: u64,
) -> WhitelistSubscriptionFilter {
) -> gossipsub::WhitelistSubscriptionFilter {
let mut possible_hashes = HashSet::new();
for fork_digest in possible_fork_digests {
let mut add = |kind| {
let topic: Topic =
let topic: gossipsub::IdentTopic =
GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into();
possible_hashes.insert(topic.hash());
};
@ -262,7 +240,7 @@ pub(crate) fn create_whitelist_filter(
add(SyncCommitteeMessage(SyncSubnetId::new(id)));
}
}
WhitelistSubscriptionFilter(possible_hashes)
gossipsub::WhitelistSubscriptionFilter(possible_hashes)
}
/// Persist metadata to disk

View File

@ -2,7 +2,7 @@
use crate::types::{GossipEncoding, GossipKind, GossipTopic};
use crate::TopicHash;
use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage};
use libp2p::gossipsub;
use snap::raw::{decompress_len, Decoder, Encoder};
use ssz::{Decode, Encode};
use std::boxed::Box;
@ -56,12 +56,12 @@ impl SnappyTransform {
}
}
impl DataTransform for SnappyTransform {
impl gossipsub::DataTransform for SnappyTransform {
// Provides the snappy decompression from RawGossipsubMessages
fn inbound_transform(
&self,
raw_message: RawGossipsubMessage,
) -> Result<GossipsubMessage, std::io::Error> {
raw_message: gossipsub::RawMessage,
) -> Result<gossipsub::Message, std::io::Error> {
// check the length of the raw bytes
let len = decompress_len(&raw_message.data)?;
if len > self.max_size_per_message {
@ -75,7 +75,7 @@ impl DataTransform for SnappyTransform {
let decompressed_data = decoder.decompress_vec(&raw_message.data)?;
// Build the GossipsubMessage struct
Ok(GossipsubMessage {
Ok(gossipsub::Message {
source: raw_message.source,
data: decompressed_data,
sequence_number: raw_message.sequence_number,

View File

@ -1,5 +1,5 @@
#![cfg(test)]
use libp2p::gossipsub::GossipsubConfigBuilder;
use libp2p::gossipsub;
use lighthouse_network::service::Network as LibP2PService;
use lighthouse_network::Enr;
use lighthouse_network::EnrExt;
@ -81,7 +81,7 @@ pub fn build_config(port: u16, mut boot_nodes: Vec<Enr>) -> NetworkConfig {
config.boot_nodes_enr.append(&mut boot_nodes);
config.network_dir = path.into_path();
// Reduce gossipsub heartbeat parameters
config.gs_config = GossipsubConfigBuilder::from(config.gs_config)
config.gs_config = gossipsub::ConfigBuilder::from(config.gs_config)
.heartbeat_initial_delay(Duration::from_millis(500))
.heartbeat_interval(Duration::from_millis(500))
.build()

View File

@ -493,10 +493,8 @@ impl<T: BeaconChainTypes> NetworkService<T> {
NetworkEvent::PeerConnectedOutgoing(peer_id) => {
self.send_to_router(RouterMessage::StatusPeer(peer_id));
}
NetworkEvent::PeerConnectedIncoming(_)
| NetworkEvent::PeerBanned(_)
| NetworkEvent::PeerUnbanned(_) => {
// No action required for these events.
NetworkEvent::PeerConnectedIncoming(_) => {
// No action required for this event.
}
NetworkEvent::PeerDisconnected(peer_id) => {
self.send_to_router(RouterMessage::PeerDisconnected(peer_id));

View File

@ -28,7 +28,7 @@ operating system.
Install the following packages:
```bash
sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler
sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang
```
> Tips:
@ -51,10 +51,6 @@ After this, you are ready to [build Lighthouse](#build-lighthouse).
brew install cmake
```
1. Install protoc using Homebrew:
```
brew install protobuf
```
[Homebrew]: https://brew.sh/
@ -71,7 +67,7 @@ After this, you are ready to [build Lighthouse](#build-lighthouse).
Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
```
> - To verify that Chocolatey is ready, run `choco` and it should return the version.
1. Install Make, CMake, LLVM and protoc using Chocolatey:
1. Install Make, CMake and LLVM using Chocolatey:
```
choco install make
@ -85,10 +81,6 @@ choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System'
choco install llvm
```
```
choco install protoc
```
These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run
successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you
should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section.
@ -217,4 +209,3 @@ look into [cross compilation](./cross-compiling.md), or use a [pre-built
binary](https://github.com/sigp/lighthouse/releases).
If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`.

View File

@ -22,7 +22,7 @@ terminal and an Internet connection are necessary.
Install the Ubuntu dependencies:
```bash
sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler
sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang
```
> Tips:

View File

@ -14,8 +14,6 @@ The additional requirements for developers are:
don't have `anvil` available on your `PATH`.
- [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by
some dependencies. See [`Installation Guide`](./installation.md) for more info.
- [`protoc`](https://github.com/protocolbuffers/protobuf/releases) required for
the networking stack.
- [`java 11 runtime`](https://openjdk.java.net/projects/jdk/). 11 is the minimum,
used by web3signer_tests.
- [`libpq-dev`](https://www.postgresql.org/docs/devel/libpq.html). Also know as

View File

@ -80,7 +80,7 @@ impl<T: EthSpec> BootNodeConfig<T> {
}
let private_key = load_private_key(&network_config, &logger);
let local_key = CombinedKey::from_libp2p(&private_key)?;
let local_key = CombinedKey::from_libp2p(private_key)?;
let local_enr = if let Some(dir) = matches.value_of("network-dir") {
let network_dir: PathBuf = dir.into();

View File

@ -18,4 +18,4 @@ serde_yaml = "0.8.13"
types = { path = "../../consensus/types"}
ethereum_ssz = "0.5.0"
eth2_config = { path = "../eth2_config"}
discv5 = "0.3.0"
discv5 = "0.3.1"

View File

@ -2,7 +2,7 @@
# - from the `lighthouse` dir with the command: `docker build -f ./lcli/Dockerflie .`
# - from the current directory with the command: `docker build -f ./Dockerfile ../`
FROM rust:1.68.2-bullseye AS builder
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev
COPY . lighthouse
ARG PORTABLE
ENV PORTABLE $PORTABLE

View File

@ -1,14 +0,0 @@
ARG CROSS_BASE_IMAGE
FROM $CROSS_BASE_IMAGE
RUN apt-get update -y && apt-get upgrade -y
RUN apt-get install -y unzip && \
PB_REL="https://github.com/protocolbuffers/protobuf/releases" && \
curl -L $PB_REL/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip -o protoc.zip && \
unzip protoc.zip -d /usr && \
chmod +x /usr/bin/protoc
RUN apt-get install -y cmake clang-3.9
ENV PROTOC=/usr/bin/protoc

View File

@ -1,5 +1,5 @@
FROM rust:1.68.2-bullseye AS builder
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev
COPY . lighthouse
# Build lighthouse directly with a cargo build command, bypassing the Makefile.