Merge pull request #4578 from jimmygchen/merge-unstable-to-deneb-20230808

Merge unstable to deneb 20230808
This commit is contained in:
realbigsean 2023-08-08 10:42:14 -04:00 committed by GitHub
commit efbf906094
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
161 changed files with 8760 additions and 3376 deletions

View File

@ -21,10 +21,6 @@ jobs:
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install geth (ubuntu) - name: Install geth (ubuntu)
if: matrix.os == 'ubuntu-22.04' if: matrix.os == 'ubuntu-22.04'
run: | run: |

View File

@ -79,15 +79,6 @@ jobs:
if: startsWith(matrix.arch, 'x86_64-windows') if: startsWith(matrix.arch, 'x86_64-windows')
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
# ==============================
# Windows & Mac dependencies
# ==============================
- name: Install Protoc
if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows')
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# ============================== # ==============================
# Builds # Builds
# ============================== # ==============================

View File

@ -60,10 +60,6 @@ jobs:
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Foundry (anvil) - name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1 uses: foundry-rs/foundry-toolchain@v1
- name: Run tests in release - name: Run tests in release
@ -83,7 +79,7 @@ jobs:
node-version: '14' node-version: '14'
- name: Install windows build tools - name: Install windows build tools
run: | run: |
choco install python protoc visualstudio2019-workload-vctools -y choco install python visualstudio2019-workload-vctools -y
npm config set msvs_version 2019 npm config set msvs_version 2019
- name: Install Foundry (anvil) - name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1 uses: foundry-rs/foundry-toolchain@v1
@ -108,10 +104,6 @@ jobs:
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run beacon_chain tests for all known forks - name: Run beacon_chain tests for all known forks
run: make test-beacon-chain run: make test-beacon-chain
op-pool-tests: op-pool-tests:
@ -122,10 +114,6 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run operation_pool tests for all known forks - name: Run operation_pool tests for all known forks
run: make test-op-pool run: make test-op-pool
network-tests: network-tests:
@ -162,10 +150,6 @@ jobs:
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Foundry (anvil) - name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1 uses: foundry-rs/foundry-toolchain@v1
- name: Run tests in debug - name: Run tests in debug
@ -178,10 +162,6 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run state_transition_vectors in release. - name: Run state_transition_vectors in release.
run: make run-state-transition-tests run: make run-state-transition-tests
ef-tests-ubuntu: ef-tests-ubuntu:
@ -194,10 +174,6 @@ jobs:
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
if: env.SELF_HOSTED_RUNNERS == false if: env.SELF_HOSTED_RUNNERS == false
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run consensus-spec-tests with blst, milagro and fake_crypto - name: Run consensus-spec-tests with blst, milagro and fake_crypto
run: make test-ef run: make test-ef
dockerfile-ubuntu: dockerfile-ubuntu:
@ -220,10 +196,6 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Foundry (anvil) - name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1 uses: foundry-rs/foundry-toolchain@v1
- name: Run the beacon chain sim that starts from an eth1 contract - name: Run the beacon chain sim that starts from an eth1 contract
@ -236,10 +208,6 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Foundry (anvil) - name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1 uses: foundry-rs/foundry-toolchain@v1
- name: Run the beacon chain sim and go through the merge transition - name: Run the beacon chain sim and go through the merge transition
@ -252,10 +220,6 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run the beacon chain sim without an eth1 connection - name: Run the beacon chain sim without an eth1 connection
run: cargo run --release --bin simulator no-eth1-sim run: cargo run --release --bin simulator no-eth1-sim
syncing-simulator-ubuntu: syncing-simulator-ubuntu:
@ -266,10 +230,6 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install Foundry (anvil) - name: Install Foundry (anvil)
uses: foundry-rs/foundry-toolchain@v1 uses: foundry-rs/foundry-toolchain@v1
- name: Run the syncing simulator - name: Run the syncing simulator
@ -282,10 +242,6 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install geth - name: Install geth
run: | run: |
sudo add-apt-repository -y ppa:ethereum/ethereum sudo add-apt-repository -y ppa:ethereum/ethereum
@ -317,10 +273,6 @@ jobs:
dotnet-version: '6.0.201' dotnet-version: '6.0.201'
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run exec engine integration tests in release - name: Run exec engine integration tests in release
run: make test-exec-engine run: make test-exec-engine
check-benchmarks: check-benchmarks:
@ -331,10 +283,6 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Typecheck benchmark code without running it - name: Typecheck benchmark code without running it
run: make check-benches run: make check-benches
clippy: clippy:
@ -345,10 +293,6 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Get latest version of stable Rust - name: Get latest version of stable Rust
run: rustup update stable run: rustup update stable
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Lint code for quality and style with Clippy - name: Lint code for quality and style with Clippy
run: make lint run: make lint
- name: Certify Cargo.lock freshness - name: Certify Cargo.lock freshness
@ -361,10 +305,6 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }})
run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }}
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Run cargo check - name: Run cargo check
run: cargo check --workspace run: cargo check --workspace
arbitrary-check: arbitrary-check:
@ -404,10 +344,6 @@ jobs:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Install Rust (${{ env.PINNED_NIGHTLY }}) - name: Install Rust (${{ env.PINNED_NIGHTLY }})
run: rustup toolchain install $PINNED_NIGHTLY run: rustup toolchain install $PINNED_NIGHTLY
- name: Install Protoc
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Install cargo-udeps - name: Install cargo-udeps
run: cargo install cargo-udeps --locked --force run: cargo install cargo-udeps --locked --force
- name: Create Cargo config dir - name: Create Cargo config dir
@ -425,7 +361,7 @@ jobs:
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- name: Install dependencies - name: Install dependencies
run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang
- name: Use Rust beta - name: Use Rust beta
run: rustup override set beta run: rustup override set beta
- name: Run make - name: Run make

2240
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -84,6 +84,8 @@ members = [
"validator_client", "validator_client",
"validator_client/slashing_protection", "validator_client/slashing_protection",
"validator_manager",
"watch", "watch",
] ]
resolver = "2" resolver = "2"
@ -91,7 +93,6 @@ resolver = "2"
[patch] [patch]
[patch.crates-io] [patch.crates-io]
warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" } warp = { git = "https://github.com/macladson/warp", rev="7e75acc368229a46a236a8c991bf251fe7fe50ef" }
arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="f002b99989b561ddce62e4cf2887b0f8860ae991" }
[profile.maxperf] [profile.maxperf]
inherits = "release" inherits = "release"

View File

@ -1,5 +1,5 @@
[target.x86_64-unknown-linux-gnu] [target.x86_64-unknown-linux-gnu]
dockerfile = './scripts/cross/Dockerfile' pre-build = ["apt-get install -y cmake clang-3.9"]
[target.aarch64-unknown-linux-gnu] [target.aarch64-unknown-linux-gnu]
dockerfile = './scripts/cross/Dockerfile' pre-build = ["apt-get install -y cmake clang-3.9"]

View File

@ -1,5 +1,5 @@
FROM rust:1.68.2-bullseye AS builder FROM rust:1.68.2-bullseye AS builder
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev
COPY . lighthouse COPY . lighthouse
ARG FEATURES ARG FEATURES
ARG PROFILE=release ARG PROFILE=release

View File

@ -24,6 +24,8 @@ safe_arith = {path = "../consensus/safe_arith"}
slot_clock = { path = "../common/slot_clock" } slot_clock = { path = "../common/slot_clock" }
filesystem = { path = "../common/filesystem" } filesystem = { path = "../common/filesystem" }
sensitive_url = { path = "../common/sensitive_url" } sensitive_url = { path = "../common/sensitive_url" }
serde = { version = "1.0.116", features = ["derive"] }
serde_json = "1.0.58"
[dev-dependencies] [dev-dependencies]
tempfile = "3.1.0" tempfile = "3.1.0"

View File

@ -1,55 +1,7 @@
use account_utils::PlainText; use account_utils::read_input_from_user;
use account_utils::{read_input_from_user, strip_off_newlines};
use eth2_wallet::bip39::{Language, Mnemonic};
use std::fs;
use std::path::PathBuf;
use std::str::from_utf8;
use std::thread::sleep;
use std::time::Duration;
pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:";
pub const WALLET_NAME_PROMPT: &str = "Enter wallet name:"; pub const WALLET_NAME_PROMPT: &str = "Enter wallet name:";
pub fn read_mnemonic_from_cli(
mnemonic_path: Option<PathBuf>,
stdin_inputs: bool,
) -> Result<Mnemonic, String> {
let mnemonic = match mnemonic_path {
Some(path) => fs::read(&path)
.map_err(|e| format!("Unable to read {:?}: {:?}", path, e))
.and_then(|bytes| {
let bytes_no_newlines: PlainText = strip_off_newlines(bytes).into();
let phrase = from_utf8(bytes_no_newlines.as_ref())
.map_err(|e| format!("Unable to derive mnemonic: {:?}", e))?;
Mnemonic::from_phrase(phrase, Language::English).map_err(|e| {
format!(
"Unable to derive mnemonic from string {:?}: {:?}",
phrase, e
)
})
})?,
None => loop {
eprintln!();
eprintln!("{}", MNEMONIC_PROMPT);
let mnemonic = read_input_from_user(stdin_inputs)?;
match Mnemonic::from_phrase(mnemonic.as_str(), Language::English) {
Ok(mnemonic_m) => {
eprintln!("Valid mnemonic provided.");
eprintln!();
sleep(Duration::from_secs(1));
break mnemonic_m;
}
Err(_) => {
eprintln!("Invalid mnemonic");
}
}
},
};
Ok(mnemonic)
}
/// Reads in a wallet name from the user. If the `--wallet-name` flag is provided, use it. Otherwise /// Reads in a wallet name from the user. If the `--wallet-name` flag is provided, use it. Otherwise
/// read from an interactive prompt using tty unless the `--stdin-inputs` flag is provided. /// read from an interactive prompt using tty unless the `--stdin-inputs` flag is provided.
pub fn read_wallet_name_from_cli( pub fn read_wallet_name_from_cli(

View File

@ -4,8 +4,8 @@ use account_utils::{
eth2_keystore::Keystore, eth2_keystore::Keystore,
read_password_from_user, read_password_from_user,
validator_definitions::{ validator_definitions::{
recursively_find_voting_keystores, ValidatorDefinition, ValidatorDefinitions, recursively_find_voting_keystores, PasswordStorage, ValidatorDefinition,
CONFIG_FILENAME, ValidatorDefinitions, CONFIG_FILENAME,
}, },
ZeroizeString, ZeroizeString,
}; };
@ -277,7 +277,9 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin
let suggested_fee_recipient = None; let suggested_fee_recipient = None;
let validator_def = ValidatorDefinition::new_keystore_with_password( let validator_def = ValidatorDefinition::new_keystore_with_password(
&dest_keystore, &dest_keystore,
password_opt, password_opt
.map(PasswordStorage::ValidatorDefinitions)
.unwrap_or(PasswordStorage::None),
graffiti, graffiti,
suggested_fee_recipient, suggested_fee_recipient,
None, None,

View File

@ -1,10 +1,9 @@
use super::create::STORE_WITHDRAW_FLAG; use super::create::STORE_WITHDRAW_FLAG;
use crate::common::read_mnemonic_from_cli;
use crate::validator::create::COUNT_FLAG; use crate::validator::create::COUNT_FLAG;
use crate::wallet::create::STDIN_INPUTS_FLAG; use crate::wallet::create::STDIN_INPUTS_FLAG;
use crate::SECRETS_DIR_FLAG; use crate::SECRETS_DIR_FLAG;
use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder}; use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilder};
use account_utils::random_password; use account_utils::{random_password, read_mnemonic_from_cli};
use clap::{App, Arg, ArgMatches}; use clap::{App, Arg, ArgMatches};
use directory::ensure_dir_exists; use directory::ensure_dir_exists;
use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR};

View File

@ -1,6 +1,6 @@
use crate::common::read_mnemonic_from_cli;
use crate::wallet::create::{create_wallet_from_mnemonic, STDIN_INPUTS_FLAG}; use crate::wallet::create::{create_wallet_from_mnemonic, STDIN_INPUTS_FLAG};
use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG}; use crate::wallet::create::{HD_TYPE, NAME_FLAG, PASSWORD_FLAG, TYPE_FLAG};
use account_utils::read_mnemonic_from_cli;
use clap::{App, Arg, ArgMatches}; use clap::{App, Arg, ArgMatches};
use std::path::PathBuf; use std::path::PathBuf;

View File

@ -331,7 +331,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// compute ideal head rewards // compute ideal head rewards
let head = get_attestation_component_delta( let head = get_attestation_component_delta(
true, true,
total_balances.previous_epoch_attesters(), total_balances.previous_epoch_head_attesters(),
total_balances, total_balances,
base_reward, base_reward,
finality_delay, finality_delay,
@ -353,7 +353,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
// compute ideal source rewards // compute ideal source rewards
let source = get_attestation_component_delta( let source = get_attestation_component_delta(
true, true,
total_balances.previous_epoch_head_attesters(), total_balances.previous_epoch_attesters(),
total_balances, total_balances,
base_reward, base_reward,
finality_delay, finality_delay,

View File

@ -35,10 +35,8 @@
mod batch; mod batch;
use crate::{ use crate::{
beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics,
metrics, observed_aggregates::ObserveOutcome, observed_attesters::Error as ObservedAttestersError,
observed_aggregates::ObserveOutcome,
observed_attesters::Error as ObservedAttestersError,
BeaconChain, BeaconChainError, BeaconChainTypes, BeaconChain, BeaconChainError, BeaconChainTypes,
}; };
use bls::verify_signature_sets; use bls::verify_signature_sets;
@ -57,8 +55,8 @@ use std::borrow::Cow;
use strum::AsRefStr; use strum::AsRefStr;
use tree_hash::TreeHash; use tree_hash::TreeHash;
use types::{ use types::{
Attestation, BeaconCommittee, CommitteeIndex, Epoch, EthSpec, Hash256, IndexedAttestation, Attestation, BeaconCommittee, ChainSpec, CommitteeIndex, Epoch, EthSpec, Hash256,
SelectionProof, SignedAggregateAndProof, Slot, SubnetId, IndexedAttestation, SelectionProof, SignedAggregateAndProof, Slot, SubnetId,
}; };
pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations}; pub use batch::{batch_verify_aggregated_attestations, batch_verify_unaggregated_attestations};
@ -454,7 +452,7 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
// MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
// //
// We do not queue future attestations for later processing. // We do not queue future attestations for later processing.
verify_propagation_slot_range(&chain.slot_clock, attestation)?; verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?;
// Check the attestation's epoch matches its target. // Check the attestation's epoch matches its target.
if attestation.data.slot.epoch(T::EthSpec::slots_per_epoch()) if attestation.data.slot.epoch(T::EthSpec::slots_per_epoch())
@ -722,7 +720,7 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> {
// MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
// //
// We do not queue future attestations for later processing. // We do not queue future attestations for later processing.
verify_propagation_slot_range(&chain.slot_clock, attestation)?; verify_propagation_slot_range(&chain.slot_clock, attestation, &chain.spec)?;
// Check to ensure that the attestation is "unaggregated". I.e., it has exactly one // Check to ensure that the attestation is "unaggregated". I.e., it has exactly one
// aggregation bit set. // aggregation bit set.
@ -1037,11 +1035,11 @@ fn verify_head_block_is_known<T: BeaconChainTypes>(
pub fn verify_propagation_slot_range<S: SlotClock, E: EthSpec>( pub fn verify_propagation_slot_range<S: SlotClock, E: EthSpec>(
slot_clock: &S, slot_clock: &S,
attestation: &Attestation<E>, attestation: &Attestation<E>,
spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
let attestation_slot = attestation.data.slot; let attestation_slot = attestation.data.slot;
let latest_permissible_slot = slot_clock let latest_permissible_slot = slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .now_with_future_tolerance(spec.maximum_gossip_clock_disparity())
.ok_or(BeaconChainError::UnableToReadSlot)?; .ok_or(BeaconChainError::UnableToReadSlot)?;
if attestation_slot > latest_permissible_slot { if attestation_slot > latest_permissible_slot {
return Err(Error::FutureSlot { return Err(Error::FutureSlot {
@ -1052,7 +1050,7 @@ pub fn verify_propagation_slot_range<S: SlotClock, E: EthSpec>(
// Taking advantage of saturating subtraction on `Slot`. // Taking advantage of saturating subtraction on `Slot`.
let earliest_permissible_slot = slot_clock let earliest_permissible_slot = slot_clock
.now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .now_with_past_tolerance(spec.maximum_gossip_clock_disparity())
.ok_or(BeaconChainError::UnableToReadSlot)? .ok_or(BeaconChainError::UnableToReadSlot)?
- E::slots_per_epoch(); - E::slots_per_epoch();
if attestation_slot < earliest_permissible_slot { if attestation_slot < earliest_permissible_slot {

View File

@ -259,11 +259,6 @@ pub enum OverrideForkchoiceUpdate {
AlreadyApplied, AlreadyApplied,
} }
/// The accepted clock drift for nodes gossiping blocks and attestations. See:
///
/// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#configuration
pub const MAXIMUM_GOSSIP_CLOCK_DISPARITY: Duration = Duration::from_millis(500);
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum AttestationProcessingOutcome { pub enum AttestationProcessingOutcome {
Processed, Processed,

View File

@ -5,7 +5,7 @@ use std::sync::Arc;
use crate::beacon_chain::{ use crate::beacon_chain::{
BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, BeaconChain, BeaconChainTypes, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT,
MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
}; };
use crate::data_availability_checker::AvailabilityCheckError; use crate::data_availability_checker::AvailabilityCheckError;
use crate::kzg_utils::{validate_blob, validate_blobs}; use crate::kzg_utils::{validate_blob, validate_blobs};
@ -196,7 +196,7 @@ pub fn validate_blob_sidecar_for_gossip<T: BeaconChainTypes>(
// Verify that the sidecar is not from a future slot. // Verify that the sidecar is not from a future slot.
let latest_permissible_slot = chain let latest_permissible_slot = chain
.slot_clock .slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity())
.ok_or(BeaconChainError::UnableToReadSlot)?; .ok_or(BeaconChainError::UnableToReadSlot)?;
if blob_slot > latest_permissible_slot { if blob_slot > latest_permissible_slot {
return Err(GossipBlobError::FutureSlot { return Err(GossipBlobError::FutureSlot {

View File

@ -65,7 +65,7 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache;
use crate::{ use crate::{
beacon_chain::{ beacon_chain::{
BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT,
MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
}, },
metrics, BeaconChain, BeaconChainError, BeaconChainTypes, metrics, BeaconChain, BeaconChainError, BeaconChainTypes,
}; };
@ -786,7 +786,7 @@ impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
// Do not gossip or process blocks from future slots. // Do not gossip or process blocks from future slots.
let present_slot_with_tolerance = chain let present_slot_with_tolerance = chain
.slot_clock .slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity())
.ok_or(BeaconChainError::UnableToReadSlot)?; .ok_or(BeaconChainError::UnableToReadSlot)?;
if block.slot() > present_slot_with_tolerance { if block.slot() > present_slot_with_tolerance {
return Err(BlockError::FutureSlot { return Err(BlockError::FutureSlot {

View File

@ -61,7 +61,7 @@ pub use self::beacon_chain::{
BeaconStore, ChainSegmentResult, ForkChoiceError, OverrideForkchoiceUpdate, BeaconStore, ChainSegmentResult, ForkChoiceError, OverrideForkchoiceUpdate,
ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped, ProduceBlockVerification, StateSkipConfig, WhenSlotSkipped,
INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON, MAXIMUM_GOSSIP_CLOCK_DISPARITY, INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,
}; };
pub use self::beacon_snapshot::BeaconSnapshot; pub use self::beacon_snapshot::BeaconSnapshot;
pub use self::chain_config::ChainConfig; pub use self::chain_config::ChainConfig;

View File

@ -1,6 +1,4 @@
use crate::{ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes};
beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes,
};
use derivative::Derivative; use derivative::Derivative;
use slot_clock::SlotClock; use slot_clock::SlotClock;
use std::time::Duration; use std::time::Duration;
@ -103,7 +101,8 @@ impl<T: BeaconChainTypes> VerifiedLightClientFinalityUpdate<T> {
// verify that enough time has passed for the block to have been propagated // verify that enough time has passed for the block to have been propagated
match start_time { match start_time {
Some(time) => { Some(time) => {
if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration if seen_timestamp + chain.spec.maximum_gossip_clock_disparity()
< time + one_third_slot_duration
{ {
return Err(Error::TooEarly); return Err(Error::TooEarly);
} }

View File

@ -1,6 +1,4 @@
use crate::{ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes};
beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes,
};
use derivative::Derivative; use derivative::Derivative;
use eth2::types::Hash256; use eth2::types::Hash256;
use slot_clock::SlotClock; use slot_clock::SlotClock;
@ -103,7 +101,8 @@ impl<T: BeaconChainTypes> VerifiedLightClientOptimisticUpdate<T> {
// verify that enough time has passed for the block to have been propagated // verify that enough time has passed for the block to have been propagated
match start_time { match start_time {
Some(time) => { Some(time) => {
if seen_timestamp + MAXIMUM_GOSSIP_CLOCK_DISPARITY < time + one_third_slot_duration if seen_timestamp + chain.spec.maximum_gossip_clock_disparity()
< time + one_third_slot_duration
{ {
return Err(Error::TooEarly); return Err(Error::TooEarly);
} }

View File

@ -86,9 +86,6 @@ pub enum MergeReadiness {
#[serde(serialize_with = "serialize_uint256")] #[serde(serialize_with = "serialize_uint256")]
current_difficulty: Option<Uint256>, current_difficulty: Option<Uint256>,
}, },
/// The transition configuration with the EL failed, there might be a problem with
/// connectivity, authentication or a difference in configuration.
ExchangeTransitionConfigurationFailed { error: String },
/// The EL can be reached and has the correct configuration, however it's not yet synced. /// The EL can be reached and has the correct configuration, however it's not yet synced.
NotSynced, NotSynced,
/// The user has not configured this node to use an execution endpoint. /// The user has not configured this node to use an execution endpoint.
@ -109,12 +106,6 @@ impl fmt::Display for MergeReadiness {
params, current_difficulty params, current_difficulty
) )
} }
MergeReadiness::ExchangeTransitionConfigurationFailed { error } => write!(
f,
"Could not confirm the transition configuration with the \
execution endpoint: {:?}",
error
),
MergeReadiness::NotSynced => write!( MergeReadiness::NotSynced => write!(
f, f,
"The execution endpoint is connected and configured, \ "The execution endpoint is connected and configured, \
@ -155,14 +146,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
/// Attempts to connect to the EL and confirm that it is ready for the merge. /// Attempts to connect to the EL and confirm that it is ready for the merge.
pub async fn check_merge_readiness(&self) -> MergeReadiness { pub async fn check_merge_readiness(&self) -> MergeReadiness {
if let Some(el) = self.execution_layer.as_ref() { if let Some(el) = self.execution_layer.as_ref() {
if let Err(e) = el.exchange_transition_configuration(&self.spec).await {
// The EL was either unreachable, responded with an error or has a different
// configuration.
return MergeReadiness::ExchangeTransitionConfigurationFailed {
error: format!("{:?}", e),
};
}
if !el.is_synced_for_notifier().await { if !el.is_synced_for_notifier().await {
// The EL is not synced. // The EL is not synced.
return MergeReadiness::NotSynced; return MergeReadiness::NotSynced;

View File

@ -28,10 +28,8 @@
use crate::observed_attesters::SlotSubcommitteeIndex; use crate::observed_attesters::SlotSubcommitteeIndex;
use crate::{ use crate::{
beacon_chain::{MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT}, beacon_chain::VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, metrics,
metrics, observed_aggregates::ObserveOutcome, BeaconChain, BeaconChainError, BeaconChainTypes,
observed_aggregates::ObserveOutcome,
BeaconChain, BeaconChainError, BeaconChainTypes,
}; };
use bls::{verify_signature_sets, PublicKeyBytes}; use bls::{verify_signature_sets, PublicKeyBytes};
use derivative::Derivative; use derivative::Derivative;
@ -52,6 +50,7 @@ use tree_hash_derive::TreeHash;
use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT;
use types::slot_data::SlotData; use types::slot_data::SlotData;
use types::sync_committee::Error as SyncCommitteeError; use types::sync_committee::Error as SyncCommitteeError;
use types::ChainSpec;
use types::{ use types::{
sync_committee_contribution::Error as ContributionError, AggregateSignature, BeaconStateError, sync_committee_contribution::Error as ContributionError, AggregateSignature, BeaconStateError,
EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution, EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution,
@ -297,7 +296,7 @@ impl<T: BeaconChainTypes> VerifiedSyncContribution<T> {
let subcommittee_index = contribution.subcommittee_index as usize; let subcommittee_index = contribution.subcommittee_index as usize;
// Ensure sync committee contribution is within the MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance. // Ensure sync committee contribution is within the MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance.
verify_propagation_slot_range(&chain.slot_clock, contribution)?; verify_propagation_slot_range(&chain.slot_clock, contribution, &chain.spec)?;
// Validate subcommittee index. // Validate subcommittee index.
if contribution.subcommittee_index >= SYNC_COMMITTEE_SUBNET_COUNT { if contribution.subcommittee_index >= SYNC_COMMITTEE_SUBNET_COUNT {
@ -460,7 +459,7 @@ impl VerifiedSyncCommitteeMessage {
// MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance). // MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance).
// //
// We do not queue future sync committee messages for later processing. // We do not queue future sync committee messages for later processing.
verify_propagation_slot_range(&chain.slot_clock, &sync_message)?; verify_propagation_slot_range(&chain.slot_clock, &sync_message, &chain.spec)?;
// Ensure the `subnet_id` is valid for the given validator. // Ensure the `subnet_id` is valid for the given validator.
let pubkey = chain let pubkey = chain
@ -576,11 +575,11 @@ impl VerifiedSyncCommitteeMessage {
pub fn verify_propagation_slot_range<S: SlotClock, U: SlotData>( pub fn verify_propagation_slot_range<S: SlotClock, U: SlotData>(
slot_clock: &S, slot_clock: &S,
sync_contribution: &U, sync_contribution: &U,
spec: &ChainSpec,
) -> Result<(), Error> { ) -> Result<(), Error> {
let message_slot = sync_contribution.get_slot(); let message_slot = sync_contribution.get_slot();
let latest_permissible_slot = slot_clock let latest_permissible_slot = slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .now_with_future_tolerance(spec.maximum_gossip_clock_disparity())
.ok_or(BeaconChainError::UnableToReadSlot)?; .ok_or(BeaconChainError::UnableToReadSlot)?;
if message_slot > latest_permissible_slot { if message_slot > latest_permissible_slot {
return Err(Error::FutureSlot { return Err(Error::FutureSlot {
@ -590,7 +589,7 @@ pub fn verify_propagation_slot_range<S: SlotClock, U: SlotData>(
} }
let earliest_permissible_slot = slot_clock let earliest_permissible_slot = slot_clock
.now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .now_with_past_tolerance(spec.maximum_gossip_clock_disparity())
.ok_or(BeaconChainError::UnableToReadSlot)?; .ok_or(BeaconChainError::UnableToReadSlot)?;
if message_slot < earliest_permissible_slot { if message_slot < earliest_permissible_slot {

View File

@ -5,7 +5,7 @@ use std::time::Duration;
/// A simple wrapper around `parking_lot::RwLock` that only permits read/write access with a /// A simple wrapper around `parking_lot::RwLock` that only permits read/write access with a
/// time-out (i.e., no indefinitely-blocking operations). /// time-out (i.e., no indefinitely-blocking operations).
/// ///
/// Timeouts can be optionally be disabled at runtime for all instances of this type by calling /// Timeouts can be optionally disabled at runtime for all instances of this type by calling
/// `TimeoutRwLock::disable_timeouts()`. /// `TimeoutRwLock::disable_timeouts()`.
pub struct TimeoutRwLock<T>(RwLock<T>); pub struct TimeoutRwLock<T>(RwLock<T>);

View File

@ -687,7 +687,8 @@ impl<E: EthSpec> BeaconProcessor<E> {
work_reprocessing_rx: mpsc::Receiver<ReprocessQueueMessage>, work_reprocessing_rx: mpsc::Receiver<ReprocessQueueMessage>,
work_journal_tx: Option<mpsc::Sender<&'static str>>, work_journal_tx: Option<mpsc::Sender<&'static str>>,
slot_clock: S, slot_clock: S,
) { maximum_gossip_clock_disparity: Duration,
) -> Result<(), String> {
// Used by workers to communicate that they are finished a task. // Used by workers to communicate that they are finished a task.
let (idle_tx, idle_rx) = mpsc::channel::<()>(MAX_IDLE_QUEUE_LEN); let (idle_tx, idle_rx) = mpsc::channel::<()>(MAX_IDLE_QUEUE_LEN);
@ -747,13 +748,15 @@ impl<E: EthSpec> BeaconProcessor<E> {
// receive them back once they are ready (`ready_work_rx`). // receive them back once they are ready (`ready_work_rx`).
let (ready_work_tx, ready_work_rx) = let (ready_work_tx, ready_work_rx) =
mpsc::channel::<ReadyWork>(MAX_SCHEDULED_WORK_QUEUE_LEN); mpsc::channel::<ReadyWork>(MAX_SCHEDULED_WORK_QUEUE_LEN);
spawn_reprocess_scheduler( spawn_reprocess_scheduler(
ready_work_tx, ready_work_tx,
work_reprocessing_rx, work_reprocessing_rx,
&self.executor, &self.executor,
slot_clock, slot_clock,
self.log.clone(), self.log.clone(),
); maximum_gossip_clock_disparity,
)?;
let executor = self.executor.clone(); let executor = self.executor.clone();
@ -1255,6 +1258,7 @@ impl<E: EthSpec> BeaconProcessor<E> {
// Spawn on the core executor. // Spawn on the core executor.
executor.spawn(manager_future, MANAGER_TASK_NAME); executor.spawn(manager_future, MANAGER_TASK_NAME);
Ok(())
} }
/// Spawns a blocking worker thread to process some `Work`. /// Spawns a blocking worker thread to process some `Work`.

View File

@ -361,7 +361,12 @@ pub fn spawn_reprocess_scheduler<S: SlotClock + 'static>(
executor: &TaskExecutor, executor: &TaskExecutor,
slot_clock: S, slot_clock: S,
log: Logger, log: Logger,
) { maximum_gossip_clock_disparity: Duration,
) -> Result<(), String> {
// Sanity check
if ADDITIONAL_QUEUED_BLOCK_DELAY >= maximum_gossip_clock_disparity {
return Err("The block delay and gossip disparity don't match.".to_string());
}
let mut queue = ReprocessQueue { let mut queue = ReprocessQueue {
work_reprocessing_rx, work_reprocessing_rx,
ready_work_tx, ready_work_tx,
@ -400,6 +405,7 @@ pub fn spawn_reprocess_scheduler<S: SlotClock + 'static>(
}, },
TASK_NAME, TASK_NAME,
); );
Ok(())
} }
impl<S: SlotClock> ReprocessQueue<S> { impl<S: SlotClock> ReprocessQueue<S> {

View File

@ -778,7 +778,8 @@ where
self.work_reprocessing_rx, self.work_reprocessing_rx,
None, None,
beacon_chain.slot_clock.clone(), beacon_chain.slot_clock.clone(),
); beacon_chain.spec.maximum_gossip_clock_disparity(),
)?;
} }
let state_advance_context = runtime_context.service_context("state_advance".into()); let state_advance_context = runtime_context.service_context("state_advance".into());
@ -833,9 +834,6 @@ where
execution_layer.spawn_clean_proposer_caches_routine::<TSlotClock>( execution_layer.spawn_clean_proposer_caches_routine::<TSlotClock>(
beacon_chain.slot_clock.clone(), beacon_chain.slot_clock.clone(),
); );
// Spawns a routine that polls the `exchange_transition_configuration` endpoint.
execution_layer.spawn_transition_configuration_poll(beacon_chain.spec.clone());
} }
// Spawn a service to publish BLS to execution changes at the Capella fork. // Spawn a service to publish BLS to execution changes at the Capella fork.

View File

@ -404,14 +404,6 @@ async fn merge_readiness_logging<T: BeaconChainTypes>(
"config" => ?other "config" => ?other
), ),
}, },
readiness @ MergeReadiness::ExchangeTransitionConfigurationFailed { error: _ } => {
error!(
log,
"Not ready for merge";
"info" => %readiness,
"hint" => "try updating Lighthouse and/or the execution layer",
)
}
readiness @ MergeReadiness::NotSynced => warn!( readiness @ MergeReadiness::NotSynced => warn!(
log, log,
"Not ready for merge"; "Not ready for merge";

View File

@ -1,9 +1,9 @@
use crate::engines::ForkchoiceState; use crate::engines::ForkchoiceState;
use crate::http::{ use crate::http::{
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1, ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2,
ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1,
ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1,
ENGINE_GET_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3,
}; };
use crate::BlobTxConversionError; use crate::BlobTxConversionError;
use eth2::types::{SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2}; use eth2::types::{SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2};
@ -547,7 +547,6 @@ pub struct EngineCapabilities {
pub get_payload_v1: bool, pub get_payload_v1: bool,
pub get_payload_v2: bool, pub get_payload_v2: bool,
pub get_payload_v3: bool, pub get_payload_v3: bool,
pub exchange_transition_configuration_v1: bool,
} }
impl EngineCapabilities { impl EngineCapabilities {
@ -583,9 +582,6 @@ impl EngineCapabilities {
if self.get_payload_v3 { if self.get_payload_v3 {
response.push(ENGINE_GET_PAYLOAD_V3); response.push(ENGINE_GET_PAYLOAD_V3);
} }
if self.exchange_transition_configuration_v1 {
response.push(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1);
}
response response
} }

View File

@ -48,10 +48,6 @@ pub const ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1: &str = "engine_getPayloadBodiesB
pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1: &str = "engine_getPayloadBodiesByRangeV1"; pub const ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1: &str = "engine_getPayloadBodiesByRangeV1";
pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10); pub const ENGINE_GET_PAYLOAD_BODIES_TIMEOUT: Duration = Duration::from_secs(10);
pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1: &str =
"engine_exchangeTransitionConfigurationV1";
pub const ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT: Duration = Duration::from_secs(1);
pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities"; pub const ENGINE_EXCHANGE_CAPABILITIES: &str = "engine_exchangeCapabilities";
pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1); pub const ENGINE_EXCHANGE_CAPABILITIES_TIMEOUT: Duration = Duration::from_secs(1);
@ -72,7 +68,6 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[
ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V2,
ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1,
ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1,
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1,
]; ];
/// This is necessary because a user might run a capella-enabled version of /// This is necessary because a user might run a capella-enabled version of
@ -90,7 +85,6 @@ pub static PRE_CAPELLA_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilit
get_payload_v1: true, get_payload_v1: true,
get_payload_v2: false, get_payload_v2: false,
get_payload_v3: false, get_payload_v3: false,
exchange_transition_configuration_v1: true,
}; };
/// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object.
@ -994,24 +988,6 @@ impl HttpJsonRpc {
.collect()) .collect())
} }
pub async fn exchange_transition_configuration_v1(
&self,
transition_configuration: TransitionConfigurationV1,
) -> Result<TransitionConfigurationV1, Error> {
let params = json!([transition_configuration]);
let response = self
.rpc_request(
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1,
params,
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1_TIMEOUT
* self.execution_timeout_multiplier,
)
.await?;
Ok(response)
}
pub async fn exchange_capabilities(&self) -> Result<EngineCapabilities, Error> { pub async fn exchange_capabilities(&self) -> Result<EngineCapabilities, Error> {
let params = json!([LIGHTHOUSE_CAPABILITIES]); let params = json!([LIGHTHOUSE_CAPABILITIES]);
@ -1044,8 +1020,6 @@ impl HttpJsonRpc {
get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1), get_payload_v1: capabilities.contains(ENGINE_GET_PAYLOAD_V1),
get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2),
get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3),
exchange_transition_configuration_v1: capabilities
.contains(ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1),
}), }),
} }
} }

View File

@ -80,8 +80,6 @@ const EXECUTION_BLOCKS_LRU_CACHE_SIZE: usize = 128;
const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] = const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] =
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1];
const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60);
/// A payload alongside some information about where it came from. /// A payload alongside some information about where it came from.
pub enum ProvenancedPayload<P> { pub enum ProvenancedPayload<P> {
/// A good ol' fashioned farm-to-table payload from your local EE. /// A good ol' fashioned farm-to-table payload from your local EE.
@ -582,24 +580,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
self.spawn(preparation_cleaner, "exec_preparation_cleanup"); self.spawn(preparation_cleaner, "exec_preparation_cleanup");
} }
/// Spawns a routine that polls the `exchange_transition_configuration` endpoint.
pub fn spawn_transition_configuration_poll(&self, spec: ChainSpec) {
let routine = |el: ExecutionLayer<T>| async move {
loop {
if let Err(e) = el.exchange_transition_configuration(&spec).await {
error!(
el.log(),
"Failed to check transition config";
"error" => ?e
);
}
sleep(CONFIG_POLL_INTERVAL).await;
}
};
self.spawn(routine, "exec_config_poll");
}
/// Returns `true` if the execution engine is synced and reachable. /// Returns `true` if the execution engine is synced and reachable.
pub async fn is_synced(&self) -> bool { pub async fn is_synced(&self) -> bool {
self.engine().is_synced().await self.engine().is_synced().await
@ -1394,53 +1374,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
.map_err(Error::EngineError) .map_err(Error::EngineError)
} }
pub async fn exchange_transition_configuration(&self, spec: &ChainSpec) -> Result<(), Error> {
let local = TransitionConfigurationV1 {
terminal_total_difficulty: spec.terminal_total_difficulty,
terminal_block_hash: spec.terminal_block_hash,
terminal_block_number: 0,
};
let result = self
.engine()
.request(|engine| engine.api.exchange_transition_configuration_v1(local))
.await;
match result {
Ok(remote) => {
if local.terminal_total_difficulty != remote.terminal_total_difficulty
|| local.terminal_block_hash != remote.terminal_block_hash
{
error!(
self.log(),
"Execution client config mismatch";
"msg" => "ensure lighthouse and the execution client are up-to-date and \
configured consistently",
"remote" => ?remote,
"local" => ?local,
);
Err(Error::EngineError(Box::new(EngineError::Api {
error: ApiError::TransitionConfigurationMismatch,
})))
} else {
debug!(
self.log(),
"Execution client config is OK";
);
Ok(())
}
}
Err(e) => {
error!(
self.log(),
"Unable to get transition config";
"error" => ?e,
);
Err(Error::EngineError(Box::new(e)))
}
}
}
/// Returns the execution engine capabilities resulting from a call to /// Returns the execution engine capabilities resulting from a call to
/// engine_exchangeCapabilities. If the capabilities cache is not populated, /// engine_exchangeCapabilities. If the capabilities cache is not populated,
/// or if it is populated with a cached result of age >= `age_limit`, this /// or if it is populated with a cached result of age >= `age_limit`, this

View File

@ -435,15 +435,6 @@ pub async fn handle_rpc<T: EthSpec>(
Ok(serde_json::to_value(response).unwrap()) Ok(serde_json::to_value(response).unwrap())
} }
ENGINE_EXCHANGE_TRANSITION_CONFIGURATION_V1 => {
let block_generator = ctx.execution_block_generator.read();
let transition_config: TransitionConfigurationV1 = TransitionConfigurationV1 {
terminal_total_difficulty: block_generator.terminal_total_difficulty,
terminal_block_hash: block_generator.terminal_block_hash,
terminal_block_number: block_generator.terminal_block_number,
};
Ok(serde_json::to_value(transition_config).unwrap())
}
ENGINE_EXCHANGE_CAPABILITIES => { ENGINE_EXCHANGE_CAPABILITIES => {
let engine_capabilities = ctx.engine_capabilities.read(); let engine_capabilities = ctx.engine_capabilities.read();
Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap()) Ok(serde_json::to_value(engine_capabilities.to_response()).unwrap())

View File

@ -49,7 +49,6 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities {
get_payload_v1: true, get_payload_v1: true,
get_payload_v2: true, get_payload_v2: true,
get_payload_v3: true, get_payload_v3: true,
exchange_transition_configuration_v1: true,
}; };
mod execution_block_generator; mod execution_block_generator;

View File

@ -42,6 +42,7 @@ operation_pool = { path = "../operation_pool" }
sensitive_url = { path = "../../common/sensitive_url" } sensitive_url = { path = "../../common/sensitive_url" }
unused_port = {path = "../../common/unused_port"} unused_port = {path = "../../common/unused_port"}
store = { path = "../store" } store = { path = "../store" }
bytes = "1.1.0"
[dev-dependencies] [dev-dependencies]
environment = { path = "../../lighthouse/environment" } environment = { path = "../../lighthouse/environment" }

View File

@ -1,9 +1,7 @@
//! Contains the handler for the `GET validator/duties/attester/{epoch}` endpoint. //! Contains the handler for the `GET validator/duties/attester/{epoch}` endpoint.
use crate::state_id::StateId; use crate::state_id::StateId;
use beacon_chain::{ use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes};
BeaconChain, BeaconChainError, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
};
use eth2::types::{self as api_types}; use eth2::types::{self as api_types};
use slot_clock::SlotClock; use slot_clock::SlotClock;
use state_processing::state_advance::partial_state_advance; use state_processing::state_advance::partial_state_advance;
@ -32,7 +30,7 @@ pub fn attester_duties<T: BeaconChainTypes>(
// will equal `current_epoch + 1` // will equal `current_epoch + 1`
let tolerant_current_epoch = chain let tolerant_current_epoch = chain
.slot_clock .slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity())
.ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))? .ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))?
.epoch(T::EthSpec::slots_per_epoch()); .epoch(T::EthSpec::slots_per_epoch());

View File

@ -30,6 +30,7 @@ use beacon_chain::{
BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped, BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped,
}; };
pub use block_id::BlockId; pub use block_id::BlockId;
use bytes::Bytes;
use directory::DEFAULT_ROOT_DIR; use directory::DEFAULT_ROOT_DIR;
use eth2::types::{ use eth2::types::{
self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode,
@ -1236,6 +1237,41 @@ pub fn serve<T: BeaconChainTypes>(
}, },
); );
let post_beacon_blocks_ssz = eth_v1
.and(warp::path("beacon"))
.and(warp::path("blocks"))
.and(warp::path::end())
.and(warp::body::bytes())
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.and_then(
|block_bytes: Bytes,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move {
let block_contents = match SignedBlockContents::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
&chain.spec,
) {
Ok(data) => data,
Err(e) => {
return Err(warp_utils::reject::custom_bad_request(format!("{:?}", e)))
}
};
publish_blocks::publish_block(
None,
ProvenancedBlock::local(block_contents),
chain,
&network_tx,
log,
BroadcastValidation::default(),
)
.await
.map(|()| warp::reply().into_response())
},
);
let post_beacon_blocks_v2 = eth_v2 let post_beacon_blocks_v2 = eth_v2
.and(warp::path("beacon")) .and(warp::path("beacon"))
.and(warp::path("blocks")) .and(warp::path("blocks"))
@ -1274,6 +1310,57 @@ pub fn serve<T: BeaconChainTypes>(
}, },
); );
let post_beacon_blocks_v2_ssz = eth_v2
.and(warp::path("beacon"))
.and(warp::path("blocks"))
.and(warp::query::<api_types::BroadcastValidationQuery>())
.and(warp::path::end())
.and(warp::body::bytes())
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|validation_level: api_types::BroadcastValidationQuery,
block_bytes: Bytes,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move {
let block_contents = match SignedBlockContents::<T::EthSpec>::from_ssz_bytes(
&block_bytes,
&chain.spec,
) {
Ok(data) => data,
Err(_) => {
return warp::reply::with_status(
StatusCode::BAD_REQUEST,
eth2::StatusCode::BAD_REQUEST,
)
.into_response();
}
};
match publish_blocks::publish_block(
None,
ProvenancedBlock::local(block_contents),
chain,
&network_tx,
log,
validation_level.broadcast_validation,
)
.await
{
Ok(()) => warp::reply().into_response(),
Err(e) => match warp_utils::reject::handle_rejection(e).await {
Ok(reply) => reply.into_response(),
Err(_) => warp::reply::with_status(
StatusCode::INTERNAL_SERVER_ERROR,
eth2::StatusCode::INTERNAL_SERVER_ERROR,
)
.into_response(),
},
}
},
);
/* /*
* beacon/blocks * beacon/blocks
*/ */
@ -1304,6 +1391,42 @@ pub fn serve<T: BeaconChainTypes>(
}, },
); );
// POST beacon/blocks
let post_beacon_blinded_blocks_ssz = eth_v1
.and(warp::path("beacon"))
.and(warp::path("blinded_blocks"))
.and(warp::path::end())
.and(warp::body::bytes())
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.and_then(
|block_bytes: Bytes,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move {
let block =
match SignedBlockContents::<T::EthSpec, BlindedPayload<_>>::from_ssz_bytes(
&block_bytes,
&chain.spec,
) {
Ok(data) => data,
Err(e) => {
return Err(warp_utils::reject::custom_bad_request(format!("{:?}", e)))
}
};
publish_blocks::publish_blinded_block(
block,
chain,
&network_tx,
log,
BroadcastValidation::default(),
)
.await
.map(|()| warp::reply().into_response())
},
);
let post_beacon_blinded_blocks_v2 = eth_v2 let post_beacon_blinded_blocks_v2 = eth_v2
.and(warp::path("beacon")) .and(warp::path("beacon"))
.and(warp::path("blinded_blocks")) .and(warp::path("blinded_blocks"))
@ -1341,6 +1464,57 @@ pub fn serve<T: BeaconChainTypes>(
}, },
); );
let post_beacon_blinded_blocks_v2_ssz = eth_v2
.and(warp::path("beacon"))
.and(warp::path("blinded_blocks"))
.and(warp::query::<api_types::BroadcastValidationQuery>())
.and(warp::path::end())
.and(warp::body::bytes())
.and(chain_filter.clone())
.and(network_tx_filter.clone())
.and(log_filter.clone())
.then(
|validation_level: api_types::BroadcastValidationQuery,
block_bytes: Bytes,
chain: Arc<BeaconChain<T>>,
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
log: Logger| async move {
let block =
match SignedBlockContents::<T::EthSpec, BlindedPayload<_>>::from_ssz_bytes(
&block_bytes,
&chain.spec,
) {
Ok(data) => data,
Err(_) => {
return warp::reply::with_status(
StatusCode::BAD_REQUEST,
eth2::StatusCode::BAD_REQUEST,
)
.into_response();
}
};
match publish_blocks::publish_blinded_block(
block,
chain,
&network_tx,
log,
validation_level.broadcast_validation,
)
.await
{
Ok(()) => warp::reply().into_response(),
Err(e) => match warp_utils::reject::handle_rejection(e).await {
Ok(reply) => reply.into_response(),
Err(_) => warp::reply::with_status(
StatusCode::INTERNAL_SERVER_ERROR,
eth2::StatusCode::INTERNAL_SERVER_ERROR,
)
.into_response(),
},
}
},
);
let block_id_or_err = warp::path::param::<BlockId>().or_else(|_| async { let block_id_or_err = warp::path::param::<BlockId>().or_else(|_| async {
Err(warp_utils::reject::custom_bad_request( Err(warp_utils::reject::custom_bad_request(
"Invalid block ID".to_string(), "Invalid block ID".to_string(),
@ -2741,6 +2915,7 @@ pub fn serve<T: BeaconChainTypes>(
fork_versioned_response(endpoint_version, fork_name, block_contents?) fork_versioned_response(endpoint_version, fork_name, block_contents?)
.map(|response| warp::reply::json(&response).into_response()) .map(|response| warp::reply::json(&response).into_response())
.map(|res| add_consensus_version_header(res, fork_name))
}, },
); );
@ -2798,6 +2973,7 @@ pub fn serve<T: BeaconChainTypes>(
// Pose as a V2 endpoint so we return the fork `version`. // Pose as a V2 endpoint so we return the fork `version`.
fork_versioned_response(V2, fork_name, block) fork_versioned_response(V2, fork_name, block)
.map(|response| warp::reply::json(&response).into_response()) .map(|response| warp::reply::json(&response).into_response())
.map(|res| add_consensus_version_header(res, fork_name))
}, },
); );
@ -3345,6 +3521,45 @@ pub fn serve<T: BeaconChainTypes>(
}, },
); );
// POST vaidator/liveness/{epoch}
let post_validator_liveness_epoch = eth_v1
.and(warp::path("validator"))
.and(warp::path("liveness"))
.and(warp::path::param::<Epoch>())
.and(warp::path::end())
.and(warp::body::json())
.and(chain_filter.clone())
.and_then(
|epoch: Epoch, indices: Vec<u64>, chain: Arc<BeaconChain<T>>| {
blocking_json_task(move || {
// Ensure the request is for either the current, previous or next epoch.
let current_epoch = chain
.epoch()
.map_err(warp_utils::reject::beacon_chain_error)?;
let prev_epoch = current_epoch.saturating_sub(Epoch::new(1));
let next_epoch = current_epoch.saturating_add(Epoch::new(1));
if epoch < prev_epoch || epoch > next_epoch {
return Err(warp_utils::reject::custom_bad_request(format!(
"request epoch {} is more than one epoch from the current epoch {}",
epoch, current_epoch
)));
}
let liveness: Vec<api_types::StandardLivenessResponseData> = indices
.iter()
.cloned()
.map(|index| {
let is_live = chain.validator_seen_at_epoch(index as usize, epoch);
api_types::StandardLivenessResponseData { index, is_live }
})
.collect();
Ok(api_types::GenericResponse::from(liveness))
})
},
);
// POST lighthouse/liveness // POST lighthouse/liveness
let post_lighthouse_liveness = warp::path("lighthouse") let post_lighthouse_liveness = warp::path("lighthouse")
.and(warp::path("liveness")) .and(warp::path("liveness"))
@ -3967,7 +4182,15 @@ pub fn serve<T: BeaconChainTypes>(
.boxed() .boxed()
.uor( .uor(
warp::post().and( warp::post().and(
post_beacon_blocks warp::header::exact("Content-Type", "application/octet-stream")
// Routes which expect `application/octet-stream` go within this `and`.
.and(
post_beacon_blocks_ssz
.uor(post_beacon_blocks_v2_ssz)
.uor(post_beacon_blinded_blocks_ssz)
.uor(post_beacon_blinded_blocks_v2_ssz),
)
.uor(post_beacon_blocks)
.uor(post_beacon_blinded_blocks) .uor(post_beacon_blinded_blocks)
.uor(post_beacon_blocks_v2) .uor(post_beacon_blocks_v2)
.uor(post_beacon_blinded_blocks_v2) .uor(post_beacon_blinded_blocks_v2)
@ -3987,6 +4210,7 @@ pub fn serve<T: BeaconChainTypes>(
.uor(post_validator_sync_committee_subscriptions) .uor(post_validator_sync_committee_subscriptions)
.uor(post_validator_prepare_beacon_proposer) .uor(post_validator_prepare_beacon_proposer)
.uor(post_validator_register_validator) .uor(post_validator_register_validator)
.uor(post_validator_liveness_epoch)
.uor(post_lighthouse_liveness) .uor(post_lighthouse_liveness)
.uor(post_lighthouse_database_reconstruct) .uor(post_lighthouse_database_reconstruct)
.uor(post_lighthouse_block_rewards) .uor(post_lighthouse_block_rewards)

View File

@ -3,7 +3,7 @@
use crate::state_id::StateId; use crate::state_id::StateId;
use beacon_chain::{ use beacon_chain::{
beacon_proposer_cache::{compute_proposer_duties_from_head, ensure_state_is_in_epoch}, beacon_proposer_cache::{compute_proposer_duties_from_head, ensure_state_is_in_epoch},
BeaconChain, BeaconChainError, BeaconChainTypes, MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, BeaconChainError, BeaconChainTypes,
}; };
use eth2::types::{self as api_types}; use eth2::types::{self as api_types};
use safe_arith::SafeArith; use safe_arith::SafeArith;
@ -33,7 +33,7 @@ pub fn proposer_duties<T: BeaconChainTypes>(
// will equal `current_epoch + 1` // will equal `current_epoch + 1`
let tolerant_current_epoch = chain let tolerant_current_epoch = chain
.slot_clock .slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity())
.ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))? .ok_or_else(|| warp_utils::reject::custom_server_error("unable to read slot clock".into()))?
.epoch(T::EthSpec::slots_per_epoch()); .epoch(T::EthSpec::slots_per_epoch());

View File

@ -6,7 +6,7 @@ use beacon_chain::sync_committee_verification::{
}; };
use beacon_chain::{ use beacon_chain::{
validator_monitor::timestamp_now, BeaconChain, BeaconChainError, BeaconChainTypes, validator_monitor::timestamp_now, BeaconChain, BeaconChainError, BeaconChainTypes,
StateSkipConfig, MAXIMUM_GOSSIP_CLOCK_DISPARITY, StateSkipConfig,
}; };
use eth2::types::{self as api_types}; use eth2::types::{self as api_types};
use lighthouse_network::PubsubMessage; use lighthouse_network::PubsubMessage;
@ -85,7 +85,7 @@ fn duties_from_state_load<T: BeaconChainTypes>(
let current_epoch = chain.epoch()?; let current_epoch = chain.epoch()?;
let tolerant_current_epoch = chain let tolerant_current_epoch = chain
.slot_clock .slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY) .now_with_future_tolerance(chain.spec.maximum_gossip_clock_disparity())
.ok_or(BeaconChainError::UnableToReadSlot)? .ok_or(BeaconChainError::UnableToReadSlot)?
.epoch(T::EthSpec::slots_per_epoch()); .epoch(T::EthSpec::slots_per_epoch());

View File

@ -7,12 +7,9 @@ use directory::DEFAULT_ROOT_DIR;
use eth2::{BeaconNodeHttpClient, Timeouts}; use eth2::{BeaconNodeHttpClient, Timeouts};
use lighthouse_network::{ use lighthouse_network::{
discv5::enr::{CombinedKey, EnrBuilder}, discv5::enr::{CombinedKey, EnrBuilder},
libp2p::{ libp2p::swarm::{
core::connection::ConnectionId, behaviour::{ConnectionEstablished, FromSwarm},
swarm::{ ConnectionId, NetworkBehaviour,
behaviour::{ConnectionEstablished, FromSwarm},
NetworkBehaviour,
},
}, },
rpc::methods::{MetaData, MetaDataV2}, rpc::methods::{MetaData, MetaDataV2},
types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState}, types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState},
@ -167,7 +164,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
local_addr: EXTERNAL_ADDR.parse().unwrap(), local_addr: EXTERNAL_ADDR.parse().unwrap(),
send_back_addr: EXTERNAL_ADDR.parse().unwrap(), send_back_addr: EXTERNAL_ADDR.parse().unwrap(),
}; };
let connection_id = ConnectionId::new(1); let connection_id = ConnectionId::new_unchecked(1);
pm.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished { pm.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished {
peer_id, peer_id,
connection_id, connection_id,

View File

@ -182,6 +182,49 @@ pub async fn gossip_full_pass() {
.block_is_known_to_fork_choice(&block.canonical_root())); .block_is_known_to_fork_choice(&block.canonical_root()));
} }
// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn gossip_full_pass_ssz() {
/* this test targets gossip-level validation */
let validation_level: Option<BroadcastValidation> = Some(BroadcastValidation::Gossip);
// Validator count needs to be at least 32 or proposer boost gets set to 0 when computing
// `validator_count // 32`.
let validator_count = 64;
let num_initial: u64 = 31;
let tester = InteractiveTester::<E>::new(None, validator_count).await;
// Create some chain depth.
tester.harness.advance_slot();
tester
.harness
.extend_chain(
num_initial as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
tester.harness.advance_slot();
let slot_a = Slot::new(num_initial);
let slot_b = slot_a + 1;
let state_a = tester.harness.get_current_state();
let ((block, _), _): ((SignedBeaconBlock<E>, _), _) =
tester.harness.make_block(state_a, slot_b).await;
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blocks_v2_ssz(&block, validation_level)
.await;
assert!(response.is_ok());
assert!(tester
.harness
.chain
.block_is_known_to_fork_choice(&block.canonical_root()));
}
/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn consensus_invalid() { pub async fn consensus_invalid() {
@ -836,6 +879,49 @@ pub async fn blinded_gossip_full_pass() {
.block_is_known_to_fork_choice(&block.canonical_root())); .block_is_known_to_fork_choice(&block.canonical_root()));
} }
// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=gossip`.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn blinded_gossip_full_pass_ssz() {
/* this test targets gossip-level validation */
let validation_level: Option<BroadcastValidation> = Some(BroadcastValidation::Gossip);
// Validator count needs to be at least 32 or proposer boost gets set to 0 when computing
// `validator_count // 32`.
let validator_count = 64;
let num_initial: u64 = 31;
let tester = InteractiveTester::<E>::new(None, validator_count).await;
// Create some chain depth.
tester.harness.advance_slot();
tester
.harness
.extend_chain(
num_initial as usize,
BlockStrategy::OnCanonicalHead,
AttestationStrategy::AllValidators,
)
.await;
tester.harness.advance_slot();
let slot_a = Slot::new(num_initial);
let slot_b = slot_a + 1;
let state_a = tester.harness.get_current_state();
let ((block, _), _): ((SignedBlindedBeaconBlock<E>, _), _) =
tester.harness.make_blinded_block(state_a, slot_b).await;
let response: Result<(), eth2::Error> = tester
.client
.post_beacon_blinded_blocks_v2_ssz(&block, validation_level)
.await;
assert!(response.is_ok());
assert!(tester
.harness
.chain
.block_is_known_to_fork_choice(&block.canonical_root()));
}
/// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`. /// This test checks that a block that is **invalid** from a gossip perspective gets rejected when using `broadcast_validation=consensus`.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
pub async fn blinded_consensus_invalid() { pub async fn blinded_consensus_invalid() {

View File

@ -1,7 +1,7 @@
use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::test_utils::RelativeSyncCommittee;
use beacon_chain::{ use beacon_chain::{
test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType}, test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType},
BeaconChain, StateSkipConfig, WhenSlotSkipped, MAXIMUM_GOSSIP_CLOCK_DISPARITY, BeaconChain, StateSkipConfig, WhenSlotSkipped,
}; };
use environment::null_logger; use environment::null_logger;
use eth2::{ use eth2::{
@ -1251,6 +1251,22 @@ impl ApiTester {
self self
} }
pub async fn test_post_beacon_blocks_ssz_valid(mut self) -> Self {
let next_block = &self.next_block;
self.client
.post_beacon_blocks_ssz(next_block)
.await
.unwrap();
assert!(
self.network_rx.network_recv.recv().await.is_some(),
"valid blocks should be sent to network"
);
self
}
pub async fn test_post_beacon_blocks_invalid(mut self) -> Self { pub async fn test_post_beacon_blocks_invalid(mut self) -> Self {
let block = self let block = self
.harness .harness
@ -1278,6 +1294,33 @@ impl ApiTester {
self self
} }
pub async fn test_post_beacon_blocks_ssz_invalid(mut self) -> Self {
let block = self
.harness
.make_block_with_modifier(
self.harness.get_current_state(),
self.harness.get_current_slot(),
|b| {
*b.state_root_mut() = Hash256::zero();
},
)
.await
.0;
assert!(self
.client
.post_beacon_blocks_ssz(&SignedBlockContents::from(block))
.await
.is_err());
assert!(
self.network_rx.network_recv.recv().await.is_some(),
"gossip valid blocks should be sent to network"
);
self
}
pub async fn test_beacon_blocks(self) -> Self { pub async fn test_beacon_blocks(self) -> Self {
for block_id in self.interesting_block_ids() { for block_id in self.interesting_block_ids() {
let expected = block_id let expected = block_id
@ -2282,7 +2325,9 @@ impl ApiTester {
.unwrap(); .unwrap();
self.chain.slot_clock.set_current_time( self.chain.slot_clock.set_current_time(
current_epoch_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY - Duration::from_millis(1), current_epoch_start
- self.chain.spec.maximum_gossip_clock_disparity()
- Duration::from_millis(1),
); );
let dependent_root = self let dependent_root = self
@ -2319,9 +2364,9 @@ impl ApiTester {
"should not get attester duties outside of tolerance" "should not get attester duties outside of tolerance"
); );
self.chain self.chain.slot_clock.set_current_time(
.slot_clock current_epoch_start - self.chain.spec.maximum_gossip_clock_disparity(),
.set_current_time(current_epoch_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY); );
self.client self.client
.get_validator_duties_proposer(current_epoch) .get_validator_duties_proposer(current_epoch)
@ -2554,6 +2599,66 @@ impl ApiTester {
} }
} }
pub async fn test_blinded_block_production_ssz<Payload: AbstractExecPayload<E>>(&self) {
let fork = self.chain.canonical_head.cached_head().head_fork();
let genesis_validators_root = self.chain.genesis_validators_root;
for _ in 0..E::slots_per_epoch() * 3 {
let slot = self.chain.slot().unwrap();
let epoch = self.chain.epoch().unwrap();
let proposer_pubkey_bytes = self
.client
.get_validator_duties_proposer(epoch)
.await
.unwrap()
.data
.into_iter()
.find(|duty| duty.slot == slot)
.map(|duty| duty.pubkey)
.unwrap();
let proposer_pubkey = (&proposer_pubkey_bytes).try_into().unwrap();
let sk = self
.validator_keypairs()
.iter()
.find(|kp| kp.pk == proposer_pubkey)
.map(|kp| kp.sk.clone())
.unwrap();
let randao_reveal = {
let domain = self.chain.spec.get_domain(
epoch,
Domain::Randao,
&fork,
genesis_validators_root,
);
let message = epoch.signing_root(domain);
sk.sign(message).into()
};
let block = self
.client
.get_validator_blinded_blocks::<E, Payload>(slot, &randao_reveal, None)
.await
.unwrap()
.data;
let signed_block = block.sign(&sk, &fork, genesis_validators_root, &self.chain.spec);
self.client
.post_beacon_blinded_blocks_ssz(&signed_block)
.await
.unwrap();
// This converts the generic `Payload` to a concrete type for comparison.
let head_block = SignedBeaconBlock::from(signed_block.clone());
assert_eq!(head_block, signed_block);
self.chain.slot_clock.set_slot(slot.as_u64() + 1);
}
}
pub async fn test_blinded_block_production_no_verify_randao<Payload: AbstractExecPayload<E>>( pub async fn test_blinded_block_production_no_verify_randao<Payload: AbstractExecPayload<E>>(
self, self,
) -> Self { ) -> Self {
@ -2997,6 +3102,69 @@ impl ApiTester {
self self
} }
pub async fn test_post_validator_liveness_epoch(self) -> Self {
let epoch = self.chain.epoch().unwrap();
let head_state = self.chain.head_beacon_state_cloned();
let indices = (0..head_state.validators().len())
.map(|i| i as u64)
.collect::<Vec<_>>();
// Construct the expected response
let expected: Vec<StandardLivenessResponseData> = head_state
.validators()
.iter()
.enumerate()
.map(|(index, _)| StandardLivenessResponseData {
index: index as u64,
is_live: false,
})
.collect();
let result = self
.client
.post_validator_liveness_epoch(epoch, indices.clone())
.await
.unwrap()
.data;
assert_eq!(result, expected);
// Attest to the current slot
self.client
.post_beacon_pool_attestations(self.attestations.as_slice())
.await
.unwrap();
let result = self
.client
.post_validator_liveness_epoch(epoch, indices.clone())
.await
.unwrap()
.data;
let committees = head_state
.get_beacon_committees_at_slot(self.chain.slot().unwrap())
.unwrap();
let attesting_validators: Vec<usize> = committees
.into_iter()
.flat_map(|committee| committee.committee.iter().cloned())
.collect();
// All attesters should now be considered live
let expected = expected
.into_iter()
.map(|mut a| {
if attesting_validators.contains(&(a.index as usize)) {
a.is_live = true;
}
a
})
.collect::<Vec<_>>();
assert_eq!(result, expected);
self
}
// Helper function for tests that require a valid RANDAO signature. // Helper function for tests that require a valid RANDAO signature.
async fn get_test_randao(&self, slot: Slot, epoch: Epoch) -> (u64, SignatureBytes) { async fn get_test_randao(&self, slot: Slot, epoch: Epoch) -> (u64, SignatureBytes) {
let fork = self.chain.canonical_head.cached_head().head_fork(); let fork = self.chain.canonical_head.cached_head().head_fork();
@ -4409,6 +4577,22 @@ async fn post_beacon_blocks_valid() {
ApiTester::new().await.test_post_beacon_blocks_valid().await; ApiTester::new().await.test_post_beacon_blocks_valid().await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn post_beacon_blocks_ssz_valid() {
ApiTester::new()
.await
.test_post_beacon_blocks_ssz_valid()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn test_post_beacon_blocks_ssz_invalid() {
ApiTester::new()
.await
.test_post_beacon_blocks_ssz_invalid()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn post_beacon_blocks_invalid() { async fn post_beacon_blocks_invalid() {
ApiTester::new() ApiTester::new()
@ -4605,6 +4789,14 @@ async fn blinded_block_production_full_payload_premerge() {
.await; .await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn blinded_block_production_ssz_full_payload_premerge() {
ApiTester::new()
.await
.test_blinded_block_production_ssz::<FullPayload<_>>()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn blinded_block_production_with_skip_slots_full_payload_premerge() { async fn blinded_block_production_with_skip_slots_full_payload_premerge() {
ApiTester::new() ApiTester::new()
@ -4614,6 +4806,15 @@ async fn blinded_block_production_with_skip_slots_full_payload_premerge() {
.await; .await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn blinded_block_production_ssz_with_skip_slots_full_payload_premerge() {
ApiTester::new()
.await
.skip_slots(E::slots_per_epoch() * 2)
.test_blinded_block_production_ssz::<FullPayload<_>>()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn blinded_block_production_no_verify_randao_full_payload_premerge() { async fn blinded_block_production_no_verify_randao_full_payload_premerge() {
ApiTester::new() ApiTester::new()
@ -4891,6 +5092,14 @@ async fn builder_works_post_capella() {
.await; .await;
} }
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn post_validator_liveness_epoch() {
ApiTester::new()
.await
.test_post_validator_liveness_epoch()
.await;
}
#[tokio::test(flavor = "multi_thread", worker_threads = 2)] #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn lighthouse_endpoints() { async fn lighthouse_endpoints() {
ApiTester::new() ApiTester::new()

View File

@ -1,6 +1,6 @@
use crate::Context; use crate::Context;
use beacon_chain::BeaconChainTypes; use beacon_chain::BeaconChainTypes;
use lighthouse_metrics::{Encoder, TextEncoder}; use lighthouse_metrics::TextEncoder;
use lighthouse_network::prometheus_client::encoding::text::encode; use lighthouse_network::prometheus_client::encoding::text::encode;
use malloc_utils::scrape_allocator_metrics; use malloc_utils::scrape_allocator_metrics;
@ -9,7 +9,7 @@ pub use lighthouse_metrics::*;
pub fn gather_prometheus_metrics<T: BeaconChainTypes>( pub fn gather_prometheus_metrics<T: BeaconChainTypes>(
ctx: &Context<T>, ctx: &Context<T>,
) -> std::result::Result<String, String> { ) -> std::result::Result<String, String> {
let mut buffer = vec![]; let mut buffer = String::new();
let encoder = TextEncoder::new(); let encoder = TextEncoder::new();
// There are two categories of metrics: // There are two categories of metrics:
@ -50,7 +50,7 @@ pub fn gather_prometheus_metrics<T: BeaconChainTypes>(
} }
encoder encoder
.encode(&lighthouse_metrics::gather(), &mut buffer) .encode_utf8(&lighthouse_metrics::gather(), &mut buffer)
.unwrap(); .unwrap();
// encode gossipsub metrics also if they exist // encode gossipsub metrics also if they exist
if let Some(registry) = ctx.gossipsub_registry.as_ref() { if let Some(registry) = ctx.gossipsub_registry.as_ref() {
@ -59,5 +59,5 @@ pub fn gather_prometheus_metrics<T: BeaconChainTypes>(
} }
} }
String::from_utf8(buffer).map_err(|e| format!("Failed to encode prometheus info: {:?}", e)) Ok(buffer)
} }

View File

@ -5,7 +5,7 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
edition = "2021" edition = "2021"
[dependencies] [dependencies]
discv5 = { version = "0.3.0", features = ["libp2p"]} discv5 = { version = "0.3.1", features = ["libp2p"] }
unsigned-varint = { version = "0.6.0", features = ["codec"] } unsigned-varint = { version = "0.6.0", features = ["codec"] }
types = { path = "../../consensus/types" } types = { path = "../../consensus/types" }
ssz_types = "0.5.4" ssz_types = "0.5.4"
@ -40,15 +40,15 @@ directory = { path = "../../common/directory" }
regex = "1.5.5" regex = "1.5.5"
strum = { version = "0.24.0", features = ["derive"] } strum = { version = "0.24.0", features = ["derive"] }
superstruct = "0.5.0" superstruct = "0.5.0"
prometheus-client = "0.18.0" prometheus-client = "0.21.0"
unused_port = { path = "../../common/unused_port" } unused_port = { path = "../../common/unused_port" }
delay_map = "0.3.0" delay_map = "0.3.0"
void = "1" void = "1"
[dependencies.libp2p] [dependencies.libp2p]
version = "0.50.0" version = "0.52"
default-features = false default-features = false
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"] features = ["websocket", "identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"]
[dev-dependencies] [dev-dependencies]
slog-term = "2.6.0" slog-term = "2.6.0"

View File

@ -6,10 +6,7 @@ use directory::{
DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR, DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR,
}; };
use discv5::{Discv5Config, Discv5ConfigBuilder}; use discv5::{Discv5Config, Discv5ConfigBuilder};
use libp2p::gossipsub::{ use libp2p::gossipsub;
FastMessageId, GossipsubConfig, GossipsubConfigBuilder, GossipsubMessage, MessageId,
RawGossipsubMessage, ValidationMode,
};
use libp2p::Multiaddr; use libp2p::Multiaddr;
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use sha2::{Digest, Sha256}; use sha2::{Digest, Sha256};
@ -19,11 +16,6 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use types::{ForkContext, ForkName}; use types::{ForkContext, ForkName};
/// The maximum transmit size of gossip messages in bytes pre-merge.
const GOSSIP_MAX_SIZE: usize = 1_048_576; // 1M
/// The maximum transmit size of gossip messages in bytes post-merge.
const GOSSIP_MAX_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M
/// The cache time is set to accommodate the circulation time of an attestation. /// The cache time is set to accommodate the circulation time of an attestation.
/// ///
/// The p2p spec declares that we accept attestations within the following range: /// The p2p spec declares that we accept attestations within the following range:
@ -38,20 +30,20 @@ const GOSSIP_MAX_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M
/// another 500ms for "fudge factor". /// another 500ms for "fudge factor".
pub const DUPLICATE_CACHE_TIME: Duration = Duration::from_secs(33 * 12 + 1); pub const DUPLICATE_CACHE_TIME: Duration = Duration::from_secs(33 * 12 + 1);
// We treat uncompressed messages as invalid and never use the INVALID_SNAPPY_DOMAIN as in the
// specification. We leave it here for posterity.
// const MESSAGE_DOMAIN_INVALID_SNAPPY: [u8; 4] = [0, 0, 0, 0];
const MESSAGE_DOMAIN_VALID_SNAPPY: [u8; 4] = [1, 0, 0, 0];
/// The maximum size of gossip messages. /// The maximum size of gossip messages.
pub fn gossip_max_size(is_merge_enabled: bool) -> usize { pub fn gossip_max_size(is_merge_enabled: bool, gossip_max_size: usize) -> usize {
if is_merge_enabled { if is_merge_enabled {
GOSSIP_MAX_SIZE_POST_MERGE gossip_max_size
} else { } else {
GOSSIP_MAX_SIZE gossip_max_size / 10
} }
} }
pub struct GossipsubConfigParams {
pub message_domain_valid_snappy: [u8; 4],
pub gossip_max_size: usize,
}
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(default)] #[serde(default)]
/// Network configuration for lighthouse. /// Network configuration for lighthouse.
@ -83,7 +75,7 @@ pub struct Config {
/// Gossipsub configuration parameters. /// Gossipsub configuration parameters.
#[serde(skip)] #[serde(skip)]
pub gs_config: GossipsubConfig, pub gs_config: gossipsub::Config,
/// Discv5 configuration parameters. /// Discv5 configuration parameters.
#[serde(skip)] #[serde(skip)]
@ -265,7 +257,7 @@ impl Default for Config {
// Note: Using the default config here. Use `gossipsub_config` function for getting // Note: Using the default config here. Use `gossipsub_config` function for getting
// Lighthouse specific configuration for gossipsub. // Lighthouse specific configuration for gossipsub.
let gs_config = GossipsubConfigBuilder::default() let gs_config = gossipsub::ConfigBuilder::default()
.build() .build()
.expect("valid gossipsub configuration"); .expect("valid gossipsub configuration");
@ -416,16 +408,20 @@ impl From<u8> for NetworkLoad {
} }
/// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork. /// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork.
pub fn gossipsub_config(network_load: u8, fork_context: Arc<ForkContext>) -> GossipsubConfig { pub fn gossipsub_config(
network_load: u8,
fork_context: Arc<ForkContext>,
gossipsub_config_params: GossipsubConfigParams,
) -> gossipsub::Config {
// The function used to generate a gossipsub message id // The function used to generate a gossipsub message id
// We use the first 8 bytes of SHA256(topic, data) for content addressing // We use the first 8 bytes of SHA256(topic, data) for content addressing
let fast_gossip_message_id = |message: &RawGossipsubMessage| { let fast_gossip_message_id = |message: &gossipsub::RawMessage| {
let data = [message.topic.as_str().as_bytes(), &message.data].concat(); let data = [message.topic.as_str().as_bytes(), &message.data].concat();
FastMessageId::from(&Sha256::digest(data)[..8]) gossipsub::FastMessageId::from(&Sha256::digest(data)[..8])
}; };
fn prefix( fn prefix(
prefix: [u8; 4], prefix: [u8; 4],
message: &GossipsubMessage, message: &gossipsub::Message,
fork_context: Arc<ForkContext>, fork_context: Arc<ForkContext>,
) -> Vec<u8> { ) -> Vec<u8> {
let topic_bytes = message.topic.as_str().as_bytes(); let topic_bytes = message.topic.as_str().as_bytes();
@ -451,20 +447,23 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc<ForkContext>) -> Gos
} }
} }
} }
let message_domain_valid_snappy = gossipsub_config_params.message_domain_valid_snappy;
let is_merge_enabled = fork_context.fork_exists(ForkName::Merge); let is_merge_enabled = fork_context.fork_exists(ForkName::Merge);
let gossip_message_id = move |message: &GossipsubMessage| { let gossip_message_id = move |message: &gossipsub::Message| {
MessageId::from( gossipsub::MessageId::from(
&Sha256::digest( &Sha256::digest(
prefix(MESSAGE_DOMAIN_VALID_SNAPPY, message, fork_context.clone()).as_slice(), prefix(message_domain_valid_snappy, message, fork_context.clone()).as_slice(),
)[..20], )[..20],
) )
}; };
let load = NetworkLoad::from(network_load); let load = NetworkLoad::from(network_load);
GossipsubConfigBuilder::default() gossipsub::ConfigBuilder::default()
.max_transmit_size(gossip_max_size(is_merge_enabled)) .max_transmit_size(gossip_max_size(
is_merge_enabled,
gossipsub_config_params.gossip_max_size,
))
.heartbeat_interval(load.heartbeat_interval) .heartbeat_interval(load.heartbeat_interval)
.mesh_n(load.mesh_n) .mesh_n(load.mesh_n)
.mesh_n_low(load.mesh_n_low) .mesh_n_low(load.mesh_n_low)
@ -476,7 +475,7 @@ pub fn gossipsub_config(network_load: u8, fork_context: Arc<ForkContext>) -> Gos
.max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large .max_messages_per_rpc(Some(500)) // Responses to IWANT can be quite large
.history_gossip(load.history_gossip) .history_gossip(load.history_gossip)
.validate_messages() // require validation before propagation .validate_messages() // require validation before propagation
.validation_mode(ValidationMode::Anonymous) .validation_mode(gossipsub::ValidationMode::Anonymous)
.duplicate_cache_time(DUPLICATE_CACHE_TIME) .duplicate_cache_time(DUPLICATE_CACHE_TIME)
.message_id_fn(gossip_message_id) .message_id_fn(gossip_message_id)
.fast_message_id_fn(fast_gossip_message_id) .fast_message_id_fn(fast_gossip_message_id)

View File

@ -7,7 +7,7 @@ use super::ENR_FILENAME;
use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield};
use crate::NetworkConfig; use crate::NetworkConfig;
use discv5::enr::EnrKey; use discv5::enr::EnrKey;
use libp2p::core::identity::Keypair; use libp2p::identity::Keypair;
use slog::{debug, warn}; use slog::{debug, warn};
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use ssz_types::BitVector; use ssz_types::BitVector;
@ -133,7 +133,7 @@ pub fn build_or_load_enr<T: EthSpec>(
// Build the local ENR. // Build the local ENR.
// Note: Discovery should update the ENR record's IP to the external IP as seen by the // Note: Discovery should update the ENR record's IP to the external IP as seen by the
// majority of our peers, if the CLI doesn't expressly forbid it. // majority of our peers, if the CLI doesn't expressly forbid it.
let enr_key = CombinedKey::from_libp2p(&local_key)?; let enr_key = CombinedKey::from_libp2p(local_key)?;
let mut local_enr = build_enr::<T>(&enr_key, config, enr_fork_id)?; let mut local_enr = build_enr::<T>(&enr_key, config, enr_fork_id)?;
use_or_load_enr(&enr_key, &mut local_enr, config, log)?; use_or_load_enr(&enr_key, &mut local_enr, config, log)?;

View File

@ -1,10 +1,9 @@
//! ENR extension trait to support libp2p integration. //! ENR extension trait to support libp2p integration.
use crate::{Enr, Multiaddr, PeerId}; use crate::{Enr, Multiaddr, PeerId};
use discv5::enr::{CombinedKey, CombinedPublicKey}; use discv5::enr::{CombinedKey, CombinedPublicKey};
use libp2p::{ use libp2p::core::multiaddr::Protocol;
core::{identity::Keypair, identity::PublicKey, multiaddr::Protocol}, use libp2p::identity::{ed25519, secp256k1, KeyType, Keypair, PublicKey};
identity::secp256k1,
};
use tiny_keccak::{Hasher, Keccak}; use tiny_keccak::{Hasher, Keccak};
/// Extend ENR for libp2p types. /// Extend ENR for libp2p types.
@ -38,7 +37,8 @@ pub trait CombinedKeyPublicExt {
/// Extend ENR CombinedKey for conversion to libp2p keys. /// Extend ENR CombinedKey for conversion to libp2p keys.
pub trait CombinedKeyExt { pub trait CombinedKeyExt {
/// Converts a libp2p key into an ENR combined key. /// Converts a libp2p key into an ENR combined key.
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str>; fn from_libp2p(key: Keypair) -> Result<CombinedKey, &'static str>;
/// Converts a [`secp256k1::Keypair`] into and Enr [`CombinedKey`]. /// Converts a [`secp256k1::Keypair`] into and Enr [`CombinedKey`].
fn from_secp256k1(key: &secp256k1::Keypair) -> CombinedKey; fn from_secp256k1(key: &secp256k1::Keypair) -> CombinedKey;
} }
@ -93,14 +93,14 @@ impl EnrExt for Enr {
if let Some(udp) = self.udp4() { if let Some(udp) = self.udp4() {
let mut multiaddr: Multiaddr = ip.into(); let mut multiaddr: Multiaddr = ip.into();
multiaddr.push(Protocol::Udp(udp)); multiaddr.push(Protocol::Udp(udp));
multiaddr.push(Protocol::P2p(peer_id.into())); multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr); multiaddrs.push(multiaddr);
} }
if let Some(tcp) = self.tcp4() { if let Some(tcp) = self.tcp4() {
let mut multiaddr: Multiaddr = ip.into(); let mut multiaddr: Multiaddr = ip.into();
multiaddr.push(Protocol::Tcp(tcp)); multiaddr.push(Protocol::Tcp(tcp));
multiaddr.push(Protocol::P2p(peer_id.into())); multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr); multiaddrs.push(multiaddr);
} }
} }
@ -108,14 +108,14 @@ impl EnrExt for Enr {
if let Some(udp6) = self.udp6() { if let Some(udp6) = self.udp6() {
let mut multiaddr: Multiaddr = ip6.into(); let mut multiaddr: Multiaddr = ip6.into();
multiaddr.push(Protocol::Udp(udp6)); multiaddr.push(Protocol::Udp(udp6));
multiaddr.push(Protocol::P2p(peer_id.into())); multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr); multiaddrs.push(multiaddr);
} }
if let Some(tcp6) = self.tcp6() { if let Some(tcp6) = self.tcp6() {
let mut multiaddr: Multiaddr = ip6.into(); let mut multiaddr: Multiaddr = ip6.into();
multiaddr.push(Protocol::Tcp(tcp6)); multiaddr.push(Protocol::Tcp(tcp6));
multiaddr.push(Protocol::P2p(peer_id.into())); multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr); multiaddrs.push(multiaddr);
} }
} }
@ -133,7 +133,7 @@ impl EnrExt for Enr {
if let Some(tcp) = self.tcp4() { if let Some(tcp) = self.tcp4() {
let mut multiaddr: Multiaddr = ip.into(); let mut multiaddr: Multiaddr = ip.into();
multiaddr.push(Protocol::Tcp(tcp)); multiaddr.push(Protocol::Tcp(tcp));
multiaddr.push(Protocol::P2p(peer_id.into())); multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr); multiaddrs.push(multiaddr);
} }
} }
@ -141,7 +141,7 @@ impl EnrExt for Enr {
if let Some(tcp6) = self.tcp6() { if let Some(tcp6) = self.tcp6() {
let mut multiaddr: Multiaddr = ip6.into(); let mut multiaddr: Multiaddr = ip6.into();
multiaddr.push(Protocol::Tcp(tcp6)); multiaddr.push(Protocol::Tcp(tcp6));
multiaddr.push(Protocol::P2p(peer_id.into())); multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr); multiaddrs.push(multiaddr);
} }
} }
@ -159,7 +159,7 @@ impl EnrExt for Enr {
if let Some(udp) = self.udp4() { if let Some(udp) = self.udp4() {
let mut multiaddr: Multiaddr = ip.into(); let mut multiaddr: Multiaddr = ip.into();
multiaddr.push(Protocol::Udp(udp)); multiaddr.push(Protocol::Udp(udp));
multiaddr.push(Protocol::P2p(peer_id.into())); multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr); multiaddrs.push(multiaddr);
} }
} }
@ -167,7 +167,7 @@ impl EnrExt for Enr {
if let Some(udp6) = self.udp6() { if let Some(udp6) = self.udp6() {
let mut multiaddr: Multiaddr = ip6.into(); let mut multiaddr: Multiaddr = ip6.into();
multiaddr.push(Protocol::Udp(udp6)); multiaddr.push(Protocol::Udp(udp6));
multiaddr.push(Protocol::P2p(peer_id.into())); multiaddr.push(Protocol::P2p(peer_id));
multiaddrs.push(multiaddr); multiaddrs.push(multiaddr);
} }
} }
@ -204,18 +204,16 @@ impl CombinedKeyPublicExt for CombinedPublicKey {
match self { match self {
Self::Secp256k1(pk) => { Self::Secp256k1(pk) => {
let pk_bytes = pk.to_sec1_bytes(); let pk_bytes = pk.to_sec1_bytes();
let libp2p_pk = libp2p::core::PublicKey::Secp256k1( let libp2p_pk: PublicKey = secp256k1::PublicKey::try_from_bytes(&pk_bytes)
libp2p::core::identity::secp256k1::PublicKey::decode(&pk_bytes) .expect("valid public key")
.expect("valid public key"), .into();
);
PeerId::from_public_key(&libp2p_pk) PeerId::from_public_key(&libp2p_pk)
} }
Self::Ed25519(pk) => { Self::Ed25519(pk) => {
let pk_bytes = pk.to_bytes(); let pk_bytes = pk.to_bytes();
let libp2p_pk = libp2p::core::PublicKey::Ed25519( let libp2p_pk: PublicKey = ed25519::PublicKey::try_from_bytes(&pk_bytes)
libp2p::core::identity::ed25519::PublicKey::decode(&pk_bytes) .expect("valid public key")
.expect("valid public key"), .into();
);
PeerId::from_public_key(&libp2p_pk) PeerId::from_public_key(&libp2p_pk)
} }
} }
@ -223,18 +221,25 @@ impl CombinedKeyPublicExt for CombinedPublicKey {
} }
impl CombinedKeyExt for CombinedKey { impl CombinedKeyExt for CombinedKey {
fn from_libp2p(key: &libp2p::core::identity::Keypair) -> Result<CombinedKey, &'static str> { fn from_libp2p(key: Keypair) -> Result<CombinedKey, &'static str> {
match key { match key.key_type() {
Keypair::Secp256k1(key) => Ok(CombinedKey::from_secp256k1(key)), KeyType::Secp256k1 => {
Keypair::Ed25519(key) => { let key = key.try_into_secp256k1().expect("right key type");
let secret =
discv5::enr::k256::ecdsa::SigningKey::from_slice(&key.secret().to_bytes())
.expect("libp2p key must be valid");
Ok(CombinedKey::Secp256k1(secret))
}
KeyType::Ed25519 => {
let key = key.try_into_ed25519().expect("right key type");
let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes( let ed_keypair = discv5::enr::ed25519_dalek::SigningKey::from_bytes(
&(key.encode()[..32]) &(key.to_bytes()[..32])
.try_into() .try_into()
.expect("libp2p key must be valid"), .expect("libp2p key must be valid"),
); );
Ok(CombinedKey::from(ed_keypair)) Ok(CombinedKey::from(ed_keypair))
} }
Keypair::Ecdsa(_) => Err("Ecdsa keypairs not supported"), _ => Err("Unsupported keypair kind"),
} }
} }
fn from_secp256k1(key: &secp256k1::Keypair) -> Self { fn from_secp256k1(key: &secp256k1::Keypair) -> Self {
@ -251,37 +256,46 @@ pub fn peer_id_to_node_id(peer_id: &PeerId) -> Result<discv5::enr::NodeId, Strin
// if generated from a PublicKey with Identity multihash. // if generated from a PublicKey with Identity multihash.
let pk_bytes = &peer_id.to_bytes()[2..]; let pk_bytes = &peer_id.to_bytes()[2..];
match PublicKey::from_protobuf_encoding(pk_bytes).map_err(|e| { let public_key = PublicKey::try_decode_protobuf(pk_bytes).map_err(|e| {
format!( format!(
" Cannot parse libp2p public key public key from peer id: {}", " Cannot parse libp2p public key public key from peer id: {}",
e e
) )
})? { })?;
PublicKey::Secp256k1(pk) => {
let uncompressed_key_bytes = &pk.encode_uncompressed()[1..]; match public_key.key_type() {
KeyType::Secp256k1 => {
let pk = public_key
.clone()
.try_into_secp256k1()
.expect("right key type");
let uncompressed_key_bytes = &pk.to_bytes_uncompressed()[1..];
let mut output = [0_u8; 32]; let mut output = [0_u8; 32];
let mut hasher = Keccak::v256(); let mut hasher = Keccak::v256();
hasher.update(uncompressed_key_bytes); hasher.update(uncompressed_key_bytes);
hasher.finalize(&mut output); hasher.finalize(&mut output);
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length")) Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
} }
PublicKey::Ed25519(pk) => { KeyType::Ed25519 => {
let uncompressed_key_bytes = pk.encode(); let pk = public_key
.clone()
.try_into_ed25519()
.expect("right key type");
let uncompressed_key_bytes = pk.to_bytes();
let mut output = [0_u8; 32]; let mut output = [0_u8; 32];
let mut hasher = Keccak::v256(); let mut hasher = Keccak::v256();
hasher.update(&uncompressed_key_bytes); hasher.update(&uncompressed_key_bytes);
hasher.finalize(&mut output); hasher.finalize(&mut output);
Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length")) Ok(discv5::enr::NodeId::parse(&output).expect("Must be correct length"))
} }
PublicKey::Ecdsa(_) => Err(format!(
"Unsupported public key (Ecdsa) from peer {}", _ => Err(format!("Unsupported public key from peer {}", peer_id)),
peer_id
)),
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
#[test] #[test]
@ -290,9 +304,9 @@ mod tests {
let sk_bytes = hex::decode(sk_hex).unwrap(); let sk_bytes = hex::decode(sk_hex).unwrap();
let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_slice(&sk_bytes).unwrap(); let secret_key = discv5::enr::k256::ecdsa::SigningKey::from_slice(&sk_bytes).unwrap();
let libp2p_sk = libp2p::identity::secp256k1::SecretKey::from_bytes(sk_bytes).unwrap(); let libp2p_sk = secp256k1::SecretKey::try_from_bytes(sk_bytes).unwrap();
let secp256k1_kp: libp2p::identity::secp256k1::Keypair = libp2p_sk.into(); let secp256k1_kp: secp256k1::Keypair = libp2p_sk.into();
let libp2p_kp = Keypair::Secp256k1(secp256k1_kp); let libp2p_kp: Keypair = secp256k1_kp.into();
let peer_id = libp2p_kp.public().to_peer_id(); let peer_id = libp2p_kp.public().to_peer_id();
let enr = discv5::enr::EnrBuilder::new("v4") let enr = discv5::enr::EnrBuilder::new("v4")
@ -311,9 +325,9 @@ mod tests {
&sk_bytes.clone().try_into().unwrap(), &sk_bytes.clone().try_into().unwrap(),
); );
let libp2p_sk = libp2p::identity::ed25519::SecretKey::from_bytes(sk_bytes).unwrap(); let libp2p_sk = ed25519::SecretKey::try_from_bytes(sk_bytes).unwrap();
let secp256k1_kp: libp2p::identity::ed25519::Keypair = libp2p_sk.into(); let secp256k1_kp: ed25519::Keypair = libp2p_sk.into();
let libp2p_kp = Keypair::Ed25519(secp256k1_kp); let libp2p_kp: Keypair = secp256k1_kp.into();
let peer_id = libp2p_kp.public().to_peer_id(); let peer_id = libp2p_kp.public().to_peer_id();
let enr = discv5::enr::EnrBuilder::new("v4") let enr = discv5::enr::EnrBuilder::new("v4")

View File

@ -16,19 +16,20 @@ pub use enr::{
Eth2Enr, Eth2Enr,
}; };
pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt}; pub use enr_ext::{peer_id_to_node_id, CombinedKeyExt, EnrExt};
pub use libp2p::core::identity::{Keypair, PublicKey}; pub use libp2p::identity::{Keypair, PublicKey};
use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY}; use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_ENR_KEY};
use futures::prelude::*; use futures::prelude::*;
use futures::stream::FuturesUnordered; use futures::stream::FuturesUnordered;
use libp2p::multiaddr::Protocol; use libp2p::multiaddr::Protocol;
use libp2p::swarm::behaviour::{DialFailure, FromSwarm}; use libp2p::swarm::behaviour::{DialFailure, FromSwarm};
use libp2p::swarm::AddressScore; use libp2p::swarm::THandlerInEvent;
pub use libp2p::{ pub use libp2p::{
core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}, core::{ConnectedPoint, Multiaddr},
identity::PeerId,
swarm::{ swarm::{
dummy::ConnectionHandler, DialError, NetworkBehaviour, NetworkBehaviourAction as NBAction, dummy::ConnectionHandler, ConnectionId, DialError, NetworkBehaviour, NotifyHandler,
NotifyHandler, PollParameters, SubstreamProtocol, PollParameters, SubstreamProtocol, ToSwarm,
}, },
}; };
use lru::LruCache; use lru::LruCache;
@ -191,7 +192,7 @@ pub struct Discovery<TSpec: EthSpec> {
impl<TSpec: EthSpec> Discovery<TSpec> { impl<TSpec: EthSpec> Discovery<TSpec> {
/// NOTE: Creating discovery requires running within a tokio execution environment. /// NOTE: Creating discovery requires running within a tokio execution environment.
pub async fn new( pub async fn new(
local_key: &Keypair, local_key: Keypair,
config: &NetworkConfig, config: &NetworkConfig,
network_globals: Arc<NetworkGlobals<TSpec>>, network_globals: Arc<NetworkGlobals<TSpec>>,
log: &slog::Logger, log: &slog::Logger,
@ -925,22 +926,51 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> { impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
// Discovery is not a real NetworkBehaviour... // Discovery is not a real NetworkBehaviour...
type ConnectionHandler = ConnectionHandler; type ConnectionHandler = ConnectionHandler;
type OutEvent = DiscoveredPeers; type ToSwarm = DiscoveredPeers;
fn new_handler(&mut self) -> Self::ConnectionHandler { fn handle_established_inbound_connection(
ConnectionHandler &mut self,
_connection_id: ConnectionId,
_peer: PeerId,
_local_addr: &Multiaddr,
_remote_addr: &Multiaddr,
) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
// TODO: we might want to check discovery's banned ips here in the future.
Ok(ConnectionHandler)
} }
// Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them. fn handle_established_outbound_connection(
fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec<Multiaddr> { &mut self,
if let Some(enr) = self.enr_of_peer(peer_id) { _connection_id: ConnectionId,
_peer: PeerId,
_addr: &Multiaddr,
_role_override: libp2p::core::Endpoint,
) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
Ok(ConnectionHandler)
}
fn on_connection_handler_event(
&mut self,
_peer_id: PeerId,
_connection_id: ConnectionId,
_event: void::Void,
) {
}
fn handle_pending_outbound_connection(
&mut self,
_connection_id: ConnectionId,
maybe_peer: Option<PeerId>,
_addresses: &[Multiaddr],
_effective_role: libp2p::core::Endpoint,
) -> Result<Vec<Multiaddr>, libp2p::swarm::ConnectionDenied> {
if let Some(enr) = maybe_peer.and_then(|peer_id| self.enr_of_peer(&peer_id)) {
// ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP // ENR's may have multiple Multiaddrs. The multi-addr associated with the UDP
// port is removed, which is assumed to be associated with the discv5 protocol (and // port is removed, which is assumed to be associated with the discv5 protocol (and
// therefore irrelevant for other libp2p components). // therefore irrelevant for other libp2p components).
enr.multiaddr_tcp() Ok(enr.multiaddr_tcp())
} else { } else {
// PeerId is not known Ok(vec![])
Vec::new()
} }
} }
@ -949,7 +979,7 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
&mut self, &mut self,
cx: &mut Context, cx: &mut Context,
_: &mut impl PollParameters, _: &mut impl PollParameters,
) -> Poll<NBAction<Self::OutEvent, Self::ConnectionHandler>> { ) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> {
if !self.started { if !self.started {
return Poll::Pending; return Poll::Pending;
} }
@ -960,7 +990,7 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
// Drive the queries and return any results from completed queries // Drive the queries and return any results from completed queries
if let Some(peers) = self.poll_queries(cx) { if let Some(peers) = self.poll_queries(cx) {
// return the result to the peer manager // return the result to the peer manager
return Poll::Ready(NBAction::GenerateEvent(DiscoveredPeers { peers })); return Poll::Ready(ToSwarm::GenerateEvent(DiscoveredPeers { peers }));
} }
// Process the server event stream // Process the server event stream
@ -1034,10 +1064,7 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
if let Some(address) = addr { if let Some(address) = addr {
// NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling // NOTE: This doesn't actually track the external TCP port. More sophisticated NAT handling
// should handle this. // should handle this.
return Poll::Ready(NBAction::ReportObservedAddr { return Poll::Ready(ToSwarm::NewExternalAddrCandidate(address));
address,
score: AddressScore::Finite(1),
});
} }
} }
Discv5Event::EnrAdded { .. } Discv5Event::EnrAdded { .. }
@ -1065,8 +1092,9 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
| FromSwarm::ExpiredListenAddr(_) | FromSwarm::ExpiredListenAddr(_)
| FromSwarm::ListenerError(_) | FromSwarm::ListenerError(_)
| FromSwarm::ListenerClosed(_) | FromSwarm::ListenerClosed(_)
| FromSwarm::NewExternalAddr(_) | FromSwarm::NewExternalAddrCandidate(_)
| FromSwarm::ExpiredExternalAddr(_) => { | FromSwarm::ExternalAddrExpired(_)
| FromSwarm::ExternalAddrConfirmed(_) => {
// Ignore events not relevant to discovery // Ignore events not relevant to discovery
} }
} }
@ -1077,10 +1105,8 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
fn on_dial_failure(&mut self, peer_id: Option<PeerId>, error: &DialError) { fn on_dial_failure(&mut self, peer_id: Option<PeerId>, error: &DialError) {
if let Some(peer_id) = peer_id { if let Some(peer_id) = peer_id {
match error { match error {
DialError::Banned DialError::LocalPeerId { .. }
| DialError::LocalPeerId | DialError::Denied { .. }
| DialError::InvalidPeerId(_)
| DialError::ConnectionIo(_)
| DialError::NoAddresses | DialError::NoAddresses
| DialError::Transport(_) | DialError::Transport(_)
| DialError::WrongPeerId { .. } => { | DialError::WrongPeerId { .. } => {
@ -1088,9 +1114,7 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id); debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id);
self.disconnect_peer(&peer_id); self.disconnect_peer(&peer_id);
} }
DialError::ConnectionLimit(_) DialError::DialPeerConditionFalse(_) | DialError::Aborted => {}
| DialError::DialPeerConditionFalse(_)
| DialError::Aborted => {}
} }
} }
} }
@ -1139,8 +1163,8 @@ mod tests {
false, false,
&log, &log,
); );
let keypair = Keypair::Secp256k1(keypair); let keypair = keypair.into();
Discovery::new(&keypair, &config, Arc::new(globals), &log) Discovery::new(keypair, &config, Arc::new(globals), &log)
.await .await
.unwrap() .unwrap()
} }

View File

@ -21,7 +21,8 @@ use std::{
use strum::IntoEnumIterator; use strum::IntoEnumIterator;
use types::{EthSpec, SyncSubnetId}; use types::{EthSpec, SyncSubnetId};
pub use libp2p::core::{identity::Keypair, Multiaddr}; pub use libp2p::core::Multiaddr;
pub use libp2p::identity::Keypair;
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
pub mod peerdb; pub mod peerdb;

View File

@ -1,12 +1,14 @@
//! Implementation of [`NetworkBehaviour`] for the [`PeerManager`].
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use futures::StreamExt; use futures::StreamExt;
use libp2p::core::ConnectedPoint; use libp2p::core::ConnectedPoint;
use libp2p::identity::PeerId;
use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm}; use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm};
use libp2p::swarm::dial_opts::{DialOpts, PeerCondition}; use libp2p::swarm::dial_opts::{DialOpts, PeerCondition};
use libp2p::swarm::dummy::ConnectionHandler; use libp2p::swarm::dummy::ConnectionHandler;
use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use libp2p::swarm::{ConnectionId, NetworkBehaviour, PollParameters, ToSwarm};
use libp2p::PeerId;
use slog::{debug, error}; use slog::{debug, error};
use types::EthSpec; use types::EthSpec;
@ -19,20 +21,24 @@ use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource};
impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> { impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
type ConnectionHandler = ConnectionHandler; type ConnectionHandler = ConnectionHandler;
type ToSwarm = PeerManagerEvent;
type OutEvent = PeerManagerEvent;
/* Required trait members */ /* Required trait members */
fn new_handler(&mut self) -> Self::ConnectionHandler { fn on_connection_handler_event(
ConnectionHandler &mut self,
_peer_id: PeerId,
_connection_id: ConnectionId,
_event: libp2p::swarm::THandlerOutEvent<Self>,
) {
// no events from the dummy handler
} }
fn poll( fn poll(
&mut self, &mut self,
cx: &mut Context<'_>, cx: &mut Context<'_>,
_params: &mut impl PollParameters, _params: &mut impl PollParameters,
) -> Poll<NetworkBehaviourAction<Self::OutEvent, Self::ConnectionHandler>> { ) -> Poll<ToSwarm<Self::ToSwarm, void::Void>> {
// perform the heartbeat when necessary // perform the heartbeat when necessary
while self.heartbeat.poll_tick(cx).is_ready() { while self.heartbeat.poll_tick(cx).is_ready() {
self.heartbeat(); self.heartbeat();
@ -84,19 +90,17 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
} }
if !self.events.is_empty() { if !self.events.is_empty() {
return Poll::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); return Poll::Ready(ToSwarm::GenerateEvent(self.events.remove(0)));
} else { } else {
self.events.shrink_to_fit(); self.events.shrink_to_fit();
} }
if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_first() { if let Some((peer_id, maybe_enr)) = self.peers_to_dial.pop_first() {
self.inject_peer_connection(&peer_id, ConnectingType::Dialing, maybe_enr); self.inject_peer_connection(&peer_id, ConnectingType::Dialing, maybe_enr);
let handler = self.new_handler(); return Poll::Ready(ToSwarm::Dial {
return Poll::Ready(NetworkBehaviourAction::Dial {
opts: DialOpts::peer_id(peer_id) opts: DialOpts::peer_id(peer_id)
.condition(PeerCondition::Disconnected) .condition(PeerCondition::Disconnected)
.build(), .build(),
handler,
}); });
} }
@ -110,13 +114,31 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
endpoint, endpoint,
other_established, other_established,
.. ..
}) => self.on_connection_established(peer_id, endpoint, other_established), }) => {
// NOTE: We still need to handle the [`ConnectionEstablished`] because the
// [`NetworkBehaviour::handle_established_inbound_connection`] and
// [`NetworkBehaviour::handle_established_outbound_connection`] are fallible. This
// means another behaviour can kill the connection early, and we can't assume a
// peer as connected until this event is received.
self.on_connection_established(peer_id, endpoint, other_established)
}
FromSwarm::ConnectionClosed(ConnectionClosed { FromSwarm::ConnectionClosed(ConnectionClosed {
peer_id, peer_id,
remaining_established, remaining_established,
.. ..
}) => self.on_connection_closed(peer_id, remaining_established), }) => self.on_connection_closed(peer_id, remaining_established),
FromSwarm::DialFailure(DialFailure { peer_id, .. }) => self.on_dial_failure(peer_id), FromSwarm::DialFailure(DialFailure {
peer_id,
error,
connection_id: _,
}) => {
debug!(self.log, "Failed to dial peer"; "peer_id"=> ?peer_id, "error" => %error);
self.on_dial_failure(peer_id);
}
FromSwarm::ExternalAddrConfirmed(_) => {
// TODO: we likely want to check this against our assumed external tcp
// address
}
FromSwarm::AddressChange(_) FromSwarm::AddressChange(_)
| FromSwarm::ListenFailure(_) | FromSwarm::ListenFailure(_)
| FromSwarm::NewListener(_) | FromSwarm::NewListener(_)
@ -124,13 +146,35 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
| FromSwarm::ExpiredListenAddr(_) | FromSwarm::ExpiredListenAddr(_)
| FromSwarm::ListenerError(_) | FromSwarm::ListenerError(_)
| FromSwarm::ListenerClosed(_) | FromSwarm::ListenerClosed(_)
| FromSwarm::NewExternalAddr(_) | FromSwarm::NewExternalAddrCandidate(_)
| FromSwarm::ExpiredExternalAddr(_) => { | FromSwarm::ExternalAddrExpired(_) => {
// The rest of the events we ignore since they are handled in their associated // The rest of the events we ignore since they are handled in their associated
// `SwarmEvent` // `SwarmEvent`
} }
} }
} }
fn handle_established_inbound_connection(
&mut self,
_connection_id: ConnectionId,
_peer: PeerId,
_local_addr: &libp2p::Multiaddr,
_remote_addr: &libp2p::Multiaddr,
) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
// TODO: we might want to check if we accept this peer or not in the future.
Ok(ConnectionHandler)
}
fn handle_established_outbound_connection(
&mut self,
_connection_id: ConnectionId,
_peer: PeerId,
_addr: &libp2p::Multiaddr,
_role_override: libp2p::core::Endpoint,
) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
// TODO: we might want to check if we accept this peer or not in the future.
Ok(ConnectionHandler)
}
} }
impl<TSpec: EthSpec> PeerManager<TSpec> { impl<TSpec: EthSpec> PeerManager<TSpec> {

View File

@ -220,9 +220,12 @@ mod tests {
let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy);
let fork_context = Arc::new(fork_context(ForkName::Base)); let fork_context = Arc::new(fork_context(ForkName::Base));
let chain_spec = Spec::default_spec();
let mut snappy_outbound_codec = SSZSnappyOutboundCodec::<Spec>::new( let mut snappy_outbound_codec = SSZSnappyOutboundCodec::<Spec>::new(
snappy_protocol_id, snappy_protocol_id,
max_rpc_size(&fork_context), max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize),
fork_context, fork_context,
); );
@ -254,9 +257,12 @@ mod tests {
let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy);
let fork_context = Arc::new(fork_context(ForkName::Base)); let fork_context = Arc::new(fork_context(ForkName::Base));
let chain_spec = Spec::default_spec();
let mut snappy_outbound_codec = SSZSnappyOutboundCodec::<Spec>::new( let mut snappy_outbound_codec = SSZSnappyOutboundCodec::<Spec>::new(
snappy_protocol_id, snappy_protocol_id,
max_rpc_size(&fork_context), max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize),
fork_context, fork_context,
); );
@ -282,7 +288,10 @@ mod tests {
// Response limits // Response limits
let fork_context = Arc::new(fork_context(ForkName::Base)); let fork_context = Arc::new(fork_context(ForkName::Base));
let max_rpc_size = max_rpc_size(&fork_context);
let chain_spec = Spec::default_spec();
let max_rpc_size = max_rpc_size(&fork_context, chain_spec.max_chunk_size as usize);
let limit = protocol_id.rpc_response_limits::<Spec>(&fork_context); let limit = protocol_id.rpc_response_limits::<Spec>(&fork_context);
let mut max = encode_len(limit.max + 1); let mut max = encode_len(limit.max + 1);
let mut codec = SSZSnappyOutboundCodec::<Spec>::new( let mut codec = SSZSnappyOutboundCodec::<Spec>::new(

View File

@ -678,8 +678,8 @@ mod tests {
use std::sync::Arc; use std::sync::Arc;
use types::{ use types::{
blob_sidecar::BlobIdentifier, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, blob_sidecar::BlobIdentifier, BeaconBlock, BeaconBlockAltair, BeaconBlockBase,
BeaconBlockMerge, EmptyBlock, Epoch, ForkContext, FullPayload, Hash256, Signature, BeaconBlockMerge, ChainSpec, EmptyBlock, Epoch, ForkContext, FullPayload, Hash256,
SignedBeaconBlock, Slot, Signature, SignedBeaconBlock, Slot,
}; };
use snap::write::FrameEncoder; use snap::write::FrameEncoder;
@ -728,7 +728,7 @@ mod tests {
} }
/// Merge block with length < max_rpc_size. /// Merge block with length < max_rpc_size.
fn merge_block_small(fork_context: &ForkContext) -> SignedBeaconBlock<Spec> { fn merge_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> SignedBeaconBlock<Spec> {
let mut block: BeaconBlockMerge<_, FullPayload<Spec>> = let mut block: BeaconBlockMerge<_, FullPayload<Spec>> =
BeaconBlockMerge::empty(&Spec::default_spec()); BeaconBlockMerge::empty(&Spec::default_spec());
let tx = VariableList::from(vec![0; 1024]); let tx = VariableList::from(vec![0; 1024]);
@ -737,14 +737,14 @@ mod tests {
block.body.execution_payload.execution_payload.transactions = txs; block.body.execution_payload.execution_payload.transactions = txs;
let block = BeaconBlock::Merge(block); let block = BeaconBlock::Merge(block);
assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context)); assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize));
SignedBeaconBlock::from_block(block, Signature::empty()) SignedBeaconBlock::from_block(block, Signature::empty())
} }
/// Merge block with length > MAX_RPC_SIZE. /// Merge block with length > MAX_RPC_SIZE.
/// The max limit for a merge block is in the order of ~16GiB which wouldn't fit in memory. /// The max limit for a merge block is in the order of ~16GiB which wouldn't fit in memory.
/// Hence, we generate a merge block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. /// Hence, we generate a merge block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer.
fn merge_block_large(fork_context: &ForkContext) -> SignedBeaconBlock<Spec> { fn merge_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> SignedBeaconBlock<Spec> {
let mut block: BeaconBlockMerge<_, FullPayload<Spec>> = let mut block: BeaconBlockMerge<_, FullPayload<Spec>> =
BeaconBlockMerge::empty(&Spec::default_spec()); BeaconBlockMerge::empty(&Spec::default_spec());
let tx = VariableList::from(vec![0; 1024]); let tx = VariableList::from(vec![0; 1024]);
@ -753,7 +753,7 @@ mod tests {
block.body.execution_payload.execution_payload.transactions = txs; block.body.execution_payload.execution_payload.transactions = txs;
let block = BeaconBlock::Merge(block); let block = BeaconBlock::Merge(block);
assert!(block.ssz_bytes_len() > max_rpc_size(fork_context)); assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize));
SignedBeaconBlock::from_block(block, Signature::empty()) SignedBeaconBlock::from_block(block, Signature::empty())
} }
@ -823,10 +823,11 @@ mod tests {
protocol: SupportedProtocol, protocol: SupportedProtocol,
message: RPCCodedResponse<Spec>, message: RPCCodedResponse<Spec>,
fork_name: ForkName, fork_name: ForkName,
spec: &ChainSpec,
) -> Result<BytesMut, RPCError> { ) -> Result<BytesMut, RPCError> {
let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy);
let fork_context = Arc::new(fork_context(fork_name)); let fork_context = Arc::new(fork_context(fork_name));
let max_packet_size = max_rpc_size(&fork_context); let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize);
let mut buf = BytesMut::new(); let mut buf = BytesMut::new();
let mut snappy_inbound_codec = let mut snappy_inbound_codec =
@ -869,10 +870,11 @@ mod tests {
protocol: SupportedProtocol, protocol: SupportedProtocol,
message: &mut BytesMut, message: &mut BytesMut,
fork_name: ForkName, fork_name: ForkName,
spec: &ChainSpec,
) -> Result<Option<RPCResponse<Spec>>, RPCError> { ) -> Result<Option<RPCResponse<Spec>>, RPCError> {
let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy);
let fork_context = Arc::new(fork_context(fork_name)); let fork_context = Arc::new(fork_context(fork_name));
let max_packet_size = max_rpc_size(&fork_context); let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize);
let mut snappy_outbound_codec = let mut snappy_outbound_codec =
SSZSnappyOutboundCodec::<Spec>::new(snappy_protocol_id, max_packet_size, fork_context); SSZSnappyOutboundCodec::<Spec>::new(snappy_protocol_id, max_packet_size, fork_context);
// decode message just as snappy message // decode message just as snappy message
@ -884,15 +886,20 @@ mod tests {
protocol: SupportedProtocol, protocol: SupportedProtocol,
message: RPCCodedResponse<Spec>, message: RPCCodedResponse<Spec>,
fork_name: ForkName, fork_name: ForkName,
spec: &ChainSpec,
) -> Result<Option<RPCResponse<Spec>>, RPCError> { ) -> Result<Option<RPCResponse<Spec>>, RPCError> {
let mut encoded = encode_response(protocol, message, fork_name)?; let mut encoded = encode_response(protocol, message, fork_name, spec)?;
decode_response(protocol, &mut encoded, fork_name) decode_response(protocol, &mut encoded, fork_name, spec)
} }
/// Verifies that requests we send are encoded in a way that we would correctly decode too. /// Verifies that requests we send are encoded in a way that we would correctly decode too.
fn encode_then_decode_request(req: OutboundRequest<Spec>, fork_name: ForkName) { fn encode_then_decode_request(
req: OutboundRequest<Spec>,
fork_name: ForkName,
spec: &ChainSpec,
) {
let fork_context = Arc::new(fork_context(fork_name)); let fork_context = Arc::new(fork_context(fork_name));
let max_packet_size = max_rpc_size(&fork_context); let max_packet_size = max_rpc_size(&fork_context, spec.max_chunk_size as usize);
let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy);
// Encode a request we send // Encode a request we send
let mut buf = BytesMut::new(); let mut buf = BytesMut::new();
@ -943,11 +950,14 @@ mod tests {
// Test RPCResponse encoding/decoding for V1 messages // Test RPCResponse encoding/decoding for V1 messages
#[test] #[test]
fn test_encode_then_decode_v1() { fn test_encode_then_decode_v1() {
let chain_spec = Spec::default_spec();
assert_eq!( assert_eq!(
encode_then_decode_response( encode_then_decode_response(
SupportedProtocol::StatusV1, SupportedProtocol::StatusV1,
RPCCodedResponse::Success(RPCResponse::Status(status_message())), RPCCodedResponse::Success(RPCResponse::Status(status_message())),
ForkName::Base, ForkName::Base,
&chain_spec,
), ),
Ok(Some(RPCResponse::Status(status_message()))) Ok(Some(RPCResponse::Status(status_message())))
); );
@ -957,6 +967,7 @@ mod tests {
SupportedProtocol::PingV1, SupportedProtocol::PingV1,
RPCCodedResponse::Success(RPCResponse::Pong(ping_message())), RPCCodedResponse::Success(RPCResponse::Pong(ping_message())),
ForkName::Base, ForkName::Base,
&chain_spec,
), ),
Ok(Some(RPCResponse::Pong(ping_message()))) Ok(Some(RPCResponse::Pong(ping_message())))
); );
@ -966,6 +977,7 @@ mod tests {
SupportedProtocol::BlocksByRangeV1, SupportedProtocol::BlocksByRangeV1,
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))),
ForkName::Base, ForkName::Base,
&chain_spec,
), ),
Ok(Some(RPCResponse::BlocksByRange(Arc::new( Ok(Some(RPCResponse::BlocksByRange(Arc::new(
empty_base_block() empty_base_block()
@ -978,6 +990,7 @@ mod tests {
SupportedProtocol::BlocksByRangeV1, SupportedProtocol::BlocksByRangeV1,
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))),
ForkName::Altair, ForkName::Altair,
&chain_spec,
) )
.unwrap_err(), .unwrap_err(),
RPCError::SSZDecodeError(_) RPCError::SSZDecodeError(_)
@ -990,6 +1003,7 @@ mod tests {
SupportedProtocol::BlocksByRootV1, SupportedProtocol::BlocksByRootV1,
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))),
ForkName::Base, ForkName::Base,
&chain_spec,
), ),
Ok(Some(RPCResponse::BlocksByRoot( Ok(Some(RPCResponse::BlocksByRoot(
Arc::new(empty_base_block()) Arc::new(empty_base_block())
@ -1002,6 +1016,7 @@ mod tests {
SupportedProtocol::BlocksByRootV1, SupportedProtocol::BlocksByRootV1,
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))),
ForkName::Altair, ForkName::Altair,
&chain_spec,
) )
.unwrap_err(), .unwrap_err(),
RPCError::SSZDecodeError(_) RPCError::SSZDecodeError(_)
@ -1014,6 +1029,7 @@ mod tests {
SupportedProtocol::MetaDataV1, SupportedProtocol::MetaDataV1,
RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), RPCCodedResponse::Success(RPCResponse::MetaData(metadata())),
ForkName::Base, ForkName::Base,
&chain_spec,
), ),
Ok(Some(RPCResponse::MetaData(metadata()))), Ok(Some(RPCResponse::MetaData(metadata()))),
); );
@ -1024,6 +1040,7 @@ mod tests {
SupportedProtocol::MetaDataV1, SupportedProtocol::MetaDataV1,
RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())),
ForkName::Base, ForkName::Base,
&chain_spec,
), ),
Ok(Some(RPCResponse::MetaData(metadata()))), Ok(Some(RPCResponse::MetaData(metadata()))),
); );
@ -1033,6 +1050,7 @@ mod tests {
SupportedProtocol::BlobsByRangeV1, SupportedProtocol::BlobsByRangeV1,
RPCCodedResponse::Success(RPCResponse::BlobsByRange(default_blob_sidecar())), RPCCodedResponse::Success(RPCResponse::BlobsByRange(default_blob_sidecar())),
ForkName::Deneb, ForkName::Deneb,
&chain_spec
), ),
Ok(Some(RPCResponse::BlobsByRange(default_blob_sidecar()))), Ok(Some(RPCResponse::BlobsByRange(default_blob_sidecar()))),
); );
@ -1042,6 +1060,7 @@ mod tests {
SupportedProtocol::BlobsByRootV1, SupportedProtocol::BlobsByRootV1,
RPCCodedResponse::Success(RPCResponse::SidecarByRoot(default_blob_sidecar())), RPCCodedResponse::Success(RPCResponse::SidecarByRoot(default_blob_sidecar())),
ForkName::Deneb, ForkName::Deneb,
&chain_spec
), ),
Ok(Some(RPCResponse::SidecarByRoot(default_blob_sidecar()))), Ok(Some(RPCResponse::SidecarByRoot(default_blob_sidecar()))),
); );
@ -1050,11 +1069,14 @@ mod tests {
// Test RPCResponse encoding/decoding for V1 messages // Test RPCResponse encoding/decoding for V1 messages
#[test] #[test]
fn test_encode_then_decode_v2() { fn test_encode_then_decode_v2() {
let chain_spec = Spec::default_spec();
assert_eq!( assert_eq!(
encode_then_decode_response( encode_then_decode_response(
SupportedProtocol::BlocksByRangeV2, SupportedProtocol::BlocksByRangeV2,
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))),
ForkName::Base, ForkName::Base,
&chain_spec,
), ),
Ok(Some(RPCResponse::BlocksByRange(Arc::new( Ok(Some(RPCResponse::BlocksByRange(Arc::new(
empty_base_block() empty_base_block()
@ -1069,6 +1091,7 @@ mod tests {
SupportedProtocol::BlocksByRangeV2, SupportedProtocol::BlocksByRangeV2,
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))),
ForkName::Altair, ForkName::Altair,
&chain_spec,
), ),
Ok(Some(RPCResponse::BlocksByRange(Arc::new( Ok(Some(RPCResponse::BlocksByRange(Arc::new(
empty_base_block() empty_base_block()
@ -1080,12 +1103,13 @@ mod tests {
SupportedProtocol::BlocksByRangeV2, SupportedProtocol::BlocksByRangeV2,
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(altair_block()))),
ForkName::Altair, ForkName::Altair,
&chain_spec,
), ),
Ok(Some(RPCResponse::BlocksByRange(Arc::new(altair_block())))) Ok(Some(RPCResponse::BlocksByRange(Arc::new(altair_block()))))
); );
let merge_block_small = merge_block_small(&fork_context(ForkName::Merge)); let merge_block_small = merge_block_small(&fork_context(ForkName::Merge), &chain_spec);
let merge_block_large = merge_block_large(&fork_context(ForkName::Merge)); let merge_block_large = merge_block_large(&fork_context(ForkName::Merge), &chain_spec);
assert_eq!( assert_eq!(
encode_then_decode_response( encode_then_decode_response(
@ -1094,6 +1118,7 @@ mod tests {
merge_block_small.clone() merge_block_small.clone()
))), ))),
ForkName::Merge, ForkName::Merge,
&chain_spec,
), ),
Ok(Some(RPCResponse::BlocksByRange(Arc::new( Ok(Some(RPCResponse::BlocksByRange(Arc::new(
merge_block_small.clone() merge_block_small.clone()
@ -1110,6 +1135,7 @@ mod tests {
SupportedProtocol::BlocksByRangeV2, SupportedProtocol::BlocksByRangeV2,
&mut encoded, &mut encoded,
ForkName::Merge, ForkName::Merge,
&chain_spec,
) )
.unwrap_err(), .unwrap_err(),
RPCError::InvalidData(_) RPCError::InvalidData(_)
@ -1122,6 +1148,7 @@ mod tests {
SupportedProtocol::BlocksByRootV2, SupportedProtocol::BlocksByRootV2,
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))),
ForkName::Base, ForkName::Base,
&chain_spec,
), ),
Ok(Some(RPCResponse::BlocksByRoot( Ok(Some(RPCResponse::BlocksByRoot(
Arc::new(empty_base_block()) Arc::new(empty_base_block())
@ -1136,6 +1163,7 @@ mod tests {
SupportedProtocol::BlocksByRootV2, SupportedProtocol::BlocksByRootV2,
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))),
ForkName::Altair, ForkName::Altair,
&chain_spec,
), ),
Ok(Some(RPCResponse::BlocksByRoot( Ok(Some(RPCResponse::BlocksByRoot(
Arc::new(empty_base_block()) Arc::new(empty_base_block())
@ -1147,6 +1175,7 @@ mod tests {
SupportedProtocol::BlocksByRootV2, SupportedProtocol::BlocksByRootV2,
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))),
ForkName::Altair, ForkName::Altair,
&chain_spec,
), ),
Ok(Some(RPCResponse::BlocksByRoot(Arc::new(altair_block())))) Ok(Some(RPCResponse::BlocksByRoot(Arc::new(altair_block()))))
); );
@ -1158,6 +1187,7 @@ mod tests {
merge_block_small.clone() merge_block_small.clone()
))), ))),
ForkName::Merge, ForkName::Merge,
&chain_spec,
), ),
Ok(Some(RPCResponse::BlocksByRoot(Arc::new(merge_block_small)))) Ok(Some(RPCResponse::BlocksByRoot(Arc::new(merge_block_small))))
); );
@ -1172,6 +1202,7 @@ mod tests {
SupportedProtocol::BlocksByRootV2, SupportedProtocol::BlocksByRootV2,
&mut encoded, &mut encoded,
ForkName::Merge, ForkName::Merge,
&chain_spec,
) )
.unwrap_err(), .unwrap_err(),
RPCError::InvalidData(_) RPCError::InvalidData(_)
@ -1185,6 +1216,7 @@ mod tests {
SupportedProtocol::MetaDataV2, SupportedProtocol::MetaDataV2,
RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), RPCCodedResponse::Success(RPCResponse::MetaData(metadata())),
ForkName::Base, ForkName::Base,
&chain_spec,
), ),
Ok(Some(RPCResponse::MetaData(metadata_v2()))) Ok(Some(RPCResponse::MetaData(metadata_v2())))
); );
@ -1194,6 +1226,7 @@ mod tests {
SupportedProtocol::MetaDataV2, SupportedProtocol::MetaDataV2,
RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())), RPCCodedResponse::Success(RPCResponse::MetaData(metadata_v2())),
ForkName::Altair, ForkName::Altair,
&chain_spec,
), ),
Ok(Some(RPCResponse::MetaData(metadata_v2()))) Ok(Some(RPCResponse::MetaData(metadata_v2())))
); );
@ -1204,11 +1237,14 @@ mod tests {
fn test_context_bytes_v2() { fn test_context_bytes_v2() {
let fork_context = fork_context(ForkName::Altair); let fork_context = fork_context(ForkName::Altair);
let chain_spec = Spec::default_spec();
// Removing context bytes for v2 messages should error // Removing context bytes for v2 messages should error
let mut encoded_bytes = encode_response( let mut encoded_bytes = encode_response(
SupportedProtocol::BlocksByRangeV2, SupportedProtocol::BlocksByRangeV2,
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))),
ForkName::Base, ForkName::Base,
&chain_spec,
) )
.unwrap(); .unwrap();
@ -1218,7 +1254,8 @@ mod tests {
decode_response( decode_response(
SupportedProtocol::BlocksByRangeV2, SupportedProtocol::BlocksByRangeV2,
&mut encoded_bytes, &mut encoded_bytes,
ForkName::Base ForkName::Base,
&chain_spec,
) )
.unwrap_err(), .unwrap_err(),
RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _),
@ -1228,6 +1265,7 @@ mod tests {
SupportedProtocol::BlocksByRootV2, SupportedProtocol::BlocksByRootV2,
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))),
ForkName::Base, ForkName::Base,
&chain_spec,
) )
.unwrap(); .unwrap();
@ -1237,7 +1275,8 @@ mod tests {
decode_response( decode_response(
SupportedProtocol::BlocksByRangeV2, SupportedProtocol::BlocksByRangeV2,
&mut encoded_bytes, &mut encoded_bytes,
ForkName::Base ForkName::Base,
&chain_spec,
) )
.unwrap_err(), .unwrap_err(),
RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _),
@ -1248,6 +1287,7 @@ mod tests {
SupportedProtocol::BlocksByRangeV2, SupportedProtocol::BlocksByRangeV2,
RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRange(Arc::new(empty_base_block()))),
ForkName::Altair, ForkName::Altair,
&chain_spec,
) )
.unwrap(); .unwrap();
@ -1260,7 +1300,8 @@ mod tests {
decode_response( decode_response(
SupportedProtocol::BlocksByRangeV2, SupportedProtocol::BlocksByRangeV2,
&mut wrong_fork_bytes, &mut wrong_fork_bytes,
ForkName::Altair ForkName::Altair,
&chain_spec,
) )
.unwrap_err(), .unwrap_err(),
RPCError::SSZDecodeError(_), RPCError::SSZDecodeError(_),
@ -1271,6 +1312,7 @@ mod tests {
SupportedProtocol::BlocksByRootV2, SupportedProtocol::BlocksByRootV2,
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(altair_block()))),
ForkName::Altair, ForkName::Altair,
&chain_spec,
) )
.unwrap(); .unwrap();
@ -1282,7 +1324,8 @@ mod tests {
decode_response( decode_response(
SupportedProtocol::BlocksByRangeV2, SupportedProtocol::BlocksByRangeV2,
&mut wrong_fork_bytes, &mut wrong_fork_bytes,
ForkName::Altair ForkName::Altair,
&chain_spec,
) )
.unwrap_err(), .unwrap_err(),
RPCError::SSZDecodeError(_), RPCError::SSZDecodeError(_),
@ -1296,6 +1339,7 @@ mod tests {
SupportedProtocol::MetaDataV2, SupportedProtocol::MetaDataV2,
RPCCodedResponse::Success(RPCResponse::MetaData(metadata())), RPCCodedResponse::Success(RPCResponse::MetaData(metadata())),
ForkName::Altair, ForkName::Altair,
&chain_spec,
) )
.unwrap(), .unwrap(),
); );
@ -1303,7 +1347,8 @@ mod tests {
assert!(decode_response( assert!(decode_response(
SupportedProtocol::MetaDataV2, SupportedProtocol::MetaDataV2,
&mut encoded_bytes, &mut encoded_bytes,
ForkName::Altair ForkName::Altair,
&chain_spec,
) )
.is_err()); .is_err());
@ -1312,6 +1357,7 @@ mod tests {
SupportedProtocol::BlocksByRootV2, SupportedProtocol::BlocksByRootV2,
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))),
ForkName::Altair, ForkName::Altair,
&chain_spec,
) )
.unwrap(); .unwrap();
@ -1323,7 +1369,8 @@ mod tests {
decode_response( decode_response(
SupportedProtocol::BlocksByRangeV2, SupportedProtocol::BlocksByRangeV2,
&mut wrong_fork_bytes, &mut wrong_fork_bytes,
ForkName::Altair ForkName::Altair,
&chain_spec,
) )
.unwrap_err(), .unwrap_err(),
RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _), RPCError::ErrorResponse(RPCResponseErrorCode::InvalidRequest, _),
@ -1334,6 +1381,7 @@ mod tests {
SupportedProtocol::BlocksByRootV2, SupportedProtocol::BlocksByRootV2,
RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))), RPCCodedResponse::Success(RPCResponse::BlocksByRoot(Arc::new(empty_base_block()))),
ForkName::Altair, ForkName::Altair,
&chain_spec,
) )
.unwrap(); .unwrap();
@ -1343,7 +1391,8 @@ mod tests {
decode_response( decode_response(
SupportedProtocol::BlocksByRangeV2, SupportedProtocol::BlocksByRangeV2,
&mut part, &mut part,
ForkName::Altair ForkName::Altair,
&chain_spec,
), ),
Ok(None) Ok(None)
) )
@ -1364,9 +1413,12 @@ mod tests {
OutboundRequest::BlobsByRoot(blbroot_request()), OutboundRequest::BlobsByRoot(blbroot_request()),
OutboundRequest::MetaData(MetadataRequest::new_v2()), OutboundRequest::MetaData(MetadataRequest::new_v2()),
]; ];
let chain_spec = Spec::default_spec();
for req in requests.iter() { for req in requests.iter() {
for fork_name in ForkName::list_all() { for fork_name in ForkName::list_all() {
encode_then_decode_request(req.clone(), fork_name); encode_then_decode_request(req.clone(), fork_name, &chain_spec);
} }
} }
} }
@ -1420,9 +1472,16 @@ mod tests {
assert_eq!(writer.get_ref().len(), 42); assert_eq!(writer.get_ref().len(), 42);
dst.extend_from_slice(writer.get_ref()); dst.extend_from_slice(writer.get_ref());
let chain_spec = Spec::default_spec();
// 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. // 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`.
assert!(matches!( assert!(matches!(
decode_response(SupportedProtocol::StatusV1, &mut dst, ForkName::Base).unwrap_err(), decode_response(
SupportedProtocol::StatusV1,
&mut dst,
ForkName::Base,
&chain_spec
)
.unwrap_err(),
RPCError::InvalidData(_) RPCError::InvalidData(_)
)); ));
} }
@ -1477,12 +1536,15 @@ mod tests {
assert_eq!(writer.get_ref().len(), 8103); assert_eq!(writer.get_ref().len(), 8103);
dst.extend_from_slice(writer.get_ref()); dst.extend_from_slice(writer.get_ref());
let chain_spec = Spec::default_spec();
// 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. // 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`.
assert!(matches!( assert!(matches!(
decode_response( decode_response(
SupportedProtocol::BlocksByRangeV2, SupportedProtocol::BlocksByRangeV2,
&mut dst, &mut dst,
ForkName::Altair ForkName::Altair,
&chain_spec,
) )
.unwrap_err(), .unwrap_err(),
RPCError::InvalidData(_) RPCError::InvalidData(_)
@ -1510,8 +1572,12 @@ mod tests {
let mut uvi_codec: Uvi<usize> = Uvi::default(); let mut uvi_codec: Uvi<usize> = Uvi::default();
let mut dst = BytesMut::with_capacity(1024); let mut dst = BytesMut::with_capacity(1024);
let chain_spec = Spec::default_spec();
// Insert length-prefix // Insert length-prefix
uvi_codec.encode(MAX_RPC_SIZE + 1, &mut dst).unwrap(); uvi_codec
.encode(chain_spec.max_chunk_size as usize + 1, &mut dst)
.unwrap();
// Insert snappy stream identifier // Insert snappy stream identifier
dst.extend_from_slice(stream_identifier); dst.extend_from_slice(stream_identifier);
@ -1523,7 +1589,13 @@ mod tests {
dst.extend_from_slice(writer.get_ref()); dst.extend_from_slice(writer.get_ref());
assert!(matches!( assert!(matches!(
decode_response(SupportedProtocol::StatusV1, &mut dst, ForkName::Base).unwrap_err(), decode_response(
SupportedProtocol::StatusV1,
&mut dst,
ForkName::Base,
&chain_spec
)
.unwrap_err(),
RPCError::InvalidData(_) RPCError::InvalidData(_)
)); ));
} }

View File

@ -3,21 +3,19 @@
use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode}; use super::methods::{GoodbyeReason, RPCCodedResponse, RPCResponseErrorCode};
use super::outbound::OutboundRequestContainer; use super::outbound::OutboundRequestContainer;
use super::protocol::{max_rpc_size, InboundRequest, Protocol, RPCError, RPCProtocol}; use super::protocol::{InboundOutput, InboundRequest, Protocol, RPCError, RPCProtocol};
use super::{RPCReceived, RPCSend, ReqId}; use super::{RPCReceived, RPCSend, ReqId};
use crate::rpc::outbound::{OutboundFramed, OutboundRequest}; use crate::rpc::outbound::{OutboundFramed, OutboundRequest};
use crate::rpc::protocol::InboundFramed; use crate::rpc::protocol::InboundFramed;
use fnv::FnvHashMap; use fnv::FnvHashMap;
use futures::prelude::*; use futures::prelude::*;
use futures::{Sink, SinkExt}; use futures::{Sink, SinkExt};
use libp2p::core::upgrade::{
InboundUpgrade, NegotiationError, OutboundUpgrade, ProtocolError, UpgradeError,
};
use libp2p::swarm::handler::{ use libp2p::swarm::handler::{
ConnectionHandler, ConnectionHandlerEvent, ConnectionHandlerUpgrErr, KeepAlive, ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError,
FullyNegotiatedInbound, FullyNegotiatedOutbound, KeepAlive, StreamUpgradeError,
SubstreamProtocol, SubstreamProtocol,
}; };
use libp2p::swarm::NegotiatedSubstream; use libp2p::swarm::Stream;
use slog::{crit, debug, trace, warn}; use slog::{crit, debug, trace, warn};
use smallvec::SmallVec; use smallvec::SmallVec;
use std::{ use std::{
@ -31,9 +29,6 @@ use tokio::time::{sleep_until, Instant as TInstant, Sleep};
use tokio_util::time::{delay_queue, DelayQueue}; use tokio_util::time::{delay_queue, DelayQueue};
use types::{EthSpec, ForkContext}; use types::{EthSpec, ForkContext};
/// The time (in seconds) before a substream that is awaiting a response from the user times out.
pub const RESPONSE_TIMEOUT: u64 = 10;
/// The number of times to retry an outbound upgrade in the case of IO errors. /// The number of times to retry an outbound upgrade in the case of IO errors.
const IO_ERROR_RETRIES: u8 = 3; const IO_ERROR_RETRIES: u8 = 3;
@ -53,7 +48,7 @@ impl SubstreamId {
} }
} }
type InboundSubstream<TSpec> = InboundFramed<NegotiatedSubstream, TSpec>; type InboundSubstream<TSpec> = InboundFramed<Stream, TSpec>;
/// Events the handler emits to the behaviour. /// Events the handler emits to the behaviour.
pub type HandlerEvent<Id, T> = Result<RPCReceived<Id, T>, HandlerErr<Id>>; pub type HandlerEvent<Id, T> = Result<RPCReceived<Id, T>, HandlerErr<Id>>;
@ -137,6 +132,9 @@ where
/// Logger for handling RPC streams /// Logger for handling RPC streams
log: slog::Logger, log: slog::Logger,
/// Timeout that will me used for inbound and outbound responses.
resp_timeout: Duration,
} }
enum HandlerState { enum HandlerState {
@ -201,12 +199,12 @@ pub enum OutboundSubstreamState<TSpec: EthSpec> {
/// handler because GOODBYE requests can be handled and responses dropped instantly. /// handler because GOODBYE requests can be handled and responses dropped instantly.
RequestPendingResponse { RequestPendingResponse {
/// The framed negotiated substream. /// The framed negotiated substream.
substream: Box<OutboundFramed<NegotiatedSubstream, TSpec>>, substream: Box<OutboundFramed<Stream, TSpec>>,
/// Keeps track of the actual request sent. /// Keeps track of the actual request sent.
request: OutboundRequest<TSpec>, request: OutboundRequest<TSpec>,
}, },
/// Closing an outbound substream> /// Closing an outbound substream>
Closing(Box<OutboundFramed<NegotiatedSubstream, TSpec>>), Closing(Box<OutboundFramed<Stream, TSpec>>),
/// Temporary state during processing /// Temporary state during processing
Poisoned, Poisoned,
} }
@ -219,6 +217,7 @@ where
listen_protocol: SubstreamProtocol<RPCProtocol<TSpec>, ()>, listen_protocol: SubstreamProtocol<RPCProtocol<TSpec>, ()>,
fork_context: Arc<ForkContext>, fork_context: Arc<ForkContext>,
log: &slog::Logger, log: &slog::Logger,
resp_timeout: Duration,
) -> Self { ) -> Self {
RPCHandler { RPCHandler {
listen_protocol, listen_protocol,
@ -237,6 +236,7 @@ where
fork_context, fork_context,
waker: None, waker: None,
log: log.clone(), log: log.clone(),
resp_timeout,
} }
} }
@ -321,8 +321,8 @@ where
TSpec: EthSpec, TSpec: EthSpec,
Id: ReqId, Id: ReqId,
{ {
type InEvent = RPCSend<Id, TSpec>; type FromBehaviour = RPCSend<Id, TSpec>;
type OutEvent = HandlerEvent<Id, TSpec>; type ToBehaviour = HandlerEvent<Id, TSpec>;
type Error = RPCError; type Error = RPCError;
type InboundProtocol = RPCProtocol<TSpec>; type InboundProtocol = RPCProtocol<TSpec>;
type OutboundProtocol = OutboundRequestContainer<TSpec>; type OutboundProtocol = OutboundRequestContainer<TSpec>;
@ -333,121 +333,7 @@ where
self.listen_protocol.clone() self.listen_protocol.clone()
} }
fn inject_fully_negotiated_outbound( fn on_behaviour_event(&mut self, rpc_event: Self::FromBehaviour) {
&mut self,
out: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
request_info: Self::OutboundOpenInfo,
) {
self.dial_negotiated -= 1;
let (id, request) = request_info;
let proto = request.versioned_protocol().protocol();
// accept outbound connections only if the handler is not deactivated
if matches!(self.state, HandlerState::Deactivated) {
self.events_out.push(Err(HandlerErr::Outbound {
error: RPCError::Disconnected,
proto,
id,
}));
}
// add the stream to substreams if we expect a response, otherwise drop the stream.
let expected_responses = request.expected_responses();
if expected_responses > 0 {
// new outbound request. Store the stream and tag the output.
let delay_key = self.outbound_substreams_delay.insert(
self.current_outbound_substream_id,
Duration::from_secs(RESPONSE_TIMEOUT),
);
let awaiting_stream = OutboundSubstreamState::RequestPendingResponse {
substream: Box::new(out),
request,
};
let expected_responses = if expected_responses > 1 {
// Currently enforced only for multiple responses
Some(expected_responses)
} else {
None
};
if self
.outbound_substreams
.insert(
self.current_outbound_substream_id,
OutboundInfo {
state: awaiting_stream,
delay_key,
proto,
remaining_chunks: expected_responses,
req_id: id,
},
)
.is_some()
{
crit!(self.log, "Duplicate outbound substream id"; "id" => self.current_outbound_substream_id);
}
self.current_outbound_substream_id.0 += 1;
}
}
fn inject_fully_negotiated_inbound(
&mut self,
substream: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
_info: Self::InboundOpenInfo,
) {
// only accept new peer requests when active
if !matches!(self.state, HandlerState::Active) {
return;
}
let (req, substream) = substream;
let expected_responses = req.expected_responses();
// store requests that expect responses
if expected_responses > 0 {
if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS {
// Store the stream and tag the output.
let delay_key = self.inbound_substreams_delay.insert(
self.current_inbound_substream_id,
Duration::from_secs(RESPONSE_TIMEOUT),
);
let awaiting_stream = InboundState::Idle(substream);
self.inbound_substreams.insert(
self.current_inbound_substream_id,
InboundInfo {
state: awaiting_stream,
pending_items: VecDeque::with_capacity(std::cmp::min(
expected_responses,
128,
) as usize),
delay_key: Some(delay_key),
protocol: req.versioned_protocol().protocol(),
request_start_time: Instant::now(),
remaining_chunks: expected_responses,
},
);
} else {
self.events_out.push(Err(HandlerErr::Inbound {
id: self.current_inbound_substream_id,
proto: req.versioned_protocol().protocol(),
error: RPCError::HandlerRejected,
}));
return self.shutdown(None);
}
}
// If we received a goodbye, shutdown the connection.
if let InboundRequest::Goodbye(_) = req {
self.shutdown(None);
}
self.events_out.push(Ok(RPCReceived::Request(
self.current_inbound_substream_id,
req,
)));
self.current_inbound_substream_id.0 += 1;
}
fn inject_event(&mut self, rpc_event: Self::InEvent) {
match rpc_event { match rpc_event {
RPCSend::Request(id, req) => self.send_request(id, req), RPCSend::Request(id, req) => self.send_request(id, req),
RPCSend::Response(inbound_id, response) => self.send_response(inbound_id, response), RPCSend::Response(inbound_id, response) => self.send_response(inbound_id, response),
@ -459,56 +345,6 @@ where
} }
} }
fn inject_dial_upgrade_error(
&mut self,
request_info: Self::OutboundOpenInfo,
error: ConnectionHandlerUpgrErr<
<Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Error,
>,
) {
let (id, req) = request_info;
if let ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(RPCError::IoError(_))) = error
{
self.outbound_io_error_retries += 1;
if self.outbound_io_error_retries < IO_ERROR_RETRIES {
self.send_request(id, req);
return;
}
}
// This dialing is now considered failed
self.dial_negotiated -= 1;
self.outbound_io_error_retries = 0;
// map the error
let error = match error {
ConnectionHandlerUpgrErr::Timer => RPCError::InternalError("Timer failed"),
ConnectionHandlerUpgrErr::Timeout => RPCError::NegotiationTimeout,
ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Apply(e)) => e,
ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(NegotiationError::Failed)) => {
RPCError::UnsupportedProtocol
}
ConnectionHandlerUpgrErr::Upgrade(UpgradeError::Select(
NegotiationError::ProtocolError(e),
)) => match e {
ProtocolError::IoError(io_err) => RPCError::IoError(io_err.to_string()),
ProtocolError::InvalidProtocol => {
RPCError::InternalError("Protocol was deemed invalid")
}
ProtocolError::InvalidMessage | ProtocolError::TooManyProtocols => {
// Peer is sending invalid data during the negotiation phase, not
// participating in the protocol
RPCError::InvalidData("Invalid message during negotiation".to_string())
}
},
};
self.events_out.push(Err(HandlerErr::Outbound {
error,
proto: req.versioned_protocol().protocol(),
id,
}));
}
fn connection_keep_alive(&self) -> KeepAlive { fn connection_keep_alive(&self) -> KeepAlive {
// Check that we don't have outbound items pending for dialing, nor dialing, nor // Check that we don't have outbound items pending for dialing, nor dialing, nor
// established. Also check that there are no established inbound substreams. // established. Also check that there are no established inbound substreams.
@ -541,7 +377,7 @@ where
ConnectionHandlerEvent< ConnectionHandlerEvent<
Self::OutboundProtocol, Self::OutboundProtocol,
Self::OutboundOpenInfo, Self::OutboundOpenInfo,
Self::OutEvent, Self::ToBehaviour,
Self::Error, Self::Error,
>, >,
> { > {
@ -554,7 +390,9 @@ where
} }
// return any events that need to be reported // return any events that need to be reported
if !self.events_out.is_empty() { if !self.events_out.is_empty() {
return Poll::Ready(ConnectionHandlerEvent::Custom(self.events_out.remove(0))); return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(
self.events_out.remove(0),
));
} else { } else {
self.events_out.shrink_to_fit(); self.events_out.shrink_to_fit();
} }
@ -618,7 +456,9 @@ where
error: RPCError::StreamTimeout, error: RPCError::StreamTimeout,
}; };
// notify the user // notify the user
return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err))); return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err(
outbound_err,
)));
} else { } else {
crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref()); crit!(self.log, "timed out substream not in the books"; "stream_id" => outbound_id.get_ref());
} }
@ -720,7 +560,7 @@ where
// Each chunk is allowed RESPONSE_TIMEOUT to be sent. // Each chunk is allowed RESPONSE_TIMEOUT to be sent.
if let Some(ref delay_key) = info.delay_key { if let Some(ref delay_key) = info.delay_key {
self.inbound_substreams_delay self.inbound_substreams_delay
.reset(delay_key, Duration::from_secs(RESPONSE_TIMEOUT)); .reset(delay_key, self.resp_timeout);
} }
// The stream may be currently idle. Attempt to process more // The stream may be currently idle. Attempt to process more
@ -860,7 +700,7 @@ where
}; };
substream_entry.remaining_chunks = Some(remaining_chunks); substream_entry.remaining_chunks = Some(remaining_chunks);
self.outbound_substreams_delay self.outbound_substreams_delay
.reset(delay_key, Duration::from_secs(RESPONSE_TIMEOUT)); .reset(delay_key, self.resp_timeout);
} }
} else { } else {
// either this is a single response request or this response closes the // either this is a single response request or this response closes the
@ -884,7 +724,7 @@ where
}), }),
}; };
return Poll::Ready(ConnectionHandlerEvent::Custom(received)); return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(received));
} }
Poll::Ready(None) => { Poll::Ready(None) => {
// stream closed // stream closed
@ -899,7 +739,7 @@ where
// notify the application error // notify the application error
if request.expected_responses() > 1 { if request.expected_responses() > 1 {
// return an end of stream result // return an end of stream result
return Poll::Ready(ConnectionHandlerEvent::Custom(Ok( return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok(
RPCReceived::EndOfStream(request_id, request.stream_termination()), RPCReceived::EndOfStream(request_id, request.stream_termination()),
))); )));
} }
@ -910,7 +750,9 @@ where
proto: request.versioned_protocol().protocol(), proto: request.versioned_protocol().protocol(),
error: RPCError::IncompleteStream, error: RPCError::IncompleteStream,
}; };
return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err))); return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err(
outbound_err,
)));
} }
Poll::Pending => { Poll::Pending => {
entry.get_mut().state = entry.get_mut().state =
@ -926,7 +768,9 @@ where
error: e, error: e,
}; };
entry.remove_entry(); entry.remove_entry();
return Poll::Ready(ConnectionHandlerEvent::Custom(Err(outbound_err))); return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Err(
outbound_err,
)));
} }
}, },
OutboundSubstreamState::Closing(mut substream) => { OutboundSubstreamState::Closing(mut substream) => {
@ -947,7 +791,7 @@ where
// termination to the application // termination to the application
if let Some(termination) = protocol.terminator() { if let Some(termination) = protocol.terminator() {
return Poll::Ready(ConnectionHandlerEvent::Custom(Ok( return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(Ok(
RPCReceived::EndOfStream(request_id, termination), RPCReceived::EndOfStream(request_id, termination),
))); )));
} }
@ -974,7 +818,7 @@ where
OutboundRequestContainer { OutboundRequestContainer {
req: req.clone(), req: req.clone(),
fork_context: self.fork_context.clone(), fork_context: self.fork_context.clone(),
max_rpc_size: max_rpc_size(&self.fork_context), max_rpc_size: self.listen_protocol().upgrade().max_rpc_size,
}, },
(), (),
) )
@ -996,6 +840,205 @@ where
Poll::Pending Poll::Pending
} }
fn on_connection_event(
&mut self,
event: ConnectionEvent<
Self::InboundProtocol,
Self::OutboundProtocol,
Self::InboundOpenInfo,
Self::OutboundOpenInfo,
>,
) {
match event {
ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound {
protocol,
info: _,
}) => self.on_fully_negotiated_inbound(protocol),
ConnectionEvent::FullyNegotiatedOutbound(FullyNegotiatedOutbound {
protocol,
info,
}) => self.on_fully_negotiated_outbound(protocol, info),
ConnectionEvent::DialUpgradeError(DialUpgradeError { info, error }) => {
self.on_dial_upgrade_error(info, error)
}
ConnectionEvent::ListenUpgradeError(libp2p::swarm::handler::ListenUpgradeError {
info: _,
error: _, /* RPCError */
}) => {
// This is going to be removed in the next libp2p release. I think its fine to do
// nothing.
}
ConnectionEvent::LocalProtocolsChange(_) => {
// This shouldn't effect this handler, we will still negotiate streams if we support
// the protocol as usual.
}
ConnectionEvent::RemoteProtocolsChange(_) => {
// This shouldn't effect this handler, we will still negotiate streams if we support
// the protocol as usual.
}
ConnectionEvent::AddressChange(_) => {
// We dont care about these changes as they have no bearing on our RPC internal
// logic.
}
}
}
}
impl<Id, TSpec: EthSpec> RPCHandler<Id, TSpec>
where
Id: ReqId,
TSpec: EthSpec,
{
fn on_fully_negotiated_inbound(&mut self, substream: InboundOutput<Stream, TSpec>) {
// only accept new peer requests when active
if !matches!(self.state, HandlerState::Active) {
return;
}
let (req, substream) = substream;
let expected_responses = req.expected_responses();
// store requests that expect responses
if expected_responses > 0 {
if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS {
// Store the stream and tag the output.
let delay_key = self
.inbound_substreams_delay
.insert(self.current_inbound_substream_id, self.resp_timeout);
let awaiting_stream = InboundState::Idle(substream);
self.inbound_substreams.insert(
self.current_inbound_substream_id,
InboundInfo {
state: awaiting_stream,
pending_items: VecDeque::with_capacity(std::cmp::min(
expected_responses,
128,
) as usize),
delay_key: Some(delay_key),
protocol: req.versioned_protocol().protocol(),
request_start_time: Instant::now(),
remaining_chunks: expected_responses,
},
);
} else {
self.events_out.push(Err(HandlerErr::Inbound {
id: self.current_inbound_substream_id,
proto: req.versioned_protocol().protocol(),
error: RPCError::HandlerRejected,
}));
return self.shutdown(None);
}
}
// If we received a goodbye, shutdown the connection.
if let InboundRequest::Goodbye(_) = req {
self.shutdown(None);
}
self.events_out.push(Ok(RPCReceived::Request(
self.current_inbound_substream_id,
req,
)));
self.current_inbound_substream_id.0 += 1;
}
fn on_fully_negotiated_outbound(
&mut self,
substream: OutboundFramed<Stream, TSpec>,
(id, request): (Id, OutboundRequest<TSpec>),
) {
self.dial_negotiated -= 1;
// Reset any io-retries counter.
self.outbound_io_error_retries = 0;
let proto = request.versioned_protocol().protocol();
// accept outbound connections only if the handler is not deactivated
if matches!(self.state, HandlerState::Deactivated) {
self.events_out.push(Err(HandlerErr::Outbound {
error: RPCError::Disconnected,
proto,
id,
}));
}
// add the stream to substreams if we expect a response, otherwise drop the stream.
let expected_responses = request.expected_responses();
if expected_responses > 0 {
// new outbound request. Store the stream and tag the output.
let delay_key = self
.outbound_substreams_delay
.insert(self.current_outbound_substream_id, self.resp_timeout);
let awaiting_stream = OutboundSubstreamState::RequestPendingResponse {
substream: Box::new(substream),
request,
};
let expected_responses = if expected_responses > 1 {
// Currently enforced only for multiple responses
Some(expected_responses)
} else {
None
};
if self
.outbound_substreams
.insert(
self.current_outbound_substream_id,
OutboundInfo {
state: awaiting_stream,
delay_key,
proto,
remaining_chunks: expected_responses,
req_id: id,
},
)
.is_some()
{
crit!(self.log, "Duplicate outbound substream id"; "id" => self.current_outbound_substream_id);
}
self.current_outbound_substream_id.0 += 1;
}
}
fn on_dial_upgrade_error(
&mut self,
request_info: (Id, OutboundRequest<TSpec>),
error: StreamUpgradeError<RPCError>,
) {
let (id, req) = request_info;
// map the error
let error = match error {
StreamUpgradeError::Timeout => RPCError::NegotiationTimeout,
StreamUpgradeError::Apply(RPCError::IoError(e)) => {
self.outbound_io_error_retries += 1;
if self.outbound_io_error_retries < IO_ERROR_RETRIES {
self.send_request(id, req);
return;
}
RPCError::IoError(e)
}
StreamUpgradeError::NegotiationFailed => RPCError::UnsupportedProtocol,
StreamUpgradeError::Io(io_err) => {
self.outbound_io_error_retries += 1;
if self.outbound_io_error_retries < IO_ERROR_RETRIES {
self.send_request(id, req);
return;
}
RPCError::IoError(io_err.to_string())
}
StreamUpgradeError::Apply(other) => other,
};
// This dialing is now considered failed
self.dial_negotiated -= 1;
self.outbound_io_error_retries = 0;
self.events_out.push(Err(HandlerErr::Outbound {
error,
proto: req.versioned_protocol().protocol(),
id,
}));
}
} }
impl slog::Value for SubstreamId { impl slog::Value for SubstreamId {

View File

@ -6,22 +6,23 @@
use futures::future::FutureExt; use futures::future::FutureExt;
use handler::{HandlerEvent, RPCHandler}; use handler::{HandlerEvent, RPCHandler};
use libp2p::core::connection::ConnectionId;
use libp2p::swarm::{ use libp2p::swarm::{
handler::ConnectionHandler, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, handler::ConnectionHandler, ConnectionId, NetworkBehaviour, NotifyHandler, PollParameters,
PollParameters, SubstreamProtocol, ToSwarm,
}; };
use libp2p::swarm::{FromSwarm, SubstreamProtocol, THandlerInEvent};
use libp2p::PeerId; use libp2p::PeerId;
use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr}; use rate_limiter::{RPCRateLimiter as RateLimiter, RateLimitedErr};
use slog::{crit, debug, o}; use slog::{crit, debug, o};
use std::marker::PhantomData; use std::marker::PhantomData;
use std::sync::Arc; use std::sync::Arc;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::time::Duration;
use types::{EthSpec, ForkContext}; use types::{EthSpec, ForkContext};
pub(crate) use handler::HandlerErr; pub(crate) use handler::HandlerErr;
pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse}; pub(crate) use methods::{MetaData, MetaDataV1, MetaDataV2, Ping, RPCCodedResponse, RPCResponse};
pub(crate) use protocol::{InboundRequest, RPCProtocol}; pub(crate) use protocol::InboundRequest;
pub use handler::SubstreamId; pub use handler::SubstreamId;
pub use methods::{ pub use methods::{
@ -32,6 +33,7 @@ pub(crate) use outbound::OutboundRequest;
pub use protocol::{max_rpc_size, Protocol, RPCError}; pub use protocol::{max_rpc_size, Protocol, RPCError};
use self::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig}; use self::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig};
use self::protocol::RPCProtocol;
use self::self_limiter::SelfRateLimiter; use self::self_limiter::SelfRateLimiter;
pub(crate) mod codec; pub(crate) mod codec;
@ -104,8 +106,13 @@ pub struct RPCMessage<Id, TSpec: EthSpec> {
pub event: HandlerEvent<Id, TSpec>, pub event: HandlerEvent<Id, TSpec>,
} }
type BehaviourAction<Id, TSpec> = type BehaviourAction<Id, TSpec> = ToSwarm<RPCMessage<Id, TSpec>, RPCSend<Id, TSpec>>;
NetworkBehaviourAction<RPCMessage<Id, TSpec>, RPCHandler<Id, TSpec>>;
pub struct NetworkParams {
pub max_chunk_size: usize,
pub ttfb_timeout: Duration,
pub resp_timeout: Duration,
}
/// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level /// Implements the libp2p `NetworkBehaviour` trait and therefore manages network-level
/// logic. /// logic.
@ -120,6 +127,8 @@ pub struct RPC<Id: ReqId, TSpec: EthSpec> {
enable_light_client_server: bool, enable_light_client_server: bool,
/// Slog logger for RPC behaviour. /// Slog logger for RPC behaviour.
log: slog::Logger, log: slog::Logger,
/// Networking constant values
network_params: NetworkParams,
} }
impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> { impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
@ -129,6 +138,7 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
inbound_rate_limiter_config: Option<InboundRateLimiterConfig>, inbound_rate_limiter_config: Option<InboundRateLimiterConfig>,
outbound_rate_limiter_config: Option<OutboundRateLimiterConfig>, outbound_rate_limiter_config: Option<OutboundRateLimiterConfig>,
log: slog::Logger, log: slog::Logger,
network_params: NetworkParams,
) -> Self { ) -> Self {
let log = log.new(o!("service" => "libp2p_rpc")); let log = log.new(o!("service" => "libp2p_rpc"));
@ -149,6 +159,7 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
fork_context, fork_context,
enable_light_client_server, enable_light_client_server,
log, log,
network_params,
} }
} }
@ -161,7 +172,7 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
id: (ConnectionId, SubstreamId), id: (ConnectionId, SubstreamId),
event: RPCCodedResponse<TSpec>, event: RPCCodedResponse<TSpec>,
) { ) {
self.events.push(NetworkBehaviourAction::NotifyHandler { self.events.push(ToSwarm::NotifyHandler {
peer_id, peer_id,
handler: NotifyHandler::One(id.0), handler: NotifyHandler::One(id.0),
event: RPCSend::Response(id.1, event), event: RPCSend::Response(id.1, event),
@ -181,7 +192,7 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
} }
} }
} else { } else {
NetworkBehaviourAction::NotifyHandler { ToSwarm::NotifyHandler {
peer_id, peer_id,
handler: NotifyHandler::Any, handler: NotifyHandler::Any,
event: RPCSend::Request(request_id, req), event: RPCSend::Request(request_id, req),
@ -194,7 +205,7 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
/// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This /// Lighthouse wishes to disconnect from this peer by sending a Goodbye message. This
/// gracefully terminates the RPC behaviour with a goodbye message. /// gracefully terminates the RPC behaviour with a goodbye message.
pub fn shutdown(&mut self, peer_id: PeerId, id: Id, reason: GoodbyeReason) { pub fn shutdown(&mut self, peer_id: PeerId, id: Id, reason: GoodbyeReason) {
self.events.push(NetworkBehaviourAction::NotifyHandler { self.events.push(ToSwarm::NotifyHandler {
peer_id, peer_id,
handler: NotifyHandler::Any, handler: NotifyHandler::Any,
event: RPCSend::Shutdown(id, reason), event: RPCSend::Shutdown(id, reason),
@ -208,29 +219,95 @@ where
Id: ReqId, Id: ReqId,
{ {
type ConnectionHandler = RPCHandler<Id, TSpec>; type ConnectionHandler = RPCHandler<Id, TSpec>;
type OutEvent = RPCMessage<Id, TSpec>; type ToSwarm = RPCMessage<Id, TSpec>;
fn new_handler(&mut self) -> Self::ConnectionHandler { fn handle_established_inbound_connection(
RPCHandler::new( &mut self,
SubstreamProtocol::new( _connection_id: ConnectionId,
RPCProtocol { peer_id: PeerId,
fork_context: self.fork_context.clone(), _local_addr: &libp2p::Multiaddr,
max_rpc_size: max_rpc_size(&self.fork_context), _remote_addr: &libp2p::Multiaddr,
enable_light_client_server: self.enable_light_client_server, ) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
phantom: PhantomData, let protocol = SubstreamProtocol::new(
}, RPCProtocol {
(), fork_context: self.fork_context.clone(),
), max_rpc_size: max_rpc_size(&self.fork_context, self.network_params.max_chunk_size),
enable_light_client_server: self.enable_light_client_server,
phantom: PhantomData,
ttfb_timeout: self.network_params.ttfb_timeout,
},
(),
);
// NOTE: this is needed because PeerIds have interior mutability.
let peer_repr = peer_id.to_string();
let log = self.log.new(slog::o!("peer_id" => peer_repr));
let handler = RPCHandler::new(
protocol,
self.fork_context.clone(), self.fork_context.clone(),
&self.log, &log,
) self.network_params.resp_timeout,
);
Ok(handler)
} }
fn inject_event( fn handle_established_outbound_connection(
&mut self,
_connection_id: ConnectionId,
peer_id: PeerId,
_addr: &libp2p::Multiaddr,
_role_override: libp2p::core::Endpoint,
) -> Result<libp2p::swarm::THandler<Self>, libp2p::swarm::ConnectionDenied> {
let protocol = SubstreamProtocol::new(
RPCProtocol {
fork_context: self.fork_context.clone(),
max_rpc_size: max_rpc_size(&self.fork_context, self.network_params.max_chunk_size),
enable_light_client_server: self.enable_light_client_server,
phantom: PhantomData,
ttfb_timeout: self.network_params.ttfb_timeout,
},
(),
);
// NOTE: this is needed because PeerIds have interior mutability.
let peer_repr = peer_id.to_string();
let log = self.log.new(slog::o!("peer_id" => peer_repr));
let handler = RPCHandler::new(
protocol,
self.fork_context.clone(),
&log,
self.network_params.resp_timeout,
);
Ok(handler)
}
fn on_swarm_event(&mut self, event: FromSwarm<Self::ConnectionHandler>) {
match event {
FromSwarm::ConnectionClosed(_)
| FromSwarm::ConnectionEstablished(_)
| FromSwarm::AddressChange(_)
| FromSwarm::DialFailure(_)
| FromSwarm::ListenFailure(_)
| FromSwarm::NewListener(_)
| FromSwarm::NewListenAddr(_)
| FromSwarm::ExpiredListenAddr(_)
| FromSwarm::ListenerError(_)
| FromSwarm::ListenerClosed(_)
| FromSwarm::NewExternalAddrCandidate(_)
| FromSwarm::ExternalAddrExpired(_)
| FromSwarm::ExternalAddrConfirmed(_) => {
// Rpc Behaviour does not act on these swarm events. We use a comprehensive match
// statement to ensure future events are dealt with appropriately.
}
}
}
fn on_connection_handler_event(
&mut self, &mut self,
peer_id: PeerId, peer_id: PeerId,
conn_id: ConnectionId, conn_id: ConnectionId,
event: <Self::ConnectionHandler as ConnectionHandler>::OutEvent, event: <Self::ConnectionHandler as ConnectionHandler>::ToBehaviour,
) { ) {
if let Ok(RPCReceived::Request(ref id, ref req)) = event { if let Ok(RPCReceived::Request(ref id, ref req)) = event {
if let Some(limiter) = self.limiter.as_mut() { if let Some(limiter) = self.limiter.as_mut() {
@ -238,12 +315,11 @@ where
match limiter.allows(&peer_id, req) { match limiter.allows(&peer_id, req) {
Ok(()) => { Ok(()) => {
// send the event to the user // send the event to the user
self.events self.events.push(ToSwarm::GenerateEvent(RPCMessage {
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage { peer_id,
peer_id, conn_id,
conn_id, event,
event, }))
}))
} }
Err(RateLimitedErr::TooLarge) => { Err(RateLimitedErr::TooLarge) => {
// we set the batch sizes, so this is a coding/config err for most protocols // we set the batch sizes, so this is a coding/config err for most protocols
@ -283,20 +359,18 @@ where
} }
} else { } else {
// No rate limiting, send the event to the user // No rate limiting, send the event to the user
self.events self.events.push(ToSwarm::GenerateEvent(RPCMessage {
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage {
peer_id,
conn_id,
event,
}))
}
} else {
self.events
.push(NetworkBehaviourAction::GenerateEvent(RPCMessage {
peer_id, peer_id,
conn_id, conn_id,
event, event,
})); }))
}
} else {
self.events.push(ToSwarm::GenerateEvent(RPCMessage {
peer_id,
conn_id,
event,
}));
} }
} }
@ -304,7 +378,7 @@ where
&mut self, &mut self,
cx: &mut Context, cx: &mut Context,
_: &mut impl PollParameters, _: &mut impl PollParameters,
) -> Poll<NetworkBehaviourAction<Self::OutEvent, Self::ConnectionHandler>> { ) -> Poll<ToSwarm<Self::ToSwarm, THandlerInEvent<Self>>> {
// let the rate limiter prune. // let the rate limiter prune.
if let Some(limiter) = self.limiter.as_mut() { if let Some(limiter) = self.limiter.as_mut() {
let _ = limiter.poll_unpin(cx); let _ = limiter.poll_unpin(cx);

View File

@ -7,7 +7,7 @@ use crate::rpc::{
use futures::future::BoxFuture; use futures::future::BoxFuture;
use futures::prelude::{AsyncRead, AsyncWrite}; use futures::prelude::{AsyncRead, AsyncWrite};
use futures::{FutureExt, StreamExt}; use futures::{FutureExt, StreamExt};
use libp2p::core::{InboundUpgrade, ProtocolName, UpgradeInfo}; use libp2p::core::{InboundUpgrade, UpgradeInfo};
use ssz::Encode; use ssz::Encode;
use ssz_types::VariableList; use ssz_types::VariableList;
use std::io; use std::io;
@ -72,7 +72,7 @@ lazy_static! {
/// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// The `BeaconBlockMerge` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing.
/// We calculate the value from its fields instead of constructing the block and checking the length. /// We calculate the value from its fields instead of constructing the block and checking the length.
/// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network
/// with `MAX_RPC_SIZE_POST_MERGE`. /// with `max_chunk_size`.
pub static ref SIGNED_BEACON_BLOCK_MERGE_MAX: usize = pub static ref SIGNED_BEACON_BLOCK_MERGE_MAX: usize =
// Size of a full altair block // Size of a full altair block
*SIGNED_BEACON_BLOCK_ALTAIR_MAX *SIGNED_BEACON_BLOCK_ALTAIR_MAX
@ -129,27 +129,19 @@ lazy_static! {
.len(); .len();
} }
/// The maximum bytes that can be sent across the RPC pre-merge.
pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M
/// The maximum bytes that can be sent across the RPC post-merge.
pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M
pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M
pub(crate) const MAX_RPC_SIZE_POST_DENEB: usize = 10 * 1_048_576; // 10M
/// The protocol prefix the RPC protocol id. /// The protocol prefix the RPC protocol id.
const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req"; const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req";
/// Time allowed for the first byte of a request to arrive before we time out (Time To First Byte).
const TTFB_TIMEOUT: u64 = 5;
/// The number of seconds to wait for the first bytes of a request once a protocol has been /// The number of seconds to wait for the first bytes of a request once a protocol has been
/// established before the stream is terminated. /// established before the stream is terminated.
const REQUEST_TIMEOUT: u64 = 15; const REQUEST_TIMEOUT: u64 = 15;
/// Returns the maximum bytes that can be sent across the RPC. /// Returns the maximum bytes that can be sent across the RPC.
pub fn max_rpc_size(fork_context: &ForkContext) -> usize { pub fn max_rpc_size(fork_context: &ForkContext, max_chunk_size: usize) -> usize {
match fork_context.current_fork() { match fork_context.current_fork() {
ForkName::Altair | ForkName::Base => MAX_RPC_SIZE, ForkName::Altair | ForkName::Base => max_chunk_size / 10,
ForkName::Merge => MAX_RPC_SIZE_POST_MERGE, ForkName::Merge => max_chunk_size,
ForkName::Capella => MAX_RPC_SIZE_POST_CAPELLA, ForkName::Capella => max_chunk_size,
ForkName::Deneb => MAX_RPC_SIZE_POST_DENEB, ForkName::Deneb => max_chunk_size,
} }
} }
@ -318,6 +310,7 @@ pub struct RPCProtocol<TSpec: EthSpec> {
pub max_rpc_size: usize, pub max_rpc_size: usize,
pub enable_light_client_server: bool, pub enable_light_client_server: bool,
pub phantom: PhantomData<TSpec>, pub phantom: PhantomData<TSpec>,
pub ttfb_timeout: Duration,
} }
impl<TSpec: EthSpec> UpgradeInfo for RPCProtocol<TSpec> { impl<TSpec: EthSpec> UpgradeInfo for RPCProtocol<TSpec> {
@ -376,6 +369,12 @@ pub struct ProtocolId {
protocol_id: String, protocol_id: String,
} }
impl AsRef<str> for ProtocolId {
fn as_ref(&self) -> &str {
self.protocol_id.as_ref()
}
}
impl ProtocolId { impl ProtocolId {
/// Returns min and max size for messages of given protocol id requests. /// Returns min and max size for messages of given protocol id requests.
pub fn rpc_request_limits(&self) -> RpcLimits { pub fn rpc_request_limits(&self) -> RpcLimits {
@ -488,12 +487,6 @@ pub fn rpc_blob_limits<T: EthSpec>() -> RpcLimits {
) )
} }
impl ProtocolName for ProtocolId {
fn protocol_name(&self) -> &[u8] {
self.protocol_id.as_bytes()
}
}
/* Inbound upgrade */ /* Inbound upgrade */
// The inbound protocol reads the request, decodes it and returns the stream to the protocol // The inbound protocol reads the request, decodes it and returns the stream to the protocol
@ -528,7 +521,7 @@ where
} }
}; };
let mut timed_socket = TimeoutStream::new(socket); let mut timed_socket = TimeoutStream::new(socket);
timed_socket.set_read_timeout(Some(Duration::from_secs(TTFB_TIMEOUT))); timed_socket.set_read_timeout(Some(self.ttfb_timeout));
let socket = Framed::new(Box::pin(timed_socket), codec); let socket = Framed::new(Box::pin(timed_socket), codec);

View File

@ -64,7 +64,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
} }
/// Checks if the rate limiter allows the request. If it's allowed, returns the /// Checks if the rate limiter allows the request. If it's allowed, returns the
/// [`NetworkBehaviourAction`] that should be emitted. When not allowed, the request is delayed /// [`ToSwarm`] that should be emitted. When not allowed, the request is delayed
/// until it can be sent. /// until it can be sent.
pub fn allows( pub fn allows(
&mut self, &mut self,
@ -95,7 +95,7 @@ impl<Id: ReqId, TSpec: EthSpec> SelfRateLimiter<Id, TSpec> {
} }
/// Auxiliary function to deal with self rate limiting outcomes. If the rate limiter allows the /// Auxiliary function to deal with self rate limiting outcomes. If the rate limiter allows the
/// request, the [`NetworkBehaviourAction`] that should be emitted is returned. If the request /// request, the [`ToSwarm`] that should be emitted is returned. If the request
/// should be delayed, it's returned with the duration to wait. /// should be delayed, it's returned with the duration to wait.
fn try_send_request( fn try_send_request(
limiter: &mut RateLimiter, limiter: &mut RateLimiter,

View File

@ -1,6 +1,6 @@
use std::sync::Arc; use std::sync::Arc;
use libp2p::core::connection::ConnectionId; use libp2p::swarm::ConnectionId;
use types::light_client_bootstrap::LightClientBootstrap; use types::light_client_bootstrap::LightClientBootstrap;
use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; use types::{BlobSidecar, EthSpec, SignedBeaconBlock};

View File

@ -3,21 +3,27 @@ use crate::peer_manager::PeerManager;
use crate::rpc::{ReqId, RPC}; use crate::rpc::{ReqId, RPC};
use crate::types::SnappyTransform; use crate::types::SnappyTransform;
use libp2p::gossipsub::subscription_filter::{ use libp2p::gossipsub;
MaxCountSubscriptionFilter, WhitelistSubscriptionFilter, use libp2p::identify;
};
use libp2p::gossipsub::Gossipsub as BaseGossipsub;
use libp2p::identify::Behaviour as Identify;
use libp2p::swarm::NetworkBehaviour; use libp2p::swarm::NetworkBehaviour;
use types::EthSpec; use types::EthSpec;
use super::api_types::RequestId; use super::api_types::RequestId;
pub type SubscriptionFilter = MaxCountSubscriptionFilter<WhitelistSubscriptionFilter>; pub type SubscriptionFilter =
pub type Gossipsub = BaseGossipsub<SnappyTransform, SubscriptionFilter>; gossipsub::MaxCountSubscriptionFilter<gossipsub::WhitelistSubscriptionFilter>;
pub type Gossipsub = gossipsub::Behaviour<SnappyTransform, SubscriptionFilter>;
#[derive(NetworkBehaviour)] #[derive(NetworkBehaviour)]
pub(crate) struct Behaviour<AppReqId: ReqId, TSpec: EthSpec> { pub(crate) struct Behaviour<AppReqId, TSpec>
where
AppReqId: ReqId,
TSpec: EthSpec,
{
/// Peers banned.
pub banned_peers: libp2p::allow_block_list::Behaviour<libp2p::allow_block_list::BlockedPeers>,
/// Keep track of active and pending connections to enforce hard limits.
pub connection_limits: libp2p::connection_limits::Behaviour,
/// The routing pub-sub mechanism for eth2. /// The routing pub-sub mechanism for eth2.
pub gossipsub: Gossipsub, pub gossipsub: Gossipsub,
/// The Eth2 RPC specified in the wire-0 protocol. /// The Eth2 RPC specified in the wire-0 protocol.
@ -27,7 +33,7 @@ pub(crate) struct Behaviour<AppReqId: ReqId, TSpec: EthSpec> {
/// Keep regular connection to peers and disconnect if absent. /// Keep regular connection to peers and disconnect if absent.
// NOTE: The id protocol is used for initial interop. This will be removed by mainnet. // NOTE: The id protocol is used for initial interop. This will be removed by mainnet.
/// Provides IP addresses and peer information. /// Provides IP addresses and peer information.
pub identify: Identify, pub identify: identify::Behaviour,
/// The peer manager that keeps track of peer's reputation and status. /// The peer manager that keeps track of peer's reputation and status.
pub peer_manager: PeerManager<TSpec>, pub peer_manager: PeerManager<TSpec>,
} }

View File

@ -1,7 +1,8 @@
use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::types::{GossipEncoding, GossipKind, GossipTopic};
use crate::{error, TopicHash}; use crate::{error, TopicHash};
use libp2p::gossipsub::{ use libp2p::gossipsub::{
GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds, TopicScoreParams, Config as GossipsubConfig, IdentTopic as Topic, PeerScoreParams, PeerScoreThresholds,
TopicScoreParams,
}; };
use std::cmp::max; use std::cmp::max;
use std::collections::HashMap; use std::collections::HashMap;

View File

@ -1,6 +1,6 @@
use self::behaviour::Behaviour; use self::behaviour::Behaviour;
use self::gossip_cache::GossipCache; use self::gossip_cache::GossipCache;
use crate::config::{gossipsub_config, NetworkLoad}; use crate::config::{gossipsub_config, GossipsubConfigParams, NetworkLoad};
use crate::discovery::{ use crate::discovery::{
subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS,
}; };
@ -24,15 +24,12 @@ use api_types::{PeerRequestId, Request, RequestId, Response};
use futures::stream::StreamExt; use futures::stream::StreamExt;
use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings};
use libp2p::bandwidth::BandwidthSinks; use libp2p::bandwidth::BandwidthSinks;
use libp2p::gossipsub::error::PublishError;
use libp2p::gossipsub::metrics::Config as GossipsubMetricsConfig;
use libp2p::gossipsub::subscription_filter::MaxCountSubscriptionFilter;
use libp2p::gossipsub::{ use libp2p::gossipsub::{
GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, self, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, PublishError,
}; };
use libp2p::identify::{Behaviour as Identify, Config as IdentifyConfig, Event as IdentifyEvent}; use libp2p::identify;
use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol};
use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; use libp2p::swarm::{Swarm, SwarmBuilder, SwarmEvent};
use libp2p::PeerId; use libp2p::PeerId;
use slog::{crit, debug, info, o, trace, warn}; use slog::{crit, debug, info, o, trace, warn};
use std::path::PathBuf; use std::path::PathBuf;
@ -67,10 +64,6 @@ pub enum NetworkEvent<AppReqId: ReqId, TSpec: EthSpec> {
PeerConnectedIncoming(PeerId), PeerConnectedIncoming(PeerId),
/// A peer has disconnected. /// A peer has disconnected.
PeerDisconnected(PeerId), PeerDisconnected(PeerId),
/// The peer needs to be banned.
PeerBanned(PeerId),
/// The peer has been unbanned.
PeerUnbanned(PeerId),
/// An RPC Request that was sent failed. /// An RPC Request that was sent failed.
RPCFailed { RPCFailed {
/// The id of the failed request. /// The id of the failed request.
@ -232,7 +225,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
let update_gossipsub_scores = tokio::time::interval(params.decay_interval); let update_gossipsub_scores = tokio::time::interval(params.decay_interval);
let possible_fork_digests = ctx.fork_context.all_fork_digests(); let possible_fork_digests = ctx.fork_context.all_fork_digests();
let filter = MaxCountSubscriptionFilter { let filter = gossipsub::MaxCountSubscriptionFilter {
filter: utils::create_whitelist_filter( filter: utils::create_whitelist_filter(
possible_fork_digests, possible_fork_digests,
ctx.chain_spec.attestation_subnet_count, ctx.chain_spec.attestation_subnet_count,
@ -244,12 +237,20 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
max_subscriptions_per_request: 160, max_subscriptions_per_request: 160,
}; };
config.gs_config = gossipsub_config(config.network_load, ctx.fork_context.clone()); let gossipsub_config_params = GossipsubConfigParams {
message_domain_valid_snappy: ctx.chain_spec.message_domain_valid_snappy,
gossip_max_size: ctx.chain_spec.gossip_max_size as usize,
};
config.gs_config = gossipsub_config(
config.network_load,
ctx.fork_context.clone(),
gossipsub_config_params,
);
// If metrics are enabled for gossipsub build the configuration // If metrics are enabled for gossipsub build the configuration
let gossipsub_metrics = ctx let gossipsub_metrics = ctx
.gossipsub_registry .gossipsub_registry
.map(|registry| (registry, GossipsubMetricsConfig::default())); .map(|registry| (registry, Default::default()));
let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size());
let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform(
@ -268,36 +269,48 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
(gossipsub, update_gossipsub_scores) (gossipsub, update_gossipsub_scores)
}; };
let network_params = NetworkParams {
max_chunk_size: ctx.chain_spec.max_chunk_size as usize,
ttfb_timeout: ctx.chain_spec.ttfb_timeout(),
resp_timeout: ctx.chain_spec.resp_timeout(),
};
let eth2_rpc = RPC::new( let eth2_rpc = RPC::new(
ctx.fork_context.clone(), ctx.fork_context.clone(),
config.enable_light_client_server, config.enable_light_client_server,
config.inbound_rate_limiter_config.clone(), config.inbound_rate_limiter_config.clone(),
config.outbound_rate_limiter_config.clone(), config.outbound_rate_limiter_config.clone(),
log.clone(), log.clone(),
network_params,
); );
let discovery = { let discovery = {
// Build and start the discovery sub-behaviour // Build and start the discovery sub-behaviour
let mut discovery = let mut discovery = Discovery::new(
Discovery::new(&local_keypair, &config, network_globals.clone(), &log).await?; local_keypair.clone(),
&config,
network_globals.clone(),
&log,
)
.await?;
// start searching for peers // start searching for peers
discovery.discover_peers(FIND_NODE_QUERY_CLOSEST_PEERS); discovery.discover_peers(FIND_NODE_QUERY_CLOSEST_PEERS);
discovery discovery
}; };
let identify = { let identify = {
let local_public_key = local_keypair.public();
let identify_config = if config.private { let identify_config = if config.private {
IdentifyConfig::new( identify::Config::new(
"".into(), "".into(),
local_keypair.public(), // Still send legitimate public key local_public_key, // Still send legitimate public key
) )
.with_cache_size(0) .with_cache_size(0)
} else { } else {
IdentifyConfig::new("eth2/1.0.0".into(), local_keypair.public()) identify::Config::new("eth2/1.0.0".into(), local_public_key)
.with_agent_version(lighthouse_version::version_with_platform()) .with_agent_version(lighthouse_version::version_with_platform())
.with_cache_size(0) .with_cache_size(0)
}; };
Identify::new(identify_config) identify::Behaviour::new(identify_config)
}; };
let peer_manager = { let peer_manager = {
@ -310,13 +323,38 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
PeerManager::new(peer_manager_cfg, network_globals.clone(), &log)? PeerManager::new(peer_manager_cfg, network_globals.clone(), &log)?
}; };
let connection_limits = {
let limits = libp2p::connection_limits::ConnectionLimits::default()
.with_max_pending_incoming(Some(5))
.with_max_pending_outgoing(Some(16))
.with_max_established_incoming(Some(
(config.target_peers as f32
* (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR))
.ceil() as u32,
))
.with_max_established_outgoing(Some(
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32,
))
.with_max_established(Some(
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS))
.ceil() as u32,
))
.with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER));
libp2p::connection_limits::Behaviour::new(limits)
};
let banned_peers = libp2p::allow_block_list::Behaviour::default();
let behaviour = { let behaviour = {
Behaviour { Behaviour {
banned_peers,
gossipsub, gossipsub,
eth2_rpc, eth2_rpc,
discovery, discovery,
identify, identify,
peer_manager, peer_manager,
connection_limits,
} }
}; };
@ -334,22 +372,6 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
} }
// sets up the libp2p connection limits // sets up the libp2p connection limits
let limits = ConnectionLimits::default()
.with_max_pending_incoming(Some(5))
.with_max_pending_outgoing(Some(16))
.with_max_established_incoming(Some(
(config.target_peers as f32
* (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR))
.ceil() as u32,
))
.with_max_established_outgoing(Some(
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32,
))
.with_max_established(Some(
(config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS))
.ceil() as u32,
))
.with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER));
( (
SwarmBuilder::with_executor( SwarmBuilder::with_executor(
@ -359,8 +381,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
Executor(executor), Executor(executor),
) )
.notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero"))
.connection_event_buffer_size(64) .per_connection_event_buffer_size(4)
.connection_limits(limits)
.build(), .build(),
bandwidth, bandwidth,
) )
@ -401,7 +422,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
match self.swarm.listen_on(listen_multiaddr.clone()) { match self.swarm.listen_on(listen_multiaddr.clone()) {
Ok(_) => { Ok(_) => {
let mut log_address = listen_multiaddr; let mut log_address = listen_multiaddr;
log_address.push(MProtocol::P2p(enr.peer_id().into())); log_address.push(MProtocol::P2p(enr.peer_id()));
info!(self.log, "Listening established"; "address" => %log_address); info!(self.log, "Listening established"; "address" => %log_address);
} }
Err(err) => { Err(err) => {
@ -498,7 +519,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
&mut self.swarm.behaviour_mut().discovery &mut self.swarm.behaviour_mut().discovery
} }
/// Provides IP addresses and peer information. /// Provides IP addresses and peer information.
pub fn identify_mut(&mut self) -> &mut Identify { pub fn identify_mut(&mut self) -> &mut identify::Behaviour {
&mut self.swarm.behaviour_mut().identify &mut self.swarm.behaviour_mut().identify
} }
/// The peer manager that keeps track of peer's reputation and status. /// The peer manager that keeps track of peer's reputation and status.
@ -519,7 +540,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
&self.swarm.behaviour().discovery &self.swarm.behaviour().discovery
} }
/// Provides IP addresses and peer information. /// Provides IP addresses and peer information.
pub fn identify(&self) -> &Identify { pub fn identify(&self) -> &identify::Behaviour {
&self.swarm.behaviour().identify &self.swarm.behaviour().identify
} }
/// The peer manager that keeps track of peer's reputation and status. /// The peer manager that keeps track of peer's reputation and status.
@ -1056,9 +1077,12 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
/* Sub-behaviour event handling functions */ /* Sub-behaviour event handling functions */
/// Handle a gossipsub event. /// Handle a gossipsub event.
fn inject_gs_event(&mut self, event: GossipsubEvent) -> Option<NetworkEvent<AppReqId, TSpec>> { fn inject_gs_event(
&mut self,
event: gossipsub::Event,
) -> Option<NetworkEvent<AppReqId, TSpec>> {
match event { match event {
GossipsubEvent::Message { gossipsub::Event::Message {
propagation_source, propagation_source,
message_id: id, message_id: id,
message: gs_msg, message: gs_msg,
@ -1088,7 +1112,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
} }
} }
} }
GossipsubEvent::Subscribed { peer_id, topic } => { gossipsub::Event::Subscribed { peer_id, topic } => {
if let Ok(topic) = GossipTopic::decode(topic.as_str()) { if let Ok(topic) = GossipTopic::decode(topic.as_str()) {
if let Some(subnet_id) = topic.subnet_id() { if let Some(subnet_id) = topic.subnet_id() {
self.network_globals self.network_globals
@ -1129,7 +1153,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
} }
} }
} }
GossipsubEvent::Unsubscribed { peer_id, topic } => { gossipsub::Event::Unsubscribed { peer_id, topic } => {
if let Some(subnet_id) = subnet_from_topic_hash(&topic) { if let Some(subnet_id) = subnet_from_topic_hash(&topic) {
self.network_globals self.network_globals
.peers .peers
@ -1137,7 +1161,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
.remove_subscription(&peer_id, &subnet_id); .remove_subscription(&peer_id, &subnet_id);
} }
} }
GossipsubEvent::GossipsubNotSupported { peer_id } => { gossipsub::Event::GossipsubNotSupported { peer_id } => {
debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id); debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id);
self.peer_manager_mut().report_peer( self.peer_manager_mut().report_peer(
&peer_id, &peer_id,
@ -1372,10 +1396,10 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
/// Handle an identify event. /// Handle an identify event.
fn inject_identify_event( fn inject_identify_event(
&mut self, &mut self,
event: IdentifyEvent, event: identify::Event,
) -> Option<NetworkEvent<AppReqId, TSpec>> { ) -> Option<NetworkEvent<AppReqId, TSpec>> {
match event { match event {
IdentifyEvent::Received { peer_id, mut info } => { identify::Event::Received { peer_id, mut info } => {
if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES {
debug!( debug!(
self.log, self.log,
@ -1386,9 +1410,9 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
// send peer info to the peer manager. // send peer info to the peer manager.
self.peer_manager_mut().identify(&peer_id, &info); self.peer_manager_mut().identify(&peer_id, &info);
} }
IdentifyEvent::Sent { .. } => {} identify::Event::Sent { .. } => {}
IdentifyEvent::Error { .. } => {} identify::Event::Error { .. } => {}
IdentifyEvent::Pushed { .. } => {} identify::Event::Pushed { .. } => {}
} }
None None
} }
@ -1409,14 +1433,17 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
Some(NetworkEvent::PeerDisconnected(peer_id)) Some(NetworkEvent::PeerDisconnected(peer_id))
} }
PeerManagerEvent::Banned(peer_id, associated_ips) => { PeerManagerEvent::Banned(peer_id, associated_ips) => {
self.swarm.ban_peer_id(peer_id); self.swarm.behaviour_mut().banned_peers.block_peer(peer_id);
self.discovery_mut().ban_peer(&peer_id, associated_ips); self.discovery_mut().ban_peer(&peer_id, associated_ips);
Some(NetworkEvent::PeerBanned(peer_id)) None
} }
PeerManagerEvent::UnBanned(peer_id, associated_ips) => { PeerManagerEvent::UnBanned(peer_id, associated_ips) => {
self.swarm.unban_peer_id(peer_id); self.swarm
.behaviour_mut()
.banned_peers
.unblock_peer(peer_id);
self.discovery_mut().unban_peer(&peer_id, associated_ips); self.discovery_mut().unban_peer(&peer_id, associated_ips);
Some(NetworkEvent::PeerUnbanned(peer_id)) None
} }
PeerManagerEvent::Status(peer_id) => { PeerManagerEvent::Status(peer_id) => {
// it's time to status. We don't keep a beacon chain reference here, so we inform // it's time to status. We don't keep a beacon chain reference here, so we inform
@ -1463,17 +1490,20 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
let maybe_event = match swarm_event { let maybe_event = match swarm_event {
SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { SwarmEvent::Behaviour(behaviour_event) => match behaviour_event {
// Handle sub-behaviour events. // Handle sub-behaviour events.
BehaviourEvent::BannedPeers(void) => void::unreachable(void),
BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge),
BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re),
BehaviourEvent::Discovery(de) => self.inject_discovery_event(de), BehaviourEvent::Discovery(de) => self.inject_discovery_event(de),
BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), BehaviourEvent::Identify(ie) => self.inject_identify_event(ie),
BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe),
BehaviourEvent::ConnectionLimits(le) => void::unreachable(le),
}, },
SwarmEvent::ConnectionEstablished { .. } => None, SwarmEvent::ConnectionEstablished { .. } => None,
SwarmEvent::ConnectionClosed { .. } => None, SwarmEvent::ConnectionClosed { .. } => None,
SwarmEvent::IncomingConnection { SwarmEvent::IncomingConnection {
local_addr, local_addr,
send_back_addr, send_back_addr,
connection_id: _,
} => { } => {
trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr);
None None
@ -1482,19 +1512,41 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
local_addr, local_addr,
send_back_addr, send_back_addr,
error, error,
connection_id: _,
} => { } => {
debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); let error_repr = match error {
libp2p::swarm::ListenError::Aborted => {
"Incoming connection aborted".to_string()
}
libp2p::swarm::ListenError::WrongPeerId { obtained, endpoint } => {
format!("Wrong peer id, obtained {obtained}, endpoint {endpoint:?}")
}
libp2p::swarm::ListenError::LocalPeerId { endpoint } => {
format!("Dialing local peer id {endpoint:?}")
}
libp2p::swarm::ListenError::Denied { cause } => {
format!("Connection was denied with cause {cause}")
}
libp2p::swarm::ListenError::Transport(t) => match t {
libp2p::TransportError::MultiaddrNotSupported(m) => {
format!("Transport error: Multiaddr not supported: {m}")
}
libp2p::TransportError::Other(e) => {
format!("Transport error: other: {e}")
}
},
};
debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => error_repr);
None None
} }
SwarmEvent::OutgoingConnectionError { peer_id, error } => { SwarmEvent::OutgoingConnectionError {
debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); peer_id: _,
None error: _,
} connection_id: _,
SwarmEvent::BannedPeer {
peer_id,
endpoint: _,
} => { } => {
debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); // The Behaviour event is more general than the swarm event here. It includes
// connection failures. So we use that log for now, in the peer manager
// behaviour implementation.
None None
} }
SwarmEvent::NewListenAddr { address, .. } => { SwarmEvent::NewListenAddr { address, .. } => {
@ -1523,7 +1575,13 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
None None
} }
} }
SwarmEvent::Dialing(_) => None, SwarmEvent::Dialing {
peer_id,
connection_id: _,
} => {
debug!(self.log, "Swarm Dialing"; "peer_id" => ?peer_id);
None
}
}; };
if let Some(ev) = maybe_event { if let Some(ev) = maybe_event {

View File

@ -4,13 +4,11 @@ use crate::types::{
error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind, error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind,
}; };
use crate::{GossipTopic, NetworkConfig}; use crate::{GossipTopic, NetworkConfig};
use libp2p::bandwidth::{BandwidthLogging, BandwidthSinks}; use libp2p::bandwidth::BandwidthSinks;
use libp2p::core::{ use libp2p::core::{multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed};
identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed, use libp2p::gossipsub;
}; use libp2p::identity::{secp256k1, Keypair};
use libp2p::gossipsub::subscription_filter::WhitelistSubscriptionFilter; use libp2p::{core, noise, yamux, PeerId, Transport, TransportExt};
use libp2p::gossipsub::IdentTopic as Topic;
use libp2p::{core, noise, PeerId, Transport};
use prometheus_client::registry::Registry; use prometheus_client::registry::Registry;
use slog::{debug, warn}; use slog::{debug, warn};
use ssz::Decode; use ssz::Decode;
@ -52,30 +50,19 @@ pub fn build_transport(
transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone))
}; };
let (transport, bandwidth) = BandwidthLogging::new(transport);
// mplex config
let mut mplex_config = libp2p::mplex::MplexConfig::new();
mplex_config.set_max_buffer_size(256);
mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block);
// yamux config // yamux config
let mut yamux_config = libp2p::yamux::YamuxConfig::default(); let mut yamux_config = yamux::Config::default();
yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); yamux_config.set_window_update_mode(yamux::WindowUpdateMode::on_read());
let (transport, bandwidth) = transport
.upgrade(core::upgrade::Version::V1)
.authenticate(generate_noise_config(&local_private_key))
.multiplex(yamux_config)
.timeout(Duration::from_secs(10))
.boxed()
.with_bandwidth_logging();
// Authentication // Authentication
Ok(( Ok((transport, bandwidth))
transport
.upgrade(core::upgrade::Version::V1)
.authenticate(generate_noise_config(&local_private_key))
.multiplex(core::upgrade::SelectUpgrade::new(
yamux_config,
mplex_config,
))
.timeout(Duration::from_secs(10))
.boxed(),
bandwidth,
))
} }
// Useful helper functions for debugging. Currently not used in the client. // Useful helper functions for debugging. Currently not used in the client.
@ -94,10 +81,10 @@ fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
#[allow(dead_code)] #[allow(dead_code)]
fn keypair_from_bytes(mut bytes: Vec<u8>) -> error::Result<Keypair> { fn keypair_from_bytes(mut bytes: Vec<u8>) -> error::Result<Keypair> {
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes) secp256k1::SecretKey::try_from_bytes(&mut bytes)
.map(|secret| { .map(|secret| {
let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into(); let keypair: secp256k1::Keypair = secret.into();
Keypair::Secp256k1(keypair) keypair.into()
}) })
.map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into())
} }
@ -115,12 +102,10 @@ pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
Err(_) => debug!(log, "Could not read network key file"), Err(_) => debug!(log, "Could not read network key file"),
Ok(_) => { Ok(_) => {
// only accept secp256k1 keys for now // only accept secp256k1 keys for now
if let Ok(secret_key) = if let Ok(secret_key) = secp256k1::SecretKey::try_from_bytes(&mut key_bytes) {
libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes) let kp: secp256k1::Keypair = secret_key.into();
{
let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into();
debug!(log, "Loaded network key from disk."); debug!(log, "Loaded network key from disk.");
return Keypair::Secp256k1(kp); return kp.into();
} else { } else {
debug!(log, "Network key file is not a valid secp256k1 key"); debug!(log, "Network key file is not a valid secp256k1 key");
} }
@ -129,34 +114,27 @@ pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair {
} }
// if a key could not be loaded from disk, generate a new one and save it // if a key could not be loaded from disk, generate a new one and save it
let local_private_key = Keypair::generate_secp256k1(); let local_private_key = secp256k1::Keypair::generate();
if let Keypair::Secp256k1(key) = local_private_key.clone() { let _ = std::fs::create_dir_all(&config.network_dir);
let _ = std::fs::create_dir_all(&config.network_dir); match File::create(network_key_f.clone())
match File::create(network_key_f.clone()) .and_then(|mut f| f.write_all(&local_private_key.secret().to_bytes()))
.and_then(|mut f| f.write_all(&key.secret().to_bytes())) {
{ Ok(_) => {
Ok(_) => { debug!(log, "New network key generated and written to disk");
debug!(log, "New network key generated and written to disk"); }
} Err(e) => {
Err(e) => { warn!(
warn!( log,
log, "Could not write node key to file: {:?}. error: {}", network_key_f, e
"Could not write node key to file: {:?}. error: {}", network_key_f, e );
);
}
} }
} }
local_private_key local_private_key.into()
} }
/// Generate authenticated XX Noise config from identity keys /// Generate authenticated XX Noise config from identity keys
fn generate_noise_config( fn generate_noise_config(identity_keypair: &Keypair) -> noise::Config {
identity_keypair: &Keypair, noise::Config::new(identity_keypair).expect("signing can fail only once during starting a node")
) -> noise::NoiseAuthenticated<noise::XX, noise::X25519Spec, ()> {
let static_dh_keys = noise::Keypair::<noise::X25519Spec>::new()
.into_authentic(identity_keypair)
.expect("signing can fail only once during starting a node");
noise::NoiseConfig::xx(static_dh_keys).into_authenticated()
} }
/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p /// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p
@ -237,11 +215,11 @@ pub(crate) fn create_whitelist_filter(
attestation_subnet_count: u64, attestation_subnet_count: u64,
sync_committee_subnet_count: u64, sync_committee_subnet_count: u64,
blob_sidecar_subnet_count: u64, blob_sidecar_subnet_count: u64,
) -> WhitelistSubscriptionFilter { ) -> gossipsub::WhitelistSubscriptionFilter {
let mut possible_hashes = HashSet::new(); let mut possible_hashes = HashSet::new();
for fork_digest in possible_fork_digests { for fork_digest in possible_fork_digests {
let mut add = |kind| { let mut add = |kind| {
let topic: Topic = let topic: gossipsub::IdentTopic =
GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into();
possible_hashes.insert(topic.hash()); possible_hashes.insert(topic.hash());
}; };
@ -266,7 +244,7 @@ pub(crate) fn create_whitelist_filter(
add(BlobSidecar(id)); add(BlobSidecar(id));
} }
} }
WhitelistSubscriptionFilter(possible_hashes) gossipsub::WhitelistSubscriptionFilter(possible_hashes)
} }
/// Persist metadata to disk /// Persist metadata to disk

View File

@ -2,7 +2,7 @@
use crate::types::{GossipEncoding, GossipKind, GossipTopic}; use crate::types::{GossipEncoding, GossipKind, GossipTopic};
use crate::TopicHash; use crate::TopicHash;
use libp2p::gossipsub::{DataTransform, GossipsubMessage, RawGossipsubMessage}; use libp2p::gossipsub;
use snap::raw::{decompress_len, Decoder, Encoder}; use snap::raw::{decompress_len, Decoder, Encoder};
use ssz::{Decode, Encode}; use ssz::{Decode, Encode};
use std::boxed::Box; use std::boxed::Box;
@ -58,12 +58,12 @@ impl SnappyTransform {
} }
} }
impl DataTransform for SnappyTransform { impl gossipsub::DataTransform for SnappyTransform {
// Provides the snappy decompression from RawGossipsubMessages // Provides the snappy decompression from RawGossipsubMessages
fn inbound_transform( fn inbound_transform(
&self, &self,
raw_message: RawGossipsubMessage, raw_message: gossipsub::RawMessage,
) -> Result<GossipsubMessage, std::io::Error> { ) -> Result<gossipsub::Message, std::io::Error> {
// check the length of the raw bytes // check the length of the raw bytes
let len = decompress_len(&raw_message.data)?; let len = decompress_len(&raw_message.data)?;
if len > self.max_size_per_message { if len > self.max_size_per_message {
@ -77,7 +77,7 @@ impl DataTransform for SnappyTransform {
let decompressed_data = decoder.decompress_vec(&raw_message.data)?; let decompressed_data = decoder.decompress_vec(&raw_message.data)?;
// Build the GossipsubMessage struct // Build the GossipsubMessage struct
Ok(GossipsubMessage { Ok(gossipsub::Message {
source: raw_message.source, source: raw_message.source,
data: decompressed_data, data: decompressed_data,
sequence_number: raw_message.sequence_number, sequence_number: raw_message.sequence_number,

View File

@ -1,5 +1,5 @@
#![cfg(test)] #![cfg(test)]
use libp2p::gossipsub::GossipsubConfigBuilder; use libp2p::gossipsub;
use lighthouse_network::service::Network as LibP2PService; use lighthouse_network::service::Network as LibP2PService;
use lighthouse_network::Enr; use lighthouse_network::Enr;
use lighthouse_network::EnrExt; use lighthouse_network::EnrExt;
@ -84,7 +84,7 @@ pub fn build_config(port: u16, mut boot_nodes: Vec<Enr>) -> NetworkConfig {
config.boot_nodes_enr.append(&mut boot_nodes); config.boot_nodes_enr.append(&mut boot_nodes);
config.network_dir = path.into_path(); config.network_dir = path.into_path();
// Reduce gossipsub heartbeat parameters // Reduce gossipsub heartbeat parameters
config.gs_config = GossipsubConfigBuilder::from(config.gs_config) config.gs_config = gossipsub::ConfigBuilder::from(config.gs_config)
.heartbeat_initial_delay(Duration::from_millis(500)) .heartbeat_initial_delay(Duration::from_millis(500))
.heartbeat_interval(Duration::from_millis(500)) .heartbeat_interval(Duration::from_millis(500))
.build() .build()
@ -97,6 +97,7 @@ pub async fn build_libp2p_instance(
boot_nodes: Vec<Enr>, boot_nodes: Vec<Enr>,
log: slog::Logger, log: slog::Logger,
fork_name: ForkName, fork_name: ForkName,
spec: &ChainSpec,
) -> Libp2pInstance { ) -> Libp2pInstance {
let port = unused_tcp4_port().unwrap(); let port = unused_tcp4_port().unwrap();
let config = build_config(port, boot_nodes); let config = build_config(port, boot_nodes);
@ -109,7 +110,7 @@ pub async fn build_libp2p_instance(
config: &config, config: &config,
enr_fork_id: EnrForkId::default(), enr_fork_id: EnrForkId::default(),
fork_context: Arc::new(fork_context(fork_name)), fork_context: Arc::new(fork_context(fork_name)),
chain_spec: &ChainSpec::minimal(), chain_spec: spec,
gossipsub_registry: None, gossipsub_registry: None,
}; };
Libp2pInstance( Libp2pInstance(
@ -133,12 +134,13 @@ pub async fn build_node_pair(
rt: Weak<Runtime>, rt: Weak<Runtime>,
log: &slog::Logger, log: &slog::Logger,
fork_name: ForkName, fork_name: ForkName,
spec: &ChainSpec,
) -> (Libp2pInstance, Libp2pInstance) { ) -> (Libp2pInstance, Libp2pInstance) {
let sender_log = log.new(o!("who" => "sender")); let sender_log = log.new(o!("who" => "sender"));
let receiver_log = log.new(o!("who" => "receiver")); let receiver_log = log.new(o!("who" => "receiver"));
let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name).await; let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name, spec).await;
let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name).await; let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name, spec).await;
let receiver_multiaddr = receiver.local_enr().multiaddr()[1].clone(); let receiver_multiaddr = receiver.local_enr().multiaddr()[1].clone();
@ -183,10 +185,11 @@ pub async fn build_linear(
log: slog::Logger, log: slog::Logger,
n: usize, n: usize,
fork_name: ForkName, fork_name: ForkName,
spec: &ChainSpec,
) -> Vec<Libp2pInstance> { ) -> Vec<Libp2pInstance> {
let mut nodes = Vec::with_capacity(n); let mut nodes = Vec::with_capacity(n);
for _ in 0..n { for _ in 0..n {
nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name).await); nodes.push(build_libp2p_instance(rt.clone(), vec![], log.clone(), fork_name, spec).await);
} }
let multiaddrs: Vec<Multiaddr> = nodes let multiaddrs: Vec<Multiaddr> = nodes

View File

@ -9,9 +9,9 @@ use std::time::Duration;
use tokio::runtime::Runtime; use tokio::runtime::Runtime;
use tokio::time::sleep; use tokio::time::sleep;
use types::{ use types::{
BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BlobSidecar, EmptyBlock, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockMerge, BlobSidecar, ChainSpec,
Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Signature, SignedBeaconBlock, EmptyBlock, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec, Signature,
Slot, SignedBeaconBlock, Slot,
}; };
mod common; mod common;
@ -19,30 +19,30 @@ mod common;
type E = MinimalEthSpec; type E = MinimalEthSpec;
/// Merge block with length < max_rpc_size. /// Merge block with length < max_rpc_size.
fn merge_block_small(fork_context: &ForkContext) -> BeaconBlock<E> { fn merge_block_small(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock<E> {
let mut block = BeaconBlockMerge::<E>::empty(&E::default_spec()); let mut block = BeaconBlockMerge::<E>::empty(spec);
let tx = VariableList::from(vec![0; 1024]); let tx = VariableList::from(vec![0; 1024]);
let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::<Vec<_>>()); let txs = VariableList::from(std::iter::repeat(tx).take(5000).collect::<Vec<_>>());
block.body.execution_payload.execution_payload.transactions = txs; block.body.execution_payload.execution_payload.transactions = txs;
let block = BeaconBlock::Merge(block); let block = BeaconBlock::Merge(block);
assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context)); assert!(block.ssz_bytes_len() <= max_rpc_size(fork_context, spec.max_chunk_size as usize));
block block
} }
/// Merge block with length > MAX_RPC_SIZE. /// Merge block with length > MAX_RPC_SIZE.
/// The max limit for a merge block is in the order of ~16GiB which wouldn't fit in memory. /// The max limit for a merge block is in the order of ~16GiB which wouldn't fit in memory.
/// Hence, we generate a merge block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. /// Hence, we generate a merge block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer.
fn merge_block_large(fork_context: &ForkContext) -> BeaconBlock<E> { fn merge_block_large(fork_context: &ForkContext, spec: &ChainSpec) -> BeaconBlock<E> {
let mut block = BeaconBlockMerge::<E>::empty(&E::default_spec()); let mut block = BeaconBlockMerge::<E>::empty(spec);
let tx = VariableList::from(vec![0; 1024]); let tx = VariableList::from(vec![0; 1024]);
let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::<Vec<_>>()); let txs = VariableList::from(std::iter::repeat(tx).take(100000).collect::<Vec<_>>());
block.body.execution_payload.execution_payload.transactions = txs; block.body.execution_payload.execution_payload.transactions = txs;
let block = BeaconBlock::Merge(block); let block = BeaconBlock::Merge(block);
assert!(block.ssz_bytes_len() > max_rpc_size(fork_context)); assert!(block.ssz_bytes_len() > max_rpc_size(fork_context, spec.max_chunk_size as usize));
block block
} }
@ -58,10 +58,12 @@ fn test_status_rpc() {
let log = common::build_log(log_level, enable_logging); let log = common::build_log(log_level, enable_logging);
let spec = E::default_spec();
rt.block_on(async { rt.block_on(async {
// get sender/receiver // get sender/receiver
let (mut sender, mut receiver) = let (mut sender, mut receiver) =
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await;
// Dummy STATUS RPC message // Dummy STATUS RPC message
let rpc_request = Request::Status(StatusMessage { let rpc_request = Request::Status(StatusMessage {
@ -150,10 +152,12 @@ fn test_blocks_by_range_chunked_rpc() {
let rt = Arc::new(Runtime::new().unwrap()); let rt = Arc::new(Runtime::new().unwrap());
let spec = E::default_spec();
rt.block_on(async { rt.block_on(async {
// get sender/receiver // get sender/receiver
let (mut sender, mut receiver) = let (mut sender, mut receiver) =
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge, &spec).await;
// BlocksByRange Request // BlocksByRange Request
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send));
@ -169,7 +173,7 @@ fn test_blocks_by_range_chunked_rpc() {
let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty());
let rpc_response_altair = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let rpc_response_altair = Response::BlocksByRange(Some(Arc::new(signed_full_block)));
let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); let full_block = merge_block_small(&common::fork_context(ForkName::Merge), &spec);
let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty());
let rpc_response_merge_small = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let rpc_response_merge_small = Response::BlocksByRange(Some(Arc::new(signed_full_block)));
@ -277,8 +281,9 @@ fn test_blobs_by_range_chunked_rpc() {
rt.block_on(async { rt.block_on(async {
// get sender/receiver // get sender/receiver
let spec = E::default_spec();
let (mut sender, mut receiver) = let (mut sender, mut receiver) =
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Deneb).await; common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Deneb, &spec).await;
// BlobsByRange Request // BlobsByRange Request
let rpc_request = Request::BlobsByRange(BlobsByRangeRequest { let rpc_request = Request::BlobsByRange(BlobsByRangeRequest {
@ -379,16 +384,18 @@ fn test_blocks_by_range_over_limit() {
let rt = Arc::new(Runtime::new().unwrap()); let rt = Arc::new(Runtime::new().unwrap());
let spec = E::default_spec();
rt.block_on(async { rt.block_on(async {
// get sender/receiver // get sender/receiver
let (mut sender, mut receiver) = let (mut sender, mut receiver) =
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge, &spec).await;
// BlocksByRange Request // BlocksByRange Request
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send));
// BlocksByRange Response // BlocksByRange Response
let full_block = merge_block_large(&common::fork_context(ForkName::Merge)); let full_block = merge_block_large(&common::fork_context(ForkName::Merge), &spec);
let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty());
let rpc_response_merge_large = Response::BlocksByRange(Some(Arc::new(signed_full_block))); let rpc_response_merge_large = Response::BlocksByRange(Some(Arc::new(signed_full_block)));
@ -461,10 +468,12 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() {
let rt = Arc::new(Runtime::new().unwrap()); let rt = Arc::new(Runtime::new().unwrap());
let spec = E::default_spec();
rt.block_on(async { rt.block_on(async {
// get sender/receiver // get sender/receiver
let (mut sender, mut receiver) = let (mut sender, mut receiver) =
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await;
// BlocksByRange Request // BlocksByRange Request
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send)); let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, messages_to_send));
@ -581,10 +590,12 @@ fn test_blocks_by_range_single_empty_rpc() {
let log = common::build_log(log_level, enable_logging); let log = common::build_log(log_level, enable_logging);
let rt = Arc::new(Runtime::new().unwrap()); let rt = Arc::new(Runtime::new().unwrap());
let spec = E::default_spec();
rt.block_on(async { rt.block_on(async {
// get sender/receiver // get sender/receiver
let (mut sender, mut receiver) = let (mut sender, mut receiver) =
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await;
// BlocksByRange Request // BlocksByRange Request
let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, 10)); let rpc_request = Request::BlocksByRange(BlocksByRangeRequest::new(0, 10));
@ -685,7 +696,7 @@ fn test_blocks_by_root_chunked_rpc() {
// get sender/receiver // get sender/receiver
rt.block_on(async { rt.block_on(async {
let (mut sender, mut receiver) = let (mut sender, mut receiver) =
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge).await; common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Merge, &spec).await;
// BlocksByRoot Request // BlocksByRoot Request
let rpc_request = let rpc_request =
@ -707,7 +718,7 @@ fn test_blocks_by_root_chunked_rpc() {
let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty());
let rpc_response_altair = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); let rpc_response_altair = Response::BlocksByRoot(Some(Arc::new(signed_full_block)));
let full_block = merge_block_small(&common::fork_context(ForkName::Merge)); let full_block = merge_block_small(&common::fork_context(ForkName::Merge), &spec);
let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty()); let signed_full_block = SignedBeaconBlock::from_block(full_block, Signature::empty());
let rpc_response_merge_small = Response::BlocksByRoot(Some(Arc::new(signed_full_block))); let rpc_response_merge_small = Response::BlocksByRoot(Some(Arc::new(signed_full_block)));
@ -812,7 +823,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() {
// get sender/receiver // get sender/receiver
rt.block_on(async { rt.block_on(async {
let (mut sender, mut receiver) = let (mut sender, mut receiver) =
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await;
// BlocksByRoot Request // BlocksByRoot Request
let rpc_request = let rpc_request =
@ -939,10 +950,13 @@ fn test_goodbye_rpc() {
let log = common::build_log(log_level, enable_logging); let log = common::build_log(log_level, enable_logging);
let rt = Arc::new(Runtime::new().unwrap()); let rt = Arc::new(Runtime::new().unwrap());
let spec = E::default_spec();
// get sender/receiver // get sender/receiver
rt.block_on(async { rt.block_on(async {
let (mut sender, mut receiver) = let (mut sender, mut receiver) =
common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base).await; common::build_node_pair(Arc::downgrade(&rt), &log, ForkName::Base, &spec).await;
// build the sender future // build the sender future
let sender_future = async { let sender_future = async {

View File

@ -51,4 +51,6 @@ parking_lot = "0.12.0"
environment = { path = "../../lighthouse/environment" } environment = { path = "../../lighthouse/environment" }
[features] [features]
# NOTE: This can be run via cargo build --bin lighthouse --features network/disable-backfill
disable-backfill = []
fork_from_env = ["beacon_chain/fork_from_env"] fork_from_env = ["beacon_chain/fork_from_env"]

View File

@ -1845,6 +1845,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
attestation_verification::verify_propagation_slot_range( attestation_verification::verify_propagation_slot_range(
seen_clock, seen_clock,
failed_att.attestation(), failed_att.attestation(),
&self.chain.spec,
); );
// Only penalize the peer if it would have been invalid at the moment we received // Only penalize the peer if it would have been invalid at the moment we received
@ -2396,6 +2397,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
sync_committee_verification::verify_propagation_slot_range( sync_committee_verification::verify_propagation_slot_range(
seen_clock, seen_clock,
&sync_committee_message_slot, &sync_committee_message_slot,
&self.chain.spec,
); );
hindsight_verification.is_err() hindsight_verification.is_err()
}; };
@ -2708,6 +2710,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
let is_timely = attestation_verification::verify_propagation_slot_range( let is_timely = attestation_verification::verify_propagation_slot_range(
&self.chain.slot_clock, &self.chain.slot_clock,
attestation, attestation,
&self.chain.spec,
) )
.is_ok(); .is_ok();

View File

@ -671,14 +671,3 @@ impl<E: EthSpec> NetworkBeaconProcessor<TestBeaconChainType<E>> {
(network_beacon_processor, beacon_processor_receive) (network_beacon_processor, beacon_processor_receive)
} }
} }
#[cfg(test)]
mod test {
#[test]
fn queued_block_delay_is_sane() {
assert!(
beacon_processor::work_reprocessing_queue::ADDITIONAL_QUEUED_BLOCK_DELAY
< beacon_chain::MAXIMUM_GOSSIP_CLOCK_DISPARITY
);
}
}

View File

@ -1,3 +1,4 @@
#![cfg(not(debug_assertions))] // Tests are too slow in debug.
#![cfg(test)] #![cfg(test)]
use crate::{ use crate::{
@ -10,7 +11,7 @@ use crate::{
use beacon_chain::test_utils::{ use beacon_chain::test_utils::{
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
}; };
use beacon_chain::{BeaconChain, ChainConfig, WhenSlotSkipped, MAXIMUM_GOSSIP_CLOCK_DISPARITY}; use beacon_chain::{BeaconChain, ChainConfig, WhenSlotSkipped};
use beacon_processor::{work_reprocessing_queue::*, *}; use beacon_processor::{work_reprocessing_queue::*, *};
use lighthouse_network::discovery::ConnectionId; use lighthouse_network::discovery::ConnectionId;
use lighthouse_network::rpc::methods::BlobsByRangeRequest; use lighthouse_network::rpc::methods::BlobsByRangeRequest;
@ -221,7 +222,7 @@ impl TestRig {
}; };
let network_beacon_processor = Arc::new(network_beacon_processor); let network_beacon_processor = Arc::new(network_beacon_processor);
BeaconProcessor { let beacon_processor = BeaconProcessor {
network_globals, network_globals,
executor, executor,
max_workers: cmp::max(1, num_cpus::get()), max_workers: cmp::max(1, num_cpus::get()),
@ -235,8 +236,11 @@ impl TestRig {
work_reprocessing_rx, work_reprocessing_rx,
Some(work_journal_tx), Some(work_journal_tx),
harness.chain.slot_clock.clone(), harness.chain.slot_clock.clone(),
chain.spec.maximum_gossip_clock_disparity(),
); );
assert!(!beacon_processor.is_err());
Self { Self {
chain, chain,
next_block: Arc::new(next_block_tuple.0), next_block: Arc::new(next_block_tuple.0),
@ -339,7 +343,7 @@ impl TestRig {
self.network_beacon_processor self.network_beacon_processor
.send_blobs_by_range_request( .send_blobs_by_range_request(
PeerId::random(), PeerId::random(),
(ConnectionId::new(42), SubstreamId::new(24)), (ConnectionId::new_unchecked(42), SubstreamId::new(24)),
BlobsByRangeRequest { BlobsByRangeRequest {
start_slot: 0, start_slot: 0,
count, count,
@ -559,7 +563,7 @@ async fn import_gossip_block_acceptably_early() {
rig.chain rig.chain
.slot_clock .slot_clock
.set_current_time(slot_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY); .set_current_time(slot_start - rig.chain.spec.maximum_gossip_clock_disparity());
assert_eq!( assert_eq!(
rig.chain.slot().unwrap(), rig.chain.slot().unwrap(),
@ -614,9 +618,9 @@ async fn import_gossip_block_unacceptably_early() {
.start_of(rig.next_block.slot()) .start_of(rig.next_block.slot())
.unwrap(); .unwrap();
rig.chain rig.chain.slot_clock.set_current_time(
.slot_clock slot_start - rig.chain.spec.maximum_gossip_clock_disparity() - Duration::from_millis(1),
.set_current_time(slot_start - MAXIMUM_GOSSIP_CLOCK_DISPARITY - Duration::from_millis(1)); );
assert_eq!( assert_eq!(
rig.chain.slot().unwrap(), rig.chain.slot().unwrap(),

View File

@ -232,6 +232,12 @@ impl<T: BeaconChainTypes> NetworkService<T> {
// build the channels for external comms // build the channels for external comms
let (network_senders, network_recievers) = NetworkSenders::new(); let (network_senders, network_recievers) = NetworkSenders::new();
#[cfg(feature = "disable-backfill")]
warn!(
network_log,
"Backfill is disabled. DO NOT RUN IN PRODUCTION"
);
// try and construct UPnP port mappings if required. // try and construct UPnP port mappings if required.
if let Some(upnp_config) = crate::nat::UPnPConfig::from_config(config) { if let Some(upnp_config) = crate::nat::UPnPConfig::from_config(config) {
let upnp_log = network_log.new(o!("service" => "UPnP")); let upnp_log = network_log.new(o!("service" => "UPnP"));
@ -487,10 +493,8 @@ impl<T: BeaconChainTypes> NetworkService<T> {
NetworkEvent::PeerConnectedOutgoing(peer_id) => { NetworkEvent::PeerConnectedOutgoing(peer_id) => {
self.send_to_router(RouterMessage::StatusPeer(peer_id)); self.send_to_router(RouterMessage::StatusPeer(peer_id));
} }
NetworkEvent::PeerConnectedIncoming(_) NetworkEvent::PeerConnectedIncoming(_) => {
| NetworkEvent::PeerBanned(_) // No action required for this event.
| NetworkEvent::PeerUnbanned(_) => {
// No action required for these events.
} }
NetworkEvent::PeerDisconnected(peer_id) => { NetworkEvent::PeerDisconnected(peer_id) => {
self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); self.send_to_router(RouterMessage::PeerDisconnected(peer_id));

View File

@ -50,7 +50,6 @@ use beacon_chain::block_verification_types::AsBlock;
use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::block_verification_types::RpcBlock;
use beacon_chain::{ use beacon_chain::{
AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState, AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, BlockError, EngineState,
MAXIMUM_GOSSIP_CLOCK_DISPARITY,
}; };
use futures::StreamExt; use futures::StreamExt;
use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS; use lighthouse_network::rpc::methods::MAX_REQUEST_BLOCKS;
@ -537,6 +536,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
// If we would otherwise be synced, first check if we need to perform or // If we would otherwise be synced, first check if we need to perform or
// complete a backfill sync. // complete a backfill sync.
#[cfg(not(feature = "disable_backfill"))]
if matches!(sync_state, SyncState::Synced) { if matches!(sync_state, SyncState::Synced) {
// Determine if we need to start/resume/restart a backfill sync. // Determine if we need to start/resume/restart a backfill sync.
match self.backfill_sync.start(&mut self.network) { match self.backfill_sync.start(&mut self.network) {
@ -561,6 +561,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
} }
Some((RangeSyncType::Finalized, start_slot, target_slot)) => { Some((RangeSyncType::Finalized, start_slot, target_slot)) => {
// If there is a backfill sync in progress pause it. // If there is a backfill sync in progress pause it.
#[cfg(not(feature = "disable_backfill"))]
self.backfill_sync.pause(); self.backfill_sync.pause();
SyncState::SyncingFinalized { SyncState::SyncingFinalized {
@ -570,6 +571,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
} }
Some((RangeSyncType::Head, start_slot, target_slot)) => { Some((RangeSyncType::Head, start_slot, target_slot)) => {
// If there is a backfill sync in progress pause it. // If there is a backfill sync in progress pause it.
#[cfg(not(feature = "disable_backfill"))]
self.backfill_sync.pause(); self.backfill_sync.pause();
SyncState::SyncingHead { SyncState::SyncingHead {
@ -815,14 +817,15 @@ impl<T: BeaconChainTypes> SyncManager<T> {
} }
fn should_delay_lookup(&mut self, slot: Slot) -> bool { fn should_delay_lookup(&mut self, slot: Slot) -> bool {
let maximum_gossip_clock_disparity = self.chain.spec.maximum_gossip_clock_disparity();
let earliest_slot = self let earliest_slot = self
.chain .chain
.slot_clock .slot_clock
.now_with_past_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY); .now_with_past_tolerance(maximum_gossip_clock_disparity);
let latest_slot = self let latest_slot = self
.chain .chain
.slot_clock .slot_clock
.now_with_future_tolerance(MAXIMUM_GOSSIP_CLOCK_DISPARITY); .now_with_future_tolerance(maximum_gossip_clock_disparity);
if let (Some(earliest_slot), Some(latest_slot)) = (earliest_slot, latest_slot) { if let (Some(earliest_slot), Some(latest_slot)) = (earliest_slot, latest_slot) {
let msg_for_current_slot = slot >= earliest_slot && slot <= latest_slot; let msg_for_current_slot = slot >= earliest_slot && slot <= latest_slot;
let delay_threshold_unmet = self let delay_threshold_unmet = self

View File

@ -618,8 +618,10 @@ pub fn get_config<E: EthSpec>(
}; };
} }
client_config.chain.max_network_size = client_config.chain.max_network_size = lighthouse_network::gossip_max_size(
lighthouse_network::gossip_max_size(spec.bellatrix_fork_epoch.is_some()); spec.bellatrix_fork_epoch.is_some(),
spec.gossip_max_size as usize,
);
if cli_args.is_present("slasher") { if cli_args.is_present("slasher") {
let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") { let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") {

View File

@ -12,6 +12,9 @@
* [Run a Node](./run_a_node.md) * [Run a Node](./run_a_node.md)
* [Become a Validator](./mainnet-validator.md) * [Become a Validator](./mainnet-validator.md)
* [Validator Management](./validator-management.md) * [Validator Management](./validator-management.md)
* [The `validator-manager` Command](./validator-manager.md)
* [Creating validators](./validator-manager-create.md)
* [Moving validators](./validator-manager-move.md)
* [Slashing Protection](./slashing-protection.md) * [Slashing Protection](./slashing-protection.md)
* [Voluntary Exits](./voluntary-exit.md) * [Voluntary Exits](./voluntary-exit.md)
* [Partial Withdrawals](./partial-withdrawal.md) * [Partial Withdrawals](./partial-withdrawal.md)
@ -41,7 +44,7 @@
* [Remote Signing with Web3Signer](./validator-web3signer.md) * [Remote Signing with Web3Signer](./validator-web3signer.md)
* [Database Configuration](./advanced_database.md) * [Database Configuration](./advanced_database.md)
* [Database Migrations](./database-migrations.md) * [Database Migrations](./database-migrations.md)
* [Key Management](./key-management.md) * [Key Management (Deprecated)](./key-management.md)
* [Key Recovery](./key-recovery.md) * [Key Recovery](./key-recovery.md)
* [Advanced Networking](./advanced_networking.md) * [Advanced Networking](./advanced_networking.md)
* [Running a Slasher](./slasher.md) * [Running a Slasher](./slasher.md)

View File

@ -28,8 +28,8 @@ some example values.
| Research | 32 | 3.4 TB | 155 ms | | Research | 32 | 3.4 TB | 155 ms |
| Block explorer/analysis | 128 | 851 GB | 620 ms | | Block explorer/analysis | 128 | 851 GB | 620 ms |
| Enthusiast (prev. default) | 2048 | 53.6 GB | 10.2 s | | Enthusiast (prev. default) | 2048 | 53.6 GB | 10.2 s |
| Hobbyist | 4096 | 26.8 GB | 20.5 s | | Hobbyist | 4096 | 26.8 GB | 20.5 s |
| Validator only (default) | 8192 | 8.1 GB | 41 s | | Validator only (default) | 8192 | 12.7 GB | 41 s |
*Last update: May 2023. *Last update: May 2023.

View File

@ -172,7 +172,7 @@ In order to do so, lighthouse provides the following CLI options/parameters.
advertises some address, must be advertises some address, must be
reachable both over UDP and TCP. reachable both over UDP and TCP.
In the general case, an user will not require to set these explicitly. Update In the general case, a user will not require to set these explicitly. Update
these options only if you can guarantee your node is reachable with these these options only if you can guarantee your node is reachable with these
values. values.

View File

@ -201,6 +201,8 @@ else:
use local payload use local payload
``` ```
If you would like to always use the builder payload, you can add the flag `--always-prefer-builder-payload` to the beacon node.
## Checking your builder config ## Checking your builder config
You can check that your builder is configured correctly by looking for these log messages. You can check that your builder is configured correctly by looking for these log messages.

View File

@ -10,6 +10,8 @@
- [My beacon node logs `WARN BlockProcessingFailure outcome: MissingBeaconBlock`, what should I do?](#bn-missing-beacon) - [My beacon node logs `WARN BlockProcessingFailure outcome: MissingBeaconBlock`, what should I do?](#bn-missing-beacon)
- [After checkpoint sync, the progress of `downloading historical blocks` is slow. Why?](#bn-download-slow) - [After checkpoint sync, the progress of `downloading historical blocks` is slow. Why?](#bn-download-slow)
- [My beacon node logs `WARN Error processing HTTP API request`, what should I do?](#bn-http) - [My beacon node logs `WARN Error processing HTTP API request`, what should I do?](#bn-http)
- [My beacon node logs `WARN Error signalling fork choice waiter`, what should I do?](#bn-fork-choice)
- [My beacon node logs `ERRO Aggregate attestation queue full`, what should I do?](#bn-queue-full)
## [Validator](#validator-1) ## [Validator](#validator-1)
- [Why does it take so long for a validator to be activated?](#vc-activation) - [Why does it take so long for a validator to be activated?](#vc-activation)
@ -30,7 +32,7 @@
- [My beacon node and validator client are on different servers. How can I point the validator client to the beacon node?](#net-bn-vc) - [My beacon node and validator client are on different servers. How can I point the validator client to the beacon node?](#net-bn-vc)
- [Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?](#net-ip) - [Should I do anything to the beacon node or validator client settings if I have a relocation of the node / change of IP address?](#net-ip)
- [How to change the TCP/UDP port 9000 that Lighthouse listens on?](#net-port) - [How to change the TCP/UDP port 9000 that Lighthouse listens on?](#net-port)
- [Lighthouse `v4.3.0` introduces a change where a node will subscribe to only 2 subnets in total. I am worried that this will impact my validators return.](#net-subnet)
## [Miscellaneous](#miscellaneous-1) ## [Miscellaneous](#miscellaneous-1)
- [What should I do if I lose my slashing protection database?](#misc-slashing) - [What should I do if I lose my slashing protection database?](#misc-slashing)
@ -74,7 +76,7 @@ The `WARN Execution engine called failed` log is shown when the beacon node cann
`error: Reqwest(reqwest::Error { kind: Request, url: Url { scheme: "http", cannot_be_a_base: false, username: "", password: None, host: Some(Ipv4(127.0.0.1)), port: Some(8551), path: "/", query: None, fragment: None }, source: TimedOut }), service: exec` `error: Reqwest(reqwest::Error { kind: Request, url: Url { scheme: "http", cannot_be_a_base: false, username: "", password: None, host: Some(Ipv4(127.0.0.1)), port: Some(8551), path: "/", query: None, fragment: None }, source: TimedOut }), service: exec`
which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. One option is to add the flag `--execution-timeout-multiplier 3` to the beacon node. However, if the error persists, it is worth digging further to find out the cause. There are a few reasons why this can occur: which says `TimedOut` at the end of the message. This means that the execution engine has not responded in time to the beacon node. One option is to add the flags `--execution-timeout-multiplier 3` and `--disable-lock-timeouts` to the beacon node. However, if the error persists, it is worth digging further to find out the cause. There are a few reasons why this can occur:
1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced. 1. The execution engine is not synced. Check the log of the execution engine to make sure that it is synced. If it is syncing, wait until it is synced and the error will disappear. You will see the beacon node logs `INFO Execution engine online` when it is synced.
1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage. 1. The computer is overloaded. Check the CPU and RAM usage to see if it has overloaded. You can use `htop` to check for CPU and RAM usage.
1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD. 1. Your SSD is slow. Check if your SSD is in "The Bad" list [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038). If your SSD is in "The Bad" list, it means it cannot keep in sync to the network and you may want to consider upgrading to a better SSD.
@ -170,6 +172,27 @@ ERRO Failed to download attester duties err: FailedToDownloadAttesters("Som
This means that the validator client is sending requests to the beacon node. However, as the beacon node is still syncing, it is therefore unable to fulfil the request. The error will disappear once the beacon node is synced. This means that the validator client is sending requests to the beacon node. However, as the beacon node is still syncing, it is therefore unable to fulfil the request. The error will disappear once the beacon node is synced.
### <a name="bn-fork-choice"></a> My beacon node logs `WARN Error signalling fork choice waiter`, what should I do?
An example of the full log is shown below:
```
WARN Error signalling fork choice waiter slot: 6763073, error: ForkChoiceSignalOutOfOrder { current: Slot(6763074), latest: Slot(6763073) }, service: state_advance
```
This suggests that the computer resources are being overwhelmed. It could be due to high CPU usage or high disk I/O usage. This can happen, e.g., when the beacon node is downloading historical blocks, or when the execution client is syncing. The error will disappear when the resources used return to normal or when the node is synced.
### <a name="bn-queue-full"></a> My beacon node logs `ERRO Aggregate attestation queue full`, what should I do?
An example of the full log is shown below:
```
ERRO Aggregate attestation queue full, queue_len: 4096, msg: the system has insufficient resources for load, module: network::beacon_processor:1542
```
This suggests that the computer resources are being overwhelmed. It could be due to high CPU usage or high disk I/O usage. This can happen, e.g., when the beacon node is downloading historical blocks, or when the execution client is syncing. The error will disappear when the resources used return to normal or when the node is synced.
## Validator ## Validator
### <a name="vc-activation"></a> Why does it take so long for a validator to be activated? ### <a name="vc-activation"></a> Why does it take so long for a validator to be activated?
@ -279,12 +302,26 @@ The first thing is to ensure both consensus and execution clients are synced wit
- the internet is working well - the internet is working well
- you have sufficient peers - you have sufficient peers
You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations). Once the above points are good, missing attestation should be a rare occurrence. You can see more information on the [Ethstaker KB](https://ethstaker.gitbook.io/ethstaker-knowledge-base/help/missed-attestations).
Another cause for missing attestations is delays during block processing. When this happens, the debug logs will show (debug logs can be found under `$datadir/beacon/logs`):
```
DEBG Delayed head block set_as_head_delay: Some(93.579425ms), imported_delay: Some(1.460405278s), observed_delay: Some(2.540811921s), block_delay: 4.094796624s, slot: 6837344, proposer_index: 211108, block_root: 0x2c52231c0a5a117401f5231585de8aa5dd963bc7cbc00c544e681342eedd1700, service: beacon
```
The fields to look for are `imported_delay > 1s` and `observed_delay < 3s`. The `imported_delay` is how long the node took to process the block. The `imported_delay` of larger than 1 second suggests that there is slowness in processing the block. It could be due to high CPU usage, high I/O disk usage or the clients are doing some background maintenance processes. The `observed_delay` is determined mostly by the proposer and partly by your networking setup (e.g., how long it took for the node to receive the block). The `observed_delay` of less than 3 seconds means that the block is not arriving late from the block proposer. Combining the above, this implies that the validator should have been able to attest to the block, but failed due to slowness in the node processing the block.
### <a name="vc-head-vote"></a> Sometimes I miss the attestation head vote, resulting in penalty. Is this normal? ### <a name="vc-head-vote"></a> Sometimes I miss the attestation head vote, resulting in penalty. Is this normal?
In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone elses performance. In general, it is unavoidable to have some penalties occasionally. This is particularly the case when you are assigned to attest on the first slot of an epoch and if the proposer of that slot releases the block late, then you will get penalised for missing the target and head votes. Your attestation performance does not only depend on your own setup, but also on everyone elses performance.
You could also check for the sync aggregate participation percentage on block explorers such as [beaconcha.in](https://beaconcha.in/). A low sync aggregate participation percentage (e.g., 60-70%) indicates that the block that you are assigned to attest to may be published late. As a result, your validator fails to correctly attest to the block.
Another possible reason for missing the head vote is due to a chain "reorg". A reorg can happen if the proposer publishes block `n` late, and the proposer of block `n+1` builds upon block `n-1` instead of `n`. This is called a "reorg". Due to the reorg, block `n` was never included in the chain. If you are assigned to attest at slot `n`, it is possible you may still attest to block `n` despite most of the network recognizing the block as being late. In this case you will miss the head reward.
### <a name="vc-exit"></a> Can I submit a voluntary exit message without running a beacon node? ### <a name="vc-exit"></a> Can I submit a voluntary exit message without running a beacon node?
Yes. Beaconcha.in provides the tool to broadcast the message. You can create the voluntary exit message file with [ethdo](https://github.com/wealdtech/ethdo/releases/tag/v1.30.0) and submit the message via the [beaconcha.in](https://beaconcha.in/tools/broadcast) website. A guide on how to use `ethdo` to perform voluntary exit can be found [here](https://github.com/eth-educators/ethstaker-guides/blob/main/voluntary-exit.md). Yes. Beaconcha.in provides the tool to broadcast the message. You can create the voluntary exit message file with [ethdo](https://github.com/wealdtech/ethdo/releases/tag/v1.30.0) and submit the message via the [beaconcha.in](https://beaconcha.in/tools/broadcast) website. A guide on how to use `ethdo` to perform voluntary exit can be found [here](https://github.com/eth-educators/ethstaker-guides/blob/main/voluntary-exit.md).
@ -425,6 +462,14 @@ No. Lighthouse will auto-detect the change and update your Ethereum Node Record
### <a name="net-port"></a> How to change the TCP/UDP port 9000 that Lighthouse listens on? ### <a name="net-port"></a> How to change the TCP/UDP port 9000 that Lighthouse listens on?
Use the flag ```--port <PORT>``` in the beacon node. This flag can be useful when you are running two beacon nodes at the same time. You can leave one beacon node as the default port 9000, and configure the second beacon node to listen on, e.g., ```--port 9001```. Use the flag ```--port <PORT>``` in the beacon node. This flag can be useful when you are running two beacon nodes at the same time. You can leave one beacon node as the default port 9000, and configure the second beacon node to listen on, e.g., ```--port 9001```.
### <a name="net-subnet"></a> Lighthouse `v4.3.0` introduces a change where a node will subscribe to only 2 subnets in total. I am worried that this will impact my validators return.
Previously, having more validators means subscribing to more subnets. Since the change, a node will now only subscribe to 2 subnets in total. This will bring about significant reductions in bandwidth for nodes with multiple validators.
While subscribing to more subnets can ensure you have peers on a wider range of subnets, these subscriptions consume resources and bandwidth. This does not significantly increase the performance of the node, however it does benefit other nodes on the network.
If you would still like to subscribe to all subnets, you can use the flag `subscribe-all-subnets`. This may improve the block rewards by 1-5%, though it comes at the cost of a much higher bandwidth requirement.
## Miscellaneous ## Miscellaneous
### <a name="misc-slashing"></a> What should I do if I lose my slashing protection database? ### <a name="misc-slashing"></a> What should I do if I lose my slashing protection database?

View File

@ -28,7 +28,7 @@ operating system.
Install the following packages: Install the following packages:
```bash ```bash
sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang
``` ```
> Tips: > Tips:
@ -51,10 +51,6 @@ After this, you are ready to [build Lighthouse](#build-lighthouse).
brew install cmake brew install cmake
``` ```
1. Install protoc using Homebrew:
```
brew install protobuf
```
[Homebrew]: https://brew.sh/ [Homebrew]: https://brew.sh/
@ -71,7 +67,7 @@ After this, you are ready to [build Lighthouse](#build-lighthouse).
Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'))
``` ```
> - To verify that Chocolatey is ready, run `choco` and it should return the version. > - To verify that Chocolatey is ready, run `choco` and it should return the version.
1. Install Make, CMake, LLVM and protoc using Chocolatey: 1. Install Make, CMake and LLVM using Chocolatey:
``` ```
choco install make choco install make
@ -85,10 +81,6 @@ choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System'
choco install llvm choco install llvm
``` ```
```
choco install protoc
```
These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run
successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you
should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section. should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section.
@ -217,4 +209,3 @@ look into [cross compilation](./cross-compiling.md), or use a [pre-built
binary](https://github.com/sigp/lighthouse/releases). binary](https://github.com/sigp/lighthouse/releases).
If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`. If compilation fails with `error: linking with cc failed: exit code: 1`, try running `cargo clean`.

View File

@ -1,9 +1,30 @@
# Key Management # Key Management (Deprecated)
[launchpad]: https://launchpad.ethereum.org/ [launchpad]: https://launchpad.ethereum.org/
> **⚠️ The information on this page refers to tooling and process that have been deprecated. Please read the "Deprecation Notice". ⚠️**
> **Note: While Lighthouse is able to generate the validator keys and the deposit data file to submit to the deposit contract, we strongly recommend using the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) to create validators keys and the deposit data file. This is because the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) has the option to assign a withdrawal address during the key generation process, while Lighthouse wallet will always generate keys with withdrawal credentials of type 0x00. This means that users who created keys using Lighthouse will have to update their withdrawal credentials in the future to enable withdrawals. In addition, Lighthouse generates the deposit data file in the form of `*.rlp`, which cannot be uploaded to the [Staking launchpad][launchpad] that accepts only `*.json` file. This means that users have to directly interact with the deposit contract to be able to submit the deposit if they were to generate the files using Lighthouse.**
## Deprecation Notice
This page recommends the use of the `lighthouse account-manager` tool to create
validators. This tool will always generate keys with the withdrawal credentials
of type `0x00`. This means the users who created keys using `lighthouse
account-manager` will have to update their withdrawal credentials in a
separate step to receive staking rewards.
In addition, Lighthouse generates the deposit data file in the form of `*.rlp`,
which cannot be uploaded to the [Staking launchpad][launchpad] that accepts only
`*.json` file. This means that users have to directly interact with the deposit
contract to be able to submit the deposit if they were to generate the files
using Lighthouse.
Rather than continuing to read this page, we recommend users visit either:
- The [Staking Launchpad][launchpad] for detailed, beginner-friendly instructions.
- The [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) for a CLI tool used by the [Staking Launchpad][launchpad].
- The [validator-manager documentation](./validator-manager.md) for a Lighthouse-specific tool for streamlined validator management tools.
## The `lighthouse account-manager`
Lighthouse uses a _hierarchical_ key management system for producing validator Lighthouse uses a _hierarchical_ key management system for producing validator
keys. It is hierarchical because each validator key can be _derived_ from a keys. It is hierarchical because each validator key can be _derived_ from a

View File

@ -22,7 +22,7 @@ terminal and an Internet connection are necessary.
Install the Ubuntu dependencies: Install the Ubuntu dependencies:
```bash ```bash
sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang
``` ```
> Tips: > Tips:

View File

@ -14,8 +14,6 @@ The additional requirements for developers are:
don't have `anvil` available on your `PATH`. don't have `anvil` available on your `PATH`.
- [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by - [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by
some dependencies. See [`Installation Guide`](./installation.md) for more info. some dependencies. See [`Installation Guide`](./installation.md) for more info.
- [`protoc`](https://github.com/protocolbuffers/protobuf/releases) required for
the networking stack.
- [`java 11 runtime`](https://openjdk.java.net/projects/jdk/). 11 is the minimum, - [`java 11 runtime`](https://openjdk.java.net/projects/jdk/). 11 is the minimum,
used by web3signer_tests. used by web3signer_tests.
- [`libpq-dev`](https://www.postgresql.org/docs/devel/libpq.html). Also know as - [`libpq-dev`](https://www.postgresql.org/docs/devel/libpq.html). Also know as

View File

@ -46,6 +46,8 @@ Staying silent and refusing to sign messages will cause the following:
- Potentially missed rewards by missing a block proposal (if the validator is an elected block - Potentially missed rewards by missing a block proposal (if the validator is an elected block
proposer, which is unlikely). proposer, which is unlikely).
Notably, sync committee contributions are not slashable and will continue to be produced even when DP is suppressing other messages.
The loss of rewards and penalties incurred due to the missed duties will be very small in The loss of rewards and penalties incurred due to the missed duties will be very small in
dollar-values. Neglecting block proposals, generally they will equate to around 0.00002 ETH (equivalent to USD 0.04 assuming ETH is trading at USD 2000), or less than dollar-values. Neglecting block proposals, generally they will equate to around 0.00002 ETH (equivalent to USD 0.04 assuming ETH is trading at USD 2000), or less than
1% of the reward for one validator for one day. Since DP costs so little but can protect a user from 1% of the reward for one validator for one day. Since DP costs so little but can protect a user from

View File

@ -13,6 +13,10 @@ standard directories and do not start their `lighthouse vc` with the
this document. However, users with more complex needs may find this document this document. However, users with more complex needs may find this document
useful. useful.
The [lighthouse validator-manager](./validator-manager.md) command can be used
to create and import validators to a Lighthouse VC. It can also be used to move
validators between two Lighthouse VCs.
## Introducing the `validator_definitions.yml` file ## Introducing the `validator_definitions.yml` file
The `validator_definitions.yml` file is located in the `validator-dir`, which The `validator_definitions.yml` file is located in the `validator-dir`, which

View File

@ -0,0 +1,206 @@
# Creating and Importing Validators
[Ethereum Staking Launchpad]: https://launchpad.ethereum.org/en/
The `lighthouse validator-manager create` command derives validators from a
mnemonic and produces two files:
- `validators.json`: the keystores and passwords for the newly generated
validators, in JSON format.
- `deposits.json`: a JSON file of the same format as
[staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) which can
be used for deposit submission via the [Ethereum Staking
Launchpad][].
The `lighthouse validator-manager import` command accepts a `validators.json`
file (from the `create` command) and submits those validators to a running
Lighthouse Validator Client via the HTTP API.
These two commands enable a workflow of:
1. Creating the validators via the `create` command.
1. Importing the validators via the `import` command.
1. Depositing validators via the [Ethereum Staking
Launchpad][].
The separation of the `create` and `import` commands allows for running the
`create` command on an air-gapped host whilst performing the `import` command on
an internet-connected host.
The `create` and `import` commands are recommended for advanced users who are
familiar with command line tools and the practicalities of managing sensitive
cryptographic material. **We recommend that novice users follow the workflow on
[Ethereum Staking Launchpad][] rather than using the `create` and `import`
commands.**
## Simple Example
Create validators from a mnemonic with:
```bash
lighthouse \
validator-manager \
create \
--network mainnet \
--first-index 0 \
--count 2 \
--eth1-withdrawal-address <ADDRESS> \
--suggested-fee-recipient <ADDRESS> \
--output-path ./
```
> If the flag `--first-index` is not provided, it will default to using index 0.
> The `--suggested-fee-recipient` flag may be omitted to use whatever default
> value the VC uses. It does not necessarily need to be identical to
> `--eth1-withdrawal-address`.
> The command will create the `deposits.json` and `validators.json` in the present working directory. If you would like these files to be created in a different directory, change the value of `output-path`, for example `--output-path /desired/directory`. The directory will be created if the path does not exist.
Then, import the validators to a running VC with:
```bash
lighthouse \
validator-manager \
import \
--validators-file validators.json \
--vc-token <API-TOKEN-PATH>
```
> This is assuming that `validators.json` is in the present working directory. If it is not, insert the directory of the file.
> Be sure to remove `./validators.json` after the import is successful since it
> contains unencrypted validator keystores.
## Detailed Guide
This guide will create two validators and import them to a VC. For simplicity,
the same host will be used to generate the keys and run the VC. In reality,
users may want to perform the `create` command on an air-gapped machine and then
move the `validators.json` and `deposits.json` files to an Internet-connected
host. This would help protect the mnemonic from being exposed to the Internet.
### 1. Create the Validators
Run the `create` command, substituting `<ADDRESS>` for an execution address that
you control. This is where all the staked ETH and rewards will ultimately
reside, so it's very important that this address is secure, accessible and
backed-up. The `create` command:
```bash
lighthouse \
validator-manager \
create \
--first-index 0 \
--count 2 \
--eth1-withdrawal-address <ADDRESS> \
--output-path ./
```
If successful, the command output will appear like below:
```bash
Running validator manager for mainnet network
Enter the mnemonic phrase:
<REDACTED>
Valid mnemonic provided.
Starting derivation of 2 keystores. Each keystore may take several seconds.
Completed 1/2: 0x8885c29b8f88ee9b9a37b480fd4384fed74bda33d85bc8171a904847e65688b6c9bb4362d6597fd30109fb2def6c3ae4
Completed 2/2: 0xa262dae3dcd2b2e280af534effa16bedb27c06f2959e114d53bd2a248ca324a018dc73179899a066149471a94a1bc92f
Keystore generation complete
Writing "./validators.json"
Writing "./deposits.json"
```
This command will create validators at indices `0, 1`. The exact indices created
can be influenced with the `--first-index` and `--count` flags. Use these flags
with caution to prevent creating the same validator twice, this may result in a
slashing!
The command will create two files:
- `./deposits.json`: this file does *not* contain sensitive information and may be uploaded to the [Ethereum Staking Launchpad].
- `./validators.json`: this file contains **sensitive unencrypted validator keys, do not share it with anyone or upload it to any website**.
### 2. Import the validators
The VC which will receive the validators needs to have the following flags at a minimum:
- `--http`
- `--http-port 5062`
- `--enable-doppelganger-protection`
Therefore, the VC command might look like:
```bash
lighthouse \
vc \
--http \
--http-port 5062 \
--enable-doppelganger-protection
```
In order to import the validators, the location of the VC `api-token.txt` file
must be known. The location of the file varies, but it is located in the
"validator directory" of your data directory. For example:
`~/.lighthouse/mainnet/validators/api-token.txt`. We will use `<API-TOKEN-PATH>`
to subsitute this value. If you are unsure of the `api-token.txt` path, you can run `curl http://localhost:5062/lighthouse/auth` which will show the path.
Once the VC is running, use the `import` command to import the validators to the VC:
```bash
lighthouse \
validator-manager \
import \
--validators-file validators.json \
--vc-token <API-TOKEN-PATH>
```
If successful, the command output will appear like below:
```bash
Running validator manager for mainnet network
Validator client is reachable at http://localhost:5062/ and reports 0 validators
Starting to submit 2 validators to VC, each validator may take several seconds
Uploaded keystore 1 of 2 to the VC
Uploaded keystore 2 of 2 to the VC
```
The user should now *securely* delete the `validators.json` file (e.g., `shred -u validators.json`).
The `validators.json` contains the unencrypted validator keys and must not be
shared with anyone.
At the same time, `lighthouse vc` will log:
```bash
INFO Importing keystores via standard HTTP API, count: 1
WARN No slashing protection data provided with keystores
INFO Enabled validator voting_pubkey: 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f, signing_method: local_keystore
INFO Modified key_cache saved successfully
```
The WARN message means that the `validators.json` file does not contain the slashing protection data. This is normal if you are starting a new validator. The flag `--enable-doppelganger-protection` will also protect users from potential slashing risk.
The validators will now go through 2-3 epochs of [doppelganger
protection](./validator-doppelganger.md) and will automatically start performing
their duties when they are deposited and activated.
If the host VC contains the same public key as the `validators.json` file, an error will be shown and the `import` process will stop:
```bash
Duplicate validator 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f already exists on the destination validator client. This may indicate that some validators are running in two places at once, which can lead to slashing. If you are certain that there is no risk, add the --ignore-duplicates flag.
Err(DuplicateValidator(0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f))
```
If you are certain that it is safe, you can add the flag `--ignore-duplicates` in the `import` command. The command becomes:
```bash
lighthouse \
validator-manager \
import \
--validators-file validators.json \
--vc-token <API-TOKEN-PATH> \
--ignore-duplicates
```
and the output will be as follows:
```bash
Duplicate validators are ignored, ignoring 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f which exists on the destination validator client
Re-uploaded keystore 1 of 6 to the VC
```
The guide is complete.

View File

@ -0,0 +1,188 @@
# Moving Validators
The `lighthouse validator-manager move` command uses the VC HTTP API to move
validators from one VC (the "src" VC) to another VC (the "dest" VC). The move
operation is *comprehensive*; it will:
- Disable the validators on the src VC.
- Remove the validator keystores from the src VC file system.
- Export the slashing database records for the appropriate validators from the src VC to the dest VC.
- Enable the validators on the dest VC.
- Generally result in very little or no validator downtime.
It is capable of moving all validators on the src VC, a count of validators or
a list of pubkeys.
The `move` command is only guaranteed to work between two Lighthouse VCs (i.e.,
there is no guarantee that the commands will work between Lighthouse and Teku, for instance).
The `move` command only supports moving validators using a keystore on the local
file system, it does not support `Web3Signer` validators.
Although all efforts are taken to avoid it, it's possible for the `move` command
to fail in a way that removes the validator from the src VC without adding it to the
dest VC. Therefore, it is recommended to **never use the `move` command without
having a backup of all validator keystores (e.g. the mnemonic).**
## Simple Example
The following command will move all validators from the VC running at
`http://localhost:6062` to the VC running at `http://localhost:5062`.
```bash
lighthouse \
validator-manager \
move \
--src-vc-url http://localhost:6062 \
--src-vc-token ~/src-token.txt \
--dest-vc-url http://localhost:5062 \
--dest-vc-token ~/.lighthouse/mainnet/validators/api-token.txt \
--validators all
```
## Detailed Guide
This guide describes the steps to move validators between two validator clients (VCs) which are
able to SSH between each other. This guide assumes experience with the Linux command line and SSH
connections.
There will be two VCs in this example:
- The *source* VC which contains the validators/keystores to be moved.
- The *destination* VC which is to take the validators/keystores from the source.
There will be two hosts in this example:
- Host 1 (*"source host"*): Is running the `src-vc`.
- Host 2 (*"destination host"*): Is running the `dest-vc`.
The example assumes
that Host 1 is able to SSH to Host 2.
In reality, many host configurations are possible. For example:
- Both VCs on the same host.
- Both VCs on different hosts and the `validator-manager` being used on a third host.
### 1. Configure the Source VC
The source VC needs to have the following flags at a minimum:
- `--http`
- `--http-port 5062`
- `--http-allow-keystore-export`
Therefore, the source VC command might look like:
```bash
lighthouse \
vc \
--http \
--http-port 5062 \
--http-allow-keystore-export
```
### 2. Configure the Destination VC
The destination VC needs to have the following flags at a minimum:
- `--http`
- `--http-port 5062`
- `--enable-doppelganger-protection`
Therefore, the destination VC command might look like:
```bash
lighthouse \
vc \
--http \
--http-port 5062 \
--enable-doppelganger-protection
```
> The `--enable-doppelganger-protection` flag is not *strictly* required, however
> it is recommended for an additional layer of safety. It will result in 2-3
> epochs of downtime for the validator after it is moved, which is generally an
> inconsequential cost in lost rewards or penalties.
>
> Optionally, users can add the `--http-store-passwords-in-secrets-dir` flag if they'd like to have
> the import validator keystore passwords stored in separate files rather than in the
> `validator-definitions.yml` file. If you don't know what this means, you can safely omit the flag.
### 3. Obtain the Source API Token
The VC API is protected by an *API token*. This is stored in a file on each of the hosts. Since
we'll be running our command on the destination host, it will need to have the API token for the
source host on its file-system.
On the **source host**, find the location of the `api-token.txt` file and copy the contents. The
location of the file varies, but it is located in the "validator directory" of your data directory,
alongside validator keystores. For example: `~/.lighthouse/mainnet/validators/api-token.txt`. If you are unsure of the `api-token.txt` path, you can run `curl http://localhost:5062/lighthouse/auth` which will show the path.
Copy the contents of that file into a new file on the **destination host** at `~/src-token.txt`. The
API token should be similar to `api-token-0x03eace4c98e8f77477bb99efb74f9af10d800bd3318f92c33b719a4644254d4123`.
### 4. Create an SSH Tunnel
In the **source host**, open a terminal window, SSH to the **destination host** and establish a reverse-SSH connection
between the **destination host** and the **source host**.
```bash
ssh dest-host
ssh -L 6062:localhost:5062 src-host
```
It's important that you leave this session open throughout the rest of this tutorial. If you close
this terminal window then the connection between the destination and source host will be lost.
### 5. Move
With the SSH tunnel established between the `dest-host` and `src-host`, from the **destination
host** run the command to move the validators:
```bash
lighthouse \
validator-manager \
move \
--src-vc-url http://localhost:6062 \
--src-vc-token ~/src-token.txt \
--dest-vc-url http://localhost:5062 \
--dest-vc-token ~/.lighthouse/mainnet/validators/api-token.txt \
--validators all
```
The command will provide information about the progress of the operation and
emit `Done.` when the operation has completed successfully. For example:
```bash
Running validator manager for mainnet network
Validator client is reachable at http://localhost:5062/ and reports 2 validators
Validator client is reachable at http://localhost:6062/ and reports 0 validators
Moved keystore 1 of 2
Moved keystore 2 of 2
Done.
```
At the same time, `lighthouse vc` will log:
```bash
INFO Importing keystores via standard HTTP API, count: 1
INFO Enabled validator voting_pubkey: 0xab6e29f1b98fedfca878edce2b471f1b5ee58ee4c3bd216201f98254ef6f6eac40a53d74c8b7da54f51d3e85cacae92f, signing_method: local_keystore
INFO Modified key_cache saved successfully
Once the operation completes successfully, there is nothing else to be done. The
validators have been removed from the `src-host` and enabled at the `dest-host`.
If the `--enable-doppelganger-protection` flag was used it may take 2-3 epochs
for the validators to start attesting and producing blocks on the `dest-host`.
If you would only like to move some validators, you can replace the flag `--validators all` with one or more validator public keys. For example:
```bash
lighthouse \
validator-manager \
move \
--src-vc-url http://localhost:6062 \
--src-vc-token ~/src-token.txt \
--dest-vc-url http://localhost:5062 \
--dest-vc-token ~/.lighthouse/mainnet/validators/api-token.txt \
--validators 0x9096aab771e44da149bd7c9926d6f7bb96ef465c0eeb4918be5178cd23a1deb4aec232c61d85ff329b54ed4a3bdfff3a,0x90fc4f72d898a8f01ab71242e36f4545aaf87e3887be81632bb8ba4b2ae8fb70753a62f866344d7905e9a07f5a9cdda1
```
Any errors encountered during the operation should include information on how to
proceed. Assistance is also available on our
[Discord](https://discord.gg/cyAszAh).

View File

@ -0,0 +1,35 @@
# Validator Manager
[Ethereum Staking Launchpad]: https://launchpad.ethereum.org/en/
[Import Validators]: #import-validators
## Introduction
The `lighthouse validator-manager` tool provides utilities for managing validators on a *running*
Lighthouse Validator Client. The validator manager performs operations via the HTTP API of the
validator client (VC). Due to limitations of the
[keymanager-APIs](https://ethereum.github.io/keymanager-APIs/), only Lighthouse VCs are fully
supported by this command.
The validator manager tool is similar to the `lighthouse account-manager` tool,
except the latter creates files that will be read by the VC next time it starts
whilst the former makes instant changes to a live VC.
The `account-manager` is ideal for importing keys created with the
[staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli). On the
other hand, the `validator-manager` is ideal for moving existing validators
between two VCs or for advanced users to create validators at scale with less
downtime.
The `validator-manager` boasts the following features:
- One-line command to arbitrarily move validators between two VCs, maintaining the slashing protection database.
- Generates deposit files compatible with the [Ethereum Staking Launchpad][].
- Generally involves zero or very little downtime.
- The "key cache" is preserved whenever a validator is added with the validator
manager, preventing long waits at start up when a new validator is added.
## Guides
- [Creating and importing validators using the `create` and `import` commands.](./validator-manager-create.md)
- [Moving validators between two VCs using the `move` command.](./validator-manager-move.md)

View File

@ -80,7 +80,7 @@ impl<T: EthSpec> BootNodeConfig<T> {
} }
let private_key = load_private_key(&network_config, &logger); let private_key = load_private_key(&network_config, &logger);
let local_key = CombinedKey::from_libp2p(&private_key)?; let local_key = CombinedKey::from_libp2p(private_key)?;
let local_enr = if let Some(dir) = matches.value_of("network-dir") { let local_enr = if let Some(dir) = matches.value_of("network-dir") {
let network_dir: PathBuf = dir.into(); let network_dir: PathBuf = dir.into();

View File

@ -13,6 +13,9 @@ use std::fs::{self, File};
use std::io; use std::io;
use std::io::prelude::*; use std::io::prelude::*;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::str::from_utf8;
use std::thread::sleep;
use std::time::Duration;
use zeroize::Zeroize; use zeroize::Zeroize;
pub mod validator_definitions; pub mod validator_definitions;
@ -30,6 +33,8 @@ pub const MINIMUM_PASSWORD_LEN: usize = 12;
/// array of length 32. /// array of length 32.
const DEFAULT_PASSWORD_LEN: usize = 48; const DEFAULT_PASSWORD_LEN: usize = 48;
pub const MNEMONIC_PROMPT: &str = "Enter the mnemonic phrase:";
/// Returns the "default" path where a wallet should store its password file. /// Returns the "default" path where a wallet should store its password file.
pub fn default_wallet_password_path<P: AsRef<Path>>(wallet_name: &str, secrets_dir: P) -> PathBuf { pub fn default_wallet_password_path<P: AsRef<Path>>(wallet_name: &str, secrets_dir: P) -> PathBuf {
secrets_dir.as_ref().join(format!("{}.pass", wallet_name)) secrets_dir.as_ref().join(format!("{}.pass", wallet_name))
@ -59,6 +64,18 @@ pub fn read_password<P: AsRef<Path>>(path: P) -> Result<PlainText, io::Error> {
fs::read(path).map(strip_off_newlines).map(Into::into) fs::read(path).map(strip_off_newlines).map(Into::into)
} }
/// Reads a password file into a `ZeroizeString` struct, with new-lines removed.
pub fn read_password_string<P: AsRef<Path>>(path: P) -> Result<ZeroizeString, String> {
fs::read(path)
.map_err(|e| format!("Error opening file: {:?}", e))
.map(strip_off_newlines)
.and_then(|bytes| {
String::from_utf8(bytes)
.map_err(|e| format!("Error decoding utf8: {:?}", e))
.map(Into::into)
})
}
/// Write a file atomically by using a temporary file as an intermediate. /// Write a file atomically by using a temporary file as an intermediate.
/// ///
/// Care is taken to preserve the permissions of the file at `file_path` being written. /// Care is taken to preserve the permissions of the file at `file_path` being written.
@ -220,6 +237,46 @@ impl AsRef<[u8]> for ZeroizeString {
} }
} }
pub fn read_mnemonic_from_cli(
mnemonic_path: Option<PathBuf>,
stdin_inputs: bool,
) -> Result<Mnemonic, String> {
let mnemonic = match mnemonic_path {
Some(path) => fs::read(&path)
.map_err(|e| format!("Unable to read {:?}: {:?}", path, e))
.and_then(|bytes| {
let bytes_no_newlines: PlainText = strip_off_newlines(bytes).into();
let phrase = from_utf8(bytes_no_newlines.as_ref())
.map_err(|e| format!("Unable to derive mnemonic: {:?}", e))?;
Mnemonic::from_phrase(phrase, Language::English).map_err(|e| {
format!(
"Unable to derive mnemonic from string {:?}: {:?}",
phrase, e
)
})
})?,
None => loop {
eprintln!();
eprintln!("{}", MNEMONIC_PROMPT);
let mnemonic = read_input_from_user(stdin_inputs)?;
match Mnemonic::from_phrase(mnemonic.as_str(), Language::English) {
Ok(mnemonic_m) => {
eprintln!("Valid mnemonic provided.");
eprintln!();
sleep(Duration::from_secs(1));
break mnemonic_m;
}
Err(_) => {
eprintln!("Invalid mnemonic");
}
}
},
};
Ok(mnemonic)
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;

View File

@ -3,7 +3,9 @@
//! Serves as the source-of-truth of which validators this validator client should attempt (or not //! Serves as the source-of-truth of which validators this validator client should attempt (or not
//! attempt) to load into the `crate::intialized_validators::InitializedValidators` struct. //! attempt) to load into the `crate::intialized_validators::InitializedValidators` struct.
use crate::{default_keystore_password_path, write_file_via_temporary, ZeroizeString}; use crate::{
default_keystore_password_path, read_password_string, write_file_via_temporary, ZeroizeString,
};
use directory::ensure_dir_exists; use directory::ensure_dir_exists;
use eth2_keystore::Keystore; use eth2_keystore::Keystore;
use regex::Regex; use regex::Regex;
@ -43,6 +45,18 @@ pub enum Error {
UnableToOpenKeystore(eth2_keystore::Error), UnableToOpenKeystore(eth2_keystore::Error),
/// The validator directory could not be created. /// The validator directory could not be created.
UnableToCreateValidatorDir(PathBuf), UnableToCreateValidatorDir(PathBuf),
UnableToReadKeystorePassword(String),
KeystoreWithoutPassword,
}
/// Defines how a password for a validator keystore will be persisted.
pub enum PasswordStorage {
/// Store the password in the `validator_definitions.yml` file.
ValidatorDefinitions(ZeroizeString),
/// Store the password in a separate, dedicated file (likely in the "secrets" directory).
File(PathBuf),
/// Don't store the password at all.
None,
} }
#[derive(Clone, PartialEq, Serialize, Deserialize, Hash, Eq)] #[derive(Clone, PartialEq, Serialize, Deserialize, Hash, Eq)]
@ -92,6 +106,34 @@ impl SigningDefinition {
pub fn is_local_keystore(&self) -> bool { pub fn is_local_keystore(&self) -> bool {
matches!(self, SigningDefinition::LocalKeystore { .. }) matches!(self, SigningDefinition::LocalKeystore { .. })
} }
pub fn voting_keystore_password(&self) -> Result<Option<ZeroizeString>, Error> {
match self {
SigningDefinition::LocalKeystore {
voting_keystore_password: Some(password),
..
} => Ok(Some(password.clone())),
SigningDefinition::LocalKeystore {
voting_keystore_password_path: Some(path),
..
} => read_password_string(path)
.map(Into::into)
.map(Option::Some)
.map_err(Error::UnableToReadKeystorePassword),
SigningDefinition::LocalKeystore { .. } => Err(Error::KeystoreWithoutPassword),
SigningDefinition::Web3Signer(_) => Ok(None),
}
}
pub fn voting_keystore_password_path(&self) -> Option<&PathBuf> {
match self {
SigningDefinition::LocalKeystore {
voting_keystore_password_path: Some(path),
..
} => Some(path),
_ => None,
}
}
} }
/// A validator that may be initialized by this validator client. /// A validator that may be initialized by this validator client.
@ -129,7 +171,7 @@ impl ValidatorDefinition {
/// This function does not check the password against the keystore. /// This function does not check the password against the keystore.
pub fn new_keystore_with_password<P: AsRef<Path>>( pub fn new_keystore_with_password<P: AsRef<Path>>(
voting_keystore_path: P, voting_keystore_path: P,
voting_keystore_password: Option<ZeroizeString>, voting_keystore_password_storage: PasswordStorage,
graffiti: Option<GraffitiString>, graffiti: Option<GraffitiString>,
suggested_fee_recipient: Option<Address>, suggested_fee_recipient: Option<Address>,
gas_limit: Option<u64>, gas_limit: Option<u64>,
@ -139,6 +181,12 @@ impl ValidatorDefinition {
let keystore = let keystore =
Keystore::from_json_file(&voting_keystore_path).map_err(Error::UnableToOpenKeystore)?; Keystore::from_json_file(&voting_keystore_path).map_err(Error::UnableToOpenKeystore)?;
let voting_public_key = keystore.public_key().ok_or(Error::InvalidKeystorePubkey)?; let voting_public_key = keystore.public_key().ok_or(Error::InvalidKeystorePubkey)?;
let (voting_keystore_password_path, voting_keystore_password) =
match voting_keystore_password_storage {
PasswordStorage::ValidatorDefinitions(password) => (None, Some(password)),
PasswordStorage::File(path) => (Some(path), None),
PasswordStorage::None => (None, None),
};
Ok(ValidatorDefinition { Ok(ValidatorDefinition {
enabled: true, enabled: true,
@ -150,7 +198,7 @@ impl ValidatorDefinition {
builder_proposals, builder_proposals,
signing_definition: SigningDefinition::LocalKeystore { signing_definition: SigningDefinition::LocalKeystore {
voting_keystore_path, voting_keystore_path,
voting_keystore_password_path: None, voting_keystore_password_path,
voting_keystore_password, voting_keystore_password,
}, },
}) })
@ -346,6 +394,13 @@ impl ValidatorDefinitions {
pub fn as_mut_slice(&mut self) -> &mut [ValidatorDefinition] { pub fn as_mut_slice(&mut self) -> &mut [ValidatorDefinition] {
self.0.as_mut_slice() self.0.as_mut_slice()
} }
// Returns an iterator over all the `voting_keystore_password_paths` in self.
pub fn iter_voting_keystore_password_paths(&self) -> impl Iterator<Item = &PathBuf> {
self.0
.iter()
.filter_map(|def| def.signing_definition.voting_keystore_password_path())
}
} }
/// Perform an exhaustive tree search of `dir`, adding any discovered voting keystore paths to /// Perform an exhaustive tree search of `dir`, adding any discovered voting keystore paths to

View File

@ -21,10 +21,14 @@ use futures_util::StreamExt;
use lighthouse_network::PeerId; use lighthouse_network::PeerId;
use pretty_reqwest_error::PrettyReqwestError; use pretty_reqwest_error::PrettyReqwestError;
pub use reqwest; pub use reqwest;
use reqwest::{IntoUrl, RequestBuilder, Response}; use reqwest::{
header::{HeaderMap, HeaderValue},
Body, IntoUrl, RequestBuilder, Response,
};
pub use reqwest::{StatusCode, Url}; pub use reqwest::{StatusCode, Url};
pub use sensitive_url::{SensitiveError, SensitiveUrl}; pub use sensitive_url::{SensitiveError, SensitiveUrl};
use serde::{de::DeserializeOwned, Serialize}; use serde::{de::DeserializeOwned, Serialize};
use ssz::Encode;
use std::convert::TryFrom; use std::convert::TryFrom;
use std::fmt; use std::fmt;
use std::iter::Iterator; use std::iter::Iterator;
@ -322,6 +326,25 @@ impl BeaconNodeHttpClient {
ok_or_error(response).await ok_or_error(response).await
} }
/// Generic POST function supporting arbitrary responses and timeouts.
async fn post_generic_with_ssz_body<T: Into<Body>, U: IntoUrl>(
&self,
url: U,
body: T,
timeout: Option<Duration>,
) -> Result<Response, Error> {
let mut builder = self.client.post(url);
if let Some(timeout) = timeout {
builder = builder.timeout(timeout);
}
let response = builder
.header("Content-Type", "application/octet-stream")
.body(body)
.send()
.await?;
ok_or_error(response).await
}
/// Generic POST function supporting arbitrary responses and timeouts. /// Generic POST function supporting arbitrary responses and timeouts.
async fn post_generic_with_consensus_version<T: Serialize, U: IntoUrl>( async fn post_generic_with_consensus_version<T: Serialize, U: IntoUrl>(
&self, &self,
@ -342,6 +365,31 @@ impl BeaconNodeHttpClient {
ok_or_error(response).await ok_or_error(response).await
} }
/// Generic POST function supporting arbitrary responses and timeouts.
async fn post_generic_with_consensus_version_and_ssz_body<T: Into<Body>, U: IntoUrl>(
&self,
url: U,
body: T,
timeout: Option<Duration>,
fork: ForkName,
) -> Result<Response, Error> {
let mut builder = self.client.post(url);
if let Some(timeout) = timeout {
builder = builder.timeout(timeout);
}
let mut headers = HeaderMap::new();
headers.insert(
CONSENSUS_VERSION_HEADER,
HeaderValue::from_str(&fork.to_string()).expect("Failed to create header value"),
);
headers.insert(
"Content-Type",
HeaderValue::from_static("application/octet-stream"),
);
let response = builder.headers(headers).body(body).send().await?;
ok_or_error(response).await
}
/// `GET beacon/genesis` /// `GET beacon/genesis`
/// ///
/// ## Errors /// ## Errors
@ -654,6 +702,26 @@ impl BeaconNodeHttpClient {
Ok(()) Ok(())
} }
/// `POST beacon/blocks`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn post_beacon_blocks_ssz<T: EthSpec, Payload: AbstractExecPayload<T>>(
&self,
block: &SignedBlockContents<T, Payload>,
) -> Result<(), Error> {
let mut path = self.eth_path(V1)?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("blocks");
self.post_generic_with_ssz_body(path, block.as_ssz_bytes(), Some(self.timeouts.proposal))
.await?;
Ok(())
}
/// `POST beacon/blinded_blocks` /// `POST beacon/blinded_blocks`
/// ///
/// Returns `Ok(None)` on a 404 error. /// Returns `Ok(None)` on a 404 error.
@ -674,6 +742,26 @@ impl BeaconNodeHttpClient {
Ok(()) Ok(())
} }
/// `POST beacon/blinded_blocks`
///
/// Returns `Ok(None)` on a 404 error.
pub async fn post_beacon_blinded_blocks_ssz<T: EthSpec, Payload: AbstractExecPayload<T>>(
&self,
block: &SignedBeaconBlock<T, Payload>,
) -> Result<(), Error> {
let mut path = self.eth_path(V1)?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("beacon")
.push("blinded_blocks");
self.post_generic_with_ssz_body(path, block.as_ssz_bytes(), Some(self.timeouts.proposal))
.await?;
Ok(())
}
pub fn post_beacon_blocks_v2_path( pub fn post_beacon_blocks_v2_path(
&self, &self,
validation_level: Option<BroadcastValidation>, validation_level: Option<BroadcastValidation>,
@ -727,6 +815,23 @@ impl BeaconNodeHttpClient {
Ok(()) Ok(())
} }
/// `POST v2/beacon/blocks`
pub async fn post_beacon_blocks_v2_ssz<T: EthSpec, Payload: AbstractExecPayload<T>>(
&self,
block: &SignedBeaconBlock<T, Payload>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version_and_ssz_body(
self.post_beacon_blocks_v2_path(validation_level)?,
block.as_ssz_bytes(),
Some(self.timeouts.proposal),
block.message().body().fork_name(),
)
.await?;
Ok(())
}
/// `POST v2/beacon/blinded_blocks` /// `POST v2/beacon/blinded_blocks`
//TODO(sean) update this along with builder updates //TODO(sean) update this along with builder updates
pub async fn post_beacon_blinded_blocks_v2<T: EthSpec>( pub async fn post_beacon_blinded_blocks_v2<T: EthSpec>(
@ -745,6 +850,23 @@ impl BeaconNodeHttpClient {
Ok(()) Ok(())
} }
/// `POST v2/beacon/blinded_blocks`
pub async fn post_beacon_blinded_blocks_v2_ssz<T: EthSpec>(
&self,
block: &SignedBlindedBeaconBlock<T>,
validation_level: Option<BroadcastValidation>,
) -> Result<(), Error> {
self.post_generic_with_consensus_version_and_ssz_body(
self.post_beacon_blinded_blocks_v2_path(validation_level)?,
block.as_ssz_bytes(),
Some(self.timeouts.proposal),
block.message().body().fork_name(),
)
.await?;
Ok(())
}
/// Path for `v2/beacon/blocks` /// Path for `v2/beacon/blocks`
pub fn get_beacon_blocks_path(&self, block_id: BlockId) -> Result<Url, Error> { pub fn get_beacon_blocks_path(&self, block_id: BlockId) -> Result<Url, Error> {
let mut path = self.eth_path(V2)?; let mut path = self.eth_path(V2)?;
@ -1665,6 +1787,24 @@ impl BeaconNodeHttpClient {
.await .await
} }
/// `POST validator/liveness/{epoch}`
pub async fn post_validator_liveness_epoch(
&self,
epoch: Epoch,
indices: Vec<u64>,
) -> Result<GenericResponse<Vec<StandardLivenessResponseData>>, Error> {
let mut path = self.eth_path(V1)?;
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("validator")
.push("liveness")
.push(&epoch.to_string());
self.post_with_timeout_and_response(path, &indices, self.timeouts.liveness)
.await
}
/// `POST validator/duties/attester/{epoch}` /// `POST validator/duties/attester/{epoch}`
pub async fn post_validator_duties_attester( pub async fn post_validator_duties_attester(
&self, &self,

View File

@ -490,6 +490,21 @@ impl ValidatorClientHttpClient {
.await .await
} }
/// `DELETE eth/v1/keystores`
pub async fn delete_lighthouse_keystores(
&self,
req: &DeleteKeystoresRequest,
) -> Result<ExportKeystoresResponse, Error> {
let mut path = self.server.full.clone();
path.path_segments_mut()
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
.push("lighthouse")
.push("keystores");
self.delete_with_unsigned_response(path, req).await
}
fn make_keystores_url(&self) -> Result<Url, Error> { fn make_keystores_url(&self) -> Result<Url, Error> {
let mut url = self.server.full.clone(); let mut url = self.server.full.clone();
url.path_segments_mut() url.path_segments_mut()

View File

@ -1,9 +1,10 @@
use account_utils::ZeroizeString; use account_utils::ZeroizeString;
use eth2_keystore::Keystore; use eth2_keystore::Keystore;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use slashing_protection::interchange::Interchange;
use types::{Address, PublicKeyBytes}; use types::{Address, PublicKeyBytes};
pub use slashing_protection::interchange::Interchange;
#[derive(Debug, Deserialize, Serialize, PartialEq)] #[derive(Debug, Deserialize, Serialize, PartialEq)]
pub struct GetFeeRecipientResponse { pub struct GetFeeRecipientResponse {
pub pubkey: PublicKeyBytes, pub pubkey: PublicKeyBytes,
@ -27,7 +28,7 @@ pub struct ListKeystoresResponse {
pub data: Vec<SingleKeystoreResponse>, pub data: Vec<SingleKeystoreResponse>,
} }
#[derive(Debug, Deserialize, Serialize, PartialEq)] #[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Hash)]
pub struct SingleKeystoreResponse { pub struct SingleKeystoreResponse {
pub validating_pubkey: PublicKeyBytes, pub validating_pubkey: PublicKeyBytes,
pub derivation_path: Option<String>, pub derivation_path: Option<String>,

View File

@ -152,3 +152,19 @@ pub struct UpdateGasLimitRequest {
pub struct VoluntaryExitQuery { pub struct VoluntaryExitQuery {
pub epoch: Option<Epoch>, pub epoch: Option<Epoch>,
} }
#[derive(Deserialize, Serialize)]
pub struct ExportKeystoresResponse {
pub data: Vec<SingleExportKeystoresResponse>,
#[serde(with = "serde_utils::json_str")]
pub slashing_protection: Interchange,
}
#[derive(Deserialize, Serialize)]
pub struct SingleExportKeystoresResponse {
pub status: Status<DeleteKeystoreStatus>,
#[serde(skip_serializing_if = "Option::is_none")]
pub validating_keystore: Option<KeystoreJsonStr>,
#[serde(skip_serializing_if = "Option::is_none")]
pub validating_keystore_password: Option<ZeroizeString>,
}

View File

@ -1233,6 +1233,13 @@ impl FromStr for Accept {
} }
} }
#[derive(PartialEq, Debug, Serialize, Deserialize)]
pub struct StandardLivenessResponseData {
#[serde(with = "serde_utils::quoted_u64")]
pub index: u64,
pub is_live: bool,
}
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct LivenessRequestData { pub struct LivenessRequestData {
pub epoch: Epoch, pub epoch: Epoch,
@ -1417,9 +1424,10 @@ pub type BlockContentsTuple<T, Payload> = (
); );
/// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobSidecars`]. /// A wrapper over a [`SignedBeaconBlock`] or a [`SignedBeaconBlockAndBlobSidecars`].
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Encode, Serialize, Deserialize)]
#[serde(untagged)] #[serde(untagged)]
#[serde(bound = "T: EthSpec")] #[serde(bound = "T: EthSpec")]
#[ssz(enum_behaviour = "transparent")]
pub enum SignedBlockContents<T: EthSpec, Payload: AbstractExecPayload<T> = FullPayload<T>> { pub enum SignedBlockContents<T: EthSpec, Payload: AbstractExecPayload<T> = FullPayload<T>> {
BlockAndBlobSidecars(SignedBeaconBlockAndBlobSidecars<T, Payload>), BlockAndBlobSidecars(SignedBeaconBlockAndBlobSidecars<T, Payload>),
Block(SignedBeaconBlock<T, Payload>), Block(SignedBeaconBlock<T, Payload>),
@ -1440,6 +1448,13 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> SignedBlockContents<T, Payload
} }
} }
/// SSZ decode with fork variant determined by slot.
pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result<Self, ssz::DecodeError> {
// FIXME(jimmy): SSZ decode not implemented for `SignedBeaconBlockAndBlobSidecars`
SignedBeaconBlock::from_ssz_bytes(bytes, spec)
.map(|block| SignedBlockContents::Block(block))
}
pub fn signed_block(&self) -> &SignedBeaconBlock<T, Payload> { pub fn signed_block(&self) -> &SignedBeaconBlock<T, Payload> {
match self { match self {
SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => { SignedBlockContents::BlockAndBlobSidecars(block_and_sidecars) => {

View File

@ -20,4 +20,4 @@ types = { path = "../../consensus/types"}
kzg = { path = "../../crypto/kzg" } kzg = { path = "../../crypto/kzg" }
ethereum_ssz = "0.5.0" ethereum_ssz = "0.5.0"
eth2_config = { path = "../eth2_config"} eth2_config = { path = "../eth2_config"}
discv5 = "0.3.0" discv5 = "0.3.1"

View File

@ -93,3 +93,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x0B98057eA310F4d31F2a452B414647007d1645d9
# Network # Network
# --------------------------------------------------------------- # ---------------------------------------------------------------
SUBNETS_PER_NODE: 4 SUBNETS_PER_NODE: 4
GOSSIP_MAX_SIZE: 10485760
MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024
MAX_CHUNK_SIZE: 10485760
TTFB_TIMEOUT: 5
RESP_TIMEOUT: 10
MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000
MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000
ATTESTATION_SUBNET_COUNT: 64
ATTESTATION_SUBNET_EXTRA_BITS: 0
ATTESTATION_SUBNET_PREFIX_BITS: 6

View File

@ -93,3 +93,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa
# Network # Network
# --------------------------------------------------------------- # ---------------------------------------------------------------
SUBNETS_PER_NODE: 2 SUBNETS_PER_NODE: 2
GOSSIP_MAX_SIZE: 10485760
MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024
MAX_CHUNK_SIZE: 10485760
TTFB_TIMEOUT: 5
RESP_TIMEOUT: 10
MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000
MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000
ATTESTATION_SUBNET_COUNT: 64
ATTESTATION_SUBNET_EXTRA_BITS: 0
ATTESTATION_SUBNET_PREFIX_BITS: 6

View File

@ -90,3 +90,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b
# Network # Network
# --------------------------------------------------------------- # ---------------------------------------------------------------
SUBNETS_PER_NODE: 2 SUBNETS_PER_NODE: 2
GOSSIP_MAX_SIZE: 10485760
MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024
MAX_CHUNK_SIZE: 10485760
TTFB_TIMEOUT: 5
RESP_TIMEOUT: 10
MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000
MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000
ATTESTATION_SUBNET_COUNT: 64
ATTESTATION_SUBNET_EXTRA_BITS: 0
ATTESTATION_SUBNET_PREFIX_BITS: 6

View File

@ -82,3 +82,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x7f02C3E3c98b133055B8B348B2Ac625669Ed295D
# Network # Network
# --------------------------------------------------------------- # ---------------------------------------------------------------
SUBNETS_PER_NODE: 2 SUBNETS_PER_NODE: 2
GOSSIP_MAX_SIZE: 10485760
MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024
MAX_CHUNK_SIZE: 10485760
TTFB_TIMEOUT: 5
RESP_TIMEOUT: 10
MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000
MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000
ATTESTATION_SUBNET_COUNT: 64
ATTESTATION_SUBNET_EXTRA_BITS: 0
ATTESTATION_SUBNET_PREFIX_BITS: 6

View File

@ -20,6 +20,7 @@ tree_hash = "0.5.2"
hex = "0.4.2" hex = "0.4.2"
derivative = "2.1.1" derivative = "2.1.1"
lockfile = { path = "../lockfile" } lockfile = { path = "../lockfile" }
directory = { path = "../directory" }
[dev-dependencies] [dev-dependencies]
tempfile = "3.1.0" tempfile = "3.1.0"

View File

@ -1,6 +1,7 @@
use crate::{Error as DirError, ValidatorDir}; use crate::{Error as DirError, ValidatorDir};
use bls::get_withdrawal_credentials; use bls::get_withdrawal_credentials;
use deposit_contract::{encode_eth1_tx_data, Error as DepositError}; use deposit_contract::{encode_eth1_tx_data, Error as DepositError};
use directory::ensure_dir_exists;
use eth2_keystore::{Error as KeystoreError, Keystore, KeystoreBuilder, PlainText}; use eth2_keystore::{Error as KeystoreError, Keystore, KeystoreBuilder, PlainText};
use filesystem::create_with_600_perms; use filesystem::create_with_600_perms;
use rand::{distributions::Alphanumeric, Rng}; use rand::{distributions::Alphanumeric, Rng};
@ -41,6 +42,7 @@ pub enum Error {
#[cfg(feature = "insecure_keys")] #[cfg(feature = "insecure_keys")]
InsecureKeysError(String), InsecureKeysError(String),
MissingPasswordDir, MissingPasswordDir,
UnableToCreatePasswordDir(String),
} }
impl From<KeystoreError> for Error { impl From<KeystoreError> for Error {
@ -78,6 +80,13 @@ impl<'a> Builder<'a> {
self self
} }
/// Optionally supply a directory in which to store the passwords for the validator keystores.
/// If `None` is provided, do not store the password.
pub fn password_dir_opt(mut self, password_dir_opt: Option<PathBuf>) -> Self {
self.password_dir = password_dir_opt;
self
}
/// Build the `ValidatorDir` use the given `keystore` which can be unlocked with `password`. /// Build the `ValidatorDir` use the given `keystore` which can be unlocked with `password`.
/// ///
/// The builder will not necessarily check that `password` can unlock `keystore`. /// The builder will not necessarily check that `password` can unlock `keystore`.
@ -153,6 +162,10 @@ impl<'a> Builder<'a> {
create_dir_all(&dir).map_err(Error::UnableToCreateDir)?; create_dir_all(&dir).map_err(Error::UnableToCreateDir)?;
} }
if let Some(password_dir) = &self.password_dir {
ensure_dir_exists(password_dir).map_err(Error::UnableToCreatePasswordDir)?;
}
// The withdrawal keystore must be initialized in order to store it or create an eth1 // The withdrawal keystore must be initialized in order to store it or create an eth1
// deposit. // deposit.
if (self.store_withdrawal_keystore || self.deposit_info.is_some()) if (self.store_withdrawal_keystore || self.deposit_info.is_some())
@ -234,7 +247,7 @@ impl<'a> Builder<'a> {
if self.store_withdrawal_keystore { if self.store_withdrawal_keystore {
// Write the withdrawal password to file. // Write the withdrawal password to file.
write_password_to_file( write_password_to_file(
password_dir.join(withdrawal_keypair.pk.as_hex_string()), keystore_password_path(password_dir, &withdrawal_keystore),
withdrawal_password.as_bytes(), withdrawal_password.as_bytes(),
)?; )?;
@ -250,7 +263,7 @@ impl<'a> Builder<'a> {
if let Some(password_dir) = self.password_dir.as_ref() { if let Some(password_dir) = self.password_dir.as_ref() {
// Write the voting password to file. // Write the voting password to file.
write_password_to_file( write_password_to_file(
password_dir.join(format!("0x{}", voting_keystore.pubkey())), keystore_password_path(password_dir, &voting_keystore),
voting_password.as_bytes(), voting_password.as_bytes(),
)?; )?;
} }
@ -262,6 +275,12 @@ impl<'a> Builder<'a> {
} }
} }
pub fn keystore_password_path<P: AsRef<Path>>(password_dir: P, keystore: &Keystore) -> PathBuf {
password_dir
.as_ref()
.join(format!("0x{}", keystore.pubkey()))
}
/// Writes a JSON keystore to file. /// Writes a JSON keystore to file.
fn write_keystore_to_file(path: PathBuf, keystore: &Keystore) -> Result<(), Error> { fn write_keystore_to_file(path: PathBuf, keystore: &Keystore) -> Result<(), Error> {
if path.exists() { if path.exists() {

View File

@ -15,6 +15,6 @@ pub use crate::validator_dir::{
ETH1_DEPOSIT_TX_HASH_FILE, ETH1_DEPOSIT_TX_HASH_FILE,
}; };
pub use builder::{ pub use builder::{
Builder, Error as BuilderError, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE, keystore_password_path, Builder, Error as BuilderError, ETH1_DEPOSIT_DATA_FILE,
WITHDRAWAL_KEYSTORE_FILE, VOTING_KEYSTORE_FILE, WITHDRAWAL_KEYSTORE_FILE,
}; };

View File

@ -1,5 +1,5 @@
use crate::builder::{ use crate::builder::{
ETH1_DEPOSIT_AMOUNT_FILE, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE, keystore_password_path, ETH1_DEPOSIT_AMOUNT_FILE, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE,
WITHDRAWAL_KEYSTORE_FILE, WITHDRAWAL_KEYSTORE_FILE,
}; };
use deposit_contract::decode_eth1_tx_data; use deposit_contract::decode_eth1_tx_data;
@ -219,9 +219,7 @@ pub fn unlock_keypair<P: AsRef<Path>>(
) )
.map_err(Error::UnableToReadKeystore)?; .map_err(Error::UnableToReadKeystore)?;
let password_path = password_dir let password_path = keystore_password_path(password_dir, &keystore);
.as_ref()
.join(format!("0x{}", keystore.pubkey()));
let password: PlainText = read(&password_path) let password: PlainText = read(&password_path)
.map_err(|_| Error::UnableToReadPassword(password_path))? .map_err(|_| Error::UnableToReadPassword(password_path))?
.into(); .into();

Some files were not shown because too many files have changed in this diff Show More