Merge upstream, remove local portable build mods
This commit is contained in:
commit
61bb68f390
2
.github/workflows/book.yml
vendored
2
.github/workflows/book.yml
vendored
@ -13,7 +13,7 @@ jobs:
|
||||
build-and-upload-to-s3:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup mdBook
|
||||
uses: peaceiris/actions-mdbook@v1
|
||||
|
||||
26
.github/workflows/docker.yml
vendored
26
.github/workflows/docker.yml
vendored
@ -71,7 +71,7 @@ jobs:
|
||||
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
||||
FEATURE_SUFFIX: ${{ matrix.features.version_suffix }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Update Rust
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
run: rustup update stable
|
||||
@ -106,10 +106,10 @@ jobs:
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
file: ./Dockerfile.cross
|
||||
context: .
|
||||
@ -129,7 +129,7 @@ jobs:
|
||||
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Dockerhub login
|
||||
run: |
|
||||
@ -148,14 +148,16 @@ jobs:
|
||||
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
||||
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Dockerhub login
|
||||
run: |
|
||||
echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin
|
||||
- name: Build lcli dockerfile (with push)
|
||||
run: |
|
||||
docker build \
|
||||
--build-arg PORTABLE=true \
|
||||
--tag ${LCLI_IMAGE_NAME}:${VERSION}${VERSION_SUFFIX} \
|
||||
--file ./lcli/Dockerfile .
|
||||
docker push ${LCLI_IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}
|
||||
- name: Build lcli and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
build-args: |
|
||||
FEATURES=portable
|
||||
context: .
|
||||
push: true
|
||||
file: ./lcli/Dockerfile
|
||||
tags: ${{ env.LCLI_IMAGE_NAME }}:${{ env.VERSION }}${{ env.VERSION_SUFFIX }}
|
||||
|
||||
2
.github/workflows/linkcheck.yml
vendored
2
.github/workflows/linkcheck.yml
vendored
@ -20,7 +20,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run mdbook server
|
||||
run: |
|
||||
|
||||
6
.github/workflows/local-testnet.yml
vendored
6
.github/workflows/local-testnet.yml
vendored
@ -24,7 +24,7 @@ jobs:
|
||||
# Enable portable to prevent issues with caching `blst` for the wrong CPU type
|
||||
FEATURES: portable,jemalloc
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Get latest version of stable Rust
|
||||
run: rustup update stable
|
||||
@ -46,7 +46,7 @@ jobs:
|
||||
echo "$(brew --prefix)/opt/gnu-sed/libexec/gnubin" >> $GITHUB_PATH
|
||||
echo "$(brew --prefix)/opt/grep/libexec/gnubin" >> $GITHUB_PATH
|
||||
# https://github.com/actions/cache/blob/main/examples.md#rust---cargo
|
||||
- uses: actions/cache@v3
|
||||
- uses: actions/cache@v4
|
||||
id: cache-cargo
|
||||
with:
|
||||
path: |
|
||||
@ -95,6 +95,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: ["run-local-testnet"]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check that success job is dependent on all others
|
||||
run: ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success
|
||||
|
||||
16
.github/workflows/release.yml
vendored
16
.github/workflows/release.yml
vendored
@ -68,7 +68,7 @@ jobs:
|
||||
needs: extract-version
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
run: rustup update stable
|
||||
@ -80,7 +80,7 @@ jobs:
|
||||
- uses: KyleMayes/install-llvm-action@v1
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false' && startsWith(matrix.arch, 'x86_64-windows')
|
||||
with:
|
||||
version: "15.0"
|
||||
version: "16.0"
|
||||
directory: ${{ runner.temp }}/llvm
|
||||
- name: Set LIBCLANG_PATH
|
||||
if: startsWith(matrix.arch, 'x86_64-windows')
|
||||
@ -172,17 +172,19 @@ jobs:
|
||||
# This is required to share artifacts between different jobs
|
||||
# =======================================================================
|
||||
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v3
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
||||
path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
||||
compression-level: 0
|
||||
|
||||
- name: Upload signature
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc
|
||||
path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc
|
||||
compression-level: 0
|
||||
|
||||
draft-release:
|
||||
name: Draft Release
|
||||
@ -193,7 +195,7 @@ jobs:
|
||||
steps:
|
||||
# This is necessary for generating the changelog. It has to come before "Download Artifacts" or else it deletes the artifacts.
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@ -202,7 +204,7 @@ jobs:
|
||||
# ==============================
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
|
||||
# ==============================
|
||||
# Create release draft
|
||||
|
||||
54
.github/workflows/test-suite.yml
vendored
54
.github/workflows/test-suite.yml
vendored
@ -41,7 +41,7 @@ jobs:
|
||||
# Use self-hosted runners only on the sigp repo.
|
||||
runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
uses: moonrepo/setup-rust@v1
|
||||
@ -65,7 +65,7 @@ jobs:
|
||||
name: release-tests-windows
|
||||
runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "windows", "CI"]') || 'windows-2019' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
uses: moonrepo/setup-rust@v1
|
||||
@ -86,7 +86,7 @@ jobs:
|
||||
# - uses: KyleMayes/install-llvm-action@v1
|
||||
# if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
# with:
|
||||
# version: "15.0"
|
||||
# version: "16.0"
|
||||
# directory: ${{ runner.temp }}/llvm
|
||||
- name: Set LIBCLANG_PATH
|
||||
run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV
|
||||
@ -102,7 +102,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
uses: moonrepo/setup-rust@v1
|
||||
@ -121,7 +121,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
uses: moonrepo/setup-rust@v1
|
||||
with:
|
||||
@ -136,7 +136,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
uses: moonrepo/setup-rust@v1
|
||||
with:
|
||||
@ -151,7 +151,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
uses: moonrepo/setup-rust@v1
|
||||
with:
|
||||
@ -167,7 +167,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
uses: moonrepo/setup-rust@v1
|
||||
@ -188,7 +188,7 @@ jobs:
|
||||
name: state-transition-vectors-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
uses: moonrepo/setup-rust@v1
|
||||
with:
|
||||
@ -203,7 +203,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
uses: moonrepo/setup-rust@v1
|
||||
@ -211,7 +211,7 @@ jobs:
|
||||
channel: stable
|
||||
cache-target: release
|
||||
bins: cargo-nextest
|
||||
- name: Run consensus-spec-tests with blst, milagro and fake_crypto
|
||||
- name: Run consensus-spec-tests with blst and fake_crypto
|
||||
run: make nextest-ef
|
||||
- name: Show cache stats
|
||||
if: env.SELF_HOSTED_RUNNERS == 'true'
|
||||
@ -220,7 +220,7 @@ jobs:
|
||||
name: dockerfile-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Build the root Dockerfile
|
||||
run: docker build --build-arg FEATURES=portable -t lighthouse:local .
|
||||
- name: Test the built image
|
||||
@ -229,7 +229,7 @@ jobs:
|
||||
name: eth1-simulator-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
uses: moonrepo/setup-rust@v1
|
||||
with:
|
||||
@ -245,7 +245,7 @@ jobs:
|
||||
name: merge-transition-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
uses: moonrepo/setup-rust@v1
|
||||
with:
|
||||
@ -261,7 +261,7 @@ jobs:
|
||||
name: no-eth1-simulator-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
uses: moonrepo/setup-rust@v1
|
||||
with:
|
||||
@ -273,7 +273,7 @@ jobs:
|
||||
name: syncing-simulator-ubuntu
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
uses: moonrepo/setup-rust@v1
|
||||
with:
|
||||
@ -292,7 +292,7 @@ jobs:
|
||||
# Enable portable to prevent issues with caching `blst` for the wrong CPU type
|
||||
FEATURES: jemalloc,portable
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
uses: moonrepo/setup-rust@v1
|
||||
@ -309,7 +309,9 @@ jobs:
|
||||
run: |
|
||||
make
|
||||
- name: Install lcli
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
# TODO: uncomment after the version of lcli in https://github.com/sigp/lighthouse/pull/5137
|
||||
# is installed on the runners
|
||||
# if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
run: make install-lcli
|
||||
- name: Run the doppelganger protection failure test script
|
||||
run: |
|
||||
@ -323,7 +325,7 @@ jobs:
|
||||
name: execution-engine-integration-ubuntu
|
||||
runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
if: env.SELF_HOSTED_RUNNERS == 'false'
|
||||
uses: moonrepo/setup-rust@v1
|
||||
@ -344,7 +346,7 @@ jobs:
|
||||
env:
|
||||
CARGO_INCREMENTAL: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
uses: moonrepo/setup-rust@v1
|
||||
with:
|
||||
@ -370,7 +372,7 @@ jobs:
|
||||
name: check-msrv
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Rust at Minimum Supported Rust Version (MSRV)
|
||||
run: |
|
||||
metadata=$(cargo metadata --no-deps --format-version 1)
|
||||
@ -382,7 +384,7 @@ jobs:
|
||||
name: cargo-udeps
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of nightly Rust
|
||||
uses: moonrepo/setup-rust@v1
|
||||
with:
|
||||
@ -404,9 +406,9 @@ jobs:
|
||||
name: compile-with-beta-compiler
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install dependencies
|
||||
run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang
|
||||
run: sudo apt update && sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang
|
||||
- name: Use Rust beta
|
||||
run: rustup override set beta
|
||||
- name: Run make
|
||||
@ -415,7 +417,7 @@ jobs:
|
||||
name: cli-check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get latest version of stable Rust
|
||||
uses: moonrepo/setup-rust@v1
|
||||
with:
|
||||
@ -453,6 +455,6 @@ jobs:
|
||||
'cli-check',
|
||||
]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Check that success job is dependent on all others
|
||||
run: ./scripts/ci/check-success-job.sh ./.github/workflows/test-suite.yml test-suite-success
|
||||
|
||||
1761
Cargo.lock
generated
1761
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
14
Cargo.toml
14
Cargo.toml
@ -94,7 +94,9 @@ resolver = "2"
|
||||
edition = "2021"
|
||||
|
||||
[workspace.dependencies]
|
||||
anyhow = "1"
|
||||
arbitrary = { version = "1", features = ["derive"] }
|
||||
async-channel = "1.9.0"
|
||||
bincode = "1"
|
||||
bitvec = "1"
|
||||
byteorder = "1"
|
||||
@ -105,12 +107,13 @@ criterion = "0.3"
|
||||
delay_map = "0.3"
|
||||
derivative = "2"
|
||||
dirs = "3"
|
||||
discv5 = { git="https://github.com/sigp/discv5", rev="e30a2c31b7ac0c57876458b971164654dfa4513b", features = ["libp2p"] }
|
||||
either = "1.9"
|
||||
discv5 = { version = "0.4.1", features = ["libp2p"] }
|
||||
env_logger = "0.9"
|
||||
error-chain = "0.12"
|
||||
ethereum-types = "0.14"
|
||||
ethereum_hashing = "1.0.0-beta.2"
|
||||
ethereum_serde_utils = "0.5"
|
||||
ethereum_serde_utils = "0.5.2"
|
||||
ethereum_ssz = "0.5"
|
||||
ethereum_ssz_derive = "0.5"
|
||||
ethers-core = "1"
|
||||
@ -160,6 +163,7 @@ tempfile = "3"
|
||||
tokio = { version = "1", features = ["rt-multi-thread", "sync", "signal"] }
|
||||
tokio-stream = { version = "0.1", features = ["sync"] }
|
||||
tokio-util = { version = "0.6", features = ["codec", "compat", "time"] }
|
||||
tracing = "0.1.40"
|
||||
tracing-appender = "0.2"
|
||||
tracing-core = "0.1"
|
||||
tracing-log = "0.2"
|
||||
@ -168,8 +172,7 @@ tree_hash = "0.5"
|
||||
tree_hash_derive = "0.5"
|
||||
url = "2"
|
||||
uuid = { version = "0.8", features = ["serde", "v4"] }
|
||||
# TODO update to warp 0.3.6 after released.
|
||||
warp = { git = "https://github.com/seanmonstar/warp.git", default-features = false, features = ["tls"] }
|
||||
warp = { version = "0.3.6", default-features = false, features = ["tls"] }
|
||||
zeroize = { version = "1", features = ["zeroize_derive"] }
|
||||
zip = "0.6"
|
||||
|
||||
@ -228,6 +231,9 @@ validator_client = { path = "validator_client" }
|
||||
validator_dir = { path = "common/validator_dir" }
|
||||
warp_utils = { path = "common/warp_utils" }
|
||||
|
||||
[patch.crates-io]
|
||||
yamux = { git = "https://github.com/sigp/rust-yamux.git" }
|
||||
|
||||
[profile.maxperf]
|
||||
inherits = "release"
|
||||
lto = "fat"
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM rust:1.73.0-bullseye AS builder
|
||||
FROM rust:1.75.0-bullseye AS builder
|
||||
RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev
|
||||
COPY . lighthouse
|
||||
ARG FEATURES
|
||||
|
||||
2
Makefile
2
Makefile
@ -143,7 +143,6 @@ run-ef-tests:
|
||||
rm -rf $(EF_TESTS)/.accessed_file_log.txt
|
||||
cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)"
|
||||
cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto"
|
||||
cargo test --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro"
|
||||
./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests
|
||||
|
||||
# Runs EF test vectors with nextest
|
||||
@ -151,7 +150,6 @@ nextest-run-ef-tests:
|
||||
rm -rf $(EF_TESTS)/.accessed_file_log.txt
|
||||
cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES)"
|
||||
cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),fake_crypto"
|
||||
cargo nextest run --release -p ef_tests --features "ef_tests,$(EF_TEST_FEATURES),milagro"
|
||||
./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests
|
||||
|
||||
# Run the tests in the `beacon_chain` crate for all known forks.
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "beacon_node"
|
||||
version = "4.6.0"
|
||||
version = "5.1.1"
|
||||
authors = [
|
||||
"Paul Hauner <paul@paulhauner.com>",
|
||||
"Age Manning <Age@AgeManning.com",
|
||||
|
||||
@ -11,6 +11,7 @@ write_ssz_files = [] # Writes debugging .ssz files to /tmp during block process
|
||||
participation_metrics = [] # Exposes validator participation metrics to Prometheus.
|
||||
fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable
|
||||
portable = ["bls/supranational-portable"]
|
||||
test_backfill = []
|
||||
|
||||
[dev-dependencies]
|
||||
maplit = { workspace = true }
|
||||
@ -67,7 +68,6 @@ execution_layer = { workspace = true }
|
||||
sensitive_url = { workspace = true }
|
||||
superstruct = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
exit-future = { workspace = true }
|
||||
oneshot_broadcast = { path = "../../common/oneshot_broadcast/" }
|
||||
slog-term = { workspace = true }
|
||||
slog-async = { workspace = true }
|
||||
|
||||
@ -539,8 +539,8 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
|
||||
Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)),
|
||||
};
|
||||
|
||||
let indexed_attestation =
|
||||
match map_attestation_committee(chain, attestation, |(committee, _)| {
|
||||
let get_indexed_attestation_with_committee =
|
||||
|(committee, _): (BeaconCommittee, CommitteesPerSlot)| {
|
||||
// Note: this clones the signature which is known to be a relatively slow operation.
|
||||
//
|
||||
// Future optimizations should remove this clone.
|
||||
@ -561,11 +561,17 @@ impl<'a, T: BeaconChainTypes> IndexedAggregatedAttestation<'a, T> {
|
||||
|
||||
get_indexed_attestation(committee.committee, attestation)
|
||||
.map_err(|e| BeaconChainError::from(e).into())
|
||||
}) {
|
||||
Ok(indexed_attestation) => indexed_attestation,
|
||||
Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)),
|
||||
};
|
||||
|
||||
let indexed_attestation = match map_attestation_committee(
|
||||
chain,
|
||||
attestation,
|
||||
get_indexed_attestation_with_committee,
|
||||
) {
|
||||
Ok(indexed_attestation) => indexed_attestation,
|
||||
Err(e) => return Err(SignatureNotChecked(&signed_aggregate.message.aggregate, e)),
|
||||
};
|
||||
|
||||
Ok(IndexedAggregatedAttestation {
|
||||
signed_aggregate,
|
||||
indexed_attestation,
|
||||
|
||||
@ -19,7 +19,7 @@ use types::{
|
||||
};
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub enum CheckEarlyAttesterCache {
|
||||
pub enum CheckCaches {
|
||||
Yes,
|
||||
No,
|
||||
}
|
||||
@ -385,14 +385,14 @@ impl<E: EthSpec> EngineRequest<E> {
|
||||
|
||||
pub struct BeaconBlockStreamer<T: BeaconChainTypes> {
|
||||
execution_layer: ExecutionLayer<T::EthSpec>,
|
||||
check_early_attester_cache: CheckEarlyAttesterCache,
|
||||
check_caches: CheckCaches,
|
||||
beacon_chain: Arc<BeaconChain<T>>,
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> BeaconBlockStreamer<T> {
|
||||
pub fn new(
|
||||
beacon_chain: &Arc<BeaconChain<T>>,
|
||||
check_early_attester_cache: CheckEarlyAttesterCache,
|
||||
check_caches: CheckCaches,
|
||||
) -> Result<Self, BeaconChainError> {
|
||||
let execution_layer = beacon_chain
|
||||
.execution_layer
|
||||
@ -402,17 +402,17 @@ impl<T: BeaconChainTypes> BeaconBlockStreamer<T> {
|
||||
|
||||
Ok(Self {
|
||||
execution_layer,
|
||||
check_early_attester_cache,
|
||||
check_caches,
|
||||
beacon_chain: beacon_chain.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
fn check_early_attester_cache(
|
||||
&self,
|
||||
root: Hash256,
|
||||
) -> Option<Arc<SignedBeaconBlock<T::EthSpec>>> {
|
||||
if self.check_early_attester_cache == CheckEarlyAttesterCache::Yes {
|
||||
self.beacon_chain.early_attester_cache.get_block(root)
|
||||
fn check_caches(&self, root: Hash256) -> Option<Arc<SignedBeaconBlock<T::EthSpec>>> {
|
||||
if self.check_caches == CheckCaches::Yes {
|
||||
self.beacon_chain
|
||||
.data_availability_checker
|
||||
.get_block(&root)
|
||||
.or(self.beacon_chain.early_attester_cache.get_block(root))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
@ -422,10 +422,7 @@ impl<T: BeaconChainTypes> BeaconBlockStreamer<T> {
|
||||
let mut db_blocks = Vec::new();
|
||||
|
||||
for root in block_roots {
|
||||
if let Some(cached_block) = self
|
||||
.check_early_attester_cache(root)
|
||||
.map(LoadedBeaconBlock::Full)
|
||||
{
|
||||
if let Some(cached_block) = self.check_caches(root).map(LoadedBeaconBlock::Full) {
|
||||
db_blocks.push((root, Ok(Some(cached_block))));
|
||||
continue;
|
||||
}
|
||||
@ -554,7 +551,7 @@ impl<T: BeaconChainTypes> BeaconBlockStreamer<T> {
|
||||
"Using slower fallback method of eth_getBlockByHash()"
|
||||
);
|
||||
for root in block_roots {
|
||||
let cached_block = self.check_early_attester_cache(root);
|
||||
let cached_block = self.check_caches(root);
|
||||
let block_result = if cached_block.is_some() {
|
||||
Ok(cached_block)
|
||||
} else {
|
||||
@ -682,7 +679,7 @@ impl From<Error> for BeaconChainError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache};
|
||||
use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches};
|
||||
use crate::test_utils::{test_spec, BeaconChainHarness, EphemeralHarnessType};
|
||||
use execution_layer::test_utils::{Block, DEFAULT_ENGINE_CAPABILITIES};
|
||||
use execution_layer::EngineCapabilities;
|
||||
@ -804,7 +801,7 @@ mod tests {
|
||||
let start = epoch * slots_per_epoch;
|
||||
let mut epoch_roots = vec![Hash256::zero(); slots_per_epoch];
|
||||
epoch_roots[..].clone_from_slice(&block_roots[start..(start + slots_per_epoch)]);
|
||||
let streamer = BeaconBlockStreamer::new(&harness.chain, CheckEarlyAttesterCache::No)
|
||||
let streamer = BeaconBlockStreamer::new(&harness.chain, CheckCaches::No)
|
||||
.expect("should create streamer");
|
||||
let (block_tx, mut block_rx) = mpsc::unbounded_channel();
|
||||
streamer.stream(epoch_roots.clone(), block_tx).await;
|
||||
@ -945,7 +942,7 @@ mod tests {
|
||||
let start = epoch * slots_per_epoch;
|
||||
let mut epoch_roots = vec![Hash256::zero(); slots_per_epoch];
|
||||
epoch_roots[..].clone_from_slice(&block_roots[start..(start + slots_per_epoch)]);
|
||||
let streamer = BeaconBlockStreamer::new(&harness.chain, CheckEarlyAttesterCache::No)
|
||||
let streamer = BeaconBlockStreamer::new(&harness.chain, CheckCaches::No)
|
||||
.expect("should create streamer");
|
||||
let (block_tx, mut block_rx) = mpsc::unbounded_channel();
|
||||
streamer.stream(epoch_roots.clone(), block_tx).await;
|
||||
|
||||
@ -4,7 +4,7 @@ use crate::attestation_verification::{
|
||||
VerifiedUnaggregatedAttestation,
|
||||
};
|
||||
use crate::attester_cache::{AttesterCache, AttesterCacheKey};
|
||||
use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckEarlyAttesterCache};
|
||||
use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches};
|
||||
use crate::beacon_proposer_cache::compute_proposer_duties_from_head;
|
||||
use crate::beacon_proposer_cache::BeaconProposerCache;
|
||||
use crate::blob_verification::{GossipBlobError, GossipVerifiedBlob};
|
||||
@ -38,6 +38,7 @@ use crate::light_client_finality_update_verification::{
|
||||
use crate::light_client_optimistic_update_verification::{
|
||||
Error as LightClientOptimisticUpdateError, VerifiedLightClientOptimisticUpdate,
|
||||
};
|
||||
use crate::light_client_server_cache::LightClientServerCache;
|
||||
use crate::migrate::BackgroundMigrator;
|
||||
use crate::naive_aggregation_pool::{
|
||||
AggregatedAttestationMap, Error as NaiveAggregationError, NaiveAggregationPool,
|
||||
@ -339,6 +340,8 @@ struct PartialBeaconBlock<E: EthSpec> {
|
||||
bls_to_execution_changes: Vec<SignedBlsToExecutionChange>,
|
||||
}
|
||||
|
||||
pub type LightClientProducerEvent<T> = (Hash256, Slot, SyncAggregate<T>);
|
||||
|
||||
pub type BeaconForkChoice<T> = ForkChoice<
|
||||
BeaconForkChoiceStore<
|
||||
<T as BeaconChainTypes>::EthSpec,
|
||||
@ -420,10 +423,6 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
/// Maintains a record of which validators we've seen BLS to execution changes for.
|
||||
pub(crate) observed_bls_to_execution_changes:
|
||||
Mutex<ObservedOperations<SignedBlsToExecutionChange, T::EthSpec>>,
|
||||
/// The most recently validated light client finality update received on gossip.
|
||||
pub latest_seen_finality_update: Mutex<Option<LightClientFinalityUpdate<T::EthSpec>>>,
|
||||
/// The most recently validated light client optimistic update received on gossip.
|
||||
pub latest_seen_optimistic_update: Mutex<Option<LightClientOptimisticUpdate<T::EthSpec>>>,
|
||||
/// Provides information from the Ethereum 1 (PoW) chain.
|
||||
pub eth1_chain: Option<Eth1Chain<T::Eth1Chain, T::EthSpec>>,
|
||||
/// Interfaces with the execution client.
|
||||
@ -466,6 +465,10 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
||||
pub block_times_cache: Arc<RwLock<BlockTimesCache>>,
|
||||
/// A cache used to track pre-finalization block roots for quick rejection.
|
||||
pub pre_finalization_block_cache: PreFinalizationBlockCache,
|
||||
/// A cache used to produce light_client server messages
|
||||
pub light_client_server_cache: LightClientServerCache<T>,
|
||||
/// Sender to signal the light_client server to produce new updates
|
||||
pub light_client_server_tx: Option<Sender<LightClientProducerEvent<T::EthSpec>>>,
|
||||
/// Sender given to tasks, so that if they encounter a state in which execution cannot
|
||||
/// continue they can request that everything shuts down.
|
||||
pub shutdown_sender: Sender<ShutdownReason>,
|
||||
@ -1128,7 +1131,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
/// ## Errors
|
||||
///
|
||||
/// May return a database error.
|
||||
pub fn get_blocks_checking_early_attester_cache(
|
||||
pub fn get_blocks_checking_caches(
|
||||
self: &Arc<Self>,
|
||||
block_roots: Vec<Hash256>,
|
||||
executor: &TaskExecutor,
|
||||
@ -1141,10 +1144,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
>,
|
||||
Error,
|
||||
> {
|
||||
Ok(
|
||||
BeaconBlockStreamer::<T>::new(self, CheckEarlyAttesterCache::Yes)?
|
||||
.launch_stream(block_roots, executor),
|
||||
)
|
||||
Ok(BeaconBlockStreamer::<T>::new(self, CheckCaches::Yes)?
|
||||
.launch_stream(block_roots, executor))
|
||||
}
|
||||
|
||||
pub fn get_blocks(
|
||||
@ -1160,10 +1161,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
>,
|
||||
Error,
|
||||
> {
|
||||
Ok(
|
||||
BeaconBlockStreamer::<T>::new(self, CheckEarlyAttesterCache::No)?
|
||||
.launch_stream(block_roots, executor),
|
||||
)
|
||||
Ok(BeaconBlockStreamer::<T>::new(self, CheckCaches::No)?
|
||||
.launch_stream(block_roots, executor))
|
||||
}
|
||||
|
||||
pub fn get_blobs_checking_early_attester_cache(
|
||||
@ -1344,6 +1343,19 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
self.state_at_slot(load_slot, StateSkipConfig::WithoutStateRoots)
|
||||
}
|
||||
|
||||
pub fn recompute_and_cache_light_client_updates(
|
||||
&self,
|
||||
(parent_root, slot, sync_aggregate): LightClientProducerEvent<T::EthSpec>,
|
||||
) -> Result<(), Error> {
|
||||
self.light_client_server_cache.recompute_and_cache_updates(
|
||||
&self.log,
|
||||
self.store.clone(),
|
||||
&parent_root,
|
||||
slot,
|
||||
&sync_aggregate,
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the current heads of the `BeaconChain`. For the canonical head, see `Self::head`.
|
||||
///
|
||||
/// Returns `(block_root, block_slot)`.
|
||||
@ -2944,18 +2956,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
unverified_block: B,
|
||||
notify_execution_layer: NotifyExecutionLayer,
|
||||
) -> Result<AvailabilityProcessingStatus, BlockError<T::EthSpec>> {
|
||||
if let Ok(commitments) = unverified_block
|
||||
.block()
|
||||
.message()
|
||||
.body()
|
||||
.blob_kzg_commitments()
|
||||
{
|
||||
self.data_availability_checker.notify_block_commitments(
|
||||
unverified_block.block().slot(),
|
||||
block_root,
|
||||
commitments.clone(),
|
||||
);
|
||||
};
|
||||
self.data_availability_checker
|
||||
.notify_block(block_root, unverified_block.block_cloned());
|
||||
let r = self
|
||||
.process_block(block_root, unverified_block, notify_execution_layer, || {
|
||||
Ok(())
|
||||
@ -3521,6 +3523,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
};
|
||||
let current_finalized_checkpoint = state.finalized_checkpoint();
|
||||
|
||||
// compute state proofs for light client updates before inserting the state into the
|
||||
// snapshot cache.
|
||||
if self.config.enable_light_client_server {
|
||||
self.light_client_server_cache
|
||||
.cache_state_data(
|
||||
&self.spec, block, block_root,
|
||||
// mutable reference on the state is needed to compute merkle proofs
|
||||
&mut state,
|
||||
)
|
||||
.unwrap_or_else(|e| {
|
||||
error!(self.log, "error caching light_client data {:?}", e);
|
||||
});
|
||||
}
|
||||
|
||||
self.snapshot_cache
|
||||
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||
.ok_or(Error::SnapshotCacheLockTimeout)
|
||||
@ -3893,6 +3909,28 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
// Do not trigger light_client server update producer for old blocks, to extra work
|
||||
// during sync.
|
||||
if self.config.enable_light_client_server
|
||||
&& block_delay_total < self.slot_clock.slot_duration() * 32
|
||||
{
|
||||
if let Some(mut light_client_server_tx) = self.light_client_server_tx.clone() {
|
||||
if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
||||
if let Err(e) = light_client_server_tx.try_send((
|
||||
block.parent_root(),
|
||||
block.slot(),
|
||||
sync_aggregate.clone(),
|
||||
)) {
|
||||
warn!(
|
||||
self.log,
|
||||
"Failed to send light_client server event";
|
||||
"error" => ?e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For the current and next epoch of this state, ensure we have the shuffling from this
|
||||
@ -4070,7 +4108,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
.task_executor
|
||||
.spawn_blocking_handle(
|
||||
move || chain.load_state_for_block_production(slot),
|
||||
"produce_partial_beacon_block",
|
||||
"load_state_for_block_production",
|
||||
)
|
||||
.ok_or(BlockProductionError::ShuttingDown)?
|
||||
.await
|
||||
@ -4128,21 +4166,14 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
(re_org_state.pre_state, re_org_state.state_root)
|
||||
}
|
||||
// Normal case: proposing a block atop the current head using the cache.
|
||||
else if let Some((_, cached_state)) = self
|
||||
.block_production_state
|
||||
.lock()
|
||||
.take()
|
||||
.filter(|(cached_block_root, _)| *cached_block_root == head_block_root)
|
||||
else if let Some((_, cached_state)) =
|
||||
self.get_state_from_block_production_cache(head_block_root)
|
||||
{
|
||||
(cached_state.pre_state, cached_state.state_root)
|
||||
}
|
||||
// Fall back to a direct read of the snapshot cache.
|
||||
else if let Some(pre_state) = self
|
||||
.snapshot_cache
|
||||
.try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||
.and_then(|snapshot_cache| {
|
||||
snapshot_cache.get_state_for_block_production(head_block_root)
|
||||
})
|
||||
else if let Some(pre_state) =
|
||||
self.get_state_from_snapshot_cache_for_block_production(head_block_root)
|
||||
{
|
||||
warn!(
|
||||
self.log,
|
||||
@ -4183,6 +4214,40 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
Ok((state, state_root_opt))
|
||||
}
|
||||
|
||||
/// Get the state cached for block production *if* it matches `head_block_root`.
|
||||
///
|
||||
/// This will clear the cache regardless of whether the block root matches, so only call this if
|
||||
/// you think the `head_block_root` is likely to match!
|
||||
fn get_state_from_block_production_cache(
|
||||
&self,
|
||||
head_block_root: Hash256,
|
||||
) -> Option<(Hash256, BlockProductionPreState<T::EthSpec>)> {
|
||||
// Take care to drop the lock as quickly as possible.
|
||||
let mut lock = self.block_production_state.lock();
|
||||
let result = lock
|
||||
.take()
|
||||
.filter(|(cached_block_root, _)| *cached_block_root == head_block_root);
|
||||
drop(lock);
|
||||
result
|
||||
}
|
||||
|
||||
/// Get a state for block production from the snapshot cache.
|
||||
fn get_state_from_snapshot_cache_for_block_production(
|
||||
&self,
|
||||
head_block_root: Hash256,
|
||||
) -> Option<BlockProductionPreState<T::EthSpec>> {
|
||||
if let Some(lock) = self
|
||||
.snapshot_cache
|
||||
.try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||
{
|
||||
let result = lock.get_state_for_block_production(head_block_root);
|
||||
drop(lock);
|
||||
result
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetch the beacon state to use for producing a block if a 1-slot proposer re-org is viable.
|
||||
///
|
||||
/// This function will return `None` if proposer re-orgs are disabled.
|
||||
@ -4275,12 +4340,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
// Only attempt a re-org if we hit the block production cache or snapshot cache.
|
||||
let pre_state = self
|
||||
.block_production_state
|
||||
.lock()
|
||||
.take()
|
||||
.and_then(|(cached_block_root, state)| {
|
||||
(cached_block_root == re_org_parent_block).then_some(state)
|
||||
})
|
||||
.get_state_from_block_production_cache(re_org_parent_block)
|
||||
.map(|(_, state)| state)
|
||||
.or_else(|| {
|
||||
warn!(
|
||||
self.log,
|
||||
@ -4289,11 +4350,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
"slot" => slot,
|
||||
"block_root" => ?re_org_parent_block
|
||||
);
|
||||
self.snapshot_cache
|
||||
.try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||
.and_then(|snapshot_cache| {
|
||||
snapshot_cache.get_state_for_block_production(re_org_parent_block)
|
||||
})
|
||||
self.get_state_from_snapshot_cache_for_block_production(re_org_parent_block)
|
||||
})
|
||||
.or_else(|| {
|
||||
debug!(
|
||||
|
||||
@ -764,6 +764,7 @@ pub trait IntoExecutionPendingBlock<T: BeaconChainTypes>: Sized {
|
||||
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>>;
|
||||
|
||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec>;
|
||||
fn block_cloned(&self) -> Arc<SignedBeaconBlock<T::EthSpec>>;
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> GossipVerifiedBlock<T> {
|
||||
@ -1017,6 +1018,10 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for GossipVerifiedBlock<T
|
||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||
self.block.as_block()
|
||||
}
|
||||
|
||||
fn block_cloned(&self) -> Arc<SignedBeaconBlock<T::EthSpec>> {
|
||||
self.block.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
||||
@ -1168,6 +1173,10 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
|
||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||
self.block.as_block()
|
||||
}
|
||||
|
||||
fn block_cloned(&self) -> Arc<SignedBeaconBlock<T::EthSpec>> {
|
||||
self.block.block_cloned()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock<T::EthSpec>> {
|
||||
@ -1198,6 +1207,10 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock
|
||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||
self
|
||||
}
|
||||
|
||||
fn block_cloned(&self) -> Arc<SignedBeaconBlock<T::EthSpec>> {
|
||||
self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for RpcBlock<T::EthSpec> {
|
||||
@ -1228,6 +1241,10 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for RpcBlock<T::EthSpec>
|
||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||
self.as_block()
|
||||
}
|
||||
|
||||
fn block_cloned(&self) -> Arc<SignedBeaconBlock<T::EthSpec>> {
|
||||
self.block_cloned()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
||||
|
||||
@ -46,6 +46,13 @@ impl<E: EthSpec> RpcBlock<E> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
|
||||
match &self.block {
|
||||
RpcBlockInner::Block(block) => block.clone(),
|
||||
RpcBlockInner::BlockAndBlobs(block, _) => block.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn blobs(&self) -> Option<&BlobSidecarList<E>> {
|
||||
match &self.block {
|
||||
RpcBlockInner::Block(_) => None,
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
use crate::beacon_chain::{CanonicalHead, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY};
|
||||
use crate::beacon_chain::{
|
||||
CanonicalHead, LightClientProducerEvent, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY,
|
||||
};
|
||||
use crate::beacon_proposer_cache::BeaconProposerCache;
|
||||
use crate::data_availability_checker::DataAvailabilityChecker;
|
||||
use crate::eth1_chain::{CachingEth1Backend, SszEth1};
|
||||
@ -6,10 +8,11 @@ use crate::eth1_finalization_cache::Eth1FinalizationCache;
|
||||
use crate::fork_choice_signal::ForkChoiceSignalTx;
|
||||
use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary};
|
||||
use crate::head_tracker::HeadTracker;
|
||||
use crate::light_client_server_cache::LightClientServerCache;
|
||||
use crate::migrate::{BackgroundMigrator, MigratorConfig};
|
||||
use crate::persisted_beacon_chain::PersistedBeaconChain;
|
||||
use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache};
|
||||
use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE};
|
||||
use crate::snapshot_cache::SnapshotCache;
|
||||
use crate::timeout_rw_lock::TimeoutRwLock;
|
||||
use crate::validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig};
|
||||
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
||||
@ -36,8 +39,8 @@ use std::time::Duration;
|
||||
use store::{Error as StoreError, HotColdDB, ItemStore, KeyValueStoreOp};
|
||||
use task_executor::{ShutdownReason, TaskExecutor};
|
||||
use types::{
|
||||
BeaconBlock, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti, Hash256, Signature,
|
||||
SignedBeaconBlock, Slot,
|
||||
BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, Checkpoint, Epoch, EthSpec, Graffiti,
|
||||
Hash256, Signature, SignedBeaconBlock, Slot,
|
||||
};
|
||||
|
||||
/// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing
|
||||
@ -87,6 +90,7 @@ pub struct BeaconChainBuilder<T: BeaconChainTypes> {
|
||||
event_handler: Option<ServerSentEventHandler<T::EthSpec>>,
|
||||
slot_clock: Option<T::SlotClock>,
|
||||
shutdown_sender: Option<Sender<ShutdownReason>>,
|
||||
light_client_server_tx: Option<Sender<LightClientProducerEvent<T::EthSpec>>>,
|
||||
head_tracker: Option<HeadTracker>,
|
||||
validator_pubkey_cache: Option<ValidatorPubkeyCache<T>>,
|
||||
spec: ChainSpec,
|
||||
@ -129,6 +133,7 @@ where
|
||||
event_handler: None,
|
||||
slot_clock: None,
|
||||
shutdown_sender: None,
|
||||
light_client_server_tx: None,
|
||||
head_tracker: None,
|
||||
validator_pubkey_cache: None,
|
||||
spec: TEthSpec::default_spec(),
|
||||
@ -427,6 +432,7 @@ where
|
||||
mut self,
|
||||
mut weak_subj_state: BeaconState<TEthSpec>,
|
||||
weak_subj_block: SignedBeaconBlock<TEthSpec>,
|
||||
weak_subj_blobs: Option<BlobSidecarList<TEthSpec>>,
|
||||
genesis_state: BeaconState<TEthSpec>,
|
||||
) -> Result<Self, String> {
|
||||
let store = self
|
||||
@ -485,6 +491,29 @@ where
|
||||
));
|
||||
}
|
||||
|
||||
// Verify that blobs (if provided) match the block.
|
||||
if let Some(blobs) = &weak_subj_blobs {
|
||||
let commitments = weak_subj_block
|
||||
.message()
|
||||
.body()
|
||||
.blob_kzg_commitments()
|
||||
.map_err(|e| format!("Blobs provided but block does not reference them: {e:?}"))?;
|
||||
if blobs.len() != commitments.len() {
|
||||
return Err(format!(
|
||||
"Wrong number of blobs, expected: {}, got: {}",
|
||||
commitments.len(),
|
||||
blobs.len()
|
||||
));
|
||||
}
|
||||
if commitments
|
||||
.iter()
|
||||
.zip(blobs.iter())
|
||||
.any(|(commitment, blob)| *commitment != blob.kzg_commitment)
|
||||
{
|
||||
return Err("Checkpoint blob does not match block commitment".into());
|
||||
}
|
||||
}
|
||||
|
||||
// Set the store's split point *before* storing genesis so that genesis is stored
|
||||
// immediately in the freezer DB.
|
||||
store.set_split(weak_subj_slot, weak_subj_state_root, weak_subj_block_root);
|
||||
@ -506,14 +535,19 @@ where
|
||||
.do_atomically(block_root_batch)
|
||||
.map_err(|e| format!("Error writing frozen block roots: {e:?}"))?;
|
||||
|
||||
// Write the state and block non-atomically, it doesn't matter if they're forgotten
|
||||
// Write the state, block and blobs non-atomically, it doesn't matter if they're forgotten
|
||||
// about on a crash restart.
|
||||
store
|
||||
.put_state(&weak_subj_state_root, &weak_subj_state)
|
||||
.map_err(|e| format!("Failed to store weak subjectivity state: {:?}", e))?;
|
||||
.map_err(|e| format!("Failed to store weak subjectivity state: {e:?}"))?;
|
||||
store
|
||||
.put_block(&weak_subj_block_root, weak_subj_block.clone())
|
||||
.map_err(|e| format!("Failed to store weak subjectivity block: {:?}", e))?;
|
||||
.map_err(|e| format!("Failed to store weak subjectivity block: {e:?}"))?;
|
||||
if let Some(blobs) = weak_subj_blobs {
|
||||
store
|
||||
.put_blobs(&weak_subj_block_root, blobs)
|
||||
.map_err(|e| format!("Failed to store weak subjectivity blobs: {e:?}"))?;
|
||||
}
|
||||
|
||||
// Stage the database's metadata fields for atomic storage when `build` is called.
|
||||
// This prevents the database from restarting in an inconsistent state if the anchor
|
||||
@ -603,6 +637,15 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets a `Sender` to allow the beacon chain to trigger light_client update production.
|
||||
pub fn light_client_server_tx(
|
||||
mut self,
|
||||
sender: Sender<LightClientProducerEvent<TEthSpec>>,
|
||||
) -> Self {
|
||||
self.light_client_server_tx = Some(sender);
|
||||
self
|
||||
}
|
||||
|
||||
/// Creates a new, empty operation pool.
|
||||
fn empty_op_pool(mut self) -> Self {
|
||||
self.op_pool = Some(OperationPool::new());
|
||||
@ -827,15 +870,20 @@ where
|
||||
let head_for_snapshot_cache = head_snapshot.clone();
|
||||
let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot));
|
||||
let shuffling_cache_size = self.chain_config.shuffling_cache_size;
|
||||
let snapshot_cache_size = self.chain_config.snapshot_cache_size;
|
||||
|
||||
// Calculate the weak subjectivity point in which to backfill blocks to.
|
||||
let genesis_backfill_slot = if self.chain_config.genesis_backfill {
|
||||
Slot::new(0)
|
||||
} else {
|
||||
let backfill_epoch_range = (self.spec.min_validator_withdrawability_delay
|
||||
+ self.spec.churn_limit_quotient)
|
||||
.as_u64()
|
||||
/ 2;
|
||||
let backfill_epoch_range = if cfg!(feature = "test_backfill") {
|
||||
3
|
||||
} else {
|
||||
(self.spec.min_validator_withdrawability_delay + self.spec.churn_limit_quotient)
|
||||
.as_u64()
|
||||
/ 2
|
||||
};
|
||||
|
||||
match slot_clock.now() {
|
||||
Some(current_slot) => {
|
||||
let genesis_backfill_epoch = current_slot
|
||||
@ -887,8 +935,6 @@ where
|
||||
observed_proposer_slashings: <_>::default(),
|
||||
observed_attester_slashings: <_>::default(),
|
||||
observed_bls_to_execution_changes: <_>::default(),
|
||||
latest_seen_finality_update: <_>::default(),
|
||||
latest_seen_optimistic_update: <_>::default(),
|
||||
eth1_chain: self.eth1_chain,
|
||||
execution_layer: self.execution_layer,
|
||||
genesis_validators_root,
|
||||
@ -901,7 +947,7 @@ where
|
||||
event_handler: self.event_handler,
|
||||
head_tracker,
|
||||
snapshot_cache: TimeoutRwLock::new(SnapshotCache::new(
|
||||
DEFAULT_SNAPSHOT_CACHE_SIZE,
|
||||
snapshot_cache_size,
|
||||
head_for_snapshot_cache,
|
||||
)),
|
||||
shuffling_cache: TimeoutRwLock::new(ShufflingCache::new(
|
||||
@ -916,6 +962,8 @@ where
|
||||
validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache),
|
||||
attester_cache: <_>::default(),
|
||||
early_attester_cache: <_>::default(),
|
||||
light_client_server_cache: LightClientServerCache::new(),
|
||||
light_client_server_tx: self.light_client_server_tx,
|
||||
shutdown_sender: self
|
||||
.shutdown_sender
|
||||
.ok_or("Cannot build without a shutdown sender.")?,
|
||||
|
||||
@ -72,6 +72,8 @@ pub struct ChainConfig {
|
||||
pub optimistic_finalized_sync: bool,
|
||||
/// The size of the shuffling cache,
|
||||
pub shuffling_cache_size: usize,
|
||||
/// The size of the snapshot cache.
|
||||
pub snapshot_cache_size: usize,
|
||||
/// If using a weak-subjectivity sync, whether we should download blocks all the way back to
|
||||
/// genesis.
|
||||
pub genesis_backfill: bool,
|
||||
@ -83,6 +85,8 @@ pub struct ChainConfig {
|
||||
pub progressive_balances_mode: ProgressiveBalancesMode,
|
||||
/// Number of epochs between each migration of data from the hot database to the freezer.
|
||||
pub epochs_per_migration: u64,
|
||||
/// When set to true Light client server computes and caches state proofs for serving updates
|
||||
pub enable_light_client_server: bool,
|
||||
}
|
||||
|
||||
impl Default for ChainConfig {
|
||||
@ -110,10 +114,12 @@ impl Default for ChainConfig {
|
||||
// This value isn't actually read except in tests.
|
||||
optimistic_finalized_sync: true,
|
||||
shuffling_cache_size: crate::shuffling_cache::DEFAULT_CACHE_SIZE,
|
||||
snapshot_cache_size: crate::snapshot_cache::DEFAULT_SNAPSHOT_CACHE_SIZE,
|
||||
genesis_backfill: false,
|
||||
always_prepare_payload: false,
|
||||
progressive_balances_mode: ProgressiveBalancesMode::Fast,
|
||||
epochs_per_migration: crate::migrate::DEFAULT_EPOCHS_PER_MIGRATION,
|
||||
enable_light_client_server: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,7 +20,7 @@ use std::fmt::Debug;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::sync::Arc;
|
||||
use task_executor::TaskExecutor;
|
||||
use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments};
|
||||
use types::beacon_block_body::KzgCommitmentOpts;
|
||||
use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList};
|
||||
use types::{BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
||||
|
||||
@ -192,6 +192,14 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
self.availability_cache.peek_blob(blob_id)
|
||||
}
|
||||
|
||||
/// Get a block from the availability cache. Includes any blocks we are currently processing.
|
||||
pub fn get_block(&self, block_root: &Hash256) -> Option<Arc<SignedBeaconBlock<T::EthSpec>>> {
|
||||
self.processing_cache
|
||||
.read()
|
||||
.get(block_root)
|
||||
.and_then(|cached| cached.block.clone())
|
||||
}
|
||||
|
||||
/// Put a list of blobs received via RPC into the availability cache. This performs KZG
|
||||
/// verification on the blobs in the list.
|
||||
pub fn put_rpc_blobs(
|
||||
@ -344,20 +352,16 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
block.num_expected_blobs() > 0 && self.da_check_required_for_epoch(block.epoch())
|
||||
}
|
||||
|
||||
/// Adds block commitments to the processing cache. These commitments are unverified but caching
|
||||
/// Adds a block to the processing cache. This block's commitments are unverified but caching
|
||||
/// them here is useful to avoid duplicate downloads of blocks, as well as understanding
|
||||
/// our blob download requirements.
|
||||
pub fn notify_block_commitments(
|
||||
&self,
|
||||
slot: Slot,
|
||||
block_root: Hash256,
|
||||
commitments: KzgCommitments<T::EthSpec>,
|
||||
) {
|
||||
/// our blob download requirements. We will also serve this over RPC.
|
||||
pub fn notify_block(&self, block_root: Hash256, block: Arc<SignedBeaconBlock<T::EthSpec>>) {
|
||||
let slot = block.slot();
|
||||
self.processing_cache
|
||||
.write()
|
||||
.entry(block_root)
|
||||
.or_insert_with(|| ProcessingComponents::new(slot))
|
||||
.merge_block(commitments);
|
||||
.merge_block(block);
|
||||
}
|
||||
|
||||
/// Add a single blob commitment to the processing cache. This commitment is unverified but caching
|
||||
@ -450,6 +454,24 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
|
||||
pub fn persist_all(&self) -> Result<(), AvailabilityCheckError> {
|
||||
self.availability_cache.write_all_to_disk()
|
||||
}
|
||||
|
||||
/// Collects metrics from the data availability checker.
|
||||
pub fn metrics(&self) -> DataAvailabilityCheckerMetrics {
|
||||
DataAvailabilityCheckerMetrics {
|
||||
processing_cache_size: self.processing_cache.read().len(),
|
||||
num_store_entries: self.availability_cache.num_store_entries(),
|
||||
state_cache_size: self.availability_cache.state_cache_size(),
|
||||
block_cache_size: self.availability_cache.block_cache_size(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper struct to group data availability checker metrics.
|
||||
pub struct DataAvailabilityCheckerMetrics {
|
||||
pub processing_cache_size: usize,
|
||||
pub num_store_entries: usize,
|
||||
pub state_cache_size: usize,
|
||||
pub block_cache_size: usize,
|
||||
}
|
||||
|
||||
pub fn start_availability_cache_maintenance_service<T: BeaconChainTypes>(
|
||||
@ -545,6 +567,18 @@ pub struct AvailableBlock<E: EthSpec> {
|
||||
}
|
||||
|
||||
impl<E: EthSpec> AvailableBlock<E> {
|
||||
pub fn __new_for_testing(
|
||||
block_root: Hash256,
|
||||
block: Arc<SignedBeaconBlock<E>>,
|
||||
blobs: Option<BlobSidecarList<E>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
block_root,
|
||||
block,
|
||||
blobs,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block(&self) -> &SignedBeaconBlock<E> {
|
||||
&self.block
|
||||
}
|
||||
@ -585,6 +619,15 @@ pub enum MaybeAvailableBlock<E: EthSpec> {
|
||||
},
|
||||
}
|
||||
|
||||
impl<E: EthSpec> MaybeAvailableBlock<E> {
|
||||
pub fn block_cloned(&self) -> Arc<SignedBeaconBlock<E>> {
|
||||
match self {
|
||||
Self::Available(block) => block.block_cloned(),
|
||||
Self::AvailabilityPending { block, .. } => block.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum MissingBlobs {
|
||||
/// We know for certain these blobs are missing.
|
||||
|
||||
@ -182,9 +182,9 @@ macro_rules! impl_availability_view {
|
||||
|
||||
impl_availability_view!(
|
||||
ProcessingComponents,
|
||||
KzgCommitments<E>,
|
||||
Arc<SignedBeaconBlock<E>>,
|
||||
KzgCommitment,
|
||||
block_commitments,
|
||||
block,
|
||||
blob_commitments
|
||||
);
|
||||
|
||||
@ -212,12 +212,6 @@ pub trait GetCommitment<E: EthSpec> {
|
||||
fn get_commitment(&self) -> &KzgCommitment;
|
||||
}
|
||||
|
||||
// These implementations are required to implement `AvailabilityView` for `ProcessingView`.
|
||||
impl<E: EthSpec> GetCommitments<E> for KzgCommitments<E> {
|
||||
fn get_commitments(&self) -> KzgCommitments<E> {
|
||||
self.clone()
|
||||
}
|
||||
}
|
||||
impl<E: EthSpec> GetCommitment<E> for KzgCommitment {
|
||||
fn get_commitment(&self) -> &KzgCommitment {
|
||||
self
|
||||
@ -310,7 +304,7 @@ pub mod tests {
|
||||
}
|
||||
|
||||
type ProcessingViewSetup<E> = (
|
||||
KzgCommitments<E>,
|
||||
Arc<SignedBeaconBlock<E>>,
|
||||
FixedVector<Option<KzgCommitment>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
FixedVector<Option<KzgCommitment>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
);
|
||||
@ -320,12 +314,6 @@ pub mod tests {
|
||||
valid_blobs: FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
invalid_blobs: FixedVector<Option<Arc<BlobSidecar<E>>>, <E as EthSpec>::MaxBlobsPerBlock>,
|
||||
) -> ProcessingViewSetup<E> {
|
||||
let commitments = block
|
||||
.message()
|
||||
.body()
|
||||
.blob_kzg_commitments()
|
||||
.unwrap()
|
||||
.clone();
|
||||
let blobs = FixedVector::from(
|
||||
valid_blobs
|
||||
.iter()
|
||||
@ -338,7 +326,7 @@ pub mod tests {
|
||||
.map(|blob_opt| blob_opt.as_ref().map(|blob| blob.kzg_commitment))
|
||||
.collect::<Vec<_>>(),
|
||||
);
|
||||
(commitments, blobs, invalid_blobs)
|
||||
(Arc::new(block), blobs, invalid_blobs)
|
||||
}
|
||||
|
||||
type PendingComponentsSetup<E> = (
|
||||
|
||||
@ -363,6 +363,16 @@ impl<T: BeaconChainTypes> Critical<T> {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of pending component entries in memory.
|
||||
pub fn num_blocks(&self) -> usize {
|
||||
self.in_memory.len()
|
||||
}
|
||||
|
||||
/// Returns the number of entries that have overflowed to disk.
|
||||
pub fn num_store_entries(&self) -> usize {
|
||||
self.store_keys.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// This is the main struct for this module. Outside methods should
|
||||
@ -671,6 +681,21 @@ impl<T: BeaconChainTypes> OverflowLRUCache<T> {
|
||||
pub fn state_lru_cache(&self) -> &StateLRUCache<T> {
|
||||
&self.state_cache
|
||||
}
|
||||
|
||||
/// Number of states stored in memory in the cache.
|
||||
pub fn state_cache_size(&self) -> usize {
|
||||
self.state_cache.lru_cache().read().len()
|
||||
}
|
||||
|
||||
/// Number of pending component entries in memory in the cache.
|
||||
pub fn block_cache_size(&self) -> usize {
|
||||
self.critical.read().num_blocks()
|
||||
}
|
||||
|
||||
/// Returns the number of entries in the cache that have overflowed to disk.
|
||||
pub fn num_store_entries(&self) -> usize {
|
||||
self.critical.read().num_store_entries()
|
||||
}
|
||||
}
|
||||
|
||||
impl ssz::Encode for OverflowKey {
|
||||
|
||||
@ -1,8 +1,9 @@
|
||||
use crate::data_availability_checker::AvailabilityView;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use types::beacon_block_body::{KzgCommitmentOpts, KzgCommitments};
|
||||
use types::{EthSpec, Hash256, Slot};
|
||||
use std::sync::Arc;
|
||||
use types::beacon_block_body::KzgCommitmentOpts;
|
||||
use types::{EthSpec, Hash256, SignedBeaconBlock, Slot};
|
||||
|
||||
/// This cache is used only for gossip blocks/blobs and single block/blob lookups, to give req/resp
|
||||
/// a view of what we have and what we require. This cache serves a slightly different purpose than
|
||||
@ -37,6 +38,9 @@ impl<E: EthSpec> ProcessingCache<E> {
|
||||
}
|
||||
roots_missing_components
|
||||
}
|
||||
pub fn len(&self) -> usize {
|
||||
self.processing_cache.len()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@ -45,7 +49,7 @@ pub struct ProcessingComponents<E: EthSpec> {
|
||||
/// Blobs required for a block can only be known if we have seen the block. So `Some` here
|
||||
/// means we've seen it, a `None` means we haven't. The `kzg_commitments` value helps us figure
|
||||
/// out whether incoming blobs actually match the block.
|
||||
pub block_commitments: Option<KzgCommitments<E>>,
|
||||
pub block: Option<Arc<SignedBeaconBlock<E>>>,
|
||||
/// `KzgCommitments` for blobs are always known, even if we haven't seen the block. See
|
||||
/// `AvailabilityView`'s trait definition for more details.
|
||||
pub blob_commitments: KzgCommitmentOpts<E>,
|
||||
@ -55,7 +59,7 @@ impl<E: EthSpec> ProcessingComponents<E> {
|
||||
pub fn new(slot: Slot) -> Self {
|
||||
Self {
|
||||
slot,
|
||||
block_commitments: None,
|
||||
block: None,
|
||||
blob_commitments: KzgCommitmentOpts::<E>::default(),
|
||||
}
|
||||
}
|
||||
@ -67,7 +71,7 @@ impl<E: EthSpec> ProcessingComponents<E> {
|
||||
pub fn empty(_block_root: Hash256) -> Self {
|
||||
Self {
|
||||
slot: Slot::new(0),
|
||||
block_commitments: None,
|
||||
block: None,
|
||||
blob_commitments: KzgCommitmentOpts::<E>::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -190,8 +190,7 @@ impl<T: BeaconChainTypes> StateLRUCache<T> {
|
||||
})
|
||||
}
|
||||
|
||||
/// returns the state cache for inspection in tests
|
||||
#[cfg(test)]
|
||||
/// returns the state cache for inspection
|
||||
pub fn lru_cache(&self) -> &RwLock<LruCache<Hash256, BeaconState<T::EthSpec>>> {
|
||||
&self.states
|
||||
}
|
||||
|
||||
@ -967,7 +967,7 @@ mod test {
|
||||
let spec = &E::default_spec();
|
||||
let state: BeaconState<E> = BeaconState::new(0, get_eth1_data(0), spec);
|
||||
|
||||
let blocks = vec![];
|
||||
let blocks = [];
|
||||
|
||||
assert_eq!(
|
||||
get_votes_to_consider(
|
||||
|
||||
@ -81,14 +81,10 @@ impl<T: BeaconChainTypes> PayloadNotifier<T> {
|
||||
|
||||
match notify_execution_layer {
|
||||
NotifyExecutionLayer::No if chain.config.optimistic_finalized_sync => {
|
||||
// Verify the block hash here in Lighthouse and immediately mark the block as
|
||||
// optimistically imported. This saves a lot of roundtrips to the EL.
|
||||
let execution_layer = chain
|
||||
.execution_layer
|
||||
.as_ref()
|
||||
.ok_or(ExecutionPayloadError::NoExecutionConnection)?;
|
||||
|
||||
if let Err(e) = execution_layer.verify_payload_block_hash(block_message) {
|
||||
// Create a NewPayloadRequest (no clones required) and check optimistic sync verifications
|
||||
let new_payload_request: NewPayloadRequest<T::EthSpec> =
|
||||
block_message.try_into()?;
|
||||
if let Err(e) = new_payload_request.perform_optimistic_sync_verifications() {
|
||||
warn!(
|
||||
chain.log,
|
||||
"Falling back to slow block hash verification";
|
||||
@ -143,11 +139,8 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>(
|
||||
.as_ref()
|
||||
.ok_or(ExecutionPayloadError::NoExecutionConnection)?;
|
||||
|
||||
let new_payload_request: NewPayloadRequest<T::EthSpec> = block.try_into()?;
|
||||
let execution_block_hash = new_payload_request.block_hash();
|
||||
let new_payload_response = execution_layer
|
||||
.notify_new_payload(new_payload_request)
|
||||
.await;
|
||||
let execution_block_hash = block.execution_payload()?.block_hash();
|
||||
let new_payload_response = execution_layer.notify_new_payload(block.try_into()?).await;
|
||||
|
||||
match new_payload_response {
|
||||
Ok(status) => match status {
|
||||
|
||||
@ -135,20 +135,20 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
||||
|
||||
prev_block_slot = block.slot();
|
||||
expected_block_root = block.message().parent_root();
|
||||
signed_blocks.push(block);
|
||||
|
||||
// If we've reached genesis, add the genesis block root to the batch for all slots
|
||||
// between 0 and the first block slot, and set the anchor slot to 0 to indicate
|
||||
// completion.
|
||||
if expected_block_root == self.genesis_block_root {
|
||||
let genesis_slot = self.spec.genesis_slot;
|
||||
for slot in genesis_slot.as_usize()..block.slot().as_usize() {
|
||||
for slot in genesis_slot.as_usize()..prev_block_slot.as_usize() {
|
||||
chunk_writer.set(slot, self.genesis_block_root, &mut cold_batch)?;
|
||||
}
|
||||
prev_block_slot = genesis_slot;
|
||||
expected_block_root = Hash256::zero();
|
||||
break;
|
||||
}
|
||||
signed_blocks.push(block);
|
||||
}
|
||||
chunk_writer.write(&mut cold_batch)?;
|
||||
// these were pushed in reverse order so we reverse again
|
||||
|
||||
@ -32,6 +32,7 @@ pub mod historical_blocks;
|
||||
pub mod kzg_utils;
|
||||
pub mod light_client_finality_update_verification;
|
||||
pub mod light_client_optimistic_update_verification;
|
||||
mod light_client_server_cache;
|
||||
pub mod merge_readiness;
|
||||
pub mod metrics;
|
||||
pub mod migrate;
|
||||
@ -49,7 +50,7 @@ mod pre_finalization_cache;
|
||||
pub mod proposer_prep_service;
|
||||
pub mod schema_change;
|
||||
pub mod shuffling_cache;
|
||||
mod snapshot_cache;
|
||||
pub mod snapshot_cache;
|
||||
pub mod state_advance_timer;
|
||||
pub mod sync_committee_rewards;
|
||||
pub mod sync_committee_verification;
|
||||
@ -61,8 +62,8 @@ pub mod validator_pubkey_cache;
|
||||
pub use self::beacon_chain::{
|
||||
AttestationProcessingOutcome, AvailabilityProcessingStatus, BeaconBlockResponse,
|
||||
BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult,
|
||||
ForkChoiceError, OverrideForkchoiceUpdate, ProduceBlockVerification, StateSkipConfig,
|
||||
WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
||||
ForkChoiceError, LightClientProducerEvent, OverrideForkchoiceUpdate, ProduceBlockVerification,
|
||||
StateSkipConfig, WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
||||
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,
|
||||
};
|
||||
pub use self::beacon_snapshot::BeaconSnapshot;
|
||||
|
||||
@ -1,11 +1,9 @@
|
||||
use crate::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||
use crate::{BeaconChain, BeaconChainTypes};
|
||||
use derivative::Derivative;
|
||||
use slot_clock::SlotClock;
|
||||
use std::time::Duration;
|
||||
use strum::AsRefStr;
|
||||
use types::{
|
||||
light_client_update::Error as LightClientUpdateError, LightClientFinalityUpdate, Slot,
|
||||
};
|
||||
use types::LightClientFinalityUpdate;
|
||||
|
||||
/// Returned when a light client finality update was not successfully verified. It might not have been verified for
|
||||
/// two reasons:
|
||||
@ -16,8 +14,6 @@ use types::{
|
||||
/// (the `BeaconChainError` variant)
|
||||
#[derive(Debug, AsRefStr)]
|
||||
pub enum Error {
|
||||
/// Light client finality update message with a lower or equal finalized_header slot already forwarded.
|
||||
FinalityUpdateAlreadySeen,
|
||||
/// The light client finality message was received is prior to one-third of slot duration passage. (with
|
||||
/// respect to the gossip clock disparity and slot clock duration).
|
||||
///
|
||||
@ -26,29 +22,11 @@ pub enum Error {
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
TooEarly,
|
||||
/// Light client finality update message does not match the locally constructed one.
|
||||
///
|
||||
/// ## Peer Scoring
|
||||
///
|
||||
InvalidLightClientFinalityUpdate,
|
||||
/// Signature slot start time is none.
|
||||
SigSlotStartIsNone,
|
||||
/// Failed to construct a LightClientFinalityUpdate from state.
|
||||
FailedConstructingUpdate,
|
||||
/// Beacon chain error occurred.
|
||||
BeaconChainError(BeaconChainError),
|
||||
LightClientUpdateError(LightClientUpdateError),
|
||||
}
|
||||
|
||||
impl From<BeaconChainError> for Error {
|
||||
fn from(e: BeaconChainError) -> Self {
|
||||
Error::BeaconChainError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<LightClientUpdateError> for Error {
|
||||
fn from(e: LightClientUpdateError) -> Self {
|
||||
Error::LightClientUpdateError(e)
|
||||
}
|
||||
}
|
||||
|
||||
/// Wraps a `LightClientFinalityUpdate` that has been verified for propagation on the gossip network.
|
||||
@ -63,71 +41,34 @@ impl<T: BeaconChainTypes> VerifiedLightClientFinalityUpdate<T> {
|
||||
/// Returns `Ok(Self)` if the `light_client_finality_update` is valid to be (re)published on the gossip
|
||||
/// network.
|
||||
pub fn verify(
|
||||
light_client_finality_update: LightClientFinalityUpdate<T::EthSpec>,
|
||||
rcv_finality_update: LightClientFinalityUpdate<T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
seen_timestamp: Duration,
|
||||
) -> Result<Self, Error> {
|
||||
let gossiped_finality_slot = light_client_finality_update.finalized_header.beacon.slot;
|
||||
let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0);
|
||||
let signature_slot = light_client_finality_update.signature_slot;
|
||||
let start_time = chain.slot_clock.start_of(signature_slot);
|
||||
let mut latest_seen_finality_update = chain.latest_seen_finality_update.lock();
|
||||
|
||||
let head = chain.canonical_head.cached_head();
|
||||
let head_block = &head.snapshot.beacon_block;
|
||||
let attested_block_root = head_block.message().parent_root();
|
||||
let attested_block = chain
|
||||
.get_blinded_block(&attested_block_root)?
|
||||
.ok_or(Error::FailedConstructingUpdate)?;
|
||||
let mut attested_state = chain
|
||||
.get_state(&attested_block.state_root(), Some(attested_block.slot()))?
|
||||
.ok_or(Error::FailedConstructingUpdate)?;
|
||||
|
||||
let finalized_block_root = attested_state.finalized_checkpoint().root;
|
||||
let finalized_block = chain
|
||||
.get_blinded_block(&finalized_block_root)?
|
||||
.ok_or(Error::FailedConstructingUpdate)?;
|
||||
let latest_seen_finality_update_slot = match latest_seen_finality_update.as_ref() {
|
||||
Some(update) => update.finalized_header.beacon.slot,
|
||||
None => Slot::new(0),
|
||||
};
|
||||
|
||||
// verify that no other finality_update with a lower or equal
|
||||
// finalized_header.slot was already forwarded on the network
|
||||
if gossiped_finality_slot <= latest_seen_finality_update_slot {
|
||||
return Err(Error::FinalityUpdateAlreadySeen);
|
||||
}
|
||||
|
||||
// verify that enough time has passed for the block to have been propagated
|
||||
match start_time {
|
||||
Some(time) => {
|
||||
if seen_timestamp + chain.spec.maximum_gossip_clock_disparity()
|
||||
< time + one_third_slot_duration
|
||||
{
|
||||
return Err(Error::TooEarly);
|
||||
}
|
||||
}
|
||||
None => return Err(Error::SigSlotStartIsNone),
|
||||
let start_time = chain
|
||||
.slot_clock
|
||||
.start_of(rcv_finality_update.signature_slot)
|
||||
.ok_or(Error::SigSlotStartIsNone)?;
|
||||
let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0);
|
||||
if seen_timestamp + chain.spec.maximum_gossip_clock_disparity()
|
||||
< start_time + one_third_slot_duration
|
||||
{
|
||||
return Err(Error::TooEarly);
|
||||
}
|
||||
|
||||
let head_state = &head.snapshot.beacon_state;
|
||||
let finality_update = LightClientFinalityUpdate::new(
|
||||
&chain.spec,
|
||||
head_state,
|
||||
head_block,
|
||||
&mut attested_state,
|
||||
&finalized_block,
|
||||
)?;
|
||||
let latest_finality_update = chain
|
||||
.light_client_server_cache
|
||||
.get_latest_finality_update()
|
||||
.ok_or(Error::FailedConstructingUpdate)?;
|
||||
|
||||
// verify that the gossiped finality update is the same as the locally constructed one.
|
||||
if finality_update != light_client_finality_update {
|
||||
if latest_finality_update != rcv_finality_update {
|
||||
return Err(Error::InvalidLightClientFinalityUpdate);
|
||||
}
|
||||
|
||||
*latest_seen_finality_update = Some(light_client_finality_update.clone());
|
||||
|
||||
Ok(Self {
|
||||
light_client_finality_update,
|
||||
light_client_finality_update: rcv_finality_update,
|
||||
seen_timestamp,
|
||||
})
|
||||
}
|
||||
|
||||
@ -1,12 +1,10 @@
|
||||
use crate::{BeaconChain, BeaconChainError, BeaconChainTypes};
|
||||
use crate::{BeaconChain, BeaconChainTypes};
|
||||
use derivative::Derivative;
|
||||
use eth2::types::Hash256;
|
||||
use slot_clock::SlotClock;
|
||||
use std::time::Duration;
|
||||
use strum::AsRefStr;
|
||||
use types::{
|
||||
light_client_update::Error as LightClientUpdateError, LightClientOptimisticUpdate, Slot,
|
||||
};
|
||||
use types::LightClientOptimisticUpdate;
|
||||
|
||||
/// Returned when a light client optimistic update was not successfully verified. It might not have been verified for
|
||||
/// two reasons:
|
||||
@ -17,8 +15,6 @@ use types::{
|
||||
/// (the `BeaconChainError` variant)
|
||||
#[derive(Debug, AsRefStr)]
|
||||
pub enum Error {
|
||||
/// Light client optimistic update message with a lower or equal optimistic_header slot already forwarded.
|
||||
OptimisticUpdateAlreadySeen,
|
||||
/// The light client optimistic message was received is prior to one-third of slot duration passage. (with
|
||||
/// respect to the gossip clock disparity and slot clock duration).
|
||||
///
|
||||
@ -27,9 +23,6 @@ pub enum Error {
|
||||
/// Assuming the local clock is correct, the peer has sent an invalid message.
|
||||
TooEarly,
|
||||
/// Light client optimistic update message does not match the locally constructed one.
|
||||
///
|
||||
/// ## Peer Scoring
|
||||
///
|
||||
InvalidLightClientOptimisticUpdate,
|
||||
/// Signature slot start time is none.
|
||||
SigSlotStartIsNone,
|
||||
@ -37,21 +30,6 @@ pub enum Error {
|
||||
FailedConstructingUpdate,
|
||||
/// Unknown block with parent root.
|
||||
UnknownBlockParentRoot(Hash256),
|
||||
/// Beacon chain error occurred.
|
||||
BeaconChainError(BeaconChainError),
|
||||
LightClientUpdateError(LightClientUpdateError),
|
||||
}
|
||||
|
||||
impl From<BeaconChainError> for Error {
|
||||
fn from(e: BeaconChainError) -> Self {
|
||||
Error::BeaconChainError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<LightClientUpdateError> for Error {
|
||||
fn from(e: LightClientUpdateError) -> Self {
|
||||
Error::LightClientUpdateError(e)
|
||||
}
|
||||
}
|
||||
|
||||
/// Wraps a `LightClientOptimisticUpdate` that has been verified for propagation on the gossip network.
|
||||
@ -67,52 +45,27 @@ impl<T: BeaconChainTypes> VerifiedLightClientOptimisticUpdate<T> {
|
||||
/// Returns `Ok(Self)` if the `light_client_optimistic_update` is valid to be (re)published on the gossip
|
||||
/// network.
|
||||
pub fn verify(
|
||||
light_client_optimistic_update: LightClientOptimisticUpdate<T::EthSpec>,
|
||||
rcv_optimistic_update: LightClientOptimisticUpdate<T::EthSpec>,
|
||||
chain: &BeaconChain<T>,
|
||||
seen_timestamp: Duration,
|
||||
) -> Result<Self, Error> {
|
||||
let gossiped_optimistic_slot = light_client_optimistic_update.attested_header.beacon.slot;
|
||||
// verify that enough time has passed for the block to have been propagated
|
||||
let start_time = chain
|
||||
.slot_clock
|
||||
.start_of(rcv_optimistic_update.signature_slot)
|
||||
.ok_or(Error::SigSlotStartIsNone)?;
|
||||
let one_third_slot_duration = Duration::new(chain.spec.seconds_per_slot / 3, 0);
|
||||
let signature_slot = light_client_optimistic_update.signature_slot;
|
||||
let start_time = chain.slot_clock.start_of(signature_slot);
|
||||
let mut latest_seen_optimistic_update = chain.latest_seen_optimistic_update.lock();
|
||||
if seen_timestamp + chain.spec.maximum_gossip_clock_disparity()
|
||||
< start_time + one_third_slot_duration
|
||||
{
|
||||
return Err(Error::TooEarly);
|
||||
}
|
||||
|
||||
let head = chain.canonical_head.cached_head();
|
||||
let head_block = &head.snapshot.beacon_block;
|
||||
let attested_block_root = head_block.message().parent_root();
|
||||
let attested_block = chain
|
||||
.get_blinded_block(&attested_block_root)?
|
||||
.ok_or(Error::FailedConstructingUpdate)?;
|
||||
|
||||
let attested_state = chain
|
||||
.get_state(&attested_block.state_root(), Some(attested_block.slot()))?
|
||||
.ok_or(Error::FailedConstructingUpdate)?;
|
||||
let latest_seen_optimistic_update_slot = match latest_seen_optimistic_update.as_ref() {
|
||||
Some(update) => update.attested_header.beacon.slot,
|
||||
None => Slot::new(0),
|
||||
};
|
||||
|
||||
// verify that no other optimistic_update with a lower or equal
|
||||
// optimistic_header.slot was already forwarded on the network
|
||||
if gossiped_optimistic_slot <= latest_seen_optimistic_update_slot {
|
||||
return Err(Error::OptimisticUpdateAlreadySeen);
|
||||
}
|
||||
|
||||
// verify that enough time has passed for the block to have been propagated
|
||||
match start_time {
|
||||
Some(time) => {
|
||||
if seen_timestamp + chain.spec.maximum_gossip_clock_disparity()
|
||||
< time + one_third_slot_duration
|
||||
{
|
||||
return Err(Error::TooEarly);
|
||||
}
|
||||
}
|
||||
None => return Err(Error::SigSlotStartIsNone),
|
||||
}
|
||||
|
||||
// check if we can process the optimistic update immediately
|
||||
// otherwise queue
|
||||
let canonical_root = light_client_optimistic_update
|
||||
let canonical_root = rcv_optimistic_update
|
||||
.attested_header
|
||||
.beacon
|
||||
.canonical_root();
|
||||
@ -121,19 +74,20 @@ impl<T: BeaconChainTypes> VerifiedLightClientOptimisticUpdate<T> {
|
||||
return Err(Error::UnknownBlockParentRoot(canonical_root));
|
||||
}
|
||||
|
||||
let optimistic_update =
|
||||
LightClientOptimisticUpdate::new(&chain.spec, head_block, &attested_state)?;
|
||||
let latest_optimistic_update = chain
|
||||
.light_client_server_cache
|
||||
.get_latest_optimistic_update()
|
||||
.ok_or(Error::FailedConstructingUpdate)?;
|
||||
|
||||
// verify that the gossiped optimistic update is the same as the locally constructed one.
|
||||
if optimistic_update != light_client_optimistic_update {
|
||||
if latest_optimistic_update != rcv_optimistic_update {
|
||||
return Err(Error::InvalidLightClientOptimisticUpdate);
|
||||
}
|
||||
|
||||
*latest_seen_optimistic_update = Some(light_client_optimistic_update.clone());
|
||||
|
||||
let parent_root = rcv_optimistic_update.attested_header.beacon.parent_root;
|
||||
Ok(Self {
|
||||
light_client_optimistic_update,
|
||||
parent_root: canonical_root,
|
||||
light_client_optimistic_update: rcv_optimistic_update,
|
||||
parent_root,
|
||||
seen_timestamp,
|
||||
})
|
||||
}
|
||||
|
||||
256
beacon_node/beacon_chain/src/light_client_server_cache.rs
Normal file
256
beacon_node/beacon_chain/src/light_client_server_cache.rs
Normal file
@ -0,0 +1,256 @@
|
||||
use crate::errors::BeaconChainError;
|
||||
use crate::{metrics, BeaconChainTypes, BeaconStore};
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use slog::{debug, Logger};
|
||||
use ssz_types::FixedVector;
|
||||
use std::num::NonZeroUsize;
|
||||
use types::light_client_update::{FinalizedRootProofLen, FINALIZED_ROOT_INDEX};
|
||||
use types::non_zero_usize::new_non_zero_usize;
|
||||
use types::{
|
||||
BeaconBlockRef, BeaconState, ChainSpec, EthSpec, ForkName, Hash256, LightClientFinalityUpdate,
|
||||
LightClientHeader, LightClientOptimisticUpdate, Slot, SyncAggregate,
|
||||
};
|
||||
|
||||
/// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the
|
||||
/// prev block cache are very small 32 * (6 + 1) = 224 bytes. 32 is an arbitrary number that
|
||||
/// represents unlikely re-orgs, while keeping the cache very small.
|
||||
const PREV_BLOCK_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(32);
|
||||
|
||||
/// This cache computes light client messages ahead of time, required to satisfy p2p and API
|
||||
/// requests. These messages include proofs on historical states, so on-demand computation is
|
||||
/// expensive.
|
||||
///
|
||||
pub struct LightClientServerCache<T: BeaconChainTypes> {
|
||||
/// Tracks a single global latest finality update out of all imported blocks.
|
||||
///
|
||||
/// TODO: Active discussion with @etan-status if this cache should be fork aware to return
|
||||
/// latest canonical (update with highest signature slot, where its attested header is part of
|
||||
/// the head chain) instead of global latest (update with highest signature slot, out of all
|
||||
/// branches).
|
||||
latest_finality_update: RwLock<Option<LightClientFinalityUpdate<T::EthSpec>>>,
|
||||
/// Tracks a single global latest optimistic update out of all imported blocks.
|
||||
latest_optimistic_update: RwLock<Option<LightClientOptimisticUpdate<T::EthSpec>>>,
|
||||
/// Caches state proofs by block root
|
||||
prev_block_cache: Mutex<lru::LruCache<Hash256, LightClientCachedData>>,
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> LightClientServerCache<T> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
latest_finality_update: None.into(),
|
||||
latest_optimistic_update: None.into(),
|
||||
prev_block_cache: lru::LruCache::new(PREV_BLOCK_CACHE_SIZE).into(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute and cache state proofs for latter production of light-client messages. Does not
|
||||
/// trigger block replay.
|
||||
pub fn cache_state_data(
|
||||
&self,
|
||||
spec: &ChainSpec,
|
||||
block: BeaconBlockRef<T::EthSpec>,
|
||||
block_root: Hash256,
|
||||
block_post_state: &mut BeaconState<T::EthSpec>,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
let _timer = metrics::start_timer(&metrics::LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES);
|
||||
|
||||
// Only post-altair
|
||||
if spec.fork_name_at_slot::<T::EthSpec>(block.slot()) == ForkName::Base {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Persist in memory cache for a descendent block
|
||||
|
||||
let cached_data = LightClientCachedData::from_state(block_post_state)?;
|
||||
self.prev_block_cache.lock().put(block_root, cached_data);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Given a block with a SyncAggregte computes better or more recent light client updates. The
|
||||
/// results are cached either on disk or memory to be served via p2p and rest API
|
||||
pub fn recompute_and_cache_updates(
|
||||
&self,
|
||||
log: &Logger,
|
||||
store: BeaconStore<T>,
|
||||
block_parent_root: &Hash256,
|
||||
block_slot: Slot,
|
||||
sync_aggregate: &SyncAggregate<T::EthSpec>,
|
||||
) -> Result<(), BeaconChainError> {
|
||||
let _timer =
|
||||
metrics::start_timer(&metrics::LIGHT_CLIENT_SERVER_CACHE_RECOMPUTE_UPDATES_TIMES);
|
||||
|
||||
let signature_slot = block_slot;
|
||||
let attested_block_root = block_parent_root;
|
||||
|
||||
let attested_block = store.get_blinded_block(attested_block_root)?.ok_or(
|
||||
BeaconChainError::DBInconsistent(format!(
|
||||
"Block not available {:?}",
|
||||
attested_block_root
|
||||
)),
|
||||
)?;
|
||||
|
||||
let cached_parts = self.get_or_compute_prev_block_cache(
|
||||
store.clone(),
|
||||
attested_block_root,
|
||||
&attested_block.state_root(),
|
||||
attested_block.slot(),
|
||||
)?;
|
||||
|
||||
let attested_slot = attested_block.slot();
|
||||
|
||||
// Spec: Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest
|
||||
// attested_header.beacon.slot (if multiple, highest signature_slot) as selected by fork choice
|
||||
let is_latest_optimistic = match &self.latest_optimistic_update.read().clone() {
|
||||
Some(latest_optimistic_update) => {
|
||||
is_latest_optimistic_update(latest_optimistic_update, attested_slot, signature_slot)
|
||||
}
|
||||
None => true,
|
||||
};
|
||||
if is_latest_optimistic {
|
||||
// can create an optimistic update, that is more recent
|
||||
*self.latest_optimistic_update.write() = Some(LightClientOptimisticUpdate {
|
||||
attested_header: block_to_light_client_header(attested_block.message()),
|
||||
sync_aggregate: sync_aggregate.clone(),
|
||||
signature_slot,
|
||||
});
|
||||
};
|
||||
|
||||
// Spec: Full nodes SHOULD provide the LightClientFinalityUpdate with the highest
|
||||
// attested_header.beacon.slot (if multiple, highest signature_slot) as selected by fork choice
|
||||
let is_latest_finality = match &self.latest_finality_update.read().clone() {
|
||||
Some(latest_finality_update) => {
|
||||
is_latest_finality_update(latest_finality_update, attested_slot, signature_slot)
|
||||
}
|
||||
None => true,
|
||||
};
|
||||
if is_latest_finality & !cached_parts.finalized_block_root.is_zero() {
|
||||
// Immediately after checkpoint sync the finalized block may not be available yet.
|
||||
if let Some(finalized_block) =
|
||||
store.get_blinded_block(&cached_parts.finalized_block_root)?
|
||||
{
|
||||
*self.latest_finality_update.write() = Some(LightClientFinalityUpdate {
|
||||
// TODO: may want to cache this result from latest_optimistic_update if producing a
|
||||
// light_client header becomes expensive
|
||||
attested_header: block_to_light_client_header(attested_block.message()),
|
||||
finalized_header: block_to_light_client_header(finalized_block.message()),
|
||||
finality_branch: cached_parts.finality_branch.clone(),
|
||||
sync_aggregate: sync_aggregate.clone(),
|
||||
signature_slot,
|
||||
});
|
||||
} else {
|
||||
debug!(
|
||||
log,
|
||||
"Finalized block not available in store for light_client server";
|
||||
"finalized_block_root" => format!("{}", cached_parts.finalized_block_root),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retrieves prev block cached data from cache. If not present re-computes by retrieving the
|
||||
/// parent state, and inserts an entry to the cache.
|
||||
///
|
||||
/// In separate function since FnOnce of get_or_insert can not be fallible.
|
||||
fn get_or_compute_prev_block_cache(
|
||||
&self,
|
||||
store: BeaconStore<T>,
|
||||
block_root: &Hash256,
|
||||
block_state_root: &Hash256,
|
||||
block_slot: Slot,
|
||||
) -> Result<LightClientCachedData, BeaconChainError> {
|
||||
// Attempt to get the value from the cache first.
|
||||
if let Some(cached_parts) = self.prev_block_cache.lock().get(block_root) {
|
||||
return Ok(cached_parts.clone());
|
||||
}
|
||||
metrics::inc_counter(&metrics::LIGHT_CLIENT_SERVER_CACHE_PREV_BLOCK_CACHE_MISS);
|
||||
|
||||
// Compute the value, handling potential errors.
|
||||
let mut state = store
|
||||
.get_state(block_state_root, Some(block_slot))?
|
||||
.ok_or_else(|| {
|
||||
BeaconChainError::DBInconsistent(format!("Missing state {:?}", block_state_root))
|
||||
})?;
|
||||
let new_value = LightClientCachedData::from_state(&mut state)?;
|
||||
|
||||
// Insert value and return owned
|
||||
self.prev_block_cache
|
||||
.lock()
|
||||
.put(*block_root, new_value.clone());
|
||||
Ok(new_value)
|
||||
}
|
||||
|
||||
pub fn get_latest_finality_update(&self) -> Option<LightClientFinalityUpdate<T::EthSpec>> {
|
||||
self.latest_finality_update.read().clone()
|
||||
}
|
||||
|
||||
pub fn get_latest_optimistic_update(&self) -> Option<LightClientOptimisticUpdate<T::EthSpec>> {
|
||||
self.latest_optimistic_update.read().clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BeaconChainTypes> Default for LightClientServerCache<T> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
type FinalityBranch = FixedVector<Hash256, FinalizedRootProofLen>;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct LightClientCachedData {
|
||||
finality_branch: FinalityBranch,
|
||||
finalized_block_root: Hash256,
|
||||
}
|
||||
|
||||
impl LightClientCachedData {
|
||||
fn from_state<T: EthSpec>(state: &mut BeaconState<T>) -> Result<Self, BeaconChainError> {
|
||||
Ok(Self {
|
||||
finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(),
|
||||
finalized_block_root: state.finalized_checkpoint().root,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Implements spec priorization rules:
|
||||
// > Full nodes SHOULD provide the LightClientFinalityUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot)
|
||||
//
|
||||
// ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_finality_update
|
||||
fn is_latest_finality_update<T: EthSpec>(
|
||||
prev: &LightClientFinalityUpdate<T>,
|
||||
attested_slot: Slot,
|
||||
signature_slot: Slot,
|
||||
) -> bool {
|
||||
if attested_slot > prev.attested_header.beacon.slot {
|
||||
true
|
||||
} else {
|
||||
attested_slot == prev.attested_header.beacon.slot && signature_slot > prev.signature_slot
|
||||
}
|
||||
}
|
||||
|
||||
// Implements spec priorization rules:
|
||||
// > Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot)
|
||||
//
|
||||
// ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_optimistic_update
|
||||
fn is_latest_optimistic_update<T: EthSpec>(
|
||||
prev: &LightClientOptimisticUpdate<T>,
|
||||
attested_slot: Slot,
|
||||
signature_slot: Slot,
|
||||
) -> bool {
|
||||
if attested_slot > prev.attested_header.beacon.slot {
|
||||
true
|
||||
} else {
|
||||
attested_slot == prev.attested_header.beacon.slot && signature_slot > prev.signature_slot
|
||||
}
|
||||
}
|
||||
|
||||
fn block_to_light_client_header<T: EthSpec>(
|
||||
block: BeaconBlockRef<T, types::BlindedPayload<T>>,
|
||||
) -> LightClientHeader {
|
||||
// TODO: make fork aware
|
||||
LightClientHeader {
|
||||
beacon: block.block_header(),
|
||||
}
|
||||
}
|
||||
@ -1128,6 +1128,47 @@ lazy_static! {
|
||||
// Create a custom bucket list for greater granularity in block delay
|
||||
Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0])
|
||||
);
|
||||
|
||||
|
||||
/*
|
||||
* Data Availability cache metrics
|
||||
*/
|
||||
pub static ref DATA_AVAILABILITY_PROCESSING_CACHE_SIZE: Result<IntGauge> =
|
||||
try_create_int_gauge(
|
||||
"data_availability_processing_cache_size",
|
||||
"Number of entries in the data availability processing cache."
|
||||
);
|
||||
pub static ref DATA_AVAILABILITY_OVERFLOW_MEMORY_BLOCK_CACHE_SIZE: Result<IntGauge> =
|
||||
try_create_int_gauge(
|
||||
"data_availability_overflow_memory_block_cache_size",
|
||||
"Number of entries in the data availability overflow block memory cache."
|
||||
);
|
||||
pub static ref DATA_AVAILABILITY_OVERFLOW_MEMORY_STATE_CACHE_SIZE: Result<IntGauge> =
|
||||
try_create_int_gauge(
|
||||
"data_availability_overflow_memory_state_cache_size",
|
||||
"Number of entries in the data availability overflow state memory cache."
|
||||
);
|
||||
pub static ref DATA_AVAILABILITY_OVERFLOW_STORE_CACHE_SIZE: Result<IntGauge> =
|
||||
try_create_int_gauge(
|
||||
"data_availability_overflow_store_cache_size",
|
||||
"Number of entries in the data availability overflow store cache."
|
||||
);
|
||||
|
||||
/*
|
||||
* light_client server metrics
|
||||
*/
|
||||
pub static ref LIGHT_CLIENT_SERVER_CACHE_STATE_DATA_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_light_client_server_cache_state_data_seconds",
|
||||
"Time taken to produce and cache state data",
|
||||
);
|
||||
pub static ref LIGHT_CLIENT_SERVER_CACHE_RECOMPUTE_UPDATES_TIMES: Result<Histogram> = try_create_histogram(
|
||||
"beacon_light_client_server_cache_recompute_updates_seconds",
|
||||
"Time taken to recompute and cache updates",
|
||||
);
|
||||
pub static ref LIGHT_CLIENT_SERVER_CACHE_PREV_BLOCK_CACHE_MISS: Result<IntCounter> = try_create_int_counter(
|
||||
"beacon_light_client_server_cache_prev_block_cache_miss",
|
||||
"Count of prev block cache misses",
|
||||
);
|
||||
}
|
||||
|
||||
/// Scrape the `beacon_chain` for metrics that are not constantly updated (e.g., the present slot,
|
||||
@ -1155,6 +1196,24 @@ pub fn scrape_for_metrics<T: BeaconChainTypes>(beacon_chain: &BeaconChain<T>) {
|
||||
)
|
||||
}
|
||||
|
||||
let da_checker_metrics = beacon_chain.data_availability_checker.metrics();
|
||||
set_gauge_by_usize(
|
||||
&DATA_AVAILABILITY_PROCESSING_CACHE_SIZE,
|
||||
da_checker_metrics.processing_cache_size,
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&DATA_AVAILABILITY_OVERFLOW_MEMORY_BLOCK_CACHE_SIZE,
|
||||
da_checker_metrics.block_cache_size,
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&DATA_AVAILABILITY_OVERFLOW_MEMORY_STATE_CACHE_SIZE,
|
||||
da_checker_metrics.state_cache_size,
|
||||
);
|
||||
set_gauge_by_usize(
|
||||
&DATA_AVAILABILITY_OVERFLOW_STORE_CACHE_SIZE,
|
||||
da_checker_metrics.num_store_entries,
|
||||
);
|
||||
|
||||
if let Some((size, num_lookups)) = beacon_chain.pre_finalization_block_cache.metrics() {
|
||||
set_gauge_by_usize(&PRE_FINALIZATION_BLOCK_CACHE_SIZE, size);
|
||||
set_gauge_by_usize(&PRE_FINALIZATION_BLOCK_LOOKUP_COUNT, num_lookups);
|
||||
|
||||
@ -43,7 +43,7 @@ impl<T: EthSpec> Consts for Attestation<T> {
|
||||
|
||||
/// We need to keep attestations for each slot of the current epoch.
|
||||
fn max_slot_capacity() -> usize {
|
||||
T::slots_per_epoch() as usize
|
||||
2 * T::slots_per_epoch() as usize
|
||||
}
|
||||
|
||||
/// As a DoS protection measure, the maximum number of distinct `Attestations` or
|
||||
|
||||
@ -24,18 +24,16 @@ use types::{Epoch, EthSpec, Hash256, Slot, Unsigned};
|
||||
|
||||
/// The maximum capacity of the `AutoPruningEpochContainer`.
|
||||
///
|
||||
/// Fits the next, current and previous epochs. We require the next epoch due to the
|
||||
/// `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. We require the previous epoch since the specification
|
||||
/// declares:
|
||||
/// If the current epoch is N, this fits epoch N + 1, N, N - 1, and N - 2. We require the next epoch due
|
||||
/// to the `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. We require the N - 2 epoch since the specification declares:
|
||||
///
|
||||
/// ```ignore
|
||||
/// aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE
|
||||
/// >= current_slot >= aggregate.data.slot
|
||||
/// the epoch of `aggregate.data.slot` is either the current or previous epoch
|
||||
/// ```
|
||||
///
|
||||
/// This means that during the current epoch we will always accept an attestation
|
||||
/// from at least one slot in the previous epoch.
|
||||
pub const MAX_CACHED_EPOCHS: u64 = 3;
|
||||
/// This means that during the current epoch we will always accept an attestation from
|
||||
/// at least one slot in the epoch prior to the previous epoch.
|
||||
pub const MAX_CACHED_EPOCHS: u64 = 4;
|
||||
|
||||
pub type ObservedAttesters<E> = AutoPruningEpochContainer<EpochBitfield, E>;
|
||||
pub type ObservedSyncContributors<E> =
|
||||
|
||||
@ -9,7 +9,7 @@ use types::{
|
||||
};
|
||||
|
||||
/// The default size of the cache.
|
||||
pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 4;
|
||||
pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 3;
|
||||
|
||||
/// The minimum block delay to clone the state in the cache instead of removing it.
|
||||
/// This helps keep block processing fast during re-orgs from late blocks.
|
||||
@ -174,6 +174,7 @@ impl<T: EthSpec> SnapshotCache<T> {
|
||||
self.snapshots.iter().map(|s| s.beacon_block_root).collect()
|
||||
}
|
||||
|
||||
#[allow(clippy::len_without_is_empty)]
|
||||
/// The number of snapshots contained in `self`.
|
||||
pub fn len(&self) -> usize {
|
||||
self.snapshots.len()
|
||||
|
||||
@ -51,7 +51,8 @@ const MAX_BLOCK_PRODUCTION_CACHE_DISTANCE: u64 = 4;
|
||||
#[derive(Debug)]
|
||||
enum Error {
|
||||
BeaconChain(BeaconChainError),
|
||||
HeadMissingFromSnapshotCache(Hash256),
|
||||
// We don't use the inner value directly, but it's used in the Debug impl.
|
||||
HeadMissingFromSnapshotCache(#[allow(dead_code)] Hash256),
|
||||
MaxDistanceExceeded {
|
||||
current_slot: Slot,
|
||||
head_slot: Slot,
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
// #![cfg(not(debug_assertions))]
|
||||
#![cfg(not(debug_assertions))]
|
||||
|
||||
use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock};
|
||||
use beacon_chain::{
|
||||
|
||||
@ -1820,81 +1820,94 @@ struct InvalidHeadSetup {
|
||||
}
|
||||
|
||||
impl InvalidHeadSetup {
|
||||
/// This function aims to produce two things:
|
||||
///
|
||||
/// 1. A chain where the only viable head block has an invalid execution payload.
|
||||
/// 2. A block (`fork_block`) which will become the head of the chain when
|
||||
/// it is imported.
|
||||
async fn new() -> InvalidHeadSetup {
|
||||
let slots_per_epoch = E::slots_per_epoch();
|
||||
let mut rig = InvalidPayloadRig::new().enable_attestations();
|
||||
rig.move_to_terminal_block();
|
||||
rig.import_block(Payload::Valid).await; // Import a valid transition block.
|
||||
|
||||
// Import blocks until the first time the chain finalizes.
|
||||
// Import blocks until the first time the chain finalizes. This avoids
|
||||
// some edge-cases around genesis.
|
||||
while rig.cached_head().finalized_checkpoint().epoch == 0 {
|
||||
rig.import_block(Payload::Syncing).await;
|
||||
}
|
||||
|
||||
let slots_per_epoch = E::slots_per_epoch();
|
||||
let start_slot = rig.cached_head().head_slot() + 1;
|
||||
let mut opt_fork_block = None;
|
||||
// Define a helper function.
|
||||
let chain = rig.harness.chain.clone();
|
||||
let get_unrealized_justified_epoch = move || {
|
||||
chain
|
||||
.canonical_head
|
||||
.fork_choice_read_lock()
|
||||
.unrealized_justified_checkpoint()
|
||||
.epoch
|
||||
};
|
||||
|
||||
assert_eq!(start_slot % slots_per_epoch, 1);
|
||||
for i in 0..slots_per_epoch - 1 {
|
||||
let slot = start_slot + i;
|
||||
let slot_offset = slot.as_u64() % slots_per_epoch;
|
||||
|
||||
rig.harness.set_current_slot(slot);
|
||||
|
||||
if slot_offset == slots_per_epoch - 1 {
|
||||
// Optimistic head block right before epoch boundary.
|
||||
let is_valid = Payload::Syncing;
|
||||
rig.import_block_parametric(is_valid, is_valid, Some(slot), |error| {
|
||||
matches!(
|
||||
error,
|
||||
BlockError::ExecutionPayloadError(
|
||||
ExecutionPayloadError::RejectedByExecutionEngine { .. }
|
||||
)
|
||||
)
|
||||
})
|
||||
.await;
|
||||
} else if 3 * slot_offset < 2 * slots_per_epoch {
|
||||
// Valid block in previous epoch.
|
||||
rig.import_block(Payload::Valid).await;
|
||||
} else if slot_offset == slots_per_epoch - 2 {
|
||||
// Fork block one slot prior to invalid head, not applied immediately.
|
||||
let parent_state = rig
|
||||
.harness
|
||||
.chain
|
||||
.state_at_slot(slot - 1, StateSkipConfig::WithStateRoots)
|
||||
.unwrap();
|
||||
let (fork_block_tuple, _) = rig.harness.make_block(parent_state, slot).await;
|
||||
opt_fork_block = Some(fork_block_tuple.0);
|
||||
} else {
|
||||
// Skipped slot.
|
||||
};
|
||||
// Import more blocks until there is a new and higher unrealized
|
||||
// justified checkpoint.
|
||||
//
|
||||
// The result will be a single chain where the head block has a higher
|
||||
// unrealized justified checkpoint than all other blocks in the chain.
|
||||
let initial_unrealized_justified = get_unrealized_justified_epoch();
|
||||
while get_unrealized_justified_epoch() == initial_unrealized_justified {
|
||||
rig.import_block(Payload::Syncing).await;
|
||||
}
|
||||
|
||||
let invalid_head = rig.cached_head();
|
||||
assert_eq!(
|
||||
invalid_head.head_slot() % slots_per_epoch,
|
||||
slots_per_epoch - 1
|
||||
);
|
||||
// Create a forked block that competes with the head block. Both the
|
||||
// head block and this fork block will share the same parent.
|
||||
//
|
||||
// The fork block and head block will both have an unrealized justified
|
||||
// checkpoint at epoch `N` whilst their parent is at `N - 1`.
|
||||
let head_slot = rig.cached_head().head_slot();
|
||||
let parent_slot = head_slot - 1;
|
||||
let fork_block_slot = head_slot + 1;
|
||||
let parent_state = rig
|
||||
.harness
|
||||
.chain
|
||||
.state_at_slot(parent_slot, StateSkipConfig::WithStateRoots)
|
||||
.unwrap();
|
||||
let (fork_block_tuple, _) = rig.harness.make_block(parent_state, fork_block_slot).await;
|
||||
let fork_block = fork_block_tuple.0;
|
||||
|
||||
// Advance clock to new epoch to realize the justification of soon-to-be-invalid head block.
|
||||
rig.harness.set_current_slot(invalid_head.head_slot() + 1);
|
||||
let invalid_head = rig.cached_head();
|
||||
|
||||
// Advance the chain forward two epochs past the current head block.
|
||||
//
|
||||
// This ensures that `voting_source.epoch + 2 >= current_epoch` is
|
||||
// `false` in the `node_is_viable_for_head` function. In effect, this
|
||||
// ensures that no other block but the current head block is viable as a
|
||||
// head block.
|
||||
let invalid_head_epoch = invalid_head.head_slot().epoch(slots_per_epoch);
|
||||
let new_wall_clock_epoch = invalid_head_epoch + 2;
|
||||
rig.harness
|
||||
.set_current_slot(new_wall_clock_epoch.start_slot(slots_per_epoch));
|
||||
|
||||
// Invalidate the head block.
|
||||
rig.invalidate_manually(invalid_head.head_block_root())
|
||||
.await;
|
||||
|
||||
// Since our setup ensures that there is only a single, invalid block
|
||||
// that's viable for head (according to FFG filtering), setting the
|
||||
// head block as invalid should not result in another head being chosen.
|
||||
// Rather, it should fail to run fork choice and leave the invalid block as
|
||||
// the head.
|
||||
assert!(rig
|
||||
.canonical_head()
|
||||
.head_execution_status()
|
||||
.unwrap()
|
||||
.is_invalid());
|
||||
|
||||
// Finding a new head should fail since the only possible head is not valid.
|
||||
// Ensure that we're getting the correct error when trying to find a new
|
||||
// head.
|
||||
rig.assert_get_head_error_contains("InvalidBestNode");
|
||||
|
||||
Self {
|
||||
rig,
|
||||
fork_block: opt_fork_block.unwrap(),
|
||||
fork_block,
|
||||
invalid_head,
|
||||
}
|
||||
}
|
||||
|
||||
@ -3,6 +3,7 @@
|
||||
use beacon_chain::attestation_verification::Error as AttnError;
|
||||
use beacon_chain::block_verification_types::RpcBlock;
|
||||
use beacon_chain::builder::BeaconChainBuilder;
|
||||
use beacon_chain::data_availability_checker::AvailableBlock;
|
||||
use beacon_chain::schema_change::migrate_schema;
|
||||
use beacon_chain::test_utils::{
|
||||
mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness,
|
||||
@ -2395,6 +2396,7 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
|
||||
.get_full_block(&wss_block_root)
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap();
|
||||
let wss_state = full_store
|
||||
.get_state(&wss_state_root, Some(checkpoint_slot))
|
||||
.unwrap()
|
||||
@ -2437,7 +2439,12 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
|
||||
.custom_spec(test_spec::<E>())
|
||||
.task_executor(harness.chain.task_executor.clone())
|
||||
.logger(log.clone())
|
||||
.weak_subjectivity_state(wss_state, wss_block.clone(), genesis_state)
|
||||
.weak_subjectivity_state(
|
||||
wss_state,
|
||||
wss_block.clone(),
|
||||
wss_blobs_opt.clone(),
|
||||
genesis_state,
|
||||
)
|
||||
.unwrap()
|
||||
.store_migrator_config(MigratorConfig::default().blocking())
|
||||
.dummy_eth1_backend()
|
||||
@ -2455,6 +2462,17 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
|
||||
.expect("should build");
|
||||
|
||||
let beacon_chain = Arc::new(beacon_chain);
|
||||
let wss_block_root = wss_block.canonical_root();
|
||||
let store_wss_block = harness
|
||||
.chain
|
||||
.get_block(&wss_block_root)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let store_wss_blobs_opt = beacon_chain.store.get_blobs(&wss_block_root).unwrap();
|
||||
|
||||
assert_eq!(store_wss_block, wss_block);
|
||||
assert_eq!(store_wss_blobs_opt, wss_blobs_opt);
|
||||
|
||||
// Apply blocks forward to reach head.
|
||||
let chain_dump = harness.chain.chain_dump().unwrap();
|
||||
@ -2547,6 +2565,25 @@ async fn weak_subjectivity_sync_test(slots: Vec<Slot>, checkpoint_slot: Slot) {
|
||||
}
|
||||
}
|
||||
|
||||
// Corrupt the signature on the 1st block to ensure that the backfill processor is checking
|
||||
// signatures correctly. Regression test for https://github.com/sigp/lighthouse/pull/5120.
|
||||
let mut batch_with_invalid_first_block = available_blocks.clone();
|
||||
batch_with_invalid_first_block[0] = {
|
||||
let (block_root, block, blobs) = available_blocks[0].clone().deconstruct();
|
||||
let mut corrupt_block = (*block).clone();
|
||||
*corrupt_block.signature_mut() = Signature::empty();
|
||||
AvailableBlock::__new_for_testing(block_root, Arc::new(corrupt_block), blobs)
|
||||
};
|
||||
|
||||
// Importing the invalid batch should error.
|
||||
assert!(matches!(
|
||||
beacon_chain
|
||||
.import_historical_block_batch(batch_with_invalid_first_block)
|
||||
.unwrap_err(),
|
||||
BeaconChainError::HistoricalBlockError(HistoricalBlockError::InvalidSignature)
|
||||
));
|
||||
|
||||
// Importing the batch with valid signatures should succeed.
|
||||
beacon_chain
|
||||
.import_historical_block_batch(available_blocks.clone())
|
||||
.unwrap();
|
||||
|
||||
@ -82,12 +82,15 @@ pub enum ReprocessQueueMessage {
|
||||
/// A gossip block for hash `X` is being imported, we should queue the rpc block for the same
|
||||
/// hash until the gossip block is imported.
|
||||
RpcBlock(QueuedRpcBlock),
|
||||
/// A block that was successfully processed. We use this to handle attestations and light client updates
|
||||
/// A block that was successfully processed. We use this to handle attestations updates
|
||||
/// for unknown blocks.
|
||||
BlockImported {
|
||||
block_root: Hash256,
|
||||
parent_root: Hash256,
|
||||
},
|
||||
/// A new `LightClientOptimisticUpdate` has been produced. We use this to handle light client
|
||||
/// updates for unknown parent blocks.
|
||||
NewLightClientOptimisticUpdate { parent_root: Hash256 },
|
||||
/// An unaggregated attestation that references an unknown block.
|
||||
UnknownBlockUnaggregate(QueuedUnaggregate),
|
||||
/// An aggregated attestation that references an unknown block.
|
||||
@ -688,6 +691,8 @@ impl<S: SlotClock> ReprocessQueue<S> {
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
InboundEvent::Msg(NewLightClientOptimisticUpdate { parent_root }) => {
|
||||
// Unqueue the light client optimistic updates we have for this root, if any.
|
||||
if let Some(queued_lc_id) = self
|
||||
.awaiting_lc_updates_per_parent_root
|
||||
|
||||
@ -25,6 +25,7 @@ serde = { workspace = true }
|
||||
error-chain = { workspace = true }
|
||||
slog = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
dirs = { workspace = true }
|
||||
eth1 = { workspace = true }
|
||||
eth2 = { workspace = true }
|
||||
@ -44,3 +45,4 @@ monitoring_api = { workspace = true }
|
||||
execution_layer = { workspace = true }
|
||||
beacon_processor = { workspace = true }
|
||||
num_cpus = { workspace = true }
|
||||
ethereum_ssz = { workspace = true }
|
||||
|
||||
@ -1,322 +0,0 @@
|
||||
use crate::*;
|
||||
use lighthouse_network::PubsubMessage;
|
||||
use network::NetworkMessage;
|
||||
use slog::{debug, info, warn, Logger};
|
||||
use slot_clock::SlotClock;
|
||||
use std::cmp;
|
||||
use std::collections::HashSet;
|
||||
use std::mem;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::mpsc::UnboundedSender;
|
||||
use tokio::time::sleep;
|
||||
use types::EthSpec;
|
||||
|
||||
/// The size of each chunk of addresses changes to be broadcast at the Capella
|
||||
/// fork.
|
||||
const BROADCAST_CHUNK_SIZE: usize = 128;
|
||||
/// The delay between broadcasting each chunk.
|
||||
const BROADCAST_CHUNK_DELAY: Duration = Duration::from_millis(500);
|
||||
|
||||
/// If the Capella fork has already been reached, `broadcast_address_changes` is
|
||||
/// called immediately.
|
||||
///
|
||||
/// If the Capella fork has not been reached, waits until the start of the fork
|
||||
/// epoch and then calls `broadcast_address_changes`.
|
||||
pub async fn broadcast_address_changes_at_capella<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
network_send: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
log: &Logger,
|
||||
) {
|
||||
let spec = &chain.spec;
|
||||
let slot_clock = &chain.slot_clock;
|
||||
|
||||
let capella_fork_slot = if let Some(epoch) = spec.capella_fork_epoch {
|
||||
epoch.start_slot(T::EthSpec::slots_per_epoch())
|
||||
} else {
|
||||
// Exit now if Capella is not defined.
|
||||
return;
|
||||
};
|
||||
|
||||
// Wait until the Capella fork epoch.
|
||||
while chain.slot().map_or(true, |slot| slot < capella_fork_slot) {
|
||||
match slot_clock.duration_to_slot(capella_fork_slot) {
|
||||
Some(duration) => {
|
||||
// Sleep until the Capella fork.
|
||||
sleep(duration).await;
|
||||
break;
|
||||
}
|
||||
None => {
|
||||
// We were unable to read the slot clock wait another slot
|
||||
// and then try again.
|
||||
sleep(slot_clock.slot_duration()).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The following function will be called in two scenarios:
|
||||
//
|
||||
// 1. The node has been running for some time and the Capella fork has just
|
||||
// been reached.
|
||||
// 2. The node has just started and it is *after* the Capella fork.
|
||||
broadcast_address_changes(chain, network_send, log).await
|
||||
}
|
||||
|
||||
/// Broadcasts any address changes that are flagged for broadcasting at the
|
||||
/// Capella fork epoch.
|
||||
///
|
||||
/// Address changes are published in chunks, with a delay between each chunk.
|
||||
/// This helps reduce the load on the P2P network and also helps prevent us from
|
||||
/// clogging our `network_send` channel and being late to publish
|
||||
/// blocks, attestations, etc.
|
||||
pub async fn broadcast_address_changes<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
network_send: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
log: &Logger,
|
||||
) {
|
||||
let head = chain.head_snapshot();
|
||||
let mut changes = chain
|
||||
.op_pool
|
||||
.get_bls_to_execution_changes_received_pre_capella(&head.beacon_state, &chain.spec);
|
||||
|
||||
while !changes.is_empty() {
|
||||
// This `split_off` approach is to allow us to have owned chunks of the
|
||||
// `changes` vec. The `std::slice::Chunks` method uses references and
|
||||
// the `itertools` iterator that achives this isn't `Send` so it doesn't
|
||||
// work well with the `sleep` at the end of the loop.
|
||||
let tail = changes.split_off(cmp::min(BROADCAST_CHUNK_SIZE, changes.len()));
|
||||
let chunk = mem::replace(&mut changes, tail);
|
||||
|
||||
let mut published_indices = HashSet::with_capacity(BROADCAST_CHUNK_SIZE);
|
||||
let mut num_ok = 0;
|
||||
let mut num_err = 0;
|
||||
|
||||
// Publish each individual address change.
|
||||
for address_change in chunk {
|
||||
let validator_index = address_change.message.validator_index;
|
||||
|
||||
let pubsub_message = PubsubMessage::BlsToExecutionChange(Box::new(address_change));
|
||||
let message = NetworkMessage::Publish {
|
||||
messages: vec![pubsub_message],
|
||||
};
|
||||
// It seems highly unlikely that this unbounded send will fail, but
|
||||
// we handle the result nonetheless.
|
||||
if let Err(e) = network_send.send(message) {
|
||||
debug!(
|
||||
log,
|
||||
"Failed to publish change message";
|
||||
"error" => ?e,
|
||||
"validator_index" => validator_index
|
||||
);
|
||||
num_err += 1;
|
||||
} else {
|
||||
debug!(
|
||||
log,
|
||||
"Published address change message";
|
||||
"validator_index" => validator_index
|
||||
);
|
||||
num_ok += 1;
|
||||
published_indices.insert(validator_index);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove any published indices from the list of indices that need to be
|
||||
// published.
|
||||
chain
|
||||
.op_pool
|
||||
.register_indices_broadcasted_at_capella(&published_indices);
|
||||
|
||||
info!(
|
||||
log,
|
||||
"Published address change messages";
|
||||
"num_published" => num_ok,
|
||||
);
|
||||
|
||||
if num_err > 0 {
|
||||
warn!(
|
||||
log,
|
||||
"Failed to publish address changes";
|
||||
"info" => "failed messages will be retried",
|
||||
"num_unable_to_publish" => num_err,
|
||||
);
|
||||
}
|
||||
|
||||
sleep(BROADCAST_CHUNK_DELAY).await;
|
||||
}
|
||||
|
||||
debug!(
|
||||
log,
|
||||
"Address change routine complete";
|
||||
);
|
||||
}
|
||||
|
||||
#[cfg(not(debug_assertions))] // Tests run too slow in debug.
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType};
|
||||
use operation_pool::ReceivedPreCapella;
|
||||
use state_processing::{SigVerifiedOp, VerifyOperation};
|
||||
use std::collections::HashSet;
|
||||
use tokio::sync::mpsc;
|
||||
use types::*;
|
||||
|
||||
type E = MainnetEthSpec;
|
||||
|
||||
pub const VALIDATOR_COUNT: usize = BROADCAST_CHUNK_SIZE * 3;
|
||||
pub const EXECUTION_ADDRESS: Address = Address::repeat_byte(42);
|
||||
|
||||
struct Tester {
|
||||
harness: BeaconChainHarness<EphemeralHarnessType<E>>,
|
||||
/// Changes which should be broadcast at the Capella fork.
|
||||
received_pre_capella_changes: Vec<SigVerifiedOp<SignedBlsToExecutionChange, E>>,
|
||||
/// Changes which should *not* be broadcast at the Capella fork.
|
||||
not_received_pre_capella_changes: Vec<SigVerifiedOp<SignedBlsToExecutionChange, E>>,
|
||||
}
|
||||
|
||||
impl Tester {
|
||||
fn new() -> Self {
|
||||
let altair_fork_epoch = Epoch::new(0);
|
||||
let bellatrix_fork_epoch = Epoch::new(0);
|
||||
let capella_fork_epoch = Epoch::new(2);
|
||||
|
||||
let mut spec = E::default_spec();
|
||||
spec.altair_fork_epoch = Some(altair_fork_epoch);
|
||||
spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch);
|
||||
spec.capella_fork_epoch = Some(capella_fork_epoch);
|
||||
|
||||
let harness = BeaconChainHarness::builder(E::default())
|
||||
.spec(spec)
|
||||
.logger(logging::test_logger())
|
||||
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||
.deterministic_withdrawal_keypairs(VALIDATOR_COUNT)
|
||||
.fresh_ephemeral_store()
|
||||
.mock_execution_layer()
|
||||
.build();
|
||||
|
||||
Self {
|
||||
harness,
|
||||
received_pre_capella_changes: <_>::default(),
|
||||
not_received_pre_capella_changes: <_>::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn produce_verified_address_change(
|
||||
&self,
|
||||
validator_index: u64,
|
||||
) -> SigVerifiedOp<SignedBlsToExecutionChange, E> {
|
||||
let change = self
|
||||
.harness
|
||||
.make_bls_to_execution_change(validator_index, EXECUTION_ADDRESS);
|
||||
let head = self.harness.chain.head_snapshot();
|
||||
|
||||
change
|
||||
.validate(&head.beacon_state, &self.harness.spec)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn produce_received_pre_capella_changes(mut self, indices: Vec<u64>) -> Self {
|
||||
for validator_index in indices {
|
||||
self.received_pre_capella_changes
|
||||
.push(self.produce_verified_address_change(validator_index));
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
fn produce_not_received_pre_capella_changes(mut self, indices: Vec<u64>) -> Self {
|
||||
for validator_index in indices {
|
||||
self.not_received_pre_capella_changes
|
||||
.push(self.produce_verified_address_change(validator_index));
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
async fn run(self) {
|
||||
let harness = self.harness;
|
||||
let chain = harness.chain.clone();
|
||||
|
||||
let mut broadcast_indices = HashSet::new();
|
||||
for change in self.received_pre_capella_changes {
|
||||
broadcast_indices.insert(change.as_inner().message.validator_index);
|
||||
chain
|
||||
.op_pool
|
||||
.insert_bls_to_execution_change(change, ReceivedPreCapella::Yes);
|
||||
}
|
||||
|
||||
let mut non_broadcast_indices = HashSet::new();
|
||||
for change in self.not_received_pre_capella_changes {
|
||||
non_broadcast_indices.insert(change.as_inner().message.validator_index);
|
||||
chain
|
||||
.op_pool
|
||||
.insert_bls_to_execution_change(change, ReceivedPreCapella::No);
|
||||
}
|
||||
|
||||
harness.set_current_slot(
|
||||
chain
|
||||
.spec
|
||||
.capella_fork_epoch
|
||||
.unwrap()
|
||||
.start_slot(E::slots_per_epoch()),
|
||||
);
|
||||
|
||||
let (sender, mut receiver) = mpsc::unbounded_channel();
|
||||
|
||||
broadcast_address_changes_at_capella(&chain, sender, &logging::test_logger()).await;
|
||||
|
||||
let mut broadcasted_changes = vec![];
|
||||
while let Some(NetworkMessage::Publish { mut messages }) = receiver.recv().await {
|
||||
match messages.pop().unwrap() {
|
||||
PubsubMessage::BlsToExecutionChange(change) => broadcasted_changes.push(change),
|
||||
_ => panic!("unexpected message"),
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
broadcasted_changes.len(),
|
||||
broadcast_indices.len(),
|
||||
"all expected changes should have been broadcast"
|
||||
);
|
||||
|
||||
for broadcasted in &broadcasted_changes {
|
||||
assert!(
|
||||
!non_broadcast_indices.contains(&broadcasted.message.validator_index),
|
||||
"messages not flagged for broadcast should not have been broadcast"
|
||||
);
|
||||
}
|
||||
|
||||
let head = chain.head_snapshot();
|
||||
assert!(
|
||||
chain
|
||||
.op_pool
|
||||
.get_bls_to_execution_changes_received_pre_capella(
|
||||
&head.beacon_state,
|
||||
&chain.spec,
|
||||
)
|
||||
.is_empty(),
|
||||
"there shouldn't be any capella broadcast changes left in the op pool"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Useful for generating even-numbered indices. Required since only even
|
||||
// numbered genesis validators have BLS credentials.
|
||||
fn even_indices(start: u64, count: usize) -> Vec<u64> {
|
||||
(start..).filter(|i| i % 2 == 0).take(count).collect()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn one_chunk() {
|
||||
Tester::new()
|
||||
.produce_received_pre_capella_changes(even_indices(0, 4))
|
||||
.produce_not_received_pre_capella_changes(even_indices(10, 4))
|
||||
.run()
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn multiple_chunks() {
|
||||
Tester::new()
|
||||
.produce_received_pre_capella_changes(even_indices(0, BROADCAST_CHUNK_SIZE * 3 / 2))
|
||||
.run()
|
||||
.await;
|
||||
}
|
||||
}
|
||||
@ -1,4 +1,6 @@
|
||||
use crate::address_change_broadcast::broadcast_address_changes_at_capella;
|
||||
use crate::compute_light_client_updates::{
|
||||
compute_light_client_updates, LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY,
|
||||
};
|
||||
use crate::config::{ClientGenesis, Config as ClientConfig};
|
||||
use crate::notifier::spawn_notifier;
|
||||
use crate::Client;
|
||||
@ -7,6 +9,7 @@ use beacon_chain::data_availability_checker::start_availability_cache_maintenanc
|
||||
use beacon_chain::otb_verification_service::start_otb_verification_service;
|
||||
use beacon_chain::proposer_prep_service::start_proposer_prep_service;
|
||||
use beacon_chain::schema_change::migrate_schema;
|
||||
use beacon_chain::LightClientProducerEvent;
|
||||
use beacon_chain::{
|
||||
builder::{BeaconChainBuilder, Witness},
|
||||
eth1_chain::{CachingEth1Backend, Eth1Chain},
|
||||
@ -24,6 +27,7 @@ use eth2::{
|
||||
BeaconNodeHttpClient, Error as ApiError, Timeouts,
|
||||
};
|
||||
use execution_layer::ExecutionLayer;
|
||||
use futures::channel::mpsc::Receiver;
|
||||
use genesis::{interop_genesis_state, Eth1GenesisService, DEFAULT_ETH1_BLOCK_HASH};
|
||||
use lighthouse_network::{prometheus_client::registry::Registry, NetworkGlobals};
|
||||
use monitoring_api::{MonitoringHttpClient, ProcessType};
|
||||
@ -31,6 +35,7 @@ use network::{NetworkConfig, NetworkSenders, NetworkService};
|
||||
use slasher::Slasher;
|
||||
use slasher_service::SlasherService;
|
||||
use slog::{debug, info, warn, Logger};
|
||||
use ssz::Decode;
|
||||
use std::net::TcpListener;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
@ -39,7 +44,7 @@ use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use timer::spawn_timer;
|
||||
use tokio::sync::oneshot;
|
||||
use types::{
|
||||
test_utils::generate_deterministic_keypairs, BeaconState, ChainSpec, EthSpec,
|
||||
test_utils::generate_deterministic_keypairs, BeaconState, BlobSidecarList, ChainSpec, EthSpec,
|
||||
ExecutionBlockHash, Hash256, SignedBeaconBlock,
|
||||
};
|
||||
|
||||
@ -83,6 +88,7 @@ pub struct ClientBuilder<T: BeaconChainTypes> {
|
||||
slasher: Option<Arc<Slasher<T::EthSpec>>>,
|
||||
beacon_processor_config: Option<BeaconProcessorConfig>,
|
||||
beacon_processor_channels: Option<BeaconProcessorChannels<T::EthSpec>>,
|
||||
light_client_server_rv: Option<Receiver<LightClientProducerEvent<T::EthSpec>>>,
|
||||
eth_spec_instance: T::EthSpec,
|
||||
}
|
||||
|
||||
@ -118,6 +124,7 @@ where
|
||||
eth_spec_instance,
|
||||
beacon_processor_config: None,
|
||||
beacon_processor_channels: None,
|
||||
light_client_server_rv: None,
|
||||
}
|
||||
}
|
||||
|
||||
@ -206,6 +213,16 @@ where
|
||||
builder
|
||||
};
|
||||
|
||||
let builder = if config.network.enable_light_client_server {
|
||||
let (tx, rv) = futures::channel::mpsc::channel::<LightClientProducerEvent<TEthSpec>>(
|
||||
LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY,
|
||||
);
|
||||
self.light_client_server_rv = Some(rv);
|
||||
builder.light_client_server_tx(tx)
|
||||
} else {
|
||||
builder
|
||||
};
|
||||
|
||||
let chain_exists = builder.store_contains_beacon_chain().unwrap_or(false);
|
||||
|
||||
// If the client is expect to resume but there's no beacon chain in the database,
|
||||
@ -302,6 +319,7 @@ where
|
||||
ClientGenesis::WeakSubjSszBytes {
|
||||
anchor_state_bytes,
|
||||
anchor_block_bytes,
|
||||
anchor_blobs_bytes,
|
||||
} => {
|
||||
info!(context.log(), "Starting checkpoint sync");
|
||||
if config.chain.genesis_backfill {
|
||||
@ -315,10 +333,25 @@ where
|
||||
.map_err(|e| format!("Unable to parse weak subj state SSZ: {:?}", e))?;
|
||||
let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec)
|
||||
.map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?;
|
||||
let anchor_blobs = if anchor_block.message().body().has_blobs() {
|
||||
let anchor_blobs_bytes = anchor_blobs_bytes
|
||||
.ok_or("Blobs for checkpoint must be provided using --checkpoint-blobs")?;
|
||||
Some(
|
||||
BlobSidecarList::from_ssz_bytes(&anchor_blobs_bytes)
|
||||
.map_err(|e| format!("Unable to parse weak subj blobs SSZ: {e:?}"))?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let genesis_state = genesis_state(&runtime_context, &config, log).await?;
|
||||
|
||||
builder
|
||||
.weak_subjectivity_state(anchor_state, anchor_block, genesis_state)
|
||||
.weak_subjectivity_state(
|
||||
anchor_state,
|
||||
anchor_block,
|
||||
anchor_blobs,
|
||||
genesis_state,
|
||||
)
|
||||
.map(|v| (v, None))?
|
||||
}
|
||||
ClientGenesis::CheckpointSyncUrl { url } => {
|
||||
@ -413,9 +446,33 @@ where
|
||||
e => format!("Error fetching finalized block from remote: {:?}", e),
|
||||
})?
|
||||
.ok_or("Finalized block missing from remote, it returned 404")?;
|
||||
let block_root = block.canonical_root();
|
||||
|
||||
debug!(context.log(), "Downloaded finalized block");
|
||||
|
||||
let blobs = if block.message().body().has_blobs() {
|
||||
debug!(context.log(), "Downloading finalized blobs");
|
||||
if let Some(response) = remote
|
||||
.get_blobs::<TEthSpec>(BlockId::Root(block_root), None)
|
||||
.await
|
||||
.map_err(|e| format!("Error fetching finalized blobs from remote: {e:?}"))?
|
||||
{
|
||||
debug!(context.log(), "Downloaded finalized blobs");
|
||||
Some(response.data)
|
||||
} else {
|
||||
warn!(
|
||||
context.log(),
|
||||
"Checkpoint server is missing blobs";
|
||||
"block_root" => %block_root,
|
||||
"hint" => "use a different URL or ask the provider to update",
|
||||
"impact" => "db will be slightly corrupt until these blobs are pruned",
|
||||
);
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let genesis_state = genesis_state(&runtime_context, &config, log).await?;
|
||||
|
||||
info!(
|
||||
@ -423,7 +480,7 @@ where
|
||||
"Loaded checkpoint block and state";
|
||||
"block_slot" => block.slot(),
|
||||
"state_slot" => state.slot(),
|
||||
"block_root" => ?block.canonical_root(),
|
||||
"block_root" => ?block_root,
|
||||
);
|
||||
|
||||
let service =
|
||||
@ -451,7 +508,7 @@ where
|
||||
});
|
||||
|
||||
builder
|
||||
.weak_subjectivity_state(state, block, genesis_state)
|
||||
.weak_subjectivity_state(state, block, blobs, genesis_state)
|
||||
.map(|v| (v, service))?
|
||||
}
|
||||
ClientGenesis::DepositContract => {
|
||||
@ -488,6 +545,7 @@ where
|
||||
network_senders: None,
|
||||
network_globals: None,
|
||||
beacon_processor_send: None,
|
||||
beacon_processor_reprocess_send: None,
|
||||
eth1_service: Some(genesis_service.eth1_service.clone()),
|
||||
log: context.log().clone(),
|
||||
sse_logging_components: runtime_context.sse_logging_components.clone(),
|
||||
@ -730,6 +788,9 @@ where
|
||||
network_globals: self.network_globals.clone(),
|
||||
eth1_service: self.eth1_service.clone(),
|
||||
beacon_processor_send: Some(beacon_processor_channels.beacon_processor_tx.clone()),
|
||||
beacon_processor_reprocess_send: Some(
|
||||
beacon_processor_channels.work_reprocessing_tx.clone(),
|
||||
),
|
||||
sse_logging_components: runtime_context.sse_logging_components.clone(),
|
||||
log: log.clone(),
|
||||
});
|
||||
@ -797,7 +858,7 @@ where
|
||||
}
|
||||
.spawn_manager(
|
||||
beacon_processor_channels.beacon_processor_rx,
|
||||
beacon_processor_channels.work_reprocessing_tx,
|
||||
beacon_processor_channels.work_reprocessing_tx.clone(),
|
||||
beacon_processor_channels.work_reprocessing_rx,
|
||||
None,
|
||||
beacon_chain.slot_clock.clone(),
|
||||
@ -858,25 +919,26 @@ where
|
||||
beacon_chain.slot_clock.clone(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Spawn a service to publish BLS to execution changes at the Capella fork.
|
||||
if let Some(network_senders) = self.network_senders {
|
||||
let inner_chain = beacon_chain.clone();
|
||||
let broadcast_context =
|
||||
runtime_context.service_context("addr_bcast".to_string());
|
||||
let log = broadcast_context.log().clone();
|
||||
broadcast_context.executor.spawn(
|
||||
async move {
|
||||
broadcast_address_changes_at_capella(
|
||||
&inner_chain,
|
||||
network_senders.network_send(),
|
||||
&log,
|
||||
)
|
||||
.await
|
||||
},
|
||||
"addr_broadcast",
|
||||
);
|
||||
}
|
||||
// Spawn service to publish light_client updates at some interval into the slot.
|
||||
if let Some(light_client_server_rv) = self.light_client_server_rv {
|
||||
let inner_chain = beacon_chain.clone();
|
||||
let light_client_update_context =
|
||||
runtime_context.service_context("lc_update".to_string());
|
||||
let log = light_client_update_context.log().clone();
|
||||
light_client_update_context.executor.spawn(
|
||||
async move {
|
||||
compute_light_client_updates(
|
||||
&inner_chain,
|
||||
light_client_server_rv,
|
||||
beacon_processor_channels.work_reprocessing_tx,
|
||||
&log,
|
||||
)
|
||||
.await
|
||||
},
|
||||
"lc_update",
|
||||
);
|
||||
}
|
||||
|
||||
start_proposer_prep_service(runtime_context.executor.clone(), beacon_chain.clone());
|
||||
|
||||
39
beacon_node/client/src/compute_light_client_updates.rs
Normal file
39
beacon_node/client/src/compute_light_client_updates.rs
Normal file
@ -0,0 +1,39 @@
|
||||
use beacon_chain::{BeaconChain, BeaconChainTypes, LightClientProducerEvent};
|
||||
use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage;
|
||||
use futures::channel::mpsc::Receiver;
|
||||
use futures::StreamExt;
|
||||
use slog::{error, Logger};
|
||||
use tokio::sync::mpsc::Sender;
|
||||
|
||||
// Each `LightClientProducerEvent` is ~200 bytes. With the light_client server producing only recent
|
||||
// updates it is okay to drop some events in case of overloading. In normal network conditions
|
||||
// there's one event emitted per block at most every 12 seconds, while consuming the event should
|
||||
// take a few milliseconds. 32 is a small enough arbitrary number.
|
||||
pub(crate) const LIGHT_CLIENT_SERVER_CHANNEL_CAPACITY: usize = 32;
|
||||
|
||||
pub async fn compute_light_client_updates<T: BeaconChainTypes>(
|
||||
chain: &BeaconChain<T>,
|
||||
mut light_client_server_rv: Receiver<LightClientProducerEvent<T::EthSpec>>,
|
||||
reprocess_tx: Sender<ReprocessQueueMessage>,
|
||||
log: &Logger,
|
||||
) {
|
||||
// Should only receive events for recent blocks, import_block filters by blocks close to clock.
|
||||
//
|
||||
// Intents to process SyncAggregates of all recent blocks sequentially, without skipping.
|
||||
// Uses a bounded receiver, so may drop some SyncAggregates if very overloaded. This is okay
|
||||
// since only the most recent updates have value.
|
||||
while let Some(event) = light_client_server_rv.next().await {
|
||||
let parent_root = event.0;
|
||||
|
||||
chain
|
||||
.recompute_and_cache_light_client_updates(event)
|
||||
.unwrap_or_else(|e| {
|
||||
error!(log, "error computing light_client updates {:?}", e);
|
||||
});
|
||||
|
||||
let msg = ReprocessQueueMessage::NewLightClientOptimisticUpdate { parent_root };
|
||||
if reprocess_tx.try_send(msg).is_err() {
|
||||
error!(log, "Failed to inform light client update"; "parent_root" => %parent_root)
|
||||
};
|
||||
}
|
||||
}
|
||||
@ -35,6 +35,7 @@ pub enum ClientGenesis {
|
||||
WeakSubjSszBytes {
|
||||
anchor_state_bytes: Vec<u8>,
|
||||
anchor_block_bytes: Vec<u8>,
|
||||
anchor_blobs_bytes: Option<Vec<u8>>,
|
||||
},
|
||||
CheckpointSyncUrl {
|
||||
url: SensitiveUrl,
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
extern crate slog;
|
||||
|
||||
mod address_change_broadcast;
|
||||
mod compute_light_client_updates;
|
||||
pub mod config;
|
||||
mod metrics;
|
||||
mod notifier;
|
||||
|
||||
@ -29,7 +29,6 @@ kzg = { workspace = true }
|
||||
state_processing = { workspace = true }
|
||||
superstruct = { workspace = true }
|
||||
lru = { workspace = true }
|
||||
exit-future = { workspace = true }
|
||||
tree_hash = { workspace = true }
|
||||
tree_hash_derive = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
@ -51,3 +50,5 @@ hash-db = "0.15.2"
|
||||
pretty_reqwest_error = { workspace = true }
|
||||
arc-swap = "1.6.0"
|
||||
eth2_network_config = { workspace = true }
|
||||
alloy-rlp = "0.3"
|
||||
alloy-consensus = { git = "https://github.com/alloy-rs/alloy.git", rev = "974d488bab5e21e9f17452a39a4bfa56677367b2" }
|
||||
|
||||
@ -1,92 +1,61 @@
|
||||
use crate::{
|
||||
json_structures::JsonWithdrawal,
|
||||
keccak::{keccak256, KeccakHasher},
|
||||
metrics, Error, ExecutionLayer,
|
||||
};
|
||||
use ethers_core::utils::rlp::RlpStream;
|
||||
use keccak_hash::KECCAK_EMPTY_LIST_RLP;
|
||||
use triehash::ordered_trie_root;
|
||||
use types::{
|
||||
map_execution_block_header_fields_base, Address, BeaconBlockRef, EthSpec, ExecutionBlockHash,
|
||||
map_execution_block_header_fields_base, Address, EthSpec, ExecutionBlockHash,
|
||||
ExecutionBlockHeader, ExecutionPayloadRef, Hash256, Hash64, Uint256,
|
||||
};
|
||||
|
||||
impl<T: EthSpec> ExecutionLayer<T> {
|
||||
/// Calculate the block hash of an execution block.
|
||||
///
|
||||
/// Return `(block_hash, transactions_root)`, where `transactions_root` is the root of the RLP
|
||||
/// transactions.
|
||||
pub fn calculate_execution_block_hash(
|
||||
payload: ExecutionPayloadRef<T>,
|
||||
parent_beacon_block_root: Hash256,
|
||||
) -> (ExecutionBlockHash, Hash256) {
|
||||
// Calculate the transactions root.
|
||||
// We're currently using a deprecated Parity library for this. We should move to a
|
||||
// better alternative when one appears, possibly following Reth.
|
||||
let rlp_transactions_root = ordered_trie_root::<KeccakHasher, _>(
|
||||
payload.transactions().iter().map(|txn_bytes| &**txn_bytes),
|
||||
);
|
||||
/// Calculate the block hash of an execution block.
|
||||
///
|
||||
/// Return `(block_hash, transactions_root)`, where `transactions_root` is the root of the RLP
|
||||
/// transactions.
|
||||
pub fn calculate_execution_block_hash<T: EthSpec>(
|
||||
payload: ExecutionPayloadRef<T>,
|
||||
parent_beacon_block_root: Option<Hash256>,
|
||||
) -> (ExecutionBlockHash, Hash256) {
|
||||
// Calculate the transactions root.
|
||||
// We're currently using a deprecated Parity library for this. We should move to a
|
||||
// better alternative when one appears, possibly following Reth.
|
||||
let rlp_transactions_root = ordered_trie_root::<KeccakHasher, _>(
|
||||
payload.transactions().iter().map(|txn_bytes| &**txn_bytes),
|
||||
);
|
||||
|
||||
// Calculate withdrawals root (post-Capella).
|
||||
let rlp_withdrawals_root = if let Ok(withdrawals) = payload.withdrawals() {
|
||||
Some(ordered_trie_root::<KeccakHasher, _>(
|
||||
withdrawals.iter().map(|withdrawal| {
|
||||
rlp_encode_withdrawal(&JsonWithdrawal::from(withdrawal.clone()))
|
||||
}),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
// Calculate withdrawals root (post-Capella).
|
||||
let rlp_withdrawals_root = if let Ok(withdrawals) = payload.withdrawals() {
|
||||
Some(ordered_trie_root::<KeccakHasher, _>(
|
||||
withdrawals
|
||||
.iter()
|
||||
.map(|withdrawal| rlp_encode_withdrawal(&JsonWithdrawal::from(withdrawal.clone()))),
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let rlp_blob_gas_used = payload.blob_gas_used().ok();
|
||||
let rlp_excess_blob_gas = payload.excess_blob_gas().ok();
|
||||
let rlp_blob_gas_used = payload.blob_gas_used().ok();
|
||||
let rlp_excess_blob_gas = payload.excess_blob_gas().ok();
|
||||
|
||||
// Calculate parent beacon block root (post-Deneb).
|
||||
let rlp_parent_beacon_block_root = rlp_excess_blob_gas
|
||||
.as_ref()
|
||||
.map(|_| parent_beacon_block_root);
|
||||
// Construct the block header.
|
||||
let exec_block_header = ExecutionBlockHeader::from_payload(
|
||||
payload,
|
||||
KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(),
|
||||
rlp_transactions_root,
|
||||
rlp_withdrawals_root,
|
||||
rlp_blob_gas_used,
|
||||
rlp_excess_blob_gas,
|
||||
parent_beacon_block_root,
|
||||
);
|
||||
|
||||
// Construct the block header.
|
||||
let exec_block_header = ExecutionBlockHeader::from_payload(
|
||||
payload,
|
||||
KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(),
|
||||
rlp_transactions_root,
|
||||
rlp_withdrawals_root,
|
||||
rlp_blob_gas_used,
|
||||
rlp_excess_blob_gas,
|
||||
rlp_parent_beacon_block_root,
|
||||
);
|
||||
|
||||
// Hash the RLP encoding of the block header.
|
||||
let rlp_block_header = rlp_encode_block_header(&exec_block_header);
|
||||
(
|
||||
ExecutionBlockHash::from_root(keccak256(&rlp_block_header)),
|
||||
rlp_transactions_root,
|
||||
)
|
||||
}
|
||||
|
||||
/// Verify `payload.block_hash` locally within Lighthouse.
|
||||
///
|
||||
/// No remote calls to the execution client will be made, so this is quite a cheap check.
|
||||
pub fn verify_payload_block_hash(&self, block: BeaconBlockRef<T>) -> Result<(), Error> {
|
||||
let payload = block.execution_payload()?.execution_payload_ref();
|
||||
let parent_beacon_block_root = block.parent_root();
|
||||
|
||||
let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH);
|
||||
|
||||
let (header_hash, rlp_transactions_root) =
|
||||
Self::calculate_execution_block_hash(payload, parent_beacon_block_root);
|
||||
|
||||
if header_hash != payload.block_hash() {
|
||||
return Err(Error::BlockHashMismatch {
|
||||
computed: header_hash,
|
||||
payload: payload.block_hash(),
|
||||
transactions_root: rlp_transactions_root,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
// Hash the RLP encoding of the block header.
|
||||
let rlp_block_header = rlp_encode_block_header(&exec_block_header);
|
||||
(
|
||||
ExecutionBlockHash::from_root(keccak256(&rlp_block_header)),
|
||||
rlp_transactions_root,
|
||||
)
|
||||
}
|
||||
|
||||
/// RLP encode a withdrawal.
|
||||
|
||||
@ -17,7 +17,6 @@ pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1};
|
||||
use pretty_reqwest_error::PrettyReqwestError;
|
||||
use reqwest::StatusCode;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash;
|
||||
use std::convert::TryFrom;
|
||||
use strum::IntoStaticStr;
|
||||
use superstruct::superstruct;
|
||||
@ -26,14 +25,16 @@ pub use types::{
|
||||
ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, Uint256, VariableList,
|
||||
Withdrawal, Withdrawals,
|
||||
};
|
||||
use types::{
|
||||
BeaconStateError, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge,
|
||||
KzgProofs, VersionedHash,
|
||||
};
|
||||
use types::{ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge, KzgProofs};
|
||||
|
||||
pub mod auth;
|
||||
pub mod http;
|
||||
pub mod json_structures;
|
||||
mod new_payload_request;
|
||||
|
||||
pub use new_payload_request::{
|
||||
NewPayloadRequest, NewPayloadRequestCapella, NewPayloadRequestDeneb, NewPayloadRequestMerge,
|
||||
};
|
||||
|
||||
pub const LATEST_TAG: &str = "latest";
|
||||
|
||||
@ -571,110 +572,6 @@ impl<E: EthSpec> ExecutionPayloadBodyV1<E> {
|
||||
}
|
||||
}
|
||||
|
||||
#[superstruct(
|
||||
variants(Merge, Capella, Deneb),
|
||||
variant_attributes(derive(Clone, Debug, PartialEq),),
|
||||
map_into(ExecutionPayload),
|
||||
map_ref_into(ExecutionPayloadRef),
|
||||
cast_error(
|
||||
ty = "BeaconStateError",
|
||||
expr = "BeaconStateError::IncorrectStateVariant"
|
||||
),
|
||||
partial_getter_error(
|
||||
ty = "BeaconStateError",
|
||||
expr = "BeaconStateError::IncorrectStateVariant"
|
||||
)
|
||||
)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct NewPayloadRequest<E: EthSpec> {
|
||||
#[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))]
|
||||
pub execution_payload: ExecutionPayloadMerge<E>,
|
||||
#[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))]
|
||||
pub execution_payload: ExecutionPayloadCapella<E>,
|
||||
#[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))]
|
||||
pub execution_payload: ExecutionPayloadDeneb<E>,
|
||||
#[superstruct(only(Deneb))]
|
||||
pub versioned_hashes: Vec<VersionedHash>,
|
||||
#[superstruct(only(Deneb))]
|
||||
pub parent_beacon_block_root: Hash256,
|
||||
}
|
||||
|
||||
impl<E: EthSpec> NewPayloadRequest<E> {
|
||||
pub fn parent_hash(&self) -> ExecutionBlockHash {
|
||||
match self {
|
||||
Self::Merge(payload) => payload.execution_payload.parent_hash,
|
||||
Self::Capella(payload) => payload.execution_payload.parent_hash,
|
||||
Self::Deneb(payload) => payload.execution_payload.parent_hash,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_hash(&self) -> ExecutionBlockHash {
|
||||
match self {
|
||||
Self::Merge(payload) => payload.execution_payload.block_hash,
|
||||
Self::Capella(payload) => payload.execution_payload.block_hash,
|
||||
Self::Deneb(payload) => payload.execution_payload.block_hash,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_number(&self) -> u64 {
|
||||
match self {
|
||||
Self::Merge(payload) => payload.execution_payload.block_number,
|
||||
Self::Capella(payload) => payload.execution_payload.block_number,
|
||||
Self::Deneb(payload) => payload.execution_payload.block_number,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_execution_payload(self) -> ExecutionPayload<E> {
|
||||
map_new_payload_request_into_execution_payload!(self, |request, cons| {
|
||||
cons(request.execution_payload)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, E: EthSpec> TryFrom<BeaconBlockRef<'a, E>> for NewPayloadRequest<E> {
|
||||
type Error = BeaconStateError;
|
||||
|
||||
fn try_from(block: BeaconBlockRef<'a, E>) -> Result<Self, Self::Error> {
|
||||
match block {
|
||||
BeaconBlockRef::Base(_) | BeaconBlockRef::Altair(_) => {
|
||||
Err(Self::Error::IncorrectStateVariant)
|
||||
}
|
||||
BeaconBlockRef::Merge(block_ref) => Ok(Self::Merge(NewPayloadRequestMerge {
|
||||
execution_payload: block_ref.body.execution_payload.execution_payload.clone(),
|
||||
})),
|
||||
BeaconBlockRef::Capella(block_ref) => Ok(Self::Capella(NewPayloadRequestCapella {
|
||||
execution_payload: block_ref.body.execution_payload.execution_payload.clone(),
|
||||
})),
|
||||
BeaconBlockRef::Deneb(block_ref) => Ok(Self::Deneb(NewPayloadRequestDeneb {
|
||||
execution_payload: block_ref.body.execution_payload.execution_payload.clone(),
|
||||
versioned_hashes: block_ref
|
||||
.body
|
||||
.blob_kzg_commitments
|
||||
.iter()
|
||||
.map(kzg_commitment_to_versioned_hash)
|
||||
.collect(),
|
||||
parent_beacon_block_root: block_ref.parent_root,
|
||||
})),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: EthSpec> TryFrom<ExecutionPayload<E>> for NewPayloadRequest<E> {
|
||||
type Error = BeaconStateError;
|
||||
|
||||
fn try_from(payload: ExecutionPayload<E>) -> Result<Self, Self::Error> {
|
||||
match payload {
|
||||
ExecutionPayload::Merge(payload) => Ok(Self::Merge(NewPayloadRequestMerge {
|
||||
execution_payload: payload,
|
||||
})),
|
||||
ExecutionPayload::Capella(payload) => Ok(Self::Capella(NewPayloadRequestCapella {
|
||||
execution_payload: payload,
|
||||
})),
|
||||
ExecutionPayload::Deneb(_) => Err(Self::Error::IncorrectStateVariant),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct EngineCapabilities {
|
||||
pub new_payload_v1: bool,
|
||||
|
||||
@ -803,10 +803,10 @@ impl HttpJsonRpc {
|
||||
|
||||
pub async fn new_payload_v3<T: EthSpec>(
|
||||
&self,
|
||||
new_payload_request_deneb: NewPayloadRequestDeneb<T>,
|
||||
new_payload_request_deneb: NewPayloadRequestDeneb<'_, T>,
|
||||
) -> Result<PayloadStatusV1, Error> {
|
||||
let params = json!([
|
||||
JsonExecutionPayload::V3(new_payload_request_deneb.execution_payload.into()),
|
||||
JsonExecutionPayload::V3(new_payload_request_deneb.execution_payload.clone().into()),
|
||||
new_payload_request_deneb.versioned_hashes,
|
||||
new_payload_request_deneb.parent_beacon_block_root,
|
||||
]);
|
||||
@ -1079,7 +1079,7 @@ impl HttpJsonRpc {
|
||||
// new_payload that the execution engine supports
|
||||
pub async fn new_payload<T: EthSpec>(
|
||||
&self,
|
||||
new_payload_request: NewPayloadRequest<T>,
|
||||
new_payload_request: NewPayloadRequest<'_, T>,
|
||||
) -> Result<PayloadStatusV1, Error> {
|
||||
let engine_capabilities = self.get_engine_capabilities(None).await?;
|
||||
match new_payload_request {
|
||||
|
||||
@ -0,0 +1,332 @@
|
||||
use crate::{block_hash::calculate_execution_block_hash, metrics, Error};
|
||||
|
||||
use crate::versioned_hashes::verify_versioned_hashes;
|
||||
use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash;
|
||||
use superstruct::superstruct;
|
||||
use types::{
|
||||
BeaconBlockRef, BeaconStateError, EthSpec, ExecutionBlockHash, ExecutionPayload,
|
||||
ExecutionPayloadRef, Hash256, VersionedHash,
|
||||
};
|
||||
use types::{ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadMerge};
|
||||
|
||||
#[superstruct(
|
||||
variants(Merge, Capella, Deneb),
|
||||
variant_attributes(derive(Clone, Debug, PartialEq),),
|
||||
map_into(ExecutionPayload),
|
||||
map_ref_into(ExecutionPayloadRef),
|
||||
cast_error(
|
||||
ty = "BeaconStateError",
|
||||
expr = "BeaconStateError::IncorrectStateVariant"
|
||||
),
|
||||
partial_getter_error(
|
||||
ty = "BeaconStateError",
|
||||
expr = "BeaconStateError::IncorrectStateVariant"
|
||||
)
|
||||
)]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct NewPayloadRequest<'block, E: EthSpec> {
|
||||
#[superstruct(only(Merge), partial_getter(rename = "execution_payload_merge"))]
|
||||
pub execution_payload: &'block ExecutionPayloadMerge<E>,
|
||||
#[superstruct(only(Capella), partial_getter(rename = "execution_payload_capella"))]
|
||||
pub execution_payload: &'block ExecutionPayloadCapella<E>,
|
||||
#[superstruct(only(Deneb), partial_getter(rename = "execution_payload_deneb"))]
|
||||
pub execution_payload: &'block ExecutionPayloadDeneb<E>,
|
||||
#[superstruct(only(Deneb))]
|
||||
pub versioned_hashes: Vec<VersionedHash>,
|
||||
#[superstruct(only(Deneb))]
|
||||
pub parent_beacon_block_root: Hash256,
|
||||
}
|
||||
|
||||
impl<'block, E: EthSpec> NewPayloadRequest<'block, E> {
|
||||
pub fn parent_hash(&self) -> ExecutionBlockHash {
|
||||
match self {
|
||||
Self::Merge(payload) => payload.execution_payload.parent_hash,
|
||||
Self::Capella(payload) => payload.execution_payload.parent_hash,
|
||||
Self::Deneb(payload) => payload.execution_payload.parent_hash,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_hash(&self) -> ExecutionBlockHash {
|
||||
match self {
|
||||
Self::Merge(payload) => payload.execution_payload.block_hash,
|
||||
Self::Capella(payload) => payload.execution_payload.block_hash,
|
||||
Self::Deneb(payload) => payload.execution_payload.block_hash,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn block_number(&self) -> u64 {
|
||||
match self {
|
||||
Self::Merge(payload) => payload.execution_payload.block_number,
|
||||
Self::Capella(payload) => payload.execution_payload.block_number,
|
||||
Self::Deneb(payload) => payload.execution_payload.block_number,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn execution_payload_ref(&self) -> ExecutionPayloadRef<'block, E> {
|
||||
match self {
|
||||
Self::Merge(request) => ExecutionPayloadRef::Merge(request.execution_payload),
|
||||
Self::Capella(request) => ExecutionPayloadRef::Capella(request.execution_payload),
|
||||
Self::Deneb(request) => ExecutionPayloadRef::Deneb(request.execution_payload),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn into_execution_payload(self) -> ExecutionPayload<E> {
|
||||
match self {
|
||||
Self::Merge(request) => ExecutionPayload::Merge(request.execution_payload.clone()),
|
||||
Self::Capella(request) => ExecutionPayload::Capella(request.execution_payload.clone()),
|
||||
Self::Deneb(request) => ExecutionPayload::Deneb(request.execution_payload.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Performs the required verifications of the payload when the chain is optimistically syncing.
|
||||
///
|
||||
/// ## Specification
|
||||
///
|
||||
/// Performs the verifications in the `verify_and_notify_new_payload` function:
|
||||
///
|
||||
/// https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/beacon-chain.md#modified-verify_and_notify_new_payload
|
||||
pub fn perform_optimistic_sync_verifications(&self) -> Result<(), Error> {
|
||||
self.verify_payload_block_hash()?;
|
||||
self.verify_versioned_hashes()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verify the block hash is consistent locally within Lighthouse.
|
||||
///
|
||||
/// ## Specification
|
||||
///
|
||||
/// Equivalent to `is_valid_block_hash` in the spec:
|
||||
/// https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/beacon-chain.md#is_valid_block_hash
|
||||
pub fn verify_payload_block_hash(&self) -> Result<(), Error> {
|
||||
let payload = self.execution_payload_ref();
|
||||
let parent_beacon_block_root = self.parent_beacon_block_root().ok().cloned();
|
||||
|
||||
let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH);
|
||||
|
||||
let (header_hash, rlp_transactions_root) =
|
||||
calculate_execution_block_hash(payload, parent_beacon_block_root);
|
||||
|
||||
if header_hash != self.block_hash() {
|
||||
return Err(Error::BlockHashMismatch {
|
||||
computed: header_hash,
|
||||
payload: payload.block_hash(),
|
||||
transactions_root: rlp_transactions_root,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verify the versioned hashes computed by the blob transactions match the versioned hashes computed from the commitments.
|
||||
///
|
||||
/// ## Specification
|
||||
///
|
||||
/// Equivalent to `is_valid_versioned_hashes` in the spec:
|
||||
/// https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/beacon-chain.md#is_valid_versioned_hashes
|
||||
pub fn verify_versioned_hashes(&self) -> Result<(), Error> {
|
||||
if let Ok(versioned_hashes) = self.versioned_hashes() {
|
||||
verify_versioned_hashes(self.execution_payload_ref(), versioned_hashes)
|
||||
.map_err(Error::VerifyingVersionedHashes)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, E: EthSpec> TryFrom<BeaconBlockRef<'a, E>> for NewPayloadRequest<'a, E> {
|
||||
type Error = BeaconStateError;
|
||||
|
||||
fn try_from(block: BeaconBlockRef<'a, E>) -> Result<Self, Self::Error> {
|
||||
match block {
|
||||
BeaconBlockRef::Base(_) | BeaconBlockRef::Altair(_) => {
|
||||
Err(Self::Error::IncorrectStateVariant)
|
||||
}
|
||||
BeaconBlockRef::Merge(block_ref) => Ok(Self::Merge(NewPayloadRequestMerge {
|
||||
execution_payload: &block_ref.body.execution_payload.execution_payload,
|
||||
})),
|
||||
BeaconBlockRef::Capella(block_ref) => Ok(Self::Capella(NewPayloadRequestCapella {
|
||||
execution_payload: &block_ref.body.execution_payload.execution_payload,
|
||||
})),
|
||||
BeaconBlockRef::Deneb(block_ref) => Ok(Self::Deneb(NewPayloadRequestDeneb {
|
||||
execution_payload: &block_ref.body.execution_payload.execution_payload,
|
||||
versioned_hashes: block_ref
|
||||
.body
|
||||
.blob_kzg_commitments
|
||||
.iter()
|
||||
.map(kzg_commitment_to_versioned_hash)
|
||||
.collect(),
|
||||
parent_beacon_block_root: block_ref.parent_root,
|
||||
})),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, E: EthSpec> TryFrom<ExecutionPayloadRef<'a, E>> for NewPayloadRequest<'a, E> {
|
||||
type Error = BeaconStateError;
|
||||
|
||||
fn try_from(payload: ExecutionPayloadRef<'a, E>) -> Result<Self, Self::Error> {
|
||||
match payload {
|
||||
ExecutionPayloadRef::Merge(payload) => Ok(Self::Merge(NewPayloadRequestMerge {
|
||||
execution_payload: payload,
|
||||
})),
|
||||
ExecutionPayloadRef::Capella(payload) => Ok(Self::Capella(NewPayloadRequestCapella {
|
||||
execution_payload: payload,
|
||||
})),
|
||||
ExecutionPayloadRef::Deneb(_) => Err(Self::Error::IncorrectStateVariant),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::versioned_hashes::Error as VersionedHashError;
|
||||
use crate::{Error, NewPayloadRequest};
|
||||
use state_processing::per_block_processing::deneb::kzg_commitment_to_versioned_hash;
|
||||
use types::{BeaconBlock, ExecPayload, ExecutionBlockHash, Hash256, MainnetEthSpec};
|
||||
|
||||
#[test]
|
||||
fn test_optimistic_sync_verifications_valid_block() {
|
||||
let beacon_block = get_valid_beacon_block();
|
||||
let new_payload_request = NewPayloadRequest::try_from(beacon_block.to_ref())
|
||||
.expect("should create new payload request");
|
||||
|
||||
assert!(
|
||||
new_payload_request
|
||||
.perform_optimistic_sync_verifications()
|
||||
.is_ok(),
|
||||
"validations should pass"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optimistic_sync_verifications_bad_block_hash() {
|
||||
let mut beacon_block = get_valid_beacon_block();
|
||||
let correct_block_hash = beacon_block
|
||||
.body()
|
||||
.execution_payload()
|
||||
.expect("should get payload")
|
||||
.block_hash();
|
||||
let invalid_block_hash = ExecutionBlockHash(Hash256::repeat_byte(0x42));
|
||||
|
||||
// now mutate the block hash
|
||||
beacon_block
|
||||
.body_mut()
|
||||
.execution_payload_deneb_mut()
|
||||
.expect("should get payload")
|
||||
.execution_payload
|
||||
.block_hash = invalid_block_hash;
|
||||
|
||||
let new_payload_request = NewPayloadRequest::try_from(beacon_block.to_ref())
|
||||
.expect("should create new payload request");
|
||||
let verification_result = new_payload_request.perform_optimistic_sync_verifications();
|
||||
println!("verification_result: {:?}", verification_result);
|
||||
let got_expected_result = match verification_result {
|
||||
Err(Error::BlockHashMismatch {
|
||||
computed, payload, ..
|
||||
}) => computed == correct_block_hash && payload == invalid_block_hash,
|
||||
_ => false,
|
||||
};
|
||||
assert!(got_expected_result, "should return expected error");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_optimistic_sync_verifications_bad_versioned_hashes() {
|
||||
let mut beacon_block = get_valid_beacon_block();
|
||||
|
||||
let mut commitments: Vec<_> = beacon_block
|
||||
.body()
|
||||
.blob_kzg_commitments()
|
||||
.expect("should get commitments")
|
||||
.clone()
|
||||
.into();
|
||||
|
||||
let correct_versioned_hash = kzg_commitment_to_versioned_hash(
|
||||
commitments.last().expect("should get last commitment"),
|
||||
);
|
||||
|
||||
// mutate the last commitment
|
||||
commitments
|
||||
.last_mut()
|
||||
.expect("should get last commitment")
|
||||
.0[0] = 0x42;
|
||||
|
||||
// calculate versioned hash from mutated commitment
|
||||
let bad_versioned_hash = kzg_commitment_to_versioned_hash(
|
||||
commitments.last().expect("should get last commitment"),
|
||||
);
|
||||
|
||||
*beacon_block
|
||||
.body_mut()
|
||||
.blob_kzg_commitments_mut()
|
||||
.expect("should get commitments") = commitments.into();
|
||||
|
||||
let new_payload_request = NewPayloadRequest::try_from(beacon_block.to_ref())
|
||||
.expect("should create new payload request");
|
||||
let verification_result = new_payload_request.perform_optimistic_sync_verifications();
|
||||
println!("verification_result: {:?}", verification_result);
|
||||
|
||||
let got_expected_result = match verification_result {
|
||||
Err(Error::VerifyingVersionedHashes(VersionedHashError::VersionHashMismatch {
|
||||
expected,
|
||||
found,
|
||||
})) => expected == bad_versioned_hash && found == correct_versioned_hash,
|
||||
_ => false,
|
||||
};
|
||||
assert!(got_expected_result, "should return expected error");
|
||||
}
|
||||
|
||||
fn get_valid_beacon_block() -> BeaconBlock<MainnetEthSpec> {
|
||||
BeaconBlock::Deneb(serde_json::from_str(r#"{
|
||||
"slot": "88160",
|
||||
"proposer_index": "583",
|
||||
"parent_root": "0x60770cd86a497ca3aa2e91f1687aa3ebafac87af52c30a920b5f40bd9e930eb6",
|
||||
"state_root": "0x4a0e0abbcbcf576f2cb7387c4289ab13b8a128e32127642f056143d6164941a6",
|
||||
"body": {
|
||||
"randao_reveal": "0xb5253d5739496abc4f67c7c92e39e46cca452c2fdfc5275e3e0426a012aa62df82f47f7dece348e28db4bb212f0e793d187120bbd47b8031ed79344116eb4128f0ce0b05ba18cd615bb13966c1bd7d89e23cc769c8e4d8e4a63755f623ac3bed",
|
||||
"eth1_data": {
|
||||
"deposit_root": "0xe4785ac914d8673797f886e3151ce2647f81ae070c7ddb6845e65fd1c47d1222",
|
||||
"deposit_count": "1181",
|
||||
"block_hash": "0x010671bdfbfce6b0071984a06a7ded6deef13b4f8fdbae402c606a7a0c8780d1"
|
||||
},
|
||||
"graffiti": "0x6c6f6465737461722f6765746800000000000000000000000000000000000000",
|
||||
"proposer_slashings": [],
|
||||
"attester_slashings": [],
|
||||
"attestations": [],
|
||||
"deposits": [],
|
||||
"voluntary_exits": [],
|
||||
"sync_aggregate": {
|
||||
"sync_committee_bits": "0xfebffffffebfff7fff7f7fffbbefffff6affffffffbfffffefffebfffdbf77fff7fd77ffffefffdff7ffffeffffffe7e5ffffffdefffff7ffbffff7fffffffff",
|
||||
"sync_committee_signature": "0x91939b5baf2a6f52d405b6dd396f5346ec435eca7d25912c91cc6a2f7030d870d68bebe4f2b21872a06929ff4cf3e5e9191053cb43eb24ebe34b9a75fb88a3acd06baf329c87f68bd664b49891260c698d7bca0f5365870b5b2b3a76f582156c"
|
||||
},
|
||||
"execution_payload": {
|
||||
"parent_hash": "0xa6f3ed782a992f79ad38da2af91b3e8923c71b801c50bc9033bb35a2e1da885f",
|
||||
"fee_recipient": "0xf97e180c050e5ab072211ad2c213eb5aee4df134",
|
||||
"state_root": "0x3bfd1a7f309ed35048c349a8daf01815bdc09a6d5df86ea77d1056f248ba2017",
|
||||
"receipts_root": "0xcb5b8ffea57cd0fa87194d49bc8bb7fad08c93c9934b886489503c328d15fd36",
|
||||
"logs_bloom": "0x002000000000000000000000800000000000000000001040000000000000000000000001000000000000000000000000000000000000100000000020000c0800000000000000008000000008000000200000800000000000000000000000000000000000000000008000000000008000000000000000000002000010000000000000000000000000000000000000000000000000000000080000004000000000800000000000000000000100000000000000000000000000000000000800000000000102000000000000000000000000000000080000001000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"prev_randao": "0xb2693020177d99ffbd4c267023be172d759e7306ff51b0e7d677d3148fbd7f1d",
|
||||
"block_number": "74807",
|
||||
"gas_limit": "30000000",
|
||||
"gas_used": "128393",
|
||||
"timestamp": "1697039520",
|
||||
"extra_data": "0xd883010d03846765746888676f312e32312e31856c696e7578",
|
||||
"base_fee_per_gas": "7",
|
||||
"block_hash": "0xc64f3a43c64aeb98518a237f6279fa03095b9f95ca673c860ad7f16fb9340062",
|
||||
"transactions": [
|
||||
"0x02f9017a8501a1f0ff4382317585012a05f2008512a05f2000830249f094c1b0bc605e2c808aa0867bfc98e51a1fe3e9867f80b901040cc7326300000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000036e534e16b8920d000000000000000000000000fb3e9c7cb92443931ee6b5b9728598d4eb9618c1000000000000000000000000fc7360b3b28cf4204268a8354dbec60720d155d2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000009a054a063f0fe7b9c68de8df91aaa5e96c15ab540000000000000000000000000c8d41b8fcc066cdabaf074d78e5153e8ce018a9c080a07dd9be0d014ffcd5b6883d0917c66b74ba51f0d976c8fc5674af192af6fa9450a02dad2c660974c125f5f22b1e6e8862a292e08cc2b4cafda35af650ee62868a43",
|
||||
"0x03f8db8501a1f0ff430d84773594008504a817c8008252089454e594b6de0aa4b0188cd1549dd7ba715a455d078080c08504a817c800f863a001253ce00f525e3495cffa0b865eadb90a4c5ee812185cc796af74b6ec0a5dd7a0010720372a4d7dcab84413ed0cfc164fb91fb6ef1562ec2f7a82e912a1d9e129a0015a73e97950397896ed2c47dcab7c0360220bcfb413a8f210a7b6e6264e698880a04402cb0f13c17ef41dca106b1e1520c7aadcbe62984d81171e29914f587d67c1a02db62a8edb581917958e4a3884e7eececbaec114c5ee496e238033e896f997ac"
|
||||
],
|
||||
"withdrawals": [],
|
||||
"blob_gas_used": "393216",
|
||||
"excess_blob_gas": "58720256"
|
||||
},
|
||||
"bls_to_execution_changes": [],
|
||||
"blob_kzg_commitments": [
|
||||
"0xa7accb7a25224a8c2e0cee9cd569fc1798665bfbfe780e08945fa9098ec61da4061f5b04e750a88d3340a801850a54fa",
|
||||
"0xac7b47f99836510ae9076dc5f5da1f370679dea1d47073307a14cbb125cdc7822ae619637135777cb40e13d897fd00a7",
|
||||
"0x997794110b9655833a88ad5a4ec40a3dc7964877bfbeb04ca1abe1d51bdc43e20e4c5757028896d298d7da954a6f14a1"
|
||||
]
|
||||
}
|
||||
}"#).expect("should decode"))
|
||||
}
|
||||
}
|
||||
@ -7,6 +7,7 @@
|
||||
use crate::payload_cache::PayloadCache;
|
||||
use arc_swap::ArcSwapOption;
|
||||
use auth::{strip_prefix, Auth, JwtKey};
|
||||
pub use block_hash::calculate_execution_block_hash;
|
||||
use builder_client::BuilderHttpClient;
|
||||
pub use engine_api::EngineCapabilities;
|
||||
use engine_api::Error as ApiError;
|
||||
@ -61,6 +62,7 @@ mod metrics;
|
||||
pub mod payload_cache;
|
||||
mod payload_status;
|
||||
pub mod test_utils;
|
||||
mod versioned_hashes;
|
||||
|
||||
/// Indicates the default jwt authenticated execution endpoint.
|
||||
pub const DEFAULT_EXECUTION_ENDPOINT: &str = "http://localhost:8551/";
|
||||
@ -141,6 +143,7 @@ pub enum Error {
|
||||
InvalidBlobConversion(String),
|
||||
BeaconStateError(BeaconStateError),
|
||||
PayloadTypeMismatch,
|
||||
VerifyingVersionedHashes(versioned_hashes::Error),
|
||||
}
|
||||
|
||||
impl From<BeaconStateError> for Error {
|
||||
@ -1321,7 +1324,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
||||
/// Maps to the `engine_newPayload` JSON-RPC call.
|
||||
pub async fn notify_new_payload(
|
||||
&self,
|
||||
new_payload_request: NewPayloadRequest<T>,
|
||||
new_payload_request: NewPayloadRequest<'_, T>,
|
||||
) -> Result<PayloadStatus, Error> {
|
||||
let _timer = metrics::start_timer_vec(
|
||||
&metrics::EXECUTION_LAYER_REQUEST_TIMES,
|
||||
|
||||
@ -699,7 +699,7 @@ pub fn generate_blobs<E: EthSpec>(
|
||||
Ok((bundle, transactions.into()))
|
||||
}
|
||||
|
||||
fn static_valid_tx<T: EthSpec>() -> Result<Transaction<T::MaxBytesPerTransaction>, String> {
|
||||
pub fn static_valid_tx<T: EthSpec>() -> Result<Transaction<T::MaxBytesPerTransaction>, String> {
|
||||
// This is a real transaction hex encoded, but we don't care about the contents of the transaction.
|
||||
let transaction: EthersTransaction = serde_json::from_str(
|
||||
r#"{
|
||||
|
||||
@ -54,7 +54,8 @@ impl Operation {
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Custom(String);
|
||||
// We don't use the string value directly, but it's used in the Debug impl which is required by `warp::reject::Reject`.
|
||||
struct Custom(#[allow(dead_code)] String);
|
||||
|
||||
impl warp::reject::Reject for Custom {}
|
||||
|
||||
|
||||
@ -244,7 +244,7 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
||||
// TODO: again consider forks
|
||||
let status = self
|
||||
.el
|
||||
.notify_new_payload(payload.try_into().unwrap())
|
||||
.notify_new_payload(payload.to_ref().try_into().unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(status, PayloadStatus::Valid);
|
||||
|
||||
@ -25,8 +25,8 @@ use warp::{http::StatusCode, Filter, Rejection};
|
||||
|
||||
use crate::EngineCapabilities;
|
||||
pub use execution_block_generator::{
|
||||
generate_blobs, generate_genesis_block, generate_genesis_header, generate_pow_block, Block,
|
||||
ExecutionBlockGenerator,
|
||||
generate_blobs, generate_genesis_block, generate_genesis_header, generate_pow_block,
|
||||
static_valid_tx, Block, ExecutionBlockGenerator,
|
||||
};
|
||||
pub use hook::Hook;
|
||||
pub use mock_builder::{MockBuilder, Operation};
|
||||
@ -599,8 +599,8 @@ async fn handle_rejection(err: Rejection) -> Result<impl warp::Reply, Infallible
|
||||
let code;
|
||||
let message;
|
||||
|
||||
if let Some(e) = err.find::<AuthError>() {
|
||||
message = format!("Authorization error: {:?}", e);
|
||||
if let Some(AuthError(e)) = err.find::<AuthError>() {
|
||||
message = format!("Authorization error: {}", e);
|
||||
code = StatusCode::UNAUTHORIZED;
|
||||
} else {
|
||||
message = "BAD_REQUEST".to_string();
|
||||
|
||||
135
beacon_node/execution_layer/src/versioned_hashes.rs
Normal file
135
beacon_node/execution_layer/src/versioned_hashes.rs
Normal file
@ -0,0 +1,135 @@
|
||||
extern crate alloy_consensus;
|
||||
extern crate alloy_rlp;
|
||||
use alloy_consensus::TxEnvelope;
|
||||
use alloy_rlp::Decodable;
|
||||
use types::{EthSpec, ExecutionPayloadRef, Hash256, Unsigned, VersionedHash};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
DecodingTransaction(String),
|
||||
LengthMismatch { expected: usize, found: usize },
|
||||
VersionHashMismatch { expected: Hash256, found: Hash256 },
|
||||
}
|
||||
|
||||
pub fn verify_versioned_hashes<E: EthSpec>(
|
||||
execution_payload: ExecutionPayloadRef<E>,
|
||||
expected_versioned_hashes: &[VersionedHash],
|
||||
) -> Result<(), Error> {
|
||||
let versioned_hashes =
|
||||
extract_versioned_hashes_from_transactions::<E>(execution_payload.transactions())?;
|
||||
if versioned_hashes.len() != expected_versioned_hashes.len() {
|
||||
return Err(Error::LengthMismatch {
|
||||
expected: expected_versioned_hashes.len(),
|
||||
found: versioned_hashes.len(),
|
||||
});
|
||||
}
|
||||
for (found, expected) in versioned_hashes
|
||||
.iter()
|
||||
.zip(expected_versioned_hashes.iter())
|
||||
{
|
||||
if found != expected {
|
||||
return Err(Error::VersionHashMismatch {
|
||||
expected: *expected,
|
||||
found: *found,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn extract_versioned_hashes_from_transactions<E: EthSpec>(
|
||||
transactions: &types::Transactions<E>,
|
||||
) -> Result<Vec<VersionedHash>, Error> {
|
||||
let mut versioned_hashes = Vec::new();
|
||||
|
||||
for tx in transactions {
|
||||
match beacon_tx_to_tx_envelope(tx)? {
|
||||
TxEnvelope::Eip4844(signed_tx_eip4844) => {
|
||||
versioned_hashes.extend(
|
||||
signed_tx_eip4844
|
||||
.tx()
|
||||
.blob_versioned_hashes
|
||||
.iter()
|
||||
.map(|fb| Hash256::from(fb.0)),
|
||||
);
|
||||
}
|
||||
// enumerating all variants explicitly to make pattern irrefutable
|
||||
// in case new types are added in the future which also have blobs
|
||||
TxEnvelope::Legacy(_)
|
||||
| TxEnvelope::TaggedLegacy(_)
|
||||
| TxEnvelope::Eip2930(_)
|
||||
| TxEnvelope::Eip1559(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(versioned_hashes)
|
||||
}
|
||||
|
||||
pub fn beacon_tx_to_tx_envelope<N: Unsigned>(
|
||||
tx: &types::Transaction<N>,
|
||||
) -> Result<TxEnvelope, Error> {
|
||||
let tx_bytes = Vec::from(tx.clone());
|
||||
TxEnvelope::decode(&mut tx_bytes.as_slice())
|
||||
.map_err(|e| Error::DecodingTransaction(e.to_string()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::test_utils::static_valid_tx;
|
||||
use alloy_consensus::{TxKind, TxLegacy};
|
||||
|
||||
type E = types::MainnetEthSpec;
|
||||
|
||||
#[test]
|
||||
fn test_decode_static_transaction() {
|
||||
let valid_tx = static_valid_tx::<E>().expect("should give me known valid transaction");
|
||||
let tx_envelope = beacon_tx_to_tx_envelope(&valid_tx).expect("should decode tx");
|
||||
let TxEnvelope::Legacy(signed_tx) = tx_envelope else {
|
||||
panic!("should decode to legacy transaction");
|
||||
};
|
||||
|
||||
assert!(matches!(
|
||||
signed_tx.tx(),
|
||||
TxLegacy {
|
||||
chain_id: Some(0x01),
|
||||
nonce: 0x15,
|
||||
gas_price: 0x4a817c800,
|
||||
to: TxKind::Call(..),
|
||||
..
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_versioned_hashes() {
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(transparent)]
|
||||
struct TestTransactions<E: EthSpec>(
|
||||
#[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] types::Transactions<E>,
|
||||
);
|
||||
|
||||
let TestTransactions(raw_transactions): TestTransactions<E> = serde_json::from_str(r#"[
|
||||
"0x03f901388501a1f0ff430f843b9aca00843b9aca0082520894e7249813d8ccf6fa95a2203f46a64166073d58878080c002f8c6a0012e98362c814f1724262c0d211a1463418a5f6382a8d457b37a2698afbe7b5ea00100ef985761395dfa8ed5ce91f3f2180b612401909e4cb8f33b90c8a454d9baa0013d45411623b90d90f916e4025ada74b453dd4ca093c017c838367c9de0f801a001753e2af0b1e70e7ef80541355b2a035cc9b2c177418bb2a4402a9b346cf84da0011789b520a8068094a92aa0b04db8d8ef1c6c9818947c5210821732b8744049a0011c4c4f95597305daa5f62bf5f690e37fa11f5de05a95d05cac4e2119e394db80a0ccd86a742af0e042d08cbb35d910ddc24bbc6538f9e53be6620d4b6e1bb77662a01a8bacbc614940ac2f5c23ffc00a122c9f085046883de65c88ab0edb859acb99",
|
||||
"0x02f9017a8501a1f0ff4382363485012a05f2008512a05f2000830249f094c1b0bc605e2c808aa0867bfc98e51a1fe3e9867f80b901040cc7326300000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000009445a285baa43e00000000000000000000000000c500931f24edb821cef6e28f7adb33b38578c82000000000000000000000000fc7360b3b28cf4204268a8354dbec60720d155d2000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000009a054a063f0fe7b9c68de8df91aaa5e96c15ab540000000000000000000000000c8d41b8fcc066cdabaf074d78e5153e8ce018a9c080a008e14475c1173cd9f5740c24c08b793f9e16c36c08fa73769db95050e31e3396a019767dcdda26c4a774ca28c9df15d0c20e43bd07bd33ee0f84d6096cb5a1ebed"
|
||||
]"#).expect("should get raw transactions");
|
||||
let expected_versioned_hashes = vec![
|
||||
"0x012e98362c814f1724262c0d211a1463418a5f6382a8d457b37a2698afbe7b5e",
|
||||
"0x0100ef985761395dfa8ed5ce91f3f2180b612401909e4cb8f33b90c8a454d9ba",
|
||||
"0x013d45411623b90d90f916e4025ada74b453dd4ca093c017c838367c9de0f801",
|
||||
"0x01753e2af0b1e70e7ef80541355b2a035cc9b2c177418bb2a4402a9b346cf84d",
|
||||
"0x011789b520a8068094a92aa0b04db8d8ef1c6c9818947c5210821732b8744049",
|
||||
"0x011c4c4f95597305daa5f62bf5f690e37fa11f5de05a95d05cac4e2119e394db",
|
||||
]
|
||||
.into_iter()
|
||||
.map(|tx| Hash256::from_slice(&hex::decode(&tx[2..]).expect("should decode hex")))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let versioned_hashes = extract_versioned_hashes_from_transactions::<E>(&raw_transactions)
|
||||
.expect("should get versioned hashes");
|
||||
assert_eq!(versioned_hashes, expected_versioned_hashes);
|
||||
}
|
||||
}
|
||||
@ -14,11 +14,12 @@ const MAX_REQUEST_RANGE_EPOCHS: usize = 100;
|
||||
const BLOCK_ROOT_CHUNK_SIZE: usize = 100;
|
||||
|
||||
#[derive(Debug)]
|
||||
// We don't use the inner values directly, but they're used in the Debug impl.
|
||||
enum AttestationPerformanceError {
|
||||
BlockReplay(BlockReplayError),
|
||||
BeaconState(BeaconStateError),
|
||||
ParticipationCache(ParticipationCacheError),
|
||||
UnableToFindValidator(usize),
|
||||
BlockReplay(#[allow(dead_code)] BlockReplayError),
|
||||
BeaconState(#[allow(dead_code)] BeaconStateError),
|
||||
ParticipationCache(#[allow(dead_code)] ParticipationCacheError),
|
||||
UnableToFindValidator(#[allow(dead_code)] usize),
|
||||
}
|
||||
|
||||
impl From<BlockReplayError> for AttestationPerformanceError {
|
||||
|
||||
@ -19,10 +19,11 @@ use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_e
|
||||
const BLOCK_ROOT_CHUNK_SIZE: usize = 100;
|
||||
|
||||
#[derive(Debug)]
|
||||
// We don't use the inner values directly, but they're used in the Debug impl.
|
||||
enum PackingEfficiencyError {
|
||||
BlockReplay(BlockReplayError),
|
||||
BeaconState(BeaconStateError),
|
||||
CommitteeStoreError(Slot),
|
||||
BlockReplay(#[allow(dead_code)] BlockReplayError),
|
||||
BeaconState(#[allow(dead_code)] BeaconStateError),
|
||||
CommitteeStoreError(#[allow(dead_code)] Slot),
|
||||
InvalidAttestationError,
|
||||
}
|
||||
|
||||
|
||||
@ -16,6 +16,7 @@ mod database;
|
||||
mod metrics;
|
||||
mod produce_block;
|
||||
mod proposer_duties;
|
||||
mod publish_attestations;
|
||||
mod publish_blocks;
|
||||
mod standard_block_rewards;
|
||||
mod state_id;
|
||||
@ -35,7 +36,7 @@ use beacon_chain::{
|
||||
validator_monitor::timestamp_now, AttestationError as AttnError, BeaconChain, BeaconChainError,
|
||||
BeaconChainTypes, WhenSlotSkipped,
|
||||
};
|
||||
use beacon_processor::BeaconProcessorSend;
|
||||
use beacon_processor::{work_reprocessing_queue::ReprocessQueueMessage, BeaconProcessorSend};
|
||||
pub use block_id::BlockId;
|
||||
use builder_states::get_next_withdrawals;
|
||||
use bytes::Bytes;
|
||||
@ -67,7 +68,7 @@ use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use sysinfo::{System, SystemExt};
|
||||
use system_health::observe_system_health_bn;
|
||||
use system_health::{observe_nat, observe_system_health_bn};
|
||||
use task_spawner::{Priority, TaskSpawner};
|
||||
use tokio::sync::{
|
||||
mpsc::{Sender, UnboundedSender},
|
||||
@ -129,6 +130,7 @@ pub struct Context<T: BeaconChainTypes> {
|
||||
pub network_senders: Option<NetworkSenders<T::EthSpec>>,
|
||||
pub network_globals: Option<Arc<NetworkGlobals<T::EthSpec>>>,
|
||||
pub beacon_processor_send: Option<BeaconProcessorSend<T::EthSpec>>,
|
||||
pub beacon_processor_reprocess_send: Option<Sender<ReprocessQueueMessage>>,
|
||||
pub eth1_service: Option<eth1::Service>,
|
||||
pub sse_logging_components: Option<SSELoggingComponents>,
|
||||
pub log: Logger,
|
||||
@ -534,6 +536,11 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.filter(|_| config.enable_beacon_processor);
|
||||
let task_spawner_filter =
|
||||
warp::any().map(move || TaskSpawner::new(beacon_processor_send.clone()));
|
||||
let beacon_processor_reprocess_send = ctx
|
||||
.beacon_processor_reprocess_send
|
||||
.clone()
|
||||
.filter(|_| config.enable_beacon_processor);
|
||||
let reprocess_send_filter = warp::any().map(move || beacon_processor_reprocess_send.clone());
|
||||
|
||||
let duplicate_block_status_code = ctx.config.duplicate_block_status_code;
|
||||
|
||||
@ -682,7 +689,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.clone()
|
||||
.and(warp::path("validator_balances"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.then(
|
||||
|state_id: StateId,
|
||||
task_spawner: TaskSpawner<T::EthSpec>,
|
||||
@ -726,7 +733,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.clone()
|
||||
.and(warp::path("validators"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.then(
|
||||
|state_id: StateId,
|
||||
task_spawner: TaskSpawner<T::EthSpec>,
|
||||
@ -1019,7 +1026,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
Ok((
|
||||
state
|
||||
.get_built_sync_committee(epoch, &chain.spec)
|
||||
.map(|committee| committee.clone())
|
||||
.cloned()
|
||||
.map_err(|e| match e {
|
||||
BeaconStateError::SyncCommitteeNotKnown { .. } => {
|
||||
warp_utils::reject::custom_bad_request(format!(
|
||||
@ -1257,7 +1264,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path("beacon"))
|
||||
.and(warp::path("blocks"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and(network_tx_filter.clone())
|
||||
@ -1327,7 +1334,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path("blocks"))
|
||||
.and(warp::query::<api_types::BroadcastValidationQuery>())
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and(network_tx_filter.clone())
|
||||
@ -1404,7 +1411,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path("beacon"))
|
||||
.and(warp::path("blinded_blocks"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and(network_tx_filter.clone())
|
||||
@ -1472,7 +1479,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path("blinded_blocks"))
|
||||
.and(warp::query::<api_types::BroadcastValidationQuery>())
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and(network_tx_filter.clone())
|
||||
@ -1754,142 +1761,28 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.clone()
|
||||
.and(warp::path("attestations"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(network_tx_filter.clone())
|
||||
.and(reprocess_send_filter)
|
||||
.and(log_filter.clone())
|
||||
.then(
|
||||
|task_spawner: TaskSpawner<T::EthSpec>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
attestations: Vec<Attestation<T::EthSpec>>,
|
||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
log: Logger| {
|
||||
task_spawner.blocking_json_task(Priority::P0, move || {
|
||||
let seen_timestamp = timestamp_now();
|
||||
let mut failures = Vec::new();
|
||||
let mut num_already_known = 0;
|
||||
|
||||
for (index, attestation) in attestations.as_slice().iter().enumerate() {
|
||||
let attestation = match chain
|
||||
.verify_unaggregated_attestation_for_gossip(attestation, None)
|
||||
{
|
||||
Ok(attestation) => attestation,
|
||||
Err(AttnError::PriorAttestationKnown { .. }) => {
|
||||
num_already_known += 1;
|
||||
|
||||
// Skip to the next attestation since an attestation for this
|
||||
// validator is already known in this epoch.
|
||||
//
|
||||
// There's little value for the network in validating a second
|
||||
// attestation for another validator since it is either:
|
||||
//
|
||||
// 1. A duplicate.
|
||||
// 2. Slashable.
|
||||
// 3. Invalid.
|
||||
//
|
||||
// We are likely to get duplicates in the case where a VC is using
|
||||
// fallback BNs. If the first BN actually publishes some/all of a
|
||||
// batch of attestations but fails to respond in a timely fashion,
|
||||
// the VC is likely to try publishing the attestations on another
|
||||
// BN. That second BN may have already seen the attestations from
|
||||
// the first BN and therefore indicate that the attestations are
|
||||
// "already seen". An attestation that has already been seen has
|
||||
// been published on the network so there's no actual error from
|
||||
// the perspective of the user.
|
||||
//
|
||||
// It's better to prevent slashable attestations from ever
|
||||
// appearing on the network than trying to slash validators,
|
||||
// especially those validators connected to the local API.
|
||||
//
|
||||
// There might be *some* value in determining that this attestation
|
||||
// is invalid, but since a valid attestation already it exists it
|
||||
// appears that this validator is capable of producing valid
|
||||
// attestations and there's no immediate cause for concern.
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
error!(log,
|
||||
"Failure verifying attestation for gossip";
|
||||
"error" => ?e,
|
||||
"request_index" => index,
|
||||
"committee_index" => attestation.data.index,
|
||||
"attestation_slot" => attestation.data.slot,
|
||||
);
|
||||
failures.push(api_types::Failure::new(
|
||||
index,
|
||||
format!("Verification: {:?}", e),
|
||||
));
|
||||
// skip to the next attestation so we do not publish this one to gossip
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Notify the validator monitor.
|
||||
chain
|
||||
.validator_monitor
|
||||
.read()
|
||||
.register_api_unaggregated_attestation(
|
||||
seen_timestamp,
|
||||
attestation.indexed_attestation(),
|
||||
&chain.slot_clock,
|
||||
);
|
||||
|
||||
publish_pubsub_message(
|
||||
&network_tx,
|
||||
PubsubMessage::Attestation(Box::new((
|
||||
attestation.subnet_id(),
|
||||
attestation.attestation().clone(),
|
||||
))),
|
||||
)?;
|
||||
|
||||
let committee_index = attestation.attestation().data.index;
|
||||
let slot = attestation.attestation().data.slot;
|
||||
|
||||
if let Err(e) = chain.apply_attestation_to_fork_choice(&attestation) {
|
||||
error!(log,
|
||||
"Failure applying verified attestation to fork choice";
|
||||
"error" => ?e,
|
||||
"request_index" => index,
|
||||
"committee_index" => committee_index,
|
||||
"slot" => slot,
|
||||
);
|
||||
failures.push(api_types::Failure::new(
|
||||
index,
|
||||
format!("Fork choice: {:?}", e),
|
||||
));
|
||||
};
|
||||
|
||||
if let Err(e) = chain.add_to_naive_aggregation_pool(&attestation) {
|
||||
error!(log,
|
||||
"Failure adding verified attestation to the naive aggregation pool";
|
||||
"error" => ?e,
|
||||
"request_index" => index,
|
||||
"committee_index" => committee_index,
|
||||
"slot" => slot,
|
||||
);
|
||||
failures.push(api_types::Failure::new(
|
||||
index,
|
||||
format!("Naive aggregation pool: {:?}", e),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if num_already_known > 0 {
|
||||
debug!(
|
||||
log,
|
||||
"Some unagg attestations already known";
|
||||
"count" => num_already_known
|
||||
);
|
||||
}
|
||||
|
||||
if failures.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(warp_utils::reject::indexed_bad_request(
|
||||
"error processing attestations".to_string(),
|
||||
failures,
|
||||
))
|
||||
}
|
||||
})
|
||||
reprocess_tx: Option<Sender<ReprocessQueueMessage>>,
|
||||
log: Logger| async move {
|
||||
let result = crate::publish_attestations::publish_attestations(
|
||||
task_spawner,
|
||||
chain,
|
||||
attestations,
|
||||
network_tx,
|
||||
reprocess_tx,
|
||||
log,
|
||||
)
|
||||
.await
|
||||
.map(|()| warp::reply::json(&()));
|
||||
task_spawner::convert_rejection(result).await
|
||||
},
|
||||
);
|
||||
|
||||
@ -1930,7 +1823,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.clone()
|
||||
.and(warp::path("attester_slashings"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(network_tx_filter.clone())
|
||||
.then(
|
||||
|task_spawner: TaskSpawner<T::EthSpec>,
|
||||
@ -1988,7 +1881,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.clone()
|
||||
.and(warp::path("proposer_slashings"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(network_tx_filter.clone())
|
||||
.then(
|
||||
|task_spawner: TaskSpawner<T::EthSpec>,
|
||||
@ -2046,7 +1939,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.clone()
|
||||
.and(warp::path("voluntary_exits"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(network_tx_filter.clone())
|
||||
.then(
|
||||
|task_spawner: TaskSpawner<T::EthSpec>,
|
||||
@ -2102,7 +1995,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.clone()
|
||||
.and(warp::path("sync_committees"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(network_tx_filter.clone())
|
||||
.and(log_filter.clone())
|
||||
.then(
|
||||
@ -2139,7 +2032,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.clone()
|
||||
.and(warp::path("bls_to_execution_changes"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(network_tx_filter.clone())
|
||||
.and(log_filter.clone())
|
||||
.then(
|
||||
@ -2434,9 +2327,8 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
accept_header: Option<api_types::Accept>| {
|
||||
task_spawner.blocking_response_task(Priority::P1, move || {
|
||||
let update = chain
|
||||
.latest_seen_optimistic_update
|
||||
.lock()
|
||||
.clone()
|
||||
.light_client_server_cache
|
||||
.get_latest_optimistic_update()
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(
|
||||
"No LightClientOptimisticUpdate is available".to_string(),
|
||||
@ -2482,9 +2374,8 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
accept_header: Option<api_types::Accept>| {
|
||||
task_spawner.blocking_response_task(Priority::P1, move || {
|
||||
let update = chain
|
||||
.latest_seen_finality_update
|
||||
.lock()
|
||||
.clone()
|
||||
.light_client_server_cache
|
||||
.get_latest_finality_update()
|
||||
.ok_or_else(|| {
|
||||
warp_utils::reject::custom_not_found(
|
||||
"No LightClientFinalityUpdate is available".to_string(),
|
||||
@ -2533,7 +2424,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path("attestations"))
|
||||
.and(warp::path::param::<Epoch>())
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.then(
|
||||
|task_spawner: TaskSpawner<T::EthSpec>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
@ -2583,7 +2474,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path("sync_committee"))
|
||||
.and(block_id_or_err)
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(log_filter.clone())
|
||||
.then(
|
||||
|task_spawner: TaskSpawner<T::EthSpec>,
|
||||
@ -2860,7 +2751,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
hex::encode(
|
||||
meta_data
|
||||
.syncnets()
|
||||
.map(|x| x.clone())
|
||||
.cloned()
|
||||
.unwrap_or_default()
|
||||
.into_bytes()
|
||||
)
|
||||
@ -3326,7 +3217,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
}))
|
||||
.and(warp::path::end())
|
||||
.and(not_while_syncing_filter.clone())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.then(
|
||||
@ -3352,7 +3243,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
}))
|
||||
.and(warp::path::end())
|
||||
.and(not_while_syncing_filter.clone())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.then(
|
||||
@ -3406,7 +3297,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(not_while_syncing_filter.clone())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(network_tx_filter.clone())
|
||||
.and(log_filter.clone())
|
||||
.then(
|
||||
@ -3519,7 +3410,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(not_while_syncing_filter.clone())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(network_tx_filter)
|
||||
.and(log_filter.clone())
|
||||
.then(
|
||||
@ -3545,7 +3436,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path("validator"))
|
||||
.and(warp::path("beacon_committee_subscriptions"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(validator_subscription_tx_filter.clone())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
@ -3557,34 +3448,34 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
log: Logger| {
|
||||
task_spawner.blocking_json_task(Priority::P0, move || {
|
||||
for subscription in &subscriptions {
|
||||
chain
|
||||
.validator_monitor
|
||||
.write()
|
||||
.auto_register_local_validator(subscription.validator_index);
|
||||
|
||||
let validator_subscription = api_types::ValidatorSubscription {
|
||||
validator_index: subscription.validator_index,
|
||||
attestation_committee_index: subscription.committee_index,
|
||||
slot: subscription.slot,
|
||||
committee_count_at_slot: subscription.committees_at_slot,
|
||||
is_aggregator: subscription.is_aggregator,
|
||||
};
|
||||
|
||||
let message = ValidatorSubscriptionMessage::AttestationSubscribe {
|
||||
subscriptions: vec![validator_subscription],
|
||||
};
|
||||
if let Err(e) = validator_subscription_tx.try_send(message) {
|
||||
warn!(
|
||||
log,
|
||||
"Unable to process committee subscriptions";
|
||||
"info" => "the host may be overloaded or resource-constrained",
|
||||
"error" => ?e,
|
||||
);
|
||||
return Err(warp_utils::reject::custom_server_error(
|
||||
"unable to queue subscription, host may be overloaded or shutting down".to_string(),
|
||||
));
|
||||
}
|
||||
let subscriptions: std::collections::BTreeSet<_> = subscriptions
|
||||
.iter()
|
||||
.map(|subscription| {
|
||||
chain
|
||||
.validator_monitor
|
||||
.write()
|
||||
.auto_register_local_validator(subscription.validator_index);
|
||||
api_types::ValidatorSubscription {
|
||||
attestation_committee_index: subscription.committee_index,
|
||||
slot: subscription.slot,
|
||||
committee_count_at_slot: subscription.committees_at_slot,
|
||||
is_aggregator: subscription.is_aggregator,
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let message =
|
||||
ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions };
|
||||
if let Err(e) = validator_subscription_tx.try_send(message) {
|
||||
warn!(
|
||||
log,
|
||||
"Unable to process committee subscriptions";
|
||||
"info" => "the host may be overloaded or resource-constrained",
|
||||
"error" => ?e,
|
||||
);
|
||||
return Err(warp_utils::reject::custom_server_error(
|
||||
"unable to queue subscription, host may be overloaded or shutting down"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -3601,7 +3492,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and(log_filter.clone())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.then(
|
||||
|task_spawner: TaskSpawner<T::EthSpec>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
@ -3652,7 +3543,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.and(log_filter.clone())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.then(
|
||||
|task_spawner: TaskSpawner<T::EthSpec>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
@ -3826,7 +3717,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path("validator"))
|
||||
.and(warp::path("sync_committee_subscriptions"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(validator_subscription_tx_filter)
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
@ -3866,18 +3757,18 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
},
|
||||
);
|
||||
|
||||
// POST vaidator/liveness/{epoch}
|
||||
// POST validator/liveness/{epoch}
|
||||
let post_validator_liveness_epoch = eth_v1
|
||||
.and(warp::path("validator"))
|
||||
.and(warp::path("liveness"))
|
||||
.and(warp::path::param::<Epoch>())
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.then(
|
||||
|epoch: Epoch,
|
||||
indices: Vec<u64>,
|
||||
indices: api_types::ValidatorIndexData,
|
||||
task_spawner: TaskSpawner<T::EthSpec>,
|
||||
chain: Arc<BeaconChain<T>>| {
|
||||
task_spawner.blocking_json_task(Priority::P0, move || {
|
||||
@ -3896,6 +3787,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
}
|
||||
|
||||
let liveness: Vec<api_types::StandardLivenessResponseData> = indices
|
||||
.0
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|index| {
|
||||
@ -3913,7 +3805,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
let post_lighthouse_liveness = warp::path("lighthouse")
|
||||
.and(warp::path("liveness"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.then(
|
||||
@ -4016,7 +3908,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path("ui"))
|
||||
.and(warp::path("validator_metrics"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.then(
|
||||
@ -4035,7 +3927,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path("ui"))
|
||||
.and(warp::path("validator_info"))
|
||||
.and(warp::path::end())
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.then(
|
||||
@ -4073,13 +3965,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.and(warp::path::end())
|
||||
.then(|task_spawner: TaskSpawner<T::EthSpec>| {
|
||||
task_spawner.blocking_json_task(Priority::P1, move || {
|
||||
Ok(api_types::GenericResponse::from(
|
||||
lighthouse_network::metrics::NAT_OPEN
|
||||
.as_ref()
|
||||
.map(|v| v.get())
|
||||
.unwrap_or(0)
|
||||
!= 0,
|
||||
))
|
||||
Ok(api_types::GenericResponse::from(observe_nat()))
|
||||
})
|
||||
});
|
||||
|
||||
@ -4267,36 +4153,6 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
},
|
||||
);
|
||||
|
||||
// GET lighthouse/beacon/states/{state_id}/ssz
|
||||
let get_lighthouse_beacon_states_ssz = warp::path("lighthouse")
|
||||
.and(warp::path("beacon"))
|
||||
.and(warp::path("states"))
|
||||
.and(warp::path::param::<StateId>())
|
||||
.and(warp::path("ssz"))
|
||||
.and(warp::path::end())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
.then(
|
||||
|state_id: StateId,
|
||||
task_spawner: TaskSpawner<T::EthSpec>,
|
||||
chain: Arc<BeaconChain<T>>| {
|
||||
task_spawner.blocking_response_task(Priority::P1, move || {
|
||||
// This debug endpoint provides no indication of optimistic status.
|
||||
let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?;
|
||||
Response::builder()
|
||||
.status(200)
|
||||
.body(state.as_ssz_bytes().into())
|
||||
.map(|res: Response<Body>| add_ssz_content_type_header(res))
|
||||
.map_err(|e| {
|
||||
warp_utils::reject::custom_server_error(format!(
|
||||
"failed to create response: {}",
|
||||
e
|
||||
))
|
||||
})
|
||||
})
|
||||
},
|
||||
);
|
||||
|
||||
// GET lighthouse/staking
|
||||
let get_lighthouse_staking = warp::path("lighthouse")
|
||||
.and(warp::path("staking"))
|
||||
@ -4368,7 +4224,7 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
let post_lighthouse_block_rewards = warp::path("lighthouse")
|
||||
.and(warp::path("analysis"))
|
||||
.and(warp::path("block_rewards"))
|
||||
.and(warp::body::json())
|
||||
.and(warp_utils::json::json())
|
||||
.and(warp::path::end())
|
||||
.and(task_spawner_filter.clone())
|
||||
.and(chain_filter.clone())
|
||||
@ -4631,7 +4487,6 @@ pub fn serve<T: BeaconChainTypes>(
|
||||
.uor(get_lighthouse_eth1_syncing)
|
||||
.uor(get_lighthouse_eth1_block_cache)
|
||||
.uor(get_lighthouse_eth1_deposit_cache)
|
||||
.uor(get_lighthouse_beacon_states_ssz)
|
||||
.uor(get_lighthouse_staking)
|
||||
.uor(get_lighthouse_database_info)
|
||||
.uor(get_lighthouse_block_rewards)
|
||||
|
||||
319
beacon_node/http_api/src/publish_attestations.rs
Normal file
319
beacon_node/http_api/src/publish_attestations.rs
Normal file
@ -0,0 +1,319 @@
|
||||
//! Import attestations and publish them to the network.
|
||||
//!
|
||||
//! This module gracefully handles attestations to unknown blocks by requeuing them and then
|
||||
//! efficiently waiting for them to finish reprocessing (using an async yield).
|
||||
//!
|
||||
//! The following comments relate to the handling of duplicate attestations (relocated here during
|
||||
//! refactoring):
|
||||
//!
|
||||
//! Skip to the next attestation since an attestation for this
|
||||
//! validator is already known in this epoch.
|
||||
//!
|
||||
//! There's little value for the network in validating a second
|
||||
//! attestation for another validator since it is either:
|
||||
//!
|
||||
//! 1. A duplicate.
|
||||
//! 2. Slashable.
|
||||
//! 3. Invalid.
|
||||
//!
|
||||
//! We are likely to get duplicates in the case where a VC is using
|
||||
//! fallback BNs. If the first BN actually publishes some/all of a
|
||||
//! batch of attestations but fails to respond in a timely fashion,
|
||||
//! the VC is likely to try publishing the attestations on another
|
||||
//! BN. That second BN may have already seen the attestations from
|
||||
//! the first BN and therefore indicate that the attestations are
|
||||
//! "already seen". An attestation that has already been seen has
|
||||
//! been published on the network so there's no actual error from
|
||||
//! the perspective of the user.
|
||||
//!
|
||||
//! It's better to prevent slashable attestations from ever
|
||||
//! appearing on the network than trying to slash validators,
|
||||
//! especially those validators connected to the local API.
|
||||
//!
|
||||
//! There might be *some* value in determining that this attestation
|
||||
//! is invalid, but since a valid attestation already it exists it
|
||||
//! appears that this validator is capable of producing valid
|
||||
//! attestations and there's no immediate cause for concern.
|
||||
use crate::task_spawner::{Priority, TaskSpawner};
|
||||
use beacon_chain::{
|
||||
validator_monitor::timestamp_now, AttestationError, BeaconChain, BeaconChainError,
|
||||
BeaconChainTypes,
|
||||
};
|
||||
use beacon_processor::work_reprocessing_queue::{QueuedUnaggregate, ReprocessQueueMessage};
|
||||
use eth2::types::Failure;
|
||||
use lighthouse_network::PubsubMessage;
|
||||
use network::NetworkMessage;
|
||||
use slog::{debug, error, warn, Logger};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::{
|
||||
mpsc::{Sender, UnboundedSender},
|
||||
oneshot,
|
||||
};
|
||||
use types::Attestation;
|
||||
|
||||
// Error variants are only used in `Debug` and considered `dead_code` by the compiler.
|
||||
#[derive(Debug)]
|
||||
enum Error {
|
||||
Validation(AttestationError),
|
||||
Publication,
|
||||
ForkChoice(#[allow(dead_code)] BeaconChainError),
|
||||
AggregationPool(#[allow(dead_code)] AttestationError),
|
||||
ReprocessDisabled,
|
||||
ReprocessFull,
|
||||
ReprocessTimeout,
|
||||
}
|
||||
|
||||
enum PublishAttestationResult {
|
||||
Success,
|
||||
AlreadyKnown,
|
||||
Reprocessing(oneshot::Receiver<Result<(), Error>>),
|
||||
Failure(Error),
|
||||
}
|
||||
|
||||
fn verify_and_publish_attestation<T: BeaconChainTypes>(
|
||||
chain: &Arc<BeaconChain<T>>,
|
||||
attestation: &Attestation<T::EthSpec>,
|
||||
seen_timestamp: Duration,
|
||||
network_tx: &UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
log: &Logger,
|
||||
) -> Result<(), Error> {
|
||||
let attestation = chain
|
||||
.verify_unaggregated_attestation_for_gossip(attestation, None)
|
||||
.map_err(Error::Validation)?;
|
||||
|
||||
// Publish.
|
||||
network_tx
|
||||
.send(NetworkMessage::Publish {
|
||||
messages: vec![PubsubMessage::Attestation(Box::new((
|
||||
attestation.subnet_id(),
|
||||
attestation.attestation().clone(),
|
||||
)))],
|
||||
})
|
||||
.map_err(|_| Error::Publication)?;
|
||||
|
||||
// Notify the validator monitor.
|
||||
chain
|
||||
.validator_monitor
|
||||
.read()
|
||||
.register_api_unaggregated_attestation(
|
||||
seen_timestamp,
|
||||
attestation.indexed_attestation(),
|
||||
&chain.slot_clock,
|
||||
);
|
||||
|
||||
let fc_result = chain.apply_attestation_to_fork_choice(&attestation);
|
||||
let naive_aggregation_result = chain.add_to_naive_aggregation_pool(&attestation);
|
||||
|
||||
if let Err(e) = &fc_result {
|
||||
warn!(
|
||||
log,
|
||||
"Attestation invalid for fork choice";
|
||||
"err" => ?e,
|
||||
);
|
||||
}
|
||||
if let Err(e) = &naive_aggregation_result {
|
||||
warn!(
|
||||
log,
|
||||
"Attestation invalid for aggregation";
|
||||
"err" => ?e
|
||||
);
|
||||
}
|
||||
|
||||
if let Err(e) = fc_result {
|
||||
Err(Error::ForkChoice(e))
|
||||
} else if let Err(e) = naive_aggregation_result {
|
||||
Err(Error::AggregationPool(e))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn publish_attestations<T: BeaconChainTypes>(
|
||||
task_spawner: TaskSpawner<T::EthSpec>,
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
attestations: Vec<Attestation<T::EthSpec>>,
|
||||
network_tx: UnboundedSender<NetworkMessage<T::EthSpec>>,
|
||||
reprocess_send: Option<Sender<ReprocessQueueMessage>>,
|
||||
log: Logger,
|
||||
) -> Result<(), warp::Rejection> {
|
||||
// Collect metadata about attestations which we'll use to report failures. We need to
|
||||
// move the `attestations` vec into the blocking task, so this small overhead is unavoidable.
|
||||
let attestation_metadata = attestations
|
||||
.iter()
|
||||
.map(|att| (att.data.slot, att.data.index))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Gossip validate and publish attestations that can be immediately processed.
|
||||
let seen_timestamp = timestamp_now();
|
||||
let inner_log = log.clone();
|
||||
let mut prelim_results = task_spawner
|
||||
.blocking_task(Priority::P0, move || {
|
||||
Ok(attestations
|
||||
.into_iter()
|
||||
.map(|attestation| {
|
||||
match verify_and_publish_attestation(
|
||||
&chain,
|
||||
&attestation,
|
||||
seen_timestamp,
|
||||
&network_tx,
|
||||
&inner_log,
|
||||
) {
|
||||
Ok(()) => PublishAttestationResult::Success,
|
||||
Err(Error::Validation(AttestationError::UnknownHeadBlock {
|
||||
beacon_block_root,
|
||||
})) => {
|
||||
let Some(reprocess_tx) = &reprocess_send else {
|
||||
return PublishAttestationResult::Failure(Error::ReprocessDisabled);
|
||||
};
|
||||
// Re-process.
|
||||
let (tx, rx) = oneshot::channel();
|
||||
let reprocess_chain = chain.clone();
|
||||
let reprocess_network_tx = network_tx.clone();
|
||||
let reprocess_log = inner_log.clone();
|
||||
let reprocess_fn = move || {
|
||||
let result = verify_and_publish_attestation(
|
||||
&reprocess_chain,
|
||||
&attestation,
|
||||
seen_timestamp,
|
||||
&reprocess_network_tx,
|
||||
&reprocess_log,
|
||||
);
|
||||
// Ignore failure on the oneshot that reports the result. This
|
||||
// shouldn't happen unless some catastrophe befalls the waiting
|
||||
// thread which causes it to drop.
|
||||
let _ = tx.send(result);
|
||||
};
|
||||
let reprocess_msg =
|
||||
ReprocessQueueMessage::UnknownBlockUnaggregate(QueuedUnaggregate {
|
||||
beacon_block_root,
|
||||
process_fn: Box::new(reprocess_fn),
|
||||
});
|
||||
if reprocess_tx.try_send(reprocess_msg).is_err() {
|
||||
PublishAttestationResult::Failure(Error::ReprocessFull)
|
||||
} else {
|
||||
PublishAttestationResult::Reprocessing(rx)
|
||||
}
|
||||
}
|
||||
Err(Error::Validation(AttestationError::PriorAttestationKnown {
|
||||
..
|
||||
})) => PublishAttestationResult::AlreadyKnown,
|
||||
Err(e) => PublishAttestationResult::Failure(e),
|
||||
}
|
||||
})
|
||||
.map(Some)
|
||||
.collect::<Vec<_>>())
|
||||
})
|
||||
.await?;
|
||||
|
||||
// Asynchronously wait for re-processing of attestations to unknown blocks. This avoids blocking
|
||||
// any of the beacon processor workers while we wait for reprocessing.
|
||||
let (reprocess_indices, reprocess_futures): (Vec<_>, Vec<_>) = prelim_results
|
||||
.iter_mut()
|
||||
.enumerate()
|
||||
.filter_map(|(i, opt_result)| {
|
||||
if let Some(PublishAttestationResult::Reprocessing(..)) = &opt_result {
|
||||
let PublishAttestationResult::Reprocessing(rx) = opt_result.take()? else {
|
||||
// Unreachable.
|
||||
return None;
|
||||
};
|
||||
Some((i, rx))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unzip();
|
||||
let reprocess_results = futures::future::join_all(reprocess_futures).await;
|
||||
|
||||
// Join everything back together and construct a response.
|
||||
// This part should be quick so we just stay in the Tokio executor's async task.
|
||||
for (i, reprocess_result) in reprocess_indices.into_iter().zip(reprocess_results) {
|
||||
let Some(result_entry) = prelim_results.get_mut(i) else {
|
||||
error!(
|
||||
log,
|
||||
"Unreachable case in attestation publishing";
|
||||
"case" => "prelim out of bounds",
|
||||
"request_index" => i,
|
||||
);
|
||||
continue;
|
||||
};
|
||||
*result_entry = Some(match reprocess_result {
|
||||
Ok(Ok(())) => PublishAttestationResult::Success,
|
||||
// Attestation failed processing on re-process.
|
||||
Ok(Err(Error::Validation(AttestationError::PriorAttestationKnown { .. }))) => {
|
||||
PublishAttestationResult::AlreadyKnown
|
||||
}
|
||||
Ok(Err(e)) => PublishAttestationResult::Failure(e),
|
||||
// Oneshot was dropped, indicating that the attestation either timed out in the
|
||||
// reprocess queue or was dropped due to some error.
|
||||
Err(_) => PublishAttestationResult::Failure(Error::ReprocessTimeout),
|
||||
});
|
||||
}
|
||||
|
||||
// Construct the response.
|
||||
let mut failures = vec![];
|
||||
let mut num_already_known = 0;
|
||||
|
||||
for (index, result) in prelim_results.iter().enumerate() {
|
||||
match result {
|
||||
Some(PublishAttestationResult::Success) => {}
|
||||
Some(PublishAttestationResult::AlreadyKnown) => num_already_known += 1,
|
||||
Some(PublishAttestationResult::Failure(e)) => {
|
||||
if let Some((slot, committee_index)) = attestation_metadata.get(index) {
|
||||
error!(
|
||||
log,
|
||||
"Failure verifying attestation for gossip";
|
||||
"error" => ?e,
|
||||
"request_index" => index,
|
||||
"committee_index" => committee_index,
|
||||
"attestation_slot" => slot,
|
||||
);
|
||||
failures.push(Failure::new(index, format!("{e:?}")));
|
||||
} else {
|
||||
error!(
|
||||
log,
|
||||
"Unreachable case in attestation publishing";
|
||||
"case" => "out of bounds",
|
||||
"request_index" => index
|
||||
);
|
||||
failures.push(Failure::new(index, "metadata logic error".into()));
|
||||
}
|
||||
}
|
||||
Some(PublishAttestationResult::Reprocessing(_)) => {
|
||||
error!(
|
||||
log,
|
||||
"Unreachable case in attestation publishing";
|
||||
"case" => "reprocessing",
|
||||
"request_index" => index
|
||||
);
|
||||
failures.push(Failure::new(index, "reprocess logic error".into()));
|
||||
}
|
||||
None => {
|
||||
error!(
|
||||
log,
|
||||
"Unreachable case in attestation publishing";
|
||||
"case" => "result is None",
|
||||
"request_index" => index
|
||||
);
|
||||
failures.push(Failure::new(index, "result logic error".into()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if num_already_known > 0 {
|
||||
debug!(
|
||||
log,
|
||||
"Some unagg attestations already known";
|
||||
"count" => num_already_known
|
||||
);
|
||||
}
|
||||
|
||||
if failures.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(warp_utils::reject::indexed_bad_request(
|
||||
"error processing attestations".to_string(),
|
||||
failures,
|
||||
))
|
||||
}
|
||||
}
|
||||
@ -60,11 +60,15 @@ impl<E: EthSpec> TaskSpawner<E> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Executes a "blocking" (non-async) task which returns a `Response`.
|
||||
pub async fn blocking_response_task<F, T>(self, priority: Priority, func: F) -> Response
|
||||
/// Executes a "blocking" (non-async) task which returns an arbitrary value.
|
||||
pub async fn blocking_task<F, T>(
|
||||
self,
|
||||
priority: Priority,
|
||||
func: F,
|
||||
) -> Result<T, warp::Rejection>
|
||||
where
|
||||
F: FnOnce() -> Result<T, warp::Rejection> + Send + Sync + 'static,
|
||||
T: Reply + Send + 'static,
|
||||
T: Send + 'static,
|
||||
{
|
||||
if let Some(beacon_processor_send) = &self.beacon_processor_send {
|
||||
// Create a closure that will execute `func` and send the result to
|
||||
@ -79,22 +83,31 @@ impl<E: EthSpec> TaskSpawner<E> {
|
||||
};
|
||||
|
||||
// Send the function to the beacon processor for execution at some arbitrary time.
|
||||
let result = send_to_beacon_processor(
|
||||
send_to_beacon_processor(
|
||||
beacon_processor_send,
|
||||
priority,
|
||||
BlockingOrAsync::Blocking(Box::new(process_fn)),
|
||||
rx,
|
||||
)
|
||||
.await
|
||||
.and_then(|x| x);
|
||||
convert_rejection(result).await
|
||||
.and_then(|x| x)
|
||||
} else {
|
||||
// There is no beacon processor so spawn a task directly on the
|
||||
// tokio executor.
|
||||
convert_rejection(warp_utils::task::blocking_response_task(func).await).await
|
||||
warp_utils::task::blocking_task(func).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Executes a "blocking" (non-async) task which returns a `Response`.
|
||||
pub async fn blocking_response_task<F, T>(self, priority: Priority, func: F) -> Response
|
||||
where
|
||||
F: FnOnce() -> Result<T, warp::Rejection> + Send + Sync + 'static,
|
||||
T: Reply + Send + 'static,
|
||||
{
|
||||
let result = self.blocking_task(priority, func).await;
|
||||
convert_rejection(result).await
|
||||
}
|
||||
|
||||
/// Executes a "blocking" (non-async) task which returns a JSON-serializable
|
||||
/// object.
|
||||
pub async fn blocking_json_task<F, T>(self, priority: Priority, func: F) -> Response
|
||||
|
||||
@ -35,6 +35,7 @@ pub const EXTERNAL_ADDR: &str = "/ip4/0.0.0.0/tcp/9000";
|
||||
|
||||
/// HTTP API tester that allows interaction with the underlying beacon chain harness.
|
||||
pub struct InteractiveTester<E: EthSpec> {
|
||||
pub ctx: Arc<Context<EphemeralHarnessType<E>>>,
|
||||
pub harness: BeaconChainHarness<EphemeralHarnessType<E>>,
|
||||
pub client: BeaconNodeHttpClient,
|
||||
pub network_rx: NetworkReceivers<E>,
|
||||
@ -43,10 +44,11 @@ pub struct InteractiveTester<E: EthSpec> {
|
||||
/// The result of calling `create_api_server`.
|
||||
///
|
||||
/// Glue-type between `tests::ApiTester` and `InteractiveTester`.
|
||||
pub struct ApiServer<E: EthSpec, SFut: Future<Output = ()>> {
|
||||
pub struct ApiServer<T: BeaconChainTypes, SFut: Future<Output = ()>> {
|
||||
pub ctx: Arc<Context<T>>,
|
||||
pub server: SFut,
|
||||
pub listening_socket: SocketAddr,
|
||||
pub network_rx: NetworkReceivers<E>,
|
||||
pub network_rx: NetworkReceivers<T::EthSpec>,
|
||||
pub local_enr: Enr,
|
||||
pub external_peer_id: PeerId,
|
||||
}
|
||||
@ -90,6 +92,7 @@ impl<E: EthSpec> InteractiveTester<E> {
|
||||
let harness = harness_builder.build();
|
||||
|
||||
let ApiServer {
|
||||
ctx,
|
||||
server,
|
||||
listening_socket,
|
||||
network_rx,
|
||||
@ -114,6 +117,7 @@ impl<E: EthSpec> InteractiveTester<E> {
|
||||
);
|
||||
|
||||
Self {
|
||||
ctx,
|
||||
harness,
|
||||
client,
|
||||
network_rx,
|
||||
@ -125,7 +129,7 @@ pub async fn create_api_server<T: BeaconChainTypes>(
|
||||
chain: Arc<BeaconChain<T>>,
|
||||
test_runtime: &TestRuntime,
|
||||
log: Logger,
|
||||
) -> ApiServer<T::EthSpec, impl Future<Output = ()>> {
|
||||
) -> ApiServer<T, impl Future<Output = ()>> {
|
||||
// Use port 0 to allocate a new unused port.
|
||||
let port = 0;
|
||||
|
||||
@ -187,6 +191,7 @@ pub async fn create_api_server<T: BeaconChainTypes>(
|
||||
} = BeaconProcessorChannels::new(&beacon_processor_config);
|
||||
|
||||
let beacon_processor_send = beacon_processor_tx;
|
||||
let reprocess_send = work_reprocessing_tx.clone();
|
||||
BeaconProcessor {
|
||||
network_globals: network_globals.clone(),
|
||||
executor: test_runtime.task_executor.clone(),
|
||||
@ -216,14 +221,17 @@ pub async fn create_api_server<T: BeaconChainTypes>(
|
||||
network_senders: Some(network_senders),
|
||||
network_globals: Some(network_globals),
|
||||
beacon_processor_send: Some(beacon_processor_send),
|
||||
beacon_processor_reprocess_send: Some(reprocess_send),
|
||||
eth1_service: Some(eth1_service),
|
||||
sse_logging_components: None,
|
||||
log,
|
||||
});
|
||||
|
||||
let (listening_socket, server) = crate::serve(ctx, test_runtime.task_executor.exit()).unwrap();
|
||||
let (listening_socket, server) =
|
||||
crate::serve(ctx.clone(), test_runtime.task_executor.exit()).unwrap();
|
||||
|
||||
ApiServer {
|
||||
ctx,
|
||||
server,
|
||||
listening_socket,
|
||||
network_rx: network_receivers,
|
||||
|
||||
@ -4,6 +4,7 @@ use beacon_chain::{
|
||||
test_utils::{AttestationStrategy, BlockStrategy, SyncCommitteeStrategy},
|
||||
ChainConfig,
|
||||
};
|
||||
use beacon_processor::work_reprocessing_queue::ReprocessQueueMessage;
|
||||
use eth2::types::ProduceBlockV3Response;
|
||||
use eth2::types::{DepositContractData, StateId};
|
||||
use execution_layer::{ForkchoiceState, PayloadAttributes};
|
||||
@ -840,3 +841,78 @@ pub async fn fork_choice_before_proposal() {
|
||||
// D's parent is B.
|
||||
assert_eq!(block_d.parent_root(), block_root_b.into());
|
||||
}
|
||||
|
||||
// Test that attestations to unknown blocks are requeued and processed when their block arrives.
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn queue_attestations_from_http() {
|
||||
let validator_count = 128;
|
||||
let all_validators = (0..validator_count).collect::<Vec<_>>();
|
||||
|
||||
let tester = InteractiveTester::<E>::new(None, validator_count).await;
|
||||
let harness = &tester.harness;
|
||||
let client = tester.client.clone();
|
||||
|
||||
let num_initial = 5;
|
||||
|
||||
// Slot of the block attested to.
|
||||
let attestation_slot = Slot::new(num_initial) + 1;
|
||||
|
||||
// Make some initial blocks.
|
||||
harness.advance_slot();
|
||||
harness
|
||||
.extend_chain(
|
||||
num_initial as usize,
|
||||
BlockStrategy::OnCanonicalHead,
|
||||
AttestationStrategy::AllValidators,
|
||||
)
|
||||
.await;
|
||||
|
||||
harness.advance_slot();
|
||||
assert_eq!(harness.get_current_slot(), attestation_slot);
|
||||
|
||||
// Make the attested-to block without applying it.
|
||||
let pre_state = harness.get_current_state();
|
||||
let (block, post_state) = harness.make_block(pre_state, attestation_slot).await;
|
||||
let block_root = block.0.canonical_root();
|
||||
|
||||
// Make attestations to the block and POST them to the beacon node on a background thread.
|
||||
let attestations = harness
|
||||
.make_unaggregated_attestations(
|
||||
&all_validators,
|
||||
&post_state,
|
||||
block.0.state_root(),
|
||||
block_root.into(),
|
||||
attestation_slot,
|
||||
)
|
||||
.into_iter()
|
||||
.flat_map(|attestations| attestations.into_iter().map(|(att, _subnet)| att))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let attestation_future = tokio::spawn(async move {
|
||||
client
|
||||
.post_beacon_pool_attestations(&attestations)
|
||||
.await
|
||||
.expect("attestations should be processed successfully")
|
||||
});
|
||||
|
||||
// In parallel, apply the block. We need to manually notify the reprocess queue, because the
|
||||
// `beacon_chain` does not know about the queue and will not update it for us.
|
||||
let parent_root = block.0.parent_root();
|
||||
harness
|
||||
.process_block(attestation_slot, block_root, block)
|
||||
.await
|
||||
.unwrap();
|
||||
tester
|
||||
.ctx
|
||||
.beacon_processor_reprocess_send
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send(ReprocessQueueMessage::BlockImported {
|
||||
block_root,
|
||||
parent_root,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
attestation_future.await.unwrap();
|
||||
}
|
||||
|
||||
@ -248,6 +248,7 @@ impl ApiTester {
|
||||
let log = null_logger().unwrap();
|
||||
|
||||
let ApiServer {
|
||||
ctx: _,
|
||||
server,
|
||||
listening_socket,
|
||||
network_rx,
|
||||
@ -341,6 +342,7 @@ impl ApiTester {
|
||||
let log = null_logger().unwrap();
|
||||
|
||||
let ApiServer {
|
||||
ctx: _,
|
||||
server,
|
||||
listening_socket,
|
||||
network_rx,
|
||||
@ -1731,7 +1733,10 @@ impl ApiTester {
|
||||
Err(e) => panic!("query failed incorrectly: {e:?}"),
|
||||
};
|
||||
|
||||
let expected = self.chain.latest_seen_optimistic_update.lock().clone();
|
||||
let expected = self
|
||||
.chain
|
||||
.light_client_server_cache
|
||||
.get_latest_optimistic_update();
|
||||
assert_eq!(result, expected);
|
||||
|
||||
self
|
||||
@ -1747,7 +1752,10 @@ impl ApiTester {
|
||||
Err(e) => panic!("query failed incorrectly: {e:?}"),
|
||||
};
|
||||
|
||||
let expected = self.chain.latest_seen_finality_update.lock().clone();
|
||||
let expected = self
|
||||
.chain
|
||||
.light_client_server_cache
|
||||
.get_latest_finality_update();
|
||||
assert_eq!(result, expected);
|
||||
|
||||
self
|
||||
@ -2713,6 +2721,31 @@ impl ApiTester {
|
||||
self
|
||||
}
|
||||
|
||||
/// Check that the metadata from the headers & JSON response body are consistent, and that the
|
||||
/// consensus block value is non-zero.
|
||||
fn check_block_v3_metadata(
|
||||
metadata: &ProduceBlockV3Metadata,
|
||||
response: &JsonProduceBlockV3Response<E>,
|
||||
) {
|
||||
// Compare fork name to ForkVersionedResponse rather than metadata consensus_version, which
|
||||
// is deserialized to a dummy value.
|
||||
assert_eq!(Some(metadata.consensus_version), response.version);
|
||||
assert_eq!(ForkName::Base, response.metadata.consensus_version);
|
||||
assert_eq!(
|
||||
metadata.execution_payload_blinded,
|
||||
response.metadata.execution_payload_blinded
|
||||
);
|
||||
assert_eq!(
|
||||
metadata.execution_payload_value,
|
||||
response.metadata.execution_payload_value
|
||||
);
|
||||
assert_eq!(
|
||||
metadata.consensus_block_value,
|
||||
response.metadata.consensus_block_value
|
||||
);
|
||||
assert!(!metadata.consensus_block_value.is_zero());
|
||||
}
|
||||
|
||||
pub async fn test_block_production_v3_ssz(self) -> Self {
|
||||
let fork = self.chain.canonical_head.cached_head().head_fork();
|
||||
let genesis_validators_root = self.chain.genesis_validators_root;
|
||||
@ -3574,11 +3607,12 @@ impl ApiTester {
|
||||
|
||||
let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
let payload: BlindedPayload<E> = match payload_type.data {
|
||||
ProduceBlockV3Response::Blinded(payload) => {
|
||||
@ -3600,11 +3634,12 @@ impl ApiTester {
|
||||
|
||||
let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, Some(0))
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
let payload: FullPayload<E> = match payload_type.data {
|
||||
ProduceBlockV3Response::Full(payload) => {
|
||||
@ -3626,11 +3661,12 @@ impl ApiTester {
|
||||
|
||||
let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, Some(u64::MAX))
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
let payload: BlindedPayload<E> = match payload_type.data {
|
||||
ProduceBlockV3Response::Blinded(payload) => {
|
||||
@ -3730,11 +3766,12 @@ impl ApiTester {
|
||||
|
||||
let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
let payload: BlindedPayload<E> = match payload_type.data {
|
||||
ProduceBlockV3Response::Blinded(payload) => {
|
||||
@ -3806,11 +3843,12 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
let payload: BlindedPayload<E> = match payload_type.data {
|
||||
ProduceBlockV3Response::Blinded(payload) => {
|
||||
@ -3896,11 +3934,12 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
let payload: FullPayload<E> = match payload_type.data {
|
||||
ProduceBlockV3Response::Full(payload) => {
|
||||
@ -3982,11 +4021,12 @@ impl ApiTester {
|
||||
.unwrap();
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
let payload: FullPayload<E> = match payload_type.data {
|
||||
ProduceBlockV3Response::Full(payload) => {
|
||||
@ -4068,11 +4108,12 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
let payload: FullPayload<E> = match payload_type.data {
|
||||
ProduceBlockV3Response::Full(payload) => {
|
||||
@ -4152,11 +4193,12 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
let payload: FullPayload<E> = match payload_type.data {
|
||||
ProduceBlockV3Response::Full(payload) => {
|
||||
@ -4208,11 +4250,12 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
match payload_type.data {
|
||||
ProduceBlockV3Response::Full(_) => (),
|
||||
@ -4274,11 +4317,12 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
match payload_type.data {
|
||||
ProduceBlockV3Response::Full(_) => (),
|
||||
@ -4382,11 +4426,12 @@ impl ApiTester {
|
||||
.get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch()))
|
||||
.await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(next_slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
match payload_type.data {
|
||||
ProduceBlockV3Response::Blinded(_) => (),
|
||||
@ -4402,11 +4447,12 @@ impl ApiTester {
|
||||
.get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch()))
|
||||
.await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(next_slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
match payload_type.data {
|
||||
ProduceBlockV3Response::Full(_) => (),
|
||||
@ -4530,11 +4576,12 @@ impl ApiTester {
|
||||
.get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch()))
|
||||
.await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(next_slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
match payload_type.data {
|
||||
ProduceBlockV3Response::Full(_) => (),
|
||||
@ -4560,11 +4607,12 @@ impl ApiTester {
|
||||
.get_test_randao(next_slot, next_slot.epoch(E::slots_per_epoch()))
|
||||
.await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(next_slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
match payload_type.data {
|
||||
ProduceBlockV3Response::Blinded(_) => (),
|
||||
@ -4640,11 +4688,12 @@ impl ApiTester {
|
||||
|
||||
let (proposer_index, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
let payload: FullPayload<E> = match payload_type.data {
|
||||
ProduceBlockV3Response::Full(payload) => {
|
||||
@ -4709,11 +4758,12 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
match payload_type.data {
|
||||
ProduceBlockV3Response::Blinded(_) => (),
|
||||
@ -4773,11 +4823,12 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
match payload_type.data {
|
||||
ProduceBlockV3Response::Full(_) => (),
|
||||
@ -4837,11 +4888,12 @@ impl ApiTester {
|
||||
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
match payload_type.data {
|
||||
ProduceBlockV3Response::Full(_) => (),
|
||||
@ -4899,11 +4951,12 @@ impl ApiTester {
|
||||
let epoch = self.chain.epoch().unwrap();
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
let _block_contents = match payload_type.data {
|
||||
ProduceBlockV3Response::Blinded(payload) => payload,
|
||||
@ -4971,11 +5024,12 @@ impl ApiTester {
|
||||
let epoch = self.chain.epoch().unwrap();
|
||||
let (_, randao_reveal) = self.get_test_randao(slot, epoch).await;
|
||||
|
||||
let (payload_type, _) = self
|
||||
let (payload_type, metadata) = self
|
||||
.client
|
||||
.get_validator_blocks_v3::<E>(slot, &randao_reveal, None, None)
|
||||
.await
|
||||
.unwrap();
|
||||
Self::check_block_v3_metadata(&metadata, &payload_type);
|
||||
|
||||
match payload_type.data {
|
||||
ProduceBlockV3Response::Full(_) => (),
|
||||
@ -5057,26 +5111,6 @@ impl ApiTester {
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_get_lighthouse_beacon_states_ssz(self) -> Self {
|
||||
for state_id in self.interesting_state_ids() {
|
||||
let result = self
|
||||
.client
|
||||
.get_lighthouse_beacon_states_ssz(&state_id.0, &self.chain.spec)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut expected = state_id
|
||||
.state(&self.chain)
|
||||
.ok()
|
||||
.map(|(state, _execution_optimistic, _finalized)| state);
|
||||
expected.as_mut().map(|state| state.drop_all_caches());
|
||||
|
||||
assert_eq!(result, expected, "{:?}", state_id);
|
||||
}
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
pub async fn test_get_lighthouse_staking(self) -> Self {
|
||||
let result = self.client.get_lighthouse_staking().await.unwrap();
|
||||
|
||||
@ -6373,8 +6407,6 @@ async fn lighthouse_endpoints() {
|
||||
.await
|
||||
.test_get_lighthouse_eth1_deposit_cache()
|
||||
.await
|
||||
.test_get_lighthouse_beacon_states_ssz()
|
||||
.await
|
||||
.test_get_lighthouse_staking()
|
||||
.await
|
||||
.test_get_lighthouse_database_info()
|
||||
|
||||
@ -5,6 +5,7 @@ authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||
edition = { workspace = true }
|
||||
|
||||
[dependencies]
|
||||
async-channel = { workspace = true }
|
||||
discv5 = { workspace = true }
|
||||
unsigned-varint = { version = "0.6", features = ["codec"] }
|
||||
ssz_types = { workspace = true }
|
||||
@ -42,22 +43,36 @@ superstruct = { workspace = true }
|
||||
prometheus-client = "0.22.0"
|
||||
unused_port = { workspace = true }
|
||||
delay_map = { workspace = true }
|
||||
void = "1"
|
||||
libp2p-mplex = { git = "https://github.com/sigp/rust-libp2p/", rev = "cfa3275ca17e502799ed56e555b6c0611752e369" }
|
||||
tracing = { workspace = true }
|
||||
byteorder = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
either = { workspace = true }
|
||||
|
||||
# Local dependencies
|
||||
futures-ticker = "0.0.3"
|
||||
futures-timer = "3.0.2"
|
||||
getrandom = "0.2.11"
|
||||
hex_fmt = "0.3.0"
|
||||
instant = "0.1.12"
|
||||
quick-protobuf = "0.8"
|
||||
void = "1.0.2"
|
||||
asynchronous-codec = "0.7.0"
|
||||
base64 = "0.21.5"
|
||||
libp2p-mplex = "0.41"
|
||||
quick-protobuf-codec = "0.3"
|
||||
|
||||
[dependencies.libp2p]
|
||||
git = "https://github.com/sigp/rust-libp2p/"
|
||||
rev = "cfa3275ca17e502799ed56e555b6c0611752e369"
|
||||
version = "0.53"
|
||||
default-features = false
|
||||
features = ["identify", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic"]
|
||||
features = ["identify", "yamux", "noise", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa", "metrics", "quic", "upnp"]
|
||||
|
||||
[dev-dependencies]
|
||||
slog-term = { workspace = true }
|
||||
slog-async = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
exit-future = { workspace = true }
|
||||
quickcheck = { workspace = true }
|
||||
quickcheck_macros = { workspace = true }
|
||||
async-std = { version = "1.6.3", features = ["unstable"] }
|
||||
|
||||
[features]
|
||||
libp2p-websocket = []
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
use crate::gossipsub;
|
||||
use crate::listen_addr::{ListenAddr, ListenAddress};
|
||||
use crate::rpc::config::{InboundRateLimiterConfig, OutboundRateLimiterConfig};
|
||||
use crate::types::GossipKind;
|
||||
@ -5,7 +6,6 @@ use crate::{Enr, PeerIdSerialized};
|
||||
use directory::{
|
||||
DEFAULT_BEACON_NODE_DIR, DEFAULT_HARDCODED_NETWORK, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR,
|
||||
};
|
||||
use libp2p::gossipsub;
|
||||
use libp2p::Multiaddr;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
@ -101,7 +101,7 @@ pub struct Config {
|
||||
/// List of libp2p nodes to initially connect to.
|
||||
pub libp2p_nodes: Vec<Multiaddr>,
|
||||
|
||||
/// List of trusted libp2p nodes which are not scored.
|
||||
/// List of trusted libp2p nodes which are not scored and marked as explicit.
|
||||
pub trusted_peers: Vec<PeerIdSerialized>,
|
||||
|
||||
/// Disables peer scoring altogether.
|
||||
@ -157,10 +157,6 @@ pub struct Config {
|
||||
|
||||
/// Configuration for the inbound rate limiter (requests received by this node).
|
||||
pub inbound_rate_limiter_config: Option<InboundRateLimiterConfig>,
|
||||
|
||||
/// Whether to disable logging duplicate gossip messages as WARN. If set to true, duplicate
|
||||
/// errors will be logged at DEBUG level.
|
||||
pub disable_duplicate_warn_logs: bool,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
@ -354,7 +350,7 @@ impl Default for Config {
|
||||
enr_udp6_port: None,
|
||||
enr_quic6_port: None,
|
||||
enr_tcp6_port: None,
|
||||
target_peers: 50,
|
||||
target_peers: 100,
|
||||
gs_config,
|
||||
discv5_config,
|
||||
boot_nodes_enr: vec![],
|
||||
@ -378,7 +374,6 @@ impl Default for Config {
|
||||
outbound_rate_limiter_config: None,
|
||||
invalid_block_storage: None,
|
||||
inbound_rate_limiter_config: None,
|
||||
disable_duplicate_warn_logs: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -59,7 +59,7 @@ const MAX_DISCOVERY_RETRY: usize = 3;
|
||||
/// Note: we always allow a single FindPeers query, so we would be
|
||||
/// running a maximum of `MAX_CONCURRENT_SUBNET_QUERIES + 1`
|
||||
/// discovery queries at a time.
|
||||
const MAX_CONCURRENT_SUBNET_QUERIES: usize = 2;
|
||||
const MAX_CONCURRENT_SUBNET_QUERIES: usize = 4;
|
||||
/// The max number of subnets to search for in a single subnet discovery query.
|
||||
const MAX_SUBNETS_IN_QUERY: usize = 3;
|
||||
/// The number of closest peers to search for when doing a regular peer search.
|
||||
@ -1004,7 +1004,10 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
|
||||
discv5::Event::SocketUpdated(socket_addr) => {
|
||||
info!(self.log, "Address updated"; "ip" => %socket_addr.ip(), "udp_port" => %socket_addr.port());
|
||||
metrics::inc_counter(&metrics::ADDRESS_UPDATE_COUNT);
|
||||
metrics::check_nat();
|
||||
// We have SOCKET_UPDATED messages. This occurs when discovery has a majority of
|
||||
// users reporting an external port and our ENR gets updated.
|
||||
// Which means we are able to do NAT traversal.
|
||||
metrics::set_gauge_vec(&metrics::NAT_OPEN, &["discv5"], 1);
|
||||
// Discv5 will have updated our local ENR. We save the updated version
|
||||
// to disk.
|
||||
|
||||
|
||||
175
beacon_node/lighthouse_network/src/gossipsub/backoff.rs
Normal file
175
beacon_node/lighthouse_network/src/gossipsub/backoff.rs
Normal file
@ -0,0 +1,175 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! Data structure for efficiently storing known back-off's when pruning peers.
|
||||
use crate::gossipsub::topic::TopicHash;
|
||||
use instant::Instant;
|
||||
use libp2p::identity::PeerId;
|
||||
use std::collections::{
|
||||
hash_map::{Entry, HashMap},
|
||||
HashSet,
|
||||
};
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct HeartbeatIndex(usize);
|
||||
|
||||
/// Stores backoffs in an efficient manner.
|
||||
pub(crate) struct BackoffStorage {
|
||||
/// Stores backoffs and the index in backoffs_by_heartbeat per peer per topic.
|
||||
backoffs: HashMap<TopicHash, HashMap<PeerId, (Instant, HeartbeatIndex)>>,
|
||||
/// Stores peer topic pairs per heartbeat (this is cyclic the current index is
|
||||
/// heartbeat_index).
|
||||
backoffs_by_heartbeat: Vec<HashSet<(TopicHash, PeerId)>>,
|
||||
/// The index in the backoffs_by_heartbeat vector corresponding to the current heartbeat.
|
||||
heartbeat_index: HeartbeatIndex,
|
||||
/// The heartbeat interval duration from the config.
|
||||
heartbeat_interval: Duration,
|
||||
/// Backoff slack from the config.
|
||||
backoff_slack: u32,
|
||||
}
|
||||
|
||||
impl BackoffStorage {
|
||||
fn heartbeats(d: &Duration, heartbeat_interval: &Duration) -> usize {
|
||||
((d.as_nanos() + heartbeat_interval.as_nanos() - 1) / heartbeat_interval.as_nanos())
|
||||
as usize
|
||||
}
|
||||
|
||||
pub(crate) fn new(
|
||||
prune_backoff: &Duration,
|
||||
heartbeat_interval: Duration,
|
||||
backoff_slack: u32,
|
||||
) -> BackoffStorage {
|
||||
// We add one additional slot for partial heartbeat
|
||||
let max_heartbeats =
|
||||
Self::heartbeats(prune_backoff, &heartbeat_interval) + backoff_slack as usize + 1;
|
||||
BackoffStorage {
|
||||
backoffs: HashMap::new(),
|
||||
backoffs_by_heartbeat: vec![HashSet::new(); max_heartbeats],
|
||||
heartbeat_index: HeartbeatIndex(0),
|
||||
heartbeat_interval,
|
||||
backoff_slack,
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates the backoff for a peer (if there is already a more restrictive backoff then this call
|
||||
/// doesn't change anything).
|
||||
pub(crate) fn update_backoff(&mut self, topic: &TopicHash, peer: &PeerId, time: Duration) {
|
||||
let instant = Instant::now() + time;
|
||||
let insert_into_backoffs_by_heartbeat =
|
||||
|heartbeat_index: HeartbeatIndex,
|
||||
backoffs_by_heartbeat: &mut Vec<HashSet<_>>,
|
||||
heartbeat_interval,
|
||||
backoff_slack| {
|
||||
let pair = (topic.clone(), *peer);
|
||||
let index = (heartbeat_index.0
|
||||
+ Self::heartbeats(&time, heartbeat_interval)
|
||||
+ backoff_slack as usize)
|
||||
% backoffs_by_heartbeat.len();
|
||||
backoffs_by_heartbeat[index].insert(pair);
|
||||
HeartbeatIndex(index)
|
||||
};
|
||||
match self.backoffs.entry(topic.clone()).or_default().entry(*peer) {
|
||||
Entry::Occupied(mut o) => {
|
||||
let (backoff, index) = o.get();
|
||||
if backoff < &instant {
|
||||
let pair = (topic.clone(), *peer);
|
||||
if let Some(s) = self.backoffs_by_heartbeat.get_mut(index.0) {
|
||||
s.remove(&pair);
|
||||
}
|
||||
let index = insert_into_backoffs_by_heartbeat(
|
||||
self.heartbeat_index,
|
||||
&mut self.backoffs_by_heartbeat,
|
||||
&self.heartbeat_interval,
|
||||
self.backoff_slack,
|
||||
);
|
||||
o.insert((instant, index));
|
||||
}
|
||||
}
|
||||
Entry::Vacant(v) => {
|
||||
let index = insert_into_backoffs_by_heartbeat(
|
||||
self.heartbeat_index,
|
||||
&mut self.backoffs_by_heartbeat,
|
||||
&self.heartbeat_interval,
|
||||
self.backoff_slack,
|
||||
);
|
||||
v.insert((instant, index));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Checks if a given peer is backoffed for the given topic. This method respects the
|
||||
/// configured BACKOFF_SLACK and may return true even if the backup is already over.
|
||||
/// It is guaranteed to return false if the backoff is not over and eventually if enough time
|
||||
/// passed true if the backoff is over.
|
||||
///
|
||||
/// This method should be used for deciding if we can already send a GRAFT to a previously
|
||||
/// backoffed peer.
|
||||
pub(crate) fn is_backoff_with_slack(&self, topic: &TopicHash, peer: &PeerId) -> bool {
|
||||
self.backoffs
|
||||
.get(topic)
|
||||
.map_or(false, |m| m.contains_key(peer))
|
||||
}
|
||||
|
||||
pub(crate) fn get_backoff_time(&self, topic: &TopicHash, peer: &PeerId) -> Option<Instant> {
|
||||
Self::get_backoff_time_from_backoffs(&self.backoffs, topic, peer)
|
||||
}
|
||||
|
||||
fn get_backoff_time_from_backoffs(
|
||||
backoffs: &HashMap<TopicHash, HashMap<PeerId, (Instant, HeartbeatIndex)>>,
|
||||
topic: &TopicHash,
|
||||
peer: &PeerId,
|
||||
) -> Option<Instant> {
|
||||
backoffs
|
||||
.get(topic)
|
||||
.and_then(|m| m.get(peer).map(|(i, _)| *i))
|
||||
}
|
||||
|
||||
/// Applies a heartbeat. That should be called regularly in intervals of length
|
||||
/// `heartbeat_interval`.
|
||||
pub(crate) fn heartbeat(&mut self) {
|
||||
// Clean up backoffs_by_heartbeat
|
||||
if let Some(s) = self.backoffs_by_heartbeat.get_mut(self.heartbeat_index.0) {
|
||||
let backoffs = &mut self.backoffs;
|
||||
let slack = self.heartbeat_interval * self.backoff_slack;
|
||||
let now = Instant::now();
|
||||
s.retain(|(topic, peer)| {
|
||||
let keep = match Self::get_backoff_time_from_backoffs(backoffs, topic, peer) {
|
||||
Some(backoff_time) => backoff_time + slack > now,
|
||||
None => false,
|
||||
};
|
||||
if !keep {
|
||||
//remove from backoffs
|
||||
if let Entry::Occupied(mut m) = backoffs.entry(topic.clone()) {
|
||||
if m.get_mut().remove(peer).is_some() && m.get().is_empty() {
|
||||
m.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
keep
|
||||
});
|
||||
}
|
||||
|
||||
// Increase heartbeat index
|
||||
self.heartbeat_index =
|
||||
HeartbeatIndex((self.heartbeat_index.0 + 1) % self.backoffs_by_heartbeat.len());
|
||||
}
|
||||
}
|
||||
3488
beacon_node/lighthouse_network/src/gossipsub/behaviour.rs
Normal file
3488
beacon_node/lighthouse_network/src/gossipsub/behaviour.rs
Normal file
File diff suppressed because it is too large
Load Diff
5243
beacon_node/lighthouse_network/src/gossipsub/behaviour/tests.rs
Normal file
5243
beacon_node/lighthouse_network/src/gossipsub/behaviour/tests.rs
Normal file
File diff suppressed because it is too large
Load Diff
1026
beacon_node/lighthouse_network/src/gossipsub/config.rs
Normal file
1026
beacon_node/lighthouse_network/src/gossipsub/config.rs
Normal file
File diff suppressed because it is too large
Load Diff
159
beacon_node/lighthouse_network/src/gossipsub/error.rs
Normal file
159
beacon_node/lighthouse_network/src/gossipsub/error.rs
Normal file
@ -0,0 +1,159 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! Error types that can result from gossipsub.
|
||||
|
||||
use libp2p::identity::SigningError;
|
||||
|
||||
/// Error associated with publishing a gossipsub message.
|
||||
#[derive(Debug)]
|
||||
pub enum PublishError {
|
||||
/// This message has already been published.
|
||||
Duplicate,
|
||||
/// An error occurred whilst signing the message.
|
||||
SigningError(SigningError),
|
||||
/// There were no peers to send this message to.
|
||||
InsufficientPeers,
|
||||
/// The overall message was too large. This could be due to excessive topics or an excessive
|
||||
/// message size.
|
||||
MessageTooLarge,
|
||||
/// The compression algorithm failed.
|
||||
TransformFailed(std::io::Error),
|
||||
/// Messages could not be sent because all queues for peers were full. The usize represents the
|
||||
/// number of peers that have full queues.
|
||||
AllQueuesFull(usize),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PublishError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(f, "{self:?}")
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for PublishError {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
match self {
|
||||
Self::SigningError(err) => Some(err),
|
||||
Self::TransformFailed(err) => Some(err),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error associated with subscribing to a topic.
|
||||
#[derive(Debug)]
|
||||
pub enum SubscriptionError {
|
||||
/// Couldn't publish our subscription
|
||||
PublishError(PublishError),
|
||||
/// We are not allowed to subscribe to this topic by the subscription filter
|
||||
NotAllowed,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for SubscriptionError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(f, "{self:?}")
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for SubscriptionError {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
match self {
|
||||
Self::PublishError(err) => Some(err),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SigningError> for PublishError {
|
||||
fn from(error: SigningError) -> Self {
|
||||
PublishError::SigningError(error)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum ValidationError {
|
||||
/// The message has an invalid signature,
|
||||
InvalidSignature,
|
||||
/// The sequence number was empty, expected a value.
|
||||
EmptySequenceNumber,
|
||||
/// The sequence number was the incorrect size
|
||||
InvalidSequenceNumber,
|
||||
/// The PeerId was invalid
|
||||
InvalidPeerId,
|
||||
/// Signature existed when validation has been sent to
|
||||
/// [`crate::behaviour::MessageAuthenticity::Anonymous`].
|
||||
SignaturePresent,
|
||||
/// Sequence number existed when validation has been sent to
|
||||
/// [`crate::behaviour::MessageAuthenticity::Anonymous`].
|
||||
SequenceNumberPresent,
|
||||
/// Message source existed when validation has been sent to
|
||||
/// [`crate::behaviour::MessageAuthenticity::Anonymous`].
|
||||
MessageSourcePresent,
|
||||
/// The data transformation failed.
|
||||
TransformFailed,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ValidationError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(f, "{self:?}")
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for ValidationError {}
|
||||
|
||||
impl From<std::io::Error> for PublishError {
|
||||
fn from(error: std::io::Error) -> PublishError {
|
||||
PublishError::TransformFailed(error)
|
||||
}
|
||||
}
|
||||
|
||||
/// Error associated with Config building.
|
||||
#[derive(Debug)]
|
||||
pub enum ConfigBuilderError {
|
||||
/// Maximum transmission size is too small.
|
||||
MaxTransmissionSizeTooSmall,
|
||||
/// Histroy length less than history gossip length.
|
||||
HistoryLengthTooSmall,
|
||||
/// The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high
|
||||
MeshParametersInvalid,
|
||||
/// The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2
|
||||
MeshOutboundInvalid,
|
||||
/// unsubscribe_backoff is zero
|
||||
UnsubscribeBackoffIsZero,
|
||||
/// Invalid protocol
|
||||
InvalidProtocol,
|
||||
}
|
||||
|
||||
impl std::error::Error for ConfigBuilderError {}
|
||||
|
||||
impl std::fmt::Display for ConfigBuilderError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::MaxTransmissionSizeTooSmall => {
|
||||
write!(f, "Maximum transmission size is too small")
|
||||
}
|
||||
Self::HistoryLengthTooSmall => write!(f, "Histroy length less than history gossip length"),
|
||||
Self::MeshParametersInvalid => write!(f, "The ineauality doesn't hold mesh_outbound_min <= mesh_n_low <= mesh_n <= mesh_n_high"),
|
||||
Self::MeshOutboundInvalid => write!(f, "The inequality doesn't hold mesh_outbound_min <= self.config.mesh_n / 2"),
|
||||
Self::UnsubscribeBackoffIsZero => write!(f, "unsubscribe_backoff is zero"),
|
||||
Self::InvalidProtocol => write!(f, "Invalid protocol"),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,12 @@
|
||||
syntax = "proto2";
|
||||
|
||||
package compat.pb;
|
||||
|
||||
message Message {
|
||||
optional bytes from = 1;
|
||||
optional bytes data = 2;
|
||||
optional bytes seqno = 3;
|
||||
repeated string topic_ids = 4;
|
||||
optional bytes signature = 5;
|
||||
optional bytes key = 6;
|
||||
}
|
||||
@ -0,0 +1,2 @@
|
||||
// Automatically generated mod.rs
|
||||
pub mod pb;
|
||||
@ -0,0 +1,67 @@
|
||||
// Automatically generated rust module for 'compat.proto' file
|
||||
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unknown_lints)]
|
||||
#![allow(clippy::all)]
|
||||
#![cfg_attr(rustfmt, rustfmt_skip)]
|
||||
|
||||
|
||||
use quick_protobuf::{MessageInfo, MessageRead, MessageWrite, BytesReader, Writer, WriterBackend, Result};
|
||||
use quick_protobuf::sizeofs::*;
|
||||
use super::super::*;
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Default, PartialEq, Clone)]
|
||||
pub struct Message {
|
||||
pub from: Option<Vec<u8>>,
|
||||
pub data: Option<Vec<u8>>,
|
||||
pub seqno: Option<Vec<u8>>,
|
||||
pub topic_ids: Vec<String>,
|
||||
pub signature: Option<Vec<u8>>,
|
||||
pub key: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl<'a> MessageRead<'a> for Message {
|
||||
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
|
||||
let mut msg = Self::default();
|
||||
while !r.is_eof() {
|
||||
match r.next_tag(bytes) {
|
||||
Ok(10) => msg.from = Some(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(18) => msg.data = Some(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(26) => msg.seqno = Some(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(34) => msg.topic_ids.push(r.read_string(bytes)?.to_owned()),
|
||||
Ok(42) => msg.signature = Some(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(50) => msg.key = Some(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(t) => { r.read_unknown(bytes, t)?; }
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageWrite for Message {
|
||||
fn get_size(&self) -> usize {
|
||||
0
|
||||
+ self.from.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
+ self.data.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
+ self.seqno.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
+ self.topic_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::<usize>()
|
||||
+ self.signature.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
+ self.key.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
}
|
||||
|
||||
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
|
||||
if let Some(ref s) = self.from { w.write_with_tag(10, |w| w.write_bytes(&**s))?; }
|
||||
if let Some(ref s) = self.data { w.write_with_tag(18, |w| w.write_bytes(&**s))?; }
|
||||
if let Some(ref s) = self.seqno { w.write_with_tag(26, |w| w.write_bytes(&**s))?; }
|
||||
for s in &self.topic_ids { w.write_with_tag(34, |w| w.write_string(&**s))?; }
|
||||
if let Some(ref s) = self.signature { w.write_with_tag(42, |w| w.write_bytes(&**s))?; }
|
||||
if let Some(ref s) = self.key { w.write_with_tag(50, |w| w.write_bytes(&**s))?; }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@ -0,0 +1,2 @@
|
||||
// Automatically generated mod.rs
|
||||
pub mod pb;
|
||||
@ -0,0 +1,567 @@
|
||||
// Automatically generated rust module for 'rpc.proto' file
|
||||
|
||||
#![allow(non_snake_case)]
|
||||
#![allow(non_upper_case_globals)]
|
||||
#![allow(non_camel_case_types)]
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unknown_lints)]
|
||||
#![allow(clippy::all)]
|
||||
#![cfg_attr(rustfmt, rustfmt_skip)]
|
||||
|
||||
|
||||
use quick_protobuf::{MessageInfo, MessageRead, MessageWrite, BytesReader, Writer, WriterBackend, Result};
|
||||
use quick_protobuf::sizeofs::*;
|
||||
use super::super::*;
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Default, PartialEq, Clone)]
|
||||
pub struct RPC {
|
||||
pub subscriptions: Vec<gossipsub::pb::mod_RPC::SubOpts>,
|
||||
pub publish: Vec<gossipsub::pb::Message>,
|
||||
pub control: Option<gossipsub::pb::ControlMessage>,
|
||||
}
|
||||
|
||||
impl<'a> MessageRead<'a> for RPC {
|
||||
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
|
||||
let mut msg = Self::default();
|
||||
while !r.is_eof() {
|
||||
match r.next_tag(bytes) {
|
||||
Ok(10) => msg.subscriptions.push(r.read_message::<gossipsub::pb::mod_RPC::SubOpts>(bytes)?),
|
||||
Ok(18) => msg.publish.push(r.read_message::<gossipsub::pb::Message>(bytes)?),
|
||||
Ok(26) => msg.control = Some(r.read_message::<gossipsub::pb::ControlMessage>(bytes)?),
|
||||
Ok(t) => { r.read_unknown(bytes, t)?; }
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageWrite for RPC {
|
||||
fn get_size(&self) -> usize {
|
||||
0
|
||||
+ self.subscriptions.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
|
||||
+ self.publish.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
|
||||
+ self.control.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size()))
|
||||
}
|
||||
|
||||
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
|
||||
for s in &self.subscriptions { w.write_with_tag(10, |w| w.write_message(s))?; }
|
||||
for s in &self.publish { w.write_with_tag(18, |w| w.write_message(s))?; }
|
||||
if let Some(ref s) = self.control { w.write_with_tag(26, |w| w.write_message(s))?; }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub mod mod_RPC {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Default, PartialEq, Clone)]
|
||||
pub struct SubOpts {
|
||||
pub subscribe: Option<bool>,
|
||||
pub topic_id: Option<String>,
|
||||
}
|
||||
|
||||
impl<'a> MessageRead<'a> for SubOpts {
|
||||
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
|
||||
let mut msg = Self::default();
|
||||
while !r.is_eof() {
|
||||
match r.next_tag(bytes) {
|
||||
Ok(8) => msg.subscribe = Some(r.read_bool(bytes)?),
|
||||
Ok(18) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()),
|
||||
Ok(t) => { r.read_unknown(bytes, t)?; }
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageWrite for SubOpts {
|
||||
fn get_size(&self) -> usize {
|
||||
0
|
||||
+ self.subscribe.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64))
|
||||
+ self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
}
|
||||
|
||||
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
|
||||
if let Some(ref s) = self.subscribe { w.write_with_tag(8, |w| w.write_bool(*s))?; }
|
||||
if let Some(ref s) = self.topic_id { w.write_with_tag(18, |w| w.write_string(&**s))?; }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Default, PartialEq, Clone)]
|
||||
pub struct Message {
|
||||
pub from: Option<Vec<u8>>,
|
||||
pub data: Option<Vec<u8>>,
|
||||
pub seqno: Option<Vec<u8>>,
|
||||
pub topic: String,
|
||||
pub signature: Option<Vec<u8>>,
|
||||
pub key: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl<'a> MessageRead<'a> for Message {
|
||||
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
|
||||
let mut msg = Self::default();
|
||||
while !r.is_eof() {
|
||||
match r.next_tag(bytes) {
|
||||
Ok(10) => msg.from = Some(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(18) => msg.data = Some(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(26) => msg.seqno = Some(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(34) => msg.topic = r.read_string(bytes)?.to_owned(),
|
||||
Ok(42) => msg.signature = Some(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(50) => msg.key = Some(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(t) => { r.read_unknown(bytes, t)?; }
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageWrite for Message {
|
||||
fn get_size(&self) -> usize {
|
||||
0
|
||||
+ self.from.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
+ self.data.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
+ self.seqno.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
+ 1 + sizeof_len((&self.topic).len())
|
||||
+ self.signature.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
+ self.key.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
}
|
||||
|
||||
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
|
||||
if let Some(ref s) = self.from { w.write_with_tag(10, |w| w.write_bytes(&**s))?; }
|
||||
if let Some(ref s) = self.data { w.write_with_tag(18, |w| w.write_bytes(&**s))?; }
|
||||
if let Some(ref s) = self.seqno { w.write_with_tag(26, |w| w.write_bytes(&**s))?; }
|
||||
w.write_with_tag(34, |w| w.write_string(&**&self.topic))?;
|
||||
if let Some(ref s) = self.signature { w.write_with_tag(42, |w| w.write_bytes(&**s))?; }
|
||||
if let Some(ref s) = self.key { w.write_with_tag(50, |w| w.write_bytes(&**s))?; }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Default, PartialEq, Clone)]
|
||||
pub struct ControlMessage {
|
||||
pub ihave: Vec<gossipsub::pb::ControlIHave>,
|
||||
pub iwant: Vec<gossipsub::pb::ControlIWant>,
|
||||
pub graft: Vec<gossipsub::pb::ControlGraft>,
|
||||
pub prune: Vec<gossipsub::pb::ControlPrune>,
|
||||
}
|
||||
|
||||
impl<'a> MessageRead<'a> for ControlMessage {
|
||||
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
|
||||
let mut msg = Self::default();
|
||||
while !r.is_eof() {
|
||||
match r.next_tag(bytes) {
|
||||
Ok(10) => msg.ihave.push(r.read_message::<gossipsub::pb::ControlIHave>(bytes)?),
|
||||
Ok(18) => msg.iwant.push(r.read_message::<gossipsub::pb::ControlIWant>(bytes)?),
|
||||
Ok(26) => msg.graft.push(r.read_message::<gossipsub::pb::ControlGraft>(bytes)?),
|
||||
Ok(34) => msg.prune.push(r.read_message::<gossipsub::pb::ControlPrune>(bytes)?),
|
||||
Ok(t) => { r.read_unknown(bytes, t)?; }
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageWrite for ControlMessage {
|
||||
fn get_size(&self) -> usize {
|
||||
0
|
||||
+ self.ihave.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
|
||||
+ self.iwant.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
|
||||
+ self.graft.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
|
||||
+ self.prune.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
|
||||
}
|
||||
|
||||
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
|
||||
for s in &self.ihave { w.write_with_tag(10, |w| w.write_message(s))?; }
|
||||
for s in &self.iwant { w.write_with_tag(18, |w| w.write_message(s))?; }
|
||||
for s in &self.graft { w.write_with_tag(26, |w| w.write_message(s))?; }
|
||||
for s in &self.prune { w.write_with_tag(34, |w| w.write_message(s))?; }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Default, PartialEq, Clone)]
|
||||
pub struct ControlIHave {
|
||||
pub topic_id: Option<String>,
|
||||
pub message_ids: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl<'a> MessageRead<'a> for ControlIHave {
|
||||
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
|
||||
let mut msg = Self::default();
|
||||
while !r.is_eof() {
|
||||
match r.next_tag(bytes) {
|
||||
Ok(10) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()),
|
||||
Ok(18) => msg.message_ids.push(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(t) => { r.read_unknown(bytes, t)?; }
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageWrite for ControlIHave {
|
||||
fn get_size(&self) -> usize {
|
||||
0
|
||||
+ self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
+ self.message_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::<usize>()
|
||||
}
|
||||
|
||||
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
|
||||
if let Some(ref s) = self.topic_id { w.write_with_tag(10, |w| w.write_string(&**s))?; }
|
||||
for s in &self.message_ids { w.write_with_tag(18, |w| w.write_bytes(&**s))?; }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Default, PartialEq, Clone)]
|
||||
pub struct ControlIWant {
|
||||
pub message_ids: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl<'a> MessageRead<'a> for ControlIWant {
|
||||
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
|
||||
let mut msg = Self::default();
|
||||
while !r.is_eof() {
|
||||
match r.next_tag(bytes) {
|
||||
Ok(10) => msg.message_ids.push(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(t) => { r.read_unknown(bytes, t)?; }
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageWrite for ControlIWant {
|
||||
fn get_size(&self) -> usize {
|
||||
0
|
||||
+ self.message_ids.iter().map(|s| 1 + sizeof_len((s).len())).sum::<usize>()
|
||||
}
|
||||
|
||||
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
|
||||
for s in &self.message_ids { w.write_with_tag(10, |w| w.write_bytes(&**s))?; }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Default, PartialEq, Clone)]
|
||||
pub struct ControlGraft {
|
||||
pub topic_id: Option<String>,
|
||||
}
|
||||
|
||||
impl<'a> MessageRead<'a> for ControlGraft {
|
||||
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
|
||||
let mut msg = Self::default();
|
||||
while !r.is_eof() {
|
||||
match r.next_tag(bytes) {
|
||||
Ok(10) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()),
|
||||
Ok(t) => { r.read_unknown(bytes, t)?; }
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageWrite for ControlGraft {
|
||||
fn get_size(&self) -> usize {
|
||||
0
|
||||
+ self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
}
|
||||
|
||||
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
|
||||
if let Some(ref s) = self.topic_id { w.write_with_tag(10, |w| w.write_string(&**s))?; }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Default, PartialEq, Clone)]
|
||||
pub struct ControlPrune {
|
||||
pub topic_id: Option<String>,
|
||||
pub peers: Vec<gossipsub::pb::PeerInfo>,
|
||||
pub backoff: Option<u64>,
|
||||
}
|
||||
|
||||
impl<'a> MessageRead<'a> for ControlPrune {
|
||||
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
|
||||
let mut msg = Self::default();
|
||||
while !r.is_eof() {
|
||||
match r.next_tag(bytes) {
|
||||
Ok(10) => msg.topic_id = Some(r.read_string(bytes)?.to_owned()),
|
||||
Ok(18) => msg.peers.push(r.read_message::<gossipsub::pb::PeerInfo>(bytes)?),
|
||||
Ok(24) => msg.backoff = Some(r.read_uint64(bytes)?),
|
||||
Ok(t) => { r.read_unknown(bytes, t)?; }
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageWrite for ControlPrune {
|
||||
fn get_size(&self) -> usize {
|
||||
0
|
||||
+ self.topic_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
+ self.peers.iter().map(|s| 1 + sizeof_len((s).get_size())).sum::<usize>()
|
||||
+ self.backoff.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64))
|
||||
}
|
||||
|
||||
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
|
||||
if let Some(ref s) = self.topic_id { w.write_with_tag(10, |w| w.write_string(&**s))?; }
|
||||
for s in &self.peers { w.write_with_tag(18, |w| w.write_message(s))?; }
|
||||
if let Some(ref s) = self.backoff { w.write_with_tag(24, |w| w.write_uint64(*s))?; }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Default, PartialEq, Clone)]
|
||||
pub struct PeerInfo {
|
||||
pub peer_id: Option<Vec<u8>>,
|
||||
pub signed_peer_record: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl<'a> MessageRead<'a> for PeerInfo {
|
||||
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
|
||||
let mut msg = Self::default();
|
||||
while !r.is_eof() {
|
||||
match r.next_tag(bytes) {
|
||||
Ok(10) => msg.peer_id = Some(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(18) => msg.signed_peer_record = Some(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(t) => { r.read_unknown(bytes, t)?; }
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageWrite for PeerInfo {
|
||||
fn get_size(&self) -> usize {
|
||||
0
|
||||
+ self.peer_id.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
+ self.signed_peer_record.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
}
|
||||
|
||||
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
|
||||
if let Some(ref s) = self.peer_id { w.write_with_tag(10, |w| w.write_bytes(&**s))?; }
|
||||
if let Some(ref s) = self.signed_peer_record { w.write_with_tag(18, |w| w.write_bytes(&**s))?; }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Default, PartialEq, Clone)]
|
||||
pub struct TopicDescriptor {
|
||||
pub name: Option<String>,
|
||||
pub auth: Option<gossipsub::pb::mod_TopicDescriptor::AuthOpts>,
|
||||
pub enc: Option<gossipsub::pb::mod_TopicDescriptor::EncOpts>,
|
||||
}
|
||||
|
||||
impl<'a> MessageRead<'a> for TopicDescriptor {
|
||||
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
|
||||
let mut msg = Self::default();
|
||||
while !r.is_eof() {
|
||||
match r.next_tag(bytes) {
|
||||
Ok(10) => msg.name = Some(r.read_string(bytes)?.to_owned()),
|
||||
Ok(18) => msg.auth = Some(r.read_message::<gossipsub::pb::mod_TopicDescriptor::AuthOpts>(bytes)?),
|
||||
Ok(26) => msg.enc = Some(r.read_message::<gossipsub::pb::mod_TopicDescriptor::EncOpts>(bytes)?),
|
||||
Ok(t) => { r.read_unknown(bytes, t)?; }
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageWrite for TopicDescriptor {
|
||||
fn get_size(&self) -> usize {
|
||||
0
|
||||
+ self.name.as_ref().map_or(0, |m| 1 + sizeof_len((m).len()))
|
||||
+ self.auth.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size()))
|
||||
+ self.enc.as_ref().map_or(0, |m| 1 + sizeof_len((m).get_size()))
|
||||
}
|
||||
|
||||
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
|
||||
if let Some(ref s) = self.name { w.write_with_tag(10, |w| w.write_string(&**s))?; }
|
||||
if let Some(ref s) = self.auth { w.write_with_tag(18, |w| w.write_message(s))?; }
|
||||
if let Some(ref s) = self.enc { w.write_with_tag(26, |w| w.write_message(s))?; }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub mod mod_TopicDescriptor {
|
||||
|
||||
use super::*;
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Default, PartialEq, Clone)]
|
||||
pub struct AuthOpts {
|
||||
pub mode: Option<gossipsub::pb::mod_TopicDescriptor::mod_AuthOpts::AuthMode>,
|
||||
pub keys: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl<'a> MessageRead<'a> for AuthOpts {
|
||||
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
|
||||
let mut msg = Self::default();
|
||||
while !r.is_eof() {
|
||||
match r.next_tag(bytes) {
|
||||
Ok(8) => msg.mode = Some(r.read_enum(bytes)?),
|
||||
Ok(18) => msg.keys.push(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(t) => { r.read_unknown(bytes, t)?; }
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageWrite for AuthOpts {
|
||||
fn get_size(&self) -> usize {
|
||||
0
|
||||
+ self.mode.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64))
|
||||
+ self.keys.iter().map(|s| 1 + sizeof_len((s).len())).sum::<usize>()
|
||||
}
|
||||
|
||||
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
|
||||
if let Some(ref s) = self.mode { w.write_with_tag(8, |w| w.write_enum(*s as i32))?; }
|
||||
for s in &self.keys { w.write_with_tag(18, |w| w.write_bytes(&**s))?; }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub mod mod_AuthOpts {
|
||||
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub enum AuthMode {
|
||||
NONE = 0,
|
||||
KEY = 1,
|
||||
WOT = 2,
|
||||
}
|
||||
|
||||
impl Default for AuthMode {
|
||||
fn default() -> Self {
|
||||
AuthMode::NONE
|
||||
}
|
||||
}
|
||||
|
||||
impl From<i32> for AuthMode {
|
||||
fn from(i: i32) -> Self {
|
||||
match i {
|
||||
0 => AuthMode::NONE,
|
||||
1 => AuthMode::KEY,
|
||||
2 => AuthMode::WOT,
|
||||
_ => Self::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a str> for AuthMode {
|
||||
fn from(s: &'a str) -> Self {
|
||||
match s {
|
||||
"NONE" => AuthMode::NONE,
|
||||
"KEY" => AuthMode::KEY,
|
||||
"WOT" => AuthMode::WOT,
|
||||
_ => Self::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#[allow(clippy::derive_partial_eq_without_eq)]
|
||||
#[derive(Debug, Default, PartialEq, Clone)]
|
||||
pub struct EncOpts {
|
||||
pub mode: Option<gossipsub::pb::mod_TopicDescriptor::mod_EncOpts::EncMode>,
|
||||
pub key_hashes: Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl<'a> MessageRead<'a> for EncOpts {
|
||||
fn from_reader(r: &mut BytesReader, bytes: &'a [u8]) -> Result<Self> {
|
||||
let mut msg = Self::default();
|
||||
while !r.is_eof() {
|
||||
match r.next_tag(bytes) {
|
||||
Ok(8) => msg.mode = Some(r.read_enum(bytes)?),
|
||||
Ok(18) => msg.key_hashes.push(r.read_bytes(bytes)?.to_owned()),
|
||||
Ok(t) => { r.read_unknown(bytes, t)?; }
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageWrite for EncOpts {
|
||||
fn get_size(&self) -> usize {
|
||||
0
|
||||
+ self.mode.as_ref().map_or(0, |m| 1 + sizeof_varint(*(m) as u64))
|
||||
+ self.key_hashes.iter().map(|s| 1 + sizeof_len((s).len())).sum::<usize>()
|
||||
}
|
||||
|
||||
fn write_message<W: WriterBackend>(&self, w: &mut Writer<W>) -> Result<()> {
|
||||
if let Some(ref s) = self.mode { w.write_with_tag(8, |w| w.write_enum(*s as i32))?; }
|
||||
for s in &self.key_hashes { w.write_with_tag(18, |w| w.write_bytes(&**s))?; }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub mod mod_EncOpts {
|
||||
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub enum EncMode {
|
||||
NONE = 0,
|
||||
SHAREDKEY = 1,
|
||||
WOT = 2,
|
||||
}
|
||||
|
||||
impl Default for EncMode {
|
||||
fn default() -> Self {
|
||||
EncMode::NONE
|
||||
}
|
||||
}
|
||||
|
||||
impl From<i32> for EncMode {
|
||||
fn from(i: i32) -> Self {
|
||||
match i {
|
||||
0 => EncMode::NONE,
|
||||
1 => EncMode::SHAREDKEY,
|
||||
2 => EncMode::WOT,
|
||||
_ => Self::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a str> for EncMode {
|
||||
fn from(s: &'a str) -> Self {
|
||||
match s {
|
||||
"NONE" => EncMode::NONE,
|
||||
"SHAREDKEY" => EncMode::SHAREDKEY,
|
||||
"WOT" => EncMode::WOT,
|
||||
_ => Self::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -0,0 +1,3 @@
|
||||
// Automatically generated mod.rs
|
||||
pub mod compat;
|
||||
pub mod gossipsub;
|
||||
@ -0,0 +1,84 @@
|
||||
syntax = "proto2";
|
||||
|
||||
package gossipsub.pb;
|
||||
|
||||
message RPC {
|
||||
repeated SubOpts subscriptions = 1;
|
||||
repeated Message publish = 2;
|
||||
|
||||
message SubOpts {
|
||||
optional bool subscribe = 1; // subscribe or unsubscribe
|
||||
optional string topic_id = 2;
|
||||
}
|
||||
|
||||
optional ControlMessage control = 3;
|
||||
}
|
||||
|
||||
message Message {
|
||||
optional bytes from = 1;
|
||||
optional bytes data = 2;
|
||||
optional bytes seqno = 3;
|
||||
required string topic = 4;
|
||||
optional bytes signature = 5;
|
||||
optional bytes key = 6;
|
||||
}
|
||||
|
||||
message ControlMessage {
|
||||
repeated ControlIHave ihave = 1;
|
||||
repeated ControlIWant iwant = 2;
|
||||
repeated ControlGraft graft = 3;
|
||||
repeated ControlPrune prune = 4;
|
||||
}
|
||||
|
||||
message ControlIHave {
|
||||
optional string topic_id = 1;
|
||||
repeated bytes message_ids = 2;
|
||||
}
|
||||
|
||||
message ControlIWant {
|
||||
repeated bytes message_ids= 1;
|
||||
}
|
||||
|
||||
message ControlGraft {
|
||||
optional string topic_id = 1;
|
||||
}
|
||||
|
||||
message ControlPrune {
|
||||
optional string topic_id = 1;
|
||||
repeated PeerInfo peers = 2; // gossipsub v1.1 PX
|
||||
optional uint64 backoff = 3; // gossipsub v1.1 backoff time (in seconds)
|
||||
}
|
||||
|
||||
message PeerInfo {
|
||||
optional bytes peer_id = 1;
|
||||
optional bytes signed_peer_record = 2;
|
||||
}
|
||||
|
||||
// topicID = hash(topicDescriptor); (not the topic.name)
|
||||
message TopicDescriptor {
|
||||
optional string name = 1;
|
||||
optional AuthOpts auth = 2;
|
||||
optional EncOpts enc = 3;
|
||||
|
||||
message AuthOpts {
|
||||
optional AuthMode mode = 1;
|
||||
repeated bytes keys = 2; // root keys to trust
|
||||
|
||||
enum AuthMode {
|
||||
NONE = 0; // no authentication, anyone can publish
|
||||
KEY = 1; // only messages signed by keys in the topic descriptor are accepted
|
||||
WOT = 2; // web of trust, certificates can allow publisher set to grow
|
||||
}
|
||||
}
|
||||
|
||||
message EncOpts {
|
||||
optional EncMode mode = 1;
|
||||
repeated bytes key_hashes = 2; // the hashes of the shared keys used (salted)
|
||||
|
||||
enum EncMode {
|
||||
NONE = 0; // no encryption, anyone can read
|
||||
SHAREDKEY = 1; // messages are encrypted with shared key
|
||||
WOT = 2; // web of trust, certificates can allow publisher set to grow
|
||||
}
|
||||
}
|
||||
}
|
||||
101
beacon_node/lighthouse_network/src/gossipsub/gossip_promises.rs
Normal file
101
beacon_node/lighthouse_network/src/gossipsub/gossip_promises.rs
Normal file
@ -0,0 +1,101 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use super::peer_score::RejectReason;
|
||||
use super::MessageId;
|
||||
use super::ValidationError;
|
||||
use instant::Instant;
|
||||
use libp2p::identity::PeerId;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Tracks recently sent `IWANT` messages and checks if peers respond to them.
|
||||
#[derive(Default)]
|
||||
pub(crate) struct GossipPromises {
|
||||
/// Stores for each tracked message id and peer the instant when this promise expires.
|
||||
///
|
||||
/// If the peer didn't respond until then we consider the promise as broken and penalize the
|
||||
/// peer.
|
||||
promises: HashMap<MessageId, HashMap<PeerId, Instant>>,
|
||||
}
|
||||
|
||||
impl GossipPromises {
|
||||
/// Returns true if the message id exists in the promises.
|
||||
pub(crate) fn contains(&self, message: &MessageId) -> bool {
|
||||
self.promises.contains_key(message)
|
||||
}
|
||||
|
||||
/// Track a promise to deliver a message from a list of [`MessageId`]s we are requesting.
|
||||
pub(crate) fn add_promise(&mut self, peer: PeerId, messages: &[MessageId], expires: Instant) {
|
||||
for message_id in messages {
|
||||
// If a promise for this message id and peer already exists we don't update the expiry!
|
||||
self.promises
|
||||
.entry(message_id.clone())
|
||||
.or_default()
|
||||
.entry(peer)
|
||||
.or_insert(expires);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn message_delivered(&mut self, message_id: &MessageId) {
|
||||
// Someone delivered a message, we can stop tracking all promises for it.
|
||||
self.promises.remove(message_id);
|
||||
}
|
||||
|
||||
pub(crate) fn reject_message(&mut self, message_id: &MessageId, reason: &RejectReason) {
|
||||
// A message got rejected, so we can stop tracking promises and let the score penalty apply
|
||||
// from invalid message delivery.
|
||||
// We do take exception and apply promise penalty regardless in the following cases, where
|
||||
// the peer delivered an obviously invalid message.
|
||||
match reason {
|
||||
RejectReason::ValidationError(ValidationError::InvalidSignature) => (),
|
||||
RejectReason::SelfOrigin => (),
|
||||
_ => {
|
||||
self.promises.remove(message_id);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Returns the number of broken promises for each peer who didn't follow up on an IWANT
|
||||
/// request.
|
||||
/// This should be called not too often relative to the expire times, since it iterates over
|
||||
/// the whole stored data.
|
||||
pub(crate) fn get_broken_promises(&mut self) -> HashMap<PeerId, usize> {
|
||||
let now = Instant::now();
|
||||
let mut result = HashMap::new();
|
||||
self.promises.retain(|msg, peers| {
|
||||
peers.retain(|peer_id, expires| {
|
||||
if *expires < now {
|
||||
let count = result.entry(*peer_id).or_insert(0);
|
||||
*count += 1;
|
||||
tracing::debug!(
|
||||
peer=%peer_id,
|
||||
message=%msg,
|
||||
"[Penalty] The peer broke the promise to deliver message in time!"
|
||||
);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
!peers.is_empty()
|
||||
});
|
||||
result
|
||||
}
|
||||
}
|
||||
562
beacon_node/lighthouse_network/src/gossipsub/handler.rs
Normal file
562
beacon_node/lighthouse_network/src/gossipsub/handler.rs
Normal file
@ -0,0 +1,562 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use super::protocol::{GossipsubCodec, ProtocolConfig};
|
||||
use super::rpc_proto::proto;
|
||||
use super::types::{PeerKind, RawMessage, Rpc, RpcOut, RpcReceiver};
|
||||
use super::ValidationError;
|
||||
use asynchronous_codec::Framed;
|
||||
use futures::future::Either;
|
||||
use futures::prelude::*;
|
||||
use futures::StreamExt;
|
||||
use instant::Instant;
|
||||
use libp2p::core::upgrade::DeniedUpgrade;
|
||||
use libp2p::swarm::handler::{
|
||||
ConnectionEvent, ConnectionHandler, ConnectionHandlerEvent, DialUpgradeError,
|
||||
FullyNegotiatedInbound, FullyNegotiatedOutbound, StreamUpgradeError, SubstreamProtocol,
|
||||
};
|
||||
use libp2p::swarm::Stream;
|
||||
use std::{
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
/// The event emitted by the Handler. This informs the behaviour of various events created
|
||||
/// by the handler.
|
||||
#[derive(Debug)]
|
||||
pub enum HandlerEvent {
|
||||
/// A GossipsubRPC message has been received. This also contains a list of invalid messages (if
|
||||
/// any) that were received.
|
||||
Message {
|
||||
/// The GossipsubRPC message excluding any invalid messages.
|
||||
rpc: Rpc,
|
||||
/// Any invalid messages that were received in the RPC, along with the associated
|
||||
/// validation error.
|
||||
invalid_messages: Vec<(RawMessage, ValidationError)>,
|
||||
},
|
||||
/// An inbound or outbound substream has been established with the peer and this informs over
|
||||
/// which protocol. This message only occurs once per connection.
|
||||
PeerKind(PeerKind),
|
||||
/// A message to be published was dropped because it could not be sent in time.
|
||||
MessageDropped(RpcOut),
|
||||
}
|
||||
|
||||
/// A message sent from the behaviour to the handler.
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Debug)]
|
||||
pub enum HandlerIn {
|
||||
/// The peer has joined the mesh.
|
||||
JoinedMesh,
|
||||
/// The peer has left the mesh.
|
||||
LeftMesh,
|
||||
}
|
||||
|
||||
/// The maximum number of inbound or outbound substreams attempts we allow.
|
||||
///
|
||||
/// Gossipsub is supposed to have a single long-lived inbound and outbound substream. On failure we
|
||||
/// attempt to recreate these. This imposes an upper bound of new substreams before we consider the
|
||||
/// connection faulty and disable the handler. This also prevents against potential substream
|
||||
/// creation loops.
|
||||
const MAX_SUBSTREAM_ATTEMPTS: usize = 5;
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum Handler {
|
||||
Enabled(EnabledHandler),
|
||||
Disabled(DisabledHandler),
|
||||
}
|
||||
|
||||
/// Protocol Handler that manages a single long-lived substream with a peer.
|
||||
pub struct EnabledHandler {
|
||||
/// Upgrade configuration for the gossipsub protocol.
|
||||
listen_protocol: ProtocolConfig,
|
||||
|
||||
/// The single long-lived outbound substream.
|
||||
outbound_substream: Option<OutboundSubstreamState>,
|
||||
|
||||
/// The single long-lived inbound substream.
|
||||
inbound_substream: Option<InboundSubstreamState>,
|
||||
|
||||
/// Queue of values that we want to send to the remote
|
||||
send_queue: RpcReceiver,
|
||||
|
||||
/// Flag indicating that an outbound substream is being established to prevent duplicate
|
||||
/// requests.
|
||||
outbound_substream_establishing: bool,
|
||||
|
||||
/// The number of outbound substreams we have requested.
|
||||
outbound_substream_attempts: usize,
|
||||
|
||||
/// The number of inbound substreams that have been created by the peer.
|
||||
inbound_substream_attempts: usize,
|
||||
|
||||
/// The type of peer this handler is associated to.
|
||||
peer_kind: Option<PeerKind>,
|
||||
|
||||
/// Keeps track on whether we have sent the peer kind to the behaviour.
|
||||
//
|
||||
// NOTE: Use this flag rather than checking the substream count each poll.
|
||||
peer_kind_sent: bool,
|
||||
|
||||
last_io_activity: Instant,
|
||||
|
||||
/// Keeps track of whether this connection is for a peer in the mesh. This is used to make
|
||||
/// decisions about the keep alive state for this connection.
|
||||
in_mesh: bool,
|
||||
}
|
||||
|
||||
pub enum DisabledHandler {
|
||||
/// If the peer doesn't support the gossipsub protocol we do not immediately disconnect.
|
||||
/// Rather, we disable the handler and prevent any incoming or outgoing substreams from being
|
||||
/// established.
|
||||
ProtocolUnsupported {
|
||||
/// Keeps track on whether we have sent the peer kind to the behaviour.
|
||||
peer_kind_sent: bool,
|
||||
},
|
||||
/// The maximum number of inbound or outbound substream attempts have happened and thereby the
|
||||
/// handler has been disabled.
|
||||
MaxSubstreamAttempts,
|
||||
}
|
||||
|
||||
/// State of the inbound substream, opened either by us or by the remote.
|
||||
enum InboundSubstreamState {
|
||||
/// Waiting for a message from the remote. The idle state for an inbound substream.
|
||||
WaitingInput(Framed<Stream, GossipsubCodec>),
|
||||
/// The substream is being closed.
|
||||
Closing(Framed<Stream, GossipsubCodec>),
|
||||
/// An error occurred during processing.
|
||||
Poisoned,
|
||||
}
|
||||
|
||||
/// State of the outbound substream, opened either by us or by the remote.
|
||||
enum OutboundSubstreamState {
|
||||
/// Waiting for the user to send a message. The idle state for an outbound substream.
|
||||
WaitingOutput(Framed<Stream, GossipsubCodec>),
|
||||
/// Waiting to send a message to the remote.
|
||||
PendingSend(Framed<Stream, GossipsubCodec>, proto::RPC),
|
||||
/// Waiting to flush the substream so that the data arrives to the remote.
|
||||
PendingFlush(Framed<Stream, GossipsubCodec>),
|
||||
/// An error occurred during processing.
|
||||
Poisoned,
|
||||
}
|
||||
|
||||
impl Handler {
|
||||
/// Builds a new [`Handler`].
|
||||
pub fn new(protocol_config: ProtocolConfig, message_queue: RpcReceiver) -> Self {
|
||||
Handler::Enabled(EnabledHandler {
|
||||
listen_protocol: protocol_config,
|
||||
inbound_substream: None,
|
||||
outbound_substream: None,
|
||||
outbound_substream_establishing: false,
|
||||
outbound_substream_attempts: 0,
|
||||
inbound_substream_attempts: 0,
|
||||
peer_kind: None,
|
||||
peer_kind_sent: false,
|
||||
last_io_activity: Instant::now(),
|
||||
in_mesh: false,
|
||||
send_queue: message_queue,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl EnabledHandler {
|
||||
fn on_fully_negotiated_inbound(
|
||||
&mut self,
|
||||
(substream, peer_kind): (Framed<Stream, GossipsubCodec>, PeerKind),
|
||||
) {
|
||||
// update the known kind of peer
|
||||
if self.peer_kind.is_none() {
|
||||
self.peer_kind = Some(peer_kind);
|
||||
}
|
||||
|
||||
// new inbound substream. Replace the current one, if it exists.
|
||||
tracing::trace!("New inbound substream request");
|
||||
self.inbound_substream = Some(InboundSubstreamState::WaitingInput(substream));
|
||||
}
|
||||
|
||||
fn on_fully_negotiated_outbound(
|
||||
&mut self,
|
||||
FullyNegotiatedOutbound { protocol, .. }: FullyNegotiatedOutbound<
|
||||
<Handler as ConnectionHandler>::OutboundProtocol,
|
||||
<Handler as ConnectionHandler>::OutboundOpenInfo,
|
||||
>,
|
||||
) {
|
||||
let (substream, peer_kind) = protocol;
|
||||
|
||||
// update the known kind of peer
|
||||
if self.peer_kind.is_none() {
|
||||
self.peer_kind = Some(peer_kind);
|
||||
}
|
||||
|
||||
assert!(
|
||||
self.outbound_substream.is_none(),
|
||||
"Established an outbound substream with one already available"
|
||||
);
|
||||
self.outbound_substream = Some(OutboundSubstreamState::WaitingOutput(substream));
|
||||
}
|
||||
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<
|
||||
ConnectionHandlerEvent<
|
||||
<Handler as ConnectionHandler>::OutboundProtocol,
|
||||
<Handler as ConnectionHandler>::OutboundOpenInfo,
|
||||
<Handler as ConnectionHandler>::ToBehaviour,
|
||||
>,
|
||||
> {
|
||||
if !self.peer_kind_sent {
|
||||
if let Some(peer_kind) = self.peer_kind.as_ref() {
|
||||
self.peer_kind_sent = true;
|
||||
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(
|
||||
HandlerEvent::PeerKind(peer_kind.clone()),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// determine if we need to create the outbound stream
|
||||
if !self.send_queue.poll_is_empty(cx)
|
||||
&& self.outbound_substream.is_none()
|
||||
&& !self.outbound_substream_establishing
|
||||
{
|
||||
self.outbound_substream_establishing = true;
|
||||
return Poll::Ready(ConnectionHandlerEvent::OutboundSubstreamRequest {
|
||||
protocol: SubstreamProtocol::new(self.listen_protocol.clone(), ()),
|
||||
});
|
||||
}
|
||||
|
||||
// process outbound stream
|
||||
loop {
|
||||
match std::mem::replace(
|
||||
&mut self.outbound_substream,
|
||||
Some(OutboundSubstreamState::Poisoned),
|
||||
) {
|
||||
// outbound idle state
|
||||
Some(OutboundSubstreamState::WaitingOutput(substream)) => {
|
||||
if let Poll::Ready(Some(mut message)) = self.send_queue.poll_next_unpin(cx) {
|
||||
match message {
|
||||
RpcOut::Publish {
|
||||
message: _,
|
||||
ref mut timeout,
|
||||
}
|
||||
| RpcOut::Forward {
|
||||
message: _,
|
||||
ref mut timeout,
|
||||
} => {
|
||||
if Pin::new(timeout).poll(cx).is_ready() {
|
||||
// Inform the behaviour and end the poll.
|
||||
self.outbound_substream =
|
||||
Some(OutboundSubstreamState::WaitingOutput(substream));
|
||||
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(
|
||||
HandlerEvent::MessageDropped(message),
|
||||
));
|
||||
}
|
||||
}
|
||||
_ => {} // All other messages are not time-bound.
|
||||
}
|
||||
self.outbound_substream = Some(OutboundSubstreamState::PendingSend(
|
||||
substream,
|
||||
message.into_protobuf(),
|
||||
));
|
||||
continue;
|
||||
}
|
||||
|
||||
self.outbound_substream =
|
||||
Some(OutboundSubstreamState::WaitingOutput(substream));
|
||||
break;
|
||||
}
|
||||
Some(OutboundSubstreamState::PendingSend(mut substream, message)) => {
|
||||
match Sink::poll_ready(Pin::new(&mut substream), cx) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
match Sink::start_send(Pin::new(&mut substream), message) {
|
||||
Ok(()) => {
|
||||
self.outbound_substream =
|
||||
Some(OutboundSubstreamState::PendingFlush(substream))
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::debug!(
|
||||
"Failed to send message on outbound stream: {e}"
|
||||
);
|
||||
self.outbound_substream = None;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
tracing::debug!("Failed to send message on outbound stream: {e}");
|
||||
self.outbound_substream = None;
|
||||
break;
|
||||
}
|
||||
Poll::Pending => {
|
||||
self.outbound_substream =
|
||||
Some(OutboundSubstreamState::PendingSend(substream, message));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(OutboundSubstreamState::PendingFlush(mut substream)) => {
|
||||
match Sink::poll_flush(Pin::new(&mut substream), cx) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
self.last_io_activity = Instant::now();
|
||||
self.outbound_substream =
|
||||
Some(OutboundSubstreamState::WaitingOutput(substream))
|
||||
}
|
||||
Poll::Ready(Err(e)) => {
|
||||
tracing::debug!("Failed to flush outbound stream: {e}");
|
||||
self.outbound_substream = None;
|
||||
break;
|
||||
}
|
||||
Poll::Pending => {
|
||||
self.outbound_substream =
|
||||
Some(OutboundSubstreamState::PendingFlush(substream));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
self.outbound_substream = None;
|
||||
break;
|
||||
}
|
||||
Some(OutboundSubstreamState::Poisoned) => {
|
||||
unreachable!("Error occurred during outbound stream processing")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle inbound messages.
|
||||
loop {
|
||||
match std::mem::replace(
|
||||
&mut self.inbound_substream,
|
||||
Some(InboundSubstreamState::Poisoned),
|
||||
) {
|
||||
// inbound idle state
|
||||
Some(InboundSubstreamState::WaitingInput(mut substream)) => {
|
||||
match substream.poll_next_unpin(cx) {
|
||||
Poll::Ready(Some(Ok(message))) => {
|
||||
self.last_io_activity = Instant::now();
|
||||
self.inbound_substream =
|
||||
Some(InboundSubstreamState::WaitingInput(substream));
|
||||
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(message));
|
||||
}
|
||||
Poll::Ready(Some(Err(error))) => {
|
||||
tracing::debug!("Failed to read from inbound stream: {error}");
|
||||
// Close this side of the stream. If the
|
||||
// peer is still around, they will re-establish their
|
||||
// outbound stream i.e. our inbound stream.
|
||||
self.inbound_substream =
|
||||
Some(InboundSubstreamState::Closing(substream));
|
||||
}
|
||||
// peer closed the stream
|
||||
Poll::Ready(None) => {
|
||||
tracing::debug!("Inbound stream closed by remote");
|
||||
self.inbound_substream =
|
||||
Some(InboundSubstreamState::Closing(substream));
|
||||
}
|
||||
Poll::Pending => {
|
||||
self.inbound_substream =
|
||||
Some(InboundSubstreamState::WaitingInput(substream));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Some(InboundSubstreamState::Closing(mut substream)) => {
|
||||
match Sink::poll_close(Pin::new(&mut substream), cx) {
|
||||
Poll::Ready(res) => {
|
||||
if let Err(e) = res {
|
||||
// Don't close the connection but just drop the inbound substream.
|
||||
// In case the remote has more to send, they will open up a new
|
||||
// substream.
|
||||
tracing::debug!("Inbound substream error while closing: {e}");
|
||||
}
|
||||
self.inbound_substream = None;
|
||||
break;
|
||||
}
|
||||
Poll::Pending => {
|
||||
self.inbound_substream =
|
||||
Some(InboundSubstreamState::Closing(substream));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
self.inbound_substream = None;
|
||||
break;
|
||||
}
|
||||
Some(InboundSubstreamState::Poisoned) => {
|
||||
unreachable!("Error occurred during inbound stream processing")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Drop the next message in queue if it's stale.
|
||||
if let Poll::Ready(Some(rpc)) = self.send_queue.poll_stale(cx) {
|
||||
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(
|
||||
HandlerEvent::MessageDropped(rpc),
|
||||
));
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
|
||||
impl ConnectionHandler for Handler {
|
||||
type FromBehaviour = HandlerIn;
|
||||
type ToBehaviour = HandlerEvent;
|
||||
type InboundOpenInfo = ();
|
||||
type InboundProtocol = either::Either<ProtocolConfig, DeniedUpgrade>;
|
||||
type OutboundOpenInfo = ();
|
||||
type OutboundProtocol = ProtocolConfig;
|
||||
|
||||
fn listen_protocol(&self) -> SubstreamProtocol<Self::InboundProtocol, Self::InboundOpenInfo> {
|
||||
match self {
|
||||
Handler::Enabled(handler) => {
|
||||
SubstreamProtocol::new(either::Either::Left(handler.listen_protocol.clone()), ())
|
||||
}
|
||||
Handler::Disabled(_) => {
|
||||
SubstreamProtocol::new(either::Either::Right(DeniedUpgrade), ())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn on_behaviour_event(&mut self, message: HandlerIn) {
|
||||
match self {
|
||||
Handler::Enabled(handler) => match message {
|
||||
HandlerIn::JoinedMesh => {
|
||||
handler.in_mesh = true;
|
||||
}
|
||||
HandlerIn::LeftMesh => {
|
||||
handler.in_mesh = false;
|
||||
}
|
||||
},
|
||||
Handler::Disabled(_) => {
|
||||
tracing::debug!(?message, "Handler is disabled. Dropping message");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn connection_keep_alive(&self) -> bool {
|
||||
matches!(self, Handler::Enabled(h) if h.in_mesh)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", name = "ConnectionHandler::poll", skip(self, cx))]
|
||||
fn poll(
|
||||
&mut self,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<
|
||||
ConnectionHandlerEvent<Self::OutboundProtocol, Self::OutboundOpenInfo, Self::ToBehaviour>,
|
||||
> {
|
||||
match self {
|
||||
Handler::Enabled(handler) => handler.poll(cx),
|
||||
Handler::Disabled(DisabledHandler::ProtocolUnsupported { peer_kind_sent }) => {
|
||||
if !*peer_kind_sent {
|
||||
*peer_kind_sent = true;
|
||||
return Poll::Ready(ConnectionHandlerEvent::NotifyBehaviour(
|
||||
HandlerEvent::PeerKind(PeerKind::NotSupported),
|
||||
));
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
Handler::Disabled(DisabledHandler::MaxSubstreamAttempts) => Poll::Pending,
|
||||
}
|
||||
}
|
||||
|
||||
fn on_connection_event(
|
||||
&mut self,
|
||||
event: ConnectionEvent<
|
||||
Self::InboundProtocol,
|
||||
Self::OutboundProtocol,
|
||||
Self::InboundOpenInfo,
|
||||
Self::OutboundOpenInfo,
|
||||
>,
|
||||
) {
|
||||
match self {
|
||||
Handler::Enabled(handler) => {
|
||||
if event.is_inbound() {
|
||||
handler.inbound_substream_attempts += 1;
|
||||
|
||||
if handler.inbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS {
|
||||
tracing::warn!(
|
||||
"The maximum number of inbound substreams attempts has been exceeded"
|
||||
);
|
||||
*self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if event.is_outbound() {
|
||||
handler.outbound_substream_establishing = false;
|
||||
|
||||
handler.outbound_substream_attempts += 1;
|
||||
|
||||
if handler.outbound_substream_attempts == MAX_SUBSTREAM_ATTEMPTS {
|
||||
tracing::warn!(
|
||||
"The maximum number of outbound substream attempts has been exceeded"
|
||||
);
|
||||
*self = Handler::Disabled(DisabledHandler::MaxSubstreamAttempts);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
match event {
|
||||
ConnectionEvent::FullyNegotiatedInbound(FullyNegotiatedInbound {
|
||||
protocol,
|
||||
..
|
||||
}) => match protocol {
|
||||
Either::Left(protocol) => handler.on_fully_negotiated_inbound(protocol),
|
||||
Either::Right(v) => void::unreachable(v),
|
||||
},
|
||||
ConnectionEvent::FullyNegotiatedOutbound(fully_negotiated_outbound) => {
|
||||
handler.on_fully_negotiated_outbound(fully_negotiated_outbound)
|
||||
}
|
||||
ConnectionEvent::DialUpgradeError(DialUpgradeError {
|
||||
error: StreamUpgradeError::Timeout,
|
||||
..
|
||||
}) => {
|
||||
tracing::debug!("Dial upgrade error: Protocol negotiation timeout");
|
||||
}
|
||||
ConnectionEvent::DialUpgradeError(DialUpgradeError {
|
||||
error: StreamUpgradeError::Apply(e),
|
||||
..
|
||||
}) => void::unreachable(e),
|
||||
ConnectionEvent::DialUpgradeError(DialUpgradeError {
|
||||
error: StreamUpgradeError::NegotiationFailed,
|
||||
..
|
||||
}) => {
|
||||
// The protocol is not supported
|
||||
tracing::debug!(
|
||||
"The remote peer does not support gossipsub on this connection"
|
||||
);
|
||||
*self = Handler::Disabled(DisabledHandler::ProtocolUnsupported {
|
||||
peer_kind_sent: false,
|
||||
});
|
||||
}
|
||||
ConnectionEvent::DialUpgradeError(DialUpgradeError {
|
||||
error: StreamUpgradeError::Io(e),
|
||||
..
|
||||
}) => {
|
||||
tracing::debug!("Protocol negotiation failed: {e}")
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
Handler::Disabled(_) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
387
beacon_node/lighthouse_network/src/gossipsub/mcache.rs
Normal file
387
beacon_node/lighthouse_network/src/gossipsub/mcache.rs
Normal file
@ -0,0 +1,387 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use super::topic::TopicHash;
|
||||
use super::types::{MessageId, RawMessage};
|
||||
use libp2p::identity::PeerId;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::fmt::Debug;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
fmt,
|
||||
};
|
||||
|
||||
/// CacheEntry stored in the history.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub(crate) struct CacheEntry {
|
||||
mid: MessageId,
|
||||
topic: TopicHash,
|
||||
}
|
||||
|
||||
/// MessageCache struct holding history of messages.
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct MessageCache {
|
||||
msgs: HashMap<MessageId, (RawMessage, HashSet<PeerId>)>,
|
||||
/// For every message and peer the number of times this peer asked for the message
|
||||
iwant_counts: HashMap<MessageId, HashMap<PeerId, u32>>,
|
||||
history: Vec<Vec<CacheEntry>>,
|
||||
/// The number of indices in the cache history used for gossiping. That means that a message
|
||||
/// won't get gossiped anymore when shift got called `gossip` many times after inserting the
|
||||
/// message in the cache.
|
||||
gossip: usize,
|
||||
}
|
||||
|
||||
impl fmt::Debug for MessageCache {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("MessageCache")
|
||||
.field("msgs", &self.msgs)
|
||||
.field("history", &self.history)
|
||||
.field("gossip", &self.gossip)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Implementation of the MessageCache.
|
||||
impl MessageCache {
|
||||
pub(crate) fn new(gossip: usize, history_capacity: usize) -> Self {
|
||||
MessageCache {
|
||||
gossip,
|
||||
msgs: HashMap::default(),
|
||||
iwant_counts: HashMap::default(),
|
||||
history: vec![Vec::new(); history_capacity],
|
||||
}
|
||||
}
|
||||
|
||||
/// Put a message into the memory cache.
|
||||
///
|
||||
/// Returns true if the message didn't already exist in the cache.
|
||||
pub(crate) fn put(&mut self, message_id: &MessageId, msg: RawMessage) -> bool {
|
||||
match self.msgs.entry(message_id.clone()) {
|
||||
Entry::Occupied(_) => {
|
||||
// Don't add duplicate entries to the cache.
|
||||
false
|
||||
}
|
||||
Entry::Vacant(entry) => {
|
||||
let cache_entry = CacheEntry {
|
||||
mid: message_id.clone(),
|
||||
topic: msg.topic.clone(),
|
||||
};
|
||||
entry.insert((msg, HashSet::default()));
|
||||
self.history[0].push(cache_entry);
|
||||
|
||||
tracing::trace!(message=?message_id, "Put message in mcache");
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Keeps track of peers we know have received the message to prevent forwarding to said peers.
|
||||
pub(crate) fn observe_duplicate(&mut self, message_id: &MessageId, source: &PeerId) {
|
||||
if let Some((message, originating_peers)) = self.msgs.get_mut(message_id) {
|
||||
// if the message is already validated, we don't need to store extra peers sending us
|
||||
// duplicates as the message has already been forwarded
|
||||
if message.validated {
|
||||
return;
|
||||
}
|
||||
|
||||
originating_peers.insert(*source);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a message with `message_id`
|
||||
#[cfg(test)]
|
||||
pub(crate) fn get(&self, message_id: &MessageId) -> Option<&RawMessage> {
|
||||
self.msgs.get(message_id).map(|(message, _)| message)
|
||||
}
|
||||
|
||||
/// Increases the iwant count for the given message by one and returns the message together
|
||||
/// with the iwant if the message exists.
|
||||
pub(crate) fn get_with_iwant_counts(
|
||||
&mut self,
|
||||
message_id: &MessageId,
|
||||
peer: &PeerId,
|
||||
) -> Option<(&RawMessage, u32)> {
|
||||
let iwant_counts = &mut self.iwant_counts;
|
||||
self.msgs.get(message_id).and_then(|(message, _)| {
|
||||
if !message.validated {
|
||||
None
|
||||
} else {
|
||||
Some((message, {
|
||||
let count = iwant_counts
|
||||
.entry(message_id.clone())
|
||||
.or_default()
|
||||
.entry(*peer)
|
||||
.or_default();
|
||||
*count += 1;
|
||||
*count
|
||||
}))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Gets a message with [`MessageId`] and tags it as validated.
|
||||
/// This function also returns the known peers that have sent us this message. This is used to
|
||||
/// prevent us sending redundant messages to peers who have already propagated it.
|
||||
pub(crate) fn validate(
|
||||
&mut self,
|
||||
message_id: &MessageId,
|
||||
) -> Option<(&RawMessage, HashSet<PeerId>)> {
|
||||
self.msgs.get_mut(message_id).map(|(message, known_peers)| {
|
||||
message.validated = true;
|
||||
// Clear the known peers list (after a message is validated, it is forwarded and we no
|
||||
// longer need to store the originating peers).
|
||||
let originating_peers = std::mem::take(known_peers);
|
||||
(&*message, originating_peers)
|
||||
})
|
||||
}
|
||||
|
||||
/// Get a list of [`MessageId`]s for a given topic.
|
||||
pub(crate) fn get_gossip_message_ids(&self, topic: &TopicHash) -> Vec<MessageId> {
|
||||
self.history[..self.gossip]
|
||||
.iter()
|
||||
.fold(vec![], |mut current_entries, entries| {
|
||||
// search for entries with desired topic
|
||||
let mut found_entries: Vec<MessageId> = entries
|
||||
.iter()
|
||||
.filter_map(|entry| {
|
||||
if &entry.topic == topic {
|
||||
let mid = &entry.mid;
|
||||
// Only gossip validated messages
|
||||
if let Some(true) = self.msgs.get(mid).map(|(msg, _)| msg.validated) {
|
||||
Some(mid.clone())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
// generate the list
|
||||
current_entries.append(&mut found_entries);
|
||||
current_entries
|
||||
})
|
||||
}
|
||||
|
||||
/// Shift the history array down one and delete messages associated with the
|
||||
/// last entry.
|
||||
pub(crate) fn shift(&mut self) {
|
||||
for entry in self.history.pop().expect("history is always > 1") {
|
||||
if let Some((msg, _)) = self.msgs.remove(&entry.mid) {
|
||||
if !msg.validated {
|
||||
// If GossipsubConfig::validate_messages is true, the implementing
|
||||
// application has to ensure that Gossipsub::validate_message gets called for
|
||||
// each received message within the cache timeout time."
|
||||
tracing::debug!(
|
||||
message=%&entry.mid,
|
||||
"The message got removed from the cache without being validated."
|
||||
);
|
||||
}
|
||||
}
|
||||
tracing::trace!(message=%&entry.mid, "Remove message from the cache");
|
||||
|
||||
self.iwant_counts.remove(&entry.mid);
|
||||
}
|
||||
|
||||
// Insert an empty vec in position 0
|
||||
self.history.insert(0, Vec::new());
|
||||
}
|
||||
|
||||
/// Removes a message from the cache and returns it if existent
|
||||
pub(crate) fn remove(
|
||||
&mut self,
|
||||
message_id: &MessageId,
|
||||
) -> Option<(RawMessage, HashSet<PeerId>)> {
|
||||
//We only remove the message from msgs and iwant_count and keep the message_id in the
|
||||
// history vector. Zhe id in the history vector will simply be ignored on popping.
|
||||
|
||||
self.iwant_counts.remove(message_id);
|
||||
self.msgs.remove(message_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::gossipsub::types::RawMessage;
|
||||
use crate::{IdentTopic as Topic, TopicHash};
|
||||
use libp2p::identity::PeerId;
|
||||
|
||||
fn gen_testm(x: u64, topic: TopicHash) -> (MessageId, RawMessage) {
|
||||
let default_id = |message: &RawMessage| {
|
||||
// default message id is: source + sequence number
|
||||
let mut source_string = message.source.as_ref().unwrap().to_base58();
|
||||
source_string.push_str(&message.sequence_number.unwrap().to_string());
|
||||
MessageId::from(source_string)
|
||||
};
|
||||
let u8x: u8 = x as u8;
|
||||
let source = Some(PeerId::random());
|
||||
let data: Vec<u8> = vec![u8x];
|
||||
let sequence_number = Some(x);
|
||||
|
||||
let m = RawMessage {
|
||||
source,
|
||||
data,
|
||||
sequence_number,
|
||||
topic,
|
||||
signature: None,
|
||||
key: None,
|
||||
validated: false,
|
||||
};
|
||||
|
||||
let id = default_id(&m);
|
||||
(id, m)
|
||||
}
|
||||
|
||||
fn new_cache(gossip_size: usize, history: usize) -> MessageCache {
|
||||
MessageCache::new(gossip_size, history)
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Test that the message cache can be created.
|
||||
fn test_new_cache() {
|
||||
let x: usize = 3;
|
||||
let mc = new_cache(x, 5);
|
||||
|
||||
assert_eq!(mc.gossip, x);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Test you can put one message and get one.
|
||||
fn test_put_get_one() {
|
||||
let mut mc = new_cache(10, 15);
|
||||
|
||||
let topic1_hash = Topic::new("topic1").hash();
|
||||
let (id, m) = gen_testm(10, topic1_hash);
|
||||
|
||||
mc.put(&id, m.clone());
|
||||
|
||||
assert_eq!(mc.history[0].len(), 1);
|
||||
|
||||
let fetched = mc.get(&id);
|
||||
|
||||
assert_eq!(fetched.unwrap(), &m);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Test attempting to 'get' with a wrong id.
|
||||
fn test_get_wrong() {
|
||||
let mut mc = new_cache(10, 15);
|
||||
|
||||
let topic1_hash = Topic::new("topic1").hash();
|
||||
let (id, m) = gen_testm(10, topic1_hash);
|
||||
|
||||
mc.put(&id, m);
|
||||
|
||||
// Try to get an incorrect ID
|
||||
let wrong_id = MessageId::new(b"wrongid");
|
||||
let fetched = mc.get(&wrong_id);
|
||||
assert!(fetched.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Test attempting to 'get' empty message cache.
|
||||
fn test_get_empty() {
|
||||
let mc = new_cache(10, 15);
|
||||
|
||||
// Try to get an incorrect ID
|
||||
let wrong_string = MessageId::new(b"imempty");
|
||||
let fetched = mc.get(&wrong_string);
|
||||
assert!(fetched.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Test shift mechanism.
|
||||
fn test_shift() {
|
||||
let mut mc = new_cache(1, 5);
|
||||
|
||||
let topic1_hash = Topic::new("topic1").hash();
|
||||
|
||||
// Build the message
|
||||
for i in 0..10 {
|
||||
let (id, m) = gen_testm(i, topic1_hash.clone());
|
||||
mc.put(&id, m.clone());
|
||||
}
|
||||
|
||||
mc.shift();
|
||||
|
||||
// Ensure the shift occurred
|
||||
assert!(mc.history[0].is_empty());
|
||||
assert!(mc.history[1].len() == 10);
|
||||
|
||||
// Make sure no messages deleted
|
||||
assert!(mc.msgs.len() == 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Test Shift with no additions.
|
||||
fn test_empty_shift() {
|
||||
let mut mc = new_cache(1, 5);
|
||||
|
||||
let topic1_hash = Topic::new("topic1").hash();
|
||||
|
||||
// Build the message
|
||||
for i in 0..10 {
|
||||
let (id, m) = gen_testm(i, topic1_hash.clone());
|
||||
mc.put(&id, m.clone());
|
||||
}
|
||||
|
||||
mc.shift();
|
||||
|
||||
// Ensure the shift occurred
|
||||
assert!(mc.history[0].is_empty());
|
||||
assert!(mc.history[1].len() == 10);
|
||||
|
||||
mc.shift();
|
||||
|
||||
assert!(mc.history[2].len() == 10);
|
||||
assert!(mc.history[1].is_empty());
|
||||
assert!(mc.history[0].is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Test shift to see if the last history messages are removed.
|
||||
fn test_remove_last_from_shift() {
|
||||
let mut mc = new_cache(4, 5);
|
||||
|
||||
let topic1_hash = Topic::new("topic1").hash();
|
||||
|
||||
// Build the message
|
||||
for i in 0..10 {
|
||||
let (id, m) = gen_testm(i, topic1_hash.clone());
|
||||
mc.put(&id, m.clone());
|
||||
}
|
||||
|
||||
// Shift right until deleting messages
|
||||
mc.shift();
|
||||
mc.shift();
|
||||
mc.shift();
|
||||
mc.shift();
|
||||
|
||||
assert_eq!(mc.history[mc.history.len() - 1].len(), 10);
|
||||
|
||||
// Shift and delete the messages
|
||||
mc.shift();
|
||||
assert_eq!(mc.history[mc.history.len() - 1].len(), 0);
|
||||
assert_eq!(mc.history[0].len(), 0);
|
||||
assert_eq!(mc.msgs.len(), 0);
|
||||
}
|
||||
}
|
||||
680
beacon_node/lighthouse_network/src/gossipsub/metrics.rs
Normal file
680
beacon_node/lighthouse_network/src/gossipsub/metrics.rs
Normal file
@ -0,0 +1,680 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! A set of metrics used to help track and diagnose the network behaviour of the gossipsub
|
||||
//! protocol.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use prometheus_client::encoding::{EncodeLabelSet, EncodeLabelValue};
|
||||
use prometheus_client::metrics::counter::Counter;
|
||||
use prometheus_client::metrics::family::{Family, MetricConstructor};
|
||||
use prometheus_client::metrics::gauge::Gauge;
|
||||
use prometheus_client::metrics::histogram::{linear_buckets, Histogram};
|
||||
use prometheus_client::registry::Registry;
|
||||
|
||||
use super::topic::TopicHash;
|
||||
use super::types::{MessageAcceptance, PeerKind};
|
||||
|
||||
// Default value that limits for how many topics do we store metrics.
|
||||
const DEFAULT_MAX_TOPICS: usize = 300;
|
||||
|
||||
// Default value that limits how many topics for which there has never been a subscription do we
|
||||
// store metrics.
|
||||
const DEFAULT_MAX_NEVER_SUBSCRIBED_TOPICS: usize = 100;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Config {
|
||||
/// This provides an upper bound to the number of mesh topics we create metrics for. It
|
||||
/// prevents unbounded labels being created in the metrics.
|
||||
pub max_topics: usize,
|
||||
/// Mesh topics are controlled by the user via subscriptions whereas non-mesh topics are
|
||||
/// determined by users on the network. This limit permits a fixed amount of topics to allow,
|
||||
/// in-addition to the mesh topics.
|
||||
pub max_never_subscribed_topics: usize,
|
||||
/// Buckets used for the score histograms.
|
||||
pub score_buckets: Vec<f64>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Create buckets for the score histograms based on score thresholds.
|
||||
pub fn buckets_using_scoring_thresholds(&mut self, params: &super::PeerScoreThresholds) {
|
||||
self.score_buckets = vec![
|
||||
params.graylist_threshold,
|
||||
params.publish_threshold,
|
||||
params.gossip_threshold,
|
||||
params.gossip_threshold / 2.0,
|
||||
params.gossip_threshold / 4.0,
|
||||
0.0,
|
||||
1.0,
|
||||
10.0,
|
||||
100.0,
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
// Some sensible defaults
|
||||
let gossip_threshold = -4000.0;
|
||||
let publish_threshold = -8000.0;
|
||||
let graylist_threshold = -16000.0;
|
||||
let score_buckets: Vec<f64> = vec![
|
||||
graylist_threshold,
|
||||
publish_threshold,
|
||||
gossip_threshold,
|
||||
gossip_threshold / 2.0,
|
||||
gossip_threshold / 4.0,
|
||||
0.0,
|
||||
1.0,
|
||||
10.0,
|
||||
100.0,
|
||||
];
|
||||
Config {
|
||||
max_topics: DEFAULT_MAX_TOPICS,
|
||||
max_never_subscribed_topics: DEFAULT_MAX_NEVER_SUBSCRIBED_TOPICS,
|
||||
score_buckets,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether we have ever been subscribed to this topic.
|
||||
type EverSubscribed = bool;
|
||||
|
||||
/// A collection of metrics used throughout the Gossipsub behaviour.
|
||||
pub(crate) struct Metrics {
|
||||
/* Configuration parameters */
|
||||
/// Maximum number of topics for which we store metrics. This helps keep the metrics bounded.
|
||||
max_topics: usize,
|
||||
/// Maximum number of topics for which we store metrics, where the topic in not one to which we
|
||||
/// have subscribed at some point. This helps keep the metrics bounded, since these topics come
|
||||
/// from received messages and not explicit application subscriptions.
|
||||
max_never_subscribed_topics: usize,
|
||||
|
||||
/* Auxiliary variables */
|
||||
/// Information needed to decide if a topic is allowed or not.
|
||||
topic_info: HashMap<TopicHash, EverSubscribed>,
|
||||
|
||||
/* Metrics per known topic */
|
||||
/// Status of our subscription to this topic. This metric allows analyzing other topic metrics
|
||||
/// filtered by our current subscription status.
|
||||
topic_subscription_status: Family<TopicHash, Gauge>,
|
||||
/// Number of peers subscribed to each topic. This allows us to analyze a topic's behaviour
|
||||
/// regardless of our subscription status.
|
||||
topic_peers_count: Family<TopicHash, Gauge>,
|
||||
/// The number of invalid messages received for a given topic.
|
||||
invalid_messages: Family<TopicHash, Counter>,
|
||||
/// The number of messages accepted by the application (validation result).
|
||||
accepted_messages: Family<TopicHash, Counter>,
|
||||
/// The number of messages ignored by the application (validation result).
|
||||
ignored_messages: Family<TopicHash, Counter>,
|
||||
/// The number of messages rejected by the application (validation result).
|
||||
rejected_messages: Family<TopicHash, Counter>,
|
||||
/// The number of publish messages dropped by the sender.
|
||||
publish_messages_dropped: Family<TopicHash, Counter>,
|
||||
/// The number of forward messages dropped by the sender.
|
||||
forward_messages_dropped: Family<TopicHash, Counter>,
|
||||
|
||||
/* Metrics regarding mesh state */
|
||||
/// Number of peers in our mesh. This metric should be updated with the count of peers for a
|
||||
/// topic in the mesh regardless of inclusion and churn events.
|
||||
mesh_peer_counts: Family<TopicHash, Gauge>,
|
||||
/// Number of times we include peers in a topic mesh for different reasons.
|
||||
mesh_peer_inclusion_events: Family<InclusionLabel, Counter>,
|
||||
/// Number of times we remove peers in a topic mesh for different reasons.
|
||||
mesh_peer_churn_events: Family<ChurnLabel, Counter>,
|
||||
|
||||
/* Metrics regarding messages sent/received */
|
||||
/// Number of gossip messages sent to each topic.
|
||||
topic_msg_sent_counts: Family<TopicHash, Counter>,
|
||||
/// Bytes from gossip messages sent to each topic.
|
||||
topic_msg_sent_bytes: Family<TopicHash, Counter>,
|
||||
/// Number of gossipsub messages published to each topic.
|
||||
topic_msg_published: Family<TopicHash, Counter>,
|
||||
|
||||
/// Number of gossipsub messages received on each topic (without filtering duplicates).
|
||||
topic_msg_recv_counts_unfiltered: Family<TopicHash, Counter>,
|
||||
/// Number of gossipsub messages received on each topic (after filtering duplicates).
|
||||
topic_msg_recv_counts: Family<TopicHash, Counter>,
|
||||
/// Bytes received from gossip messages for each topic.
|
||||
topic_msg_recv_bytes: Family<TopicHash, Counter>,
|
||||
|
||||
/* Metrics related to scoring */
|
||||
/// Histogram of the scores for each mesh topic.
|
||||
score_per_mesh: Family<TopicHash, Histogram, HistBuilder>,
|
||||
/// A counter of the kind of penalties being applied to peers.
|
||||
scoring_penalties: Family<PenaltyLabel, Counter>,
|
||||
|
||||
/* General Metrics */
|
||||
/// Gossipsub supports floodsub, gossipsub v1.0 and gossipsub v1.1. Peers are classified based
|
||||
/// on which protocol they support. This metric keeps track of the number of peers that are
|
||||
/// connected of each type.
|
||||
peers_per_protocol: Family<ProtocolLabel, Gauge>,
|
||||
/// The time it takes to complete one iteration of the heartbeat.
|
||||
heartbeat_duration: Histogram,
|
||||
|
||||
/* Performance metrics */
|
||||
/// When the user validates a message, it tries to re propagate it to its mesh peers. If the
|
||||
/// message expires from the memcache before it can be validated, we count this a cache miss
|
||||
/// and it is an indicator that the memcache size should be increased.
|
||||
memcache_misses: Counter,
|
||||
/// The number of times we have decided that an IWANT control message is required for this
|
||||
/// topic. A very high metric might indicate an underperforming network.
|
||||
topic_iwant_msgs: Family<TopicHash, Counter>,
|
||||
|
||||
/// The size of the priority queue.
|
||||
priority_queue_size: Histogram,
|
||||
/// The size of the non-priority queue.
|
||||
non_priority_queue_size: Histogram,
|
||||
}
|
||||
|
||||
impl Metrics {
|
||||
pub(crate) fn new(registry: &mut Registry, config: Config) -> Self {
|
||||
// Destructure the config to be sure everything is used.
|
||||
let Config {
|
||||
max_topics,
|
||||
max_never_subscribed_topics,
|
||||
score_buckets,
|
||||
} = config;
|
||||
|
||||
macro_rules! register_family {
|
||||
($name:expr, $help:expr) => {{
|
||||
let fam = Family::default();
|
||||
registry.register($name, $help, fam.clone());
|
||||
fam
|
||||
}};
|
||||
}
|
||||
|
||||
let topic_subscription_status = register_family!(
|
||||
"topic_subscription_status",
|
||||
"Subscription status per known topic"
|
||||
);
|
||||
let topic_peers_count = register_family!(
|
||||
"topic_peers_counts",
|
||||
"Number of peers subscribed to each topic"
|
||||
);
|
||||
|
||||
let invalid_messages = register_family!(
|
||||
"invalid_messages_per_topic",
|
||||
"Number of invalid messages received for each topic"
|
||||
);
|
||||
|
||||
let accepted_messages = register_family!(
|
||||
"accepted_messages_per_topic",
|
||||
"Number of accepted messages received for each topic"
|
||||
);
|
||||
|
||||
let ignored_messages = register_family!(
|
||||
"ignored_messages_per_topic",
|
||||
"Number of ignored messages received for each topic"
|
||||
);
|
||||
|
||||
let rejected_messages = register_family!(
|
||||
"rejected_messages_per_topic",
|
||||
"Number of rejected messages received for each topic"
|
||||
);
|
||||
|
||||
let publish_messages_dropped = register_family!(
|
||||
"publish_messages_dropped_per_topic",
|
||||
"Number of publish messages dropped per topic"
|
||||
);
|
||||
|
||||
let forward_messages_dropped = register_family!(
|
||||
"forward_messages_dropped_per_topic",
|
||||
"Number of forward messages dropped per topic"
|
||||
);
|
||||
|
||||
let mesh_peer_counts = register_family!(
|
||||
"mesh_peer_counts",
|
||||
"Number of peers in each topic in our mesh"
|
||||
);
|
||||
let mesh_peer_inclusion_events = register_family!(
|
||||
"mesh_peer_inclusion_events",
|
||||
"Number of times a peer gets added to our mesh for different reasons"
|
||||
);
|
||||
let mesh_peer_churn_events = register_family!(
|
||||
"mesh_peer_churn_events",
|
||||
"Number of times a peer gets removed from our mesh for different reasons"
|
||||
);
|
||||
let topic_msg_sent_counts = register_family!(
|
||||
"topic_msg_sent_counts",
|
||||
"Number of gossip messages sent to each topic"
|
||||
);
|
||||
let topic_msg_published = register_family!(
|
||||
"topic_msg_published",
|
||||
"Number of gossip messages published to each topic"
|
||||
);
|
||||
let topic_msg_sent_bytes = register_family!(
|
||||
"topic_msg_sent_bytes",
|
||||
"Bytes from gossip messages sent to each topic"
|
||||
);
|
||||
|
||||
let topic_msg_recv_counts_unfiltered = register_family!(
|
||||
"topic_msg_recv_counts_unfiltered",
|
||||
"Number of gossip messages received on each topic (without duplicates being filtered)"
|
||||
);
|
||||
|
||||
let topic_msg_recv_counts = register_family!(
|
||||
"topic_msg_recv_counts",
|
||||
"Number of gossip messages received on each topic (after duplicates have been filtered)"
|
||||
);
|
||||
let topic_msg_recv_bytes = register_family!(
|
||||
"topic_msg_recv_bytes",
|
||||
"Bytes received from gossip messages for each topic"
|
||||
);
|
||||
|
||||
let hist_builder = HistBuilder {
|
||||
buckets: score_buckets,
|
||||
};
|
||||
|
||||
let score_per_mesh: Family<_, _, HistBuilder> = Family::new_with_constructor(hist_builder);
|
||||
registry.register(
|
||||
"score_per_mesh",
|
||||
"Histogram of scores per mesh topic",
|
||||
score_per_mesh.clone(),
|
||||
);
|
||||
|
||||
let scoring_penalties = register_family!(
|
||||
"scoring_penalties",
|
||||
"Counter of types of scoring penalties given to peers"
|
||||
);
|
||||
let peers_per_protocol = register_family!(
|
||||
"peers_per_protocol",
|
||||
"Number of connected peers by protocol type"
|
||||
);
|
||||
|
||||
let heartbeat_duration = Histogram::new(linear_buckets(0.0, 50.0, 10));
|
||||
registry.register(
|
||||
"heartbeat_duration",
|
||||
"Histogram of observed heartbeat durations",
|
||||
heartbeat_duration.clone(),
|
||||
);
|
||||
|
||||
let topic_iwant_msgs = register_family!(
|
||||
"topic_iwant_msgs",
|
||||
"Number of times we have decided an IWANT is required for this topic"
|
||||
);
|
||||
let memcache_misses = {
|
||||
let metric = Counter::default();
|
||||
registry.register(
|
||||
"memcache_misses",
|
||||
"Number of times a message is not found in the duplicate cache when validating",
|
||||
metric.clone(),
|
||||
);
|
||||
metric
|
||||
};
|
||||
|
||||
let priority_queue_size = Histogram::new(linear_buckets(0.0, 25.0, 100));
|
||||
registry.register(
|
||||
"priority_queue_size",
|
||||
"Histogram of observed priority queue sizes",
|
||||
priority_queue_size.clone(),
|
||||
);
|
||||
|
||||
let non_priority_queue_size = Histogram::new(linear_buckets(0.0, 25.0, 100));
|
||||
registry.register(
|
||||
"non_priority_queue_size",
|
||||
"Histogram of observed non-priority queue sizes",
|
||||
non_priority_queue_size.clone(),
|
||||
);
|
||||
|
||||
Self {
|
||||
max_topics,
|
||||
max_never_subscribed_topics,
|
||||
topic_info: HashMap::default(),
|
||||
topic_subscription_status,
|
||||
topic_peers_count,
|
||||
invalid_messages,
|
||||
accepted_messages,
|
||||
ignored_messages,
|
||||
rejected_messages,
|
||||
publish_messages_dropped,
|
||||
forward_messages_dropped,
|
||||
mesh_peer_counts,
|
||||
mesh_peer_inclusion_events,
|
||||
mesh_peer_churn_events,
|
||||
topic_msg_sent_counts,
|
||||
topic_msg_sent_bytes,
|
||||
topic_msg_published,
|
||||
topic_msg_recv_counts_unfiltered,
|
||||
topic_msg_recv_counts,
|
||||
topic_msg_recv_bytes,
|
||||
score_per_mesh,
|
||||
scoring_penalties,
|
||||
peers_per_protocol,
|
||||
heartbeat_duration,
|
||||
memcache_misses,
|
||||
topic_iwant_msgs,
|
||||
priority_queue_size,
|
||||
non_priority_queue_size,
|
||||
}
|
||||
}
|
||||
|
||||
fn non_subscription_topics_count(&self) -> usize {
|
||||
self.topic_info
|
||||
.values()
|
||||
.filter(|&ever_subscribed| !ever_subscribed)
|
||||
.count()
|
||||
}
|
||||
|
||||
/// Registers a topic if not already known and if the bounds allow it.
|
||||
fn register_topic(&mut self, topic: &TopicHash) -> Result<(), ()> {
|
||||
if self.topic_info.contains_key(topic) {
|
||||
Ok(())
|
||||
} else if self.topic_info.len() < self.max_topics
|
||||
&& self.non_subscription_topics_count() < self.max_never_subscribed_topics
|
||||
{
|
||||
// This is a topic without an explicit subscription and we register it if we are within
|
||||
// the configured bounds.
|
||||
self.topic_info.entry(topic.clone()).or_insert(false);
|
||||
self.topic_subscription_status.get_or_create(topic).set(0);
|
||||
Ok(())
|
||||
} else {
|
||||
// We don't know this topic and there is no space left to store it
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Registers a set of topics that we want to store calculate metrics for.
|
||||
pub(crate) fn register_allowed_topics(&mut self, topics: Vec<TopicHash>) {
|
||||
for topic_hash in topics {
|
||||
self.topic_info.insert(topic_hash, true);
|
||||
}
|
||||
}
|
||||
|
||||
/// Increase the number of peers that are subscribed to this topic.
|
||||
pub(crate) fn inc_topic_peers(&mut self, topic: &TopicHash) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
self.topic_peers_count.get_or_create(topic).inc();
|
||||
}
|
||||
}
|
||||
|
||||
/// Decrease the number of peers that are subscribed to this topic.
|
||||
pub(crate) fn dec_topic_peers(&mut self, topic: &TopicHash) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
self.topic_peers_count.get_or_create(topic).dec();
|
||||
}
|
||||
}
|
||||
|
||||
/* Mesh related methods */
|
||||
|
||||
/// Registers the subscription to a topic if the configured limits allow it.
|
||||
/// Sets the registered number of peers in the mesh to 0.
|
||||
pub(crate) fn joined(&mut self, topic: &TopicHash) {
|
||||
if self.topic_info.contains_key(topic) || self.topic_info.len() < self.max_topics {
|
||||
self.topic_info.insert(topic.clone(), true);
|
||||
let was_subscribed = self.topic_subscription_status.get_or_create(topic).set(1);
|
||||
debug_assert_eq!(was_subscribed, 0);
|
||||
self.mesh_peer_counts.get_or_create(topic).set(0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Registers the unsubscription to a topic if the topic was previously allowed.
|
||||
/// Sets the registered number of peers in the mesh to 0.
|
||||
pub(crate) fn left(&mut self, topic: &TopicHash) {
|
||||
if self.topic_info.contains_key(topic) {
|
||||
// Depending on the configured topic bounds we could miss a mesh topic.
|
||||
// So, check first if the topic was previously allowed.
|
||||
let was_subscribed = self.topic_subscription_status.get_or_create(topic).set(0);
|
||||
debug_assert_eq!(was_subscribed, 1);
|
||||
self.mesh_peer_counts.get_or_create(topic).set(0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Register the inclusion of peers in our mesh due to some reason.
|
||||
pub(crate) fn peers_included(&mut self, topic: &TopicHash, reason: Inclusion, count: usize) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
self.mesh_peer_inclusion_events
|
||||
.get_or_create(&InclusionLabel {
|
||||
hash: topic.to_string(),
|
||||
reason,
|
||||
})
|
||||
.inc_by(count as u64);
|
||||
}
|
||||
}
|
||||
|
||||
/// Register the removal of peers in our mesh due to some reason.
|
||||
pub(crate) fn peers_removed(&mut self, topic: &TopicHash, reason: Churn, count: usize) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
self.mesh_peer_churn_events
|
||||
.get_or_create(&ChurnLabel {
|
||||
hash: topic.to_string(),
|
||||
reason,
|
||||
})
|
||||
.inc_by(count as u64);
|
||||
}
|
||||
}
|
||||
|
||||
/// Register the current number of peers in our mesh for this topic.
|
||||
pub(crate) fn set_mesh_peers(&mut self, topic: &TopicHash, count: usize) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
// Due to limits, this topic could have not been allowed, so we check.
|
||||
self.mesh_peer_counts.get_or_create(topic).set(count as i64);
|
||||
}
|
||||
}
|
||||
|
||||
/// Register that an invalid message was received on a specific topic.
|
||||
pub(crate) fn register_invalid_message(&mut self, topic: &TopicHash) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
self.invalid_messages.get_or_create(topic).inc();
|
||||
}
|
||||
}
|
||||
|
||||
/// Register a score penalty.
|
||||
pub(crate) fn register_score_penalty(&mut self, penalty: Penalty) {
|
||||
self.scoring_penalties
|
||||
.get_or_create(&PenaltyLabel { penalty })
|
||||
.inc();
|
||||
}
|
||||
|
||||
/// Registers that a message was published on a specific topic.
|
||||
pub(crate) fn register_published_message(&mut self, topic: &TopicHash) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
self.topic_msg_published.get_or_create(topic).inc();
|
||||
}
|
||||
}
|
||||
|
||||
/// Register sending a message over a topic.
|
||||
pub(crate) fn msg_sent(&mut self, topic: &TopicHash, bytes: usize) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
self.topic_msg_sent_counts.get_or_create(topic).inc();
|
||||
self.topic_msg_sent_bytes
|
||||
.get_or_create(topic)
|
||||
.inc_by(bytes as u64);
|
||||
}
|
||||
}
|
||||
|
||||
/// Register sending a message over a topic.
|
||||
pub(crate) fn publish_msg_dropped(&mut self, topic: &TopicHash) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
self.publish_messages_dropped.get_or_create(topic).inc();
|
||||
}
|
||||
}
|
||||
|
||||
/// Register dropping a message over a topic.
|
||||
pub(crate) fn forward_msg_dropped(&mut self, topic: &TopicHash) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
self.forward_messages_dropped.get_or_create(topic).inc();
|
||||
}
|
||||
}
|
||||
|
||||
/// Register that a message was received (and was not a duplicate).
|
||||
pub(crate) fn msg_recvd(&mut self, topic: &TopicHash) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
self.topic_msg_recv_counts.get_or_create(topic).inc();
|
||||
}
|
||||
}
|
||||
|
||||
/// Register that a message was received (could have been a duplicate).
|
||||
pub(crate) fn msg_recvd_unfiltered(&mut self, topic: &TopicHash, bytes: usize) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
self.topic_msg_recv_counts_unfiltered
|
||||
.get_or_create(topic)
|
||||
.inc();
|
||||
self.topic_msg_recv_bytes
|
||||
.get_or_create(topic)
|
||||
.inc_by(bytes as u64);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn register_msg_validation(
|
||||
&mut self,
|
||||
topic: &TopicHash,
|
||||
validation: &MessageAcceptance,
|
||||
) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
match validation {
|
||||
MessageAcceptance::Accept => self.accepted_messages.get_or_create(topic).inc(),
|
||||
MessageAcceptance::Ignore => self.ignored_messages.get_or_create(topic).inc(),
|
||||
MessageAcceptance::Reject => self.rejected_messages.get_or_create(topic).inc(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Register a memcache miss.
|
||||
pub(crate) fn memcache_miss(&mut self) {
|
||||
self.memcache_misses.inc();
|
||||
}
|
||||
|
||||
/// Register sending an IWANT msg for this topic.
|
||||
pub(crate) fn register_iwant(&mut self, topic: &TopicHash) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
self.topic_iwant_msgs.get_or_create(topic).inc();
|
||||
}
|
||||
}
|
||||
|
||||
/// Observes a heartbeat duration.
|
||||
pub(crate) fn observe_heartbeat_duration(&mut self, millis: u64) {
|
||||
self.heartbeat_duration.observe(millis as f64);
|
||||
}
|
||||
|
||||
/// Observes a priority queue size.
|
||||
pub(crate) fn observe_priority_queue_size(&mut self, len: usize) {
|
||||
self.priority_queue_size.observe(len as f64);
|
||||
}
|
||||
|
||||
/// Observes a non-priority queue size.
|
||||
pub(crate) fn observe_non_priority_queue_size(&mut self, len: usize) {
|
||||
self.non_priority_queue_size.observe(len as f64);
|
||||
}
|
||||
|
||||
/// Observe a score of a mesh peer.
|
||||
pub(crate) fn observe_mesh_peers_score(&mut self, topic: &TopicHash, score: f64) {
|
||||
if self.register_topic(topic).is_ok() {
|
||||
self.score_per_mesh.get_or_create(topic).observe(score);
|
||||
}
|
||||
}
|
||||
|
||||
/// Register a new peers connection based on its protocol.
|
||||
pub(crate) fn peer_protocol_connected(&mut self, kind: PeerKind) {
|
||||
self.peers_per_protocol
|
||||
.get_or_create(&ProtocolLabel { protocol: kind })
|
||||
.inc();
|
||||
}
|
||||
|
||||
/// Removes a peer from the counter based on its protocol when it disconnects.
|
||||
pub(crate) fn peer_protocol_disconnected(&mut self, kind: PeerKind) {
|
||||
let metric = self
|
||||
.peers_per_protocol
|
||||
.get_or_create(&ProtocolLabel { protocol: kind });
|
||||
if metric.get() != 0 {
|
||||
// decrement the counter
|
||||
metric.set(metric.get() - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reasons why a peer was included in the mesh.
|
||||
#[derive(PartialEq, Eq, Hash, EncodeLabelValue, Clone, Debug)]
|
||||
pub(crate) enum Inclusion {
|
||||
/// Peer was a fanaout peer.
|
||||
Fanout,
|
||||
/// Included from random selection.
|
||||
Random,
|
||||
/// Peer subscribed.
|
||||
Subscribed,
|
||||
/// Peer was included to fill the outbound quota.
|
||||
Outbound,
|
||||
}
|
||||
|
||||
/// Reasons why a peer was removed from the mesh.
|
||||
#[derive(PartialEq, Eq, Hash, EncodeLabelValue, Clone, Debug)]
|
||||
pub(crate) enum Churn {
|
||||
/// Peer disconnected.
|
||||
Dc,
|
||||
/// Peer had a bad score.
|
||||
BadScore,
|
||||
/// Peer sent a PRUNE.
|
||||
Prune,
|
||||
/// Peer unsubscribed.
|
||||
Unsub,
|
||||
/// Too many peers.
|
||||
Excess,
|
||||
}
|
||||
|
||||
/// Kinds of reasons a peer's score has been penalized
|
||||
#[derive(PartialEq, Eq, Hash, EncodeLabelValue, Clone, Debug)]
|
||||
pub(crate) enum Penalty {
|
||||
/// A peer grafted before waiting the back-off time.
|
||||
GraftBackoff,
|
||||
/// A Peer did not respond to an IWANT request in time.
|
||||
BrokenPromise,
|
||||
/// A Peer did not send enough messages as expected.
|
||||
MessageDeficit,
|
||||
/// Too many peers under one IP address.
|
||||
IPColocation,
|
||||
}
|
||||
|
||||
/// Label for the mesh inclusion event metrics.
|
||||
#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)]
|
||||
struct InclusionLabel {
|
||||
hash: String,
|
||||
reason: Inclusion,
|
||||
}
|
||||
|
||||
/// Label for the mesh churn event metrics.
|
||||
#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)]
|
||||
struct ChurnLabel {
|
||||
hash: String,
|
||||
reason: Churn,
|
||||
}
|
||||
|
||||
/// Label for the kinds of protocols peers can connect as.
|
||||
#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)]
|
||||
struct ProtocolLabel {
|
||||
protocol: PeerKind,
|
||||
}
|
||||
|
||||
/// Label for the kinds of scoring penalties that can occur
|
||||
#[derive(PartialEq, Eq, Hash, EncodeLabelSet, Clone, Debug)]
|
||||
struct PenaltyLabel {
|
||||
penalty: Penalty,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct HistBuilder {
|
||||
buckets: Vec<f64>,
|
||||
}
|
||||
|
||||
impl MetricConstructor<Histogram> for HistBuilder {
|
||||
fn new_metric(&self) -> Histogram {
|
||||
Histogram::new(self.buckets.clone().into_iter())
|
||||
}
|
||||
}
|
||||
111
beacon_node/lighthouse_network/src/gossipsub/mod.rs
Normal file
111
beacon_node/lighthouse_network/src/gossipsub/mod.rs
Normal file
@ -0,0 +1,111 @@
|
||||
//! Implementation of the [Gossipsub](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/README.md) protocol.
|
||||
//!
|
||||
//! Gossipsub is a P2P pubsub (publish/subscription) routing layer designed to extend upon
|
||||
//! floodsub and meshsub routing protocols.
|
||||
//!
|
||||
//! # Overview
|
||||
//!
|
||||
//! *Note: The gossipsub protocol specifications
|
||||
//! (<https://github.com/libp2p/specs/tree/master/pubsub/gossipsub>) provide an outline for the
|
||||
//! routing protocol. They should be consulted for further detail.*
|
||||
//!
|
||||
//! Gossipsub is a blend of meshsub for data and randomsub for mesh metadata. It provides bounded
|
||||
//! degree and amplification factor with the meshsub construction and augments it using gossip
|
||||
//! propagation of metadata with the randomsub technique.
|
||||
//!
|
||||
//! The router maintains an overlay mesh network of peers on which to efficiently send messages and
|
||||
//! metadata. Peers use control messages to broadcast and request known messages and
|
||||
//! subscribe/unsubscribe from topics in the mesh network.
|
||||
//!
|
||||
//! # Important Discrepancies
|
||||
//!
|
||||
//! This section outlines the current implementation's potential discrepancies from that of other
|
||||
//! implementations, due to undefined elements in the current specification.
|
||||
//!
|
||||
//! - **Topics** - In gossipsub, topics configurable by the `hash_topics` configuration parameter.
|
||||
//! Topics are of type [`TopicHash`]. The current go implementation uses raw utf-8 strings, and this
|
||||
//! is default configuration in rust-libp2p. Topics can be hashed (SHA256 hashed then base64
|
||||
//! encoded) by setting the `hash_topics` configuration parameter to true.
|
||||
//!
|
||||
//! - **Sequence Numbers** - A message on the gossipsub network is identified by the source
|
||||
//! [`PeerId`](libp2p_identity::PeerId) and a nonce (sequence number) of the message. The sequence numbers in
|
||||
//! this implementation are sent as raw bytes across the wire. They are 64-bit big-endian unsigned
|
||||
//! integers. When messages are signed, they are monotonically increasing integers starting from a
|
||||
//! random value and wrapping around u64::MAX. When messages are unsigned, they are chosen at random.
|
||||
//! NOTE: These numbers are sequential in the current go implementation.
|
||||
//!
|
||||
//! # Peer Discovery
|
||||
//!
|
||||
//! Gossipsub does not provide peer discovery by itself. Peer discovery is the process by which
|
||||
//! peers in a p2p network exchange information about each other among other reasons to become resistant
|
||||
//! against the failure or replacement of the
|
||||
//! [boot nodes](https://docs.libp2p.io/reference/glossary/#boot-node) of the network.
|
||||
//!
|
||||
//! Peer
|
||||
//! discovery can e.g. be implemented with the help of the [Kademlia](https://github.com/libp2p/specs/blob/master/kad-dht/README.md) protocol
|
||||
//! in combination with the [Identify](https://github.com/libp2p/specs/tree/master/identify) protocol. See the
|
||||
//! Kademlia implementation documentation for more information.
|
||||
//!
|
||||
//! # Using Gossipsub
|
||||
//!
|
||||
//! ## Gossipsub Config
|
||||
//!
|
||||
//! The [`Config`] struct specifies various network performance/tuning configuration
|
||||
//! parameters. Specifically it specifies:
|
||||
//!
|
||||
//! [`Config`]: struct.Config.html
|
||||
//!
|
||||
//! This struct implements the [`Default`] trait and can be initialised via
|
||||
//! [`Config::default()`].
|
||||
//!
|
||||
//!
|
||||
//! ## Behaviour
|
||||
//!
|
||||
//! The [`Behaviour`] struct implements the [`libp2p_swarm::NetworkBehaviour`] trait allowing it to
|
||||
//! act as the routing behaviour in a [`libp2p_swarm::Swarm`]. This struct requires an instance of
|
||||
//! [`PeerId`](libp2p_identity::PeerId) and [`Config`].
|
||||
//!
|
||||
//! [`Behaviour`]: struct.Behaviour.html
|
||||
|
||||
//! ## Example
|
||||
//!
|
||||
//! For an example on how to use gossipsub, see the [chat-example](https://github.com/libp2p/rust-libp2p/tree/master/examples/chat).
|
||||
|
||||
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
|
||||
|
||||
mod backoff;
|
||||
mod behaviour;
|
||||
mod config;
|
||||
mod error;
|
||||
mod gossip_promises;
|
||||
mod handler;
|
||||
mod mcache;
|
||||
mod metrics;
|
||||
mod peer_score;
|
||||
mod protocol;
|
||||
mod rpc_proto;
|
||||
mod subscription_filter;
|
||||
mod time_cache;
|
||||
mod topic;
|
||||
mod transform;
|
||||
mod types;
|
||||
|
||||
pub use self::behaviour::{Behaviour, Event, MessageAuthenticity};
|
||||
pub use self::config::{Config, ConfigBuilder, ValidationMode, Version};
|
||||
pub use self::error::{ConfigBuilderError, PublishError, SubscriptionError, ValidationError};
|
||||
pub use self::metrics::Config as MetricsConfig;
|
||||
pub use self::peer_score::{
|
||||
score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds,
|
||||
TopicScoreParams,
|
||||
};
|
||||
pub use self::subscription_filter::{
|
||||
AllowAllSubscriptionFilter, CallbackSubscriptionFilter, CombinedSubscriptionFilters,
|
||||
MaxCountSubscriptionFilter, RegexSubscriptionFilter, TopicSubscriptionFilter,
|
||||
WhitelistSubscriptionFilter,
|
||||
};
|
||||
pub use self::topic::{Hasher, Topic, TopicHash};
|
||||
pub use self::transform::{DataTransform, IdentityTransform};
|
||||
pub use self::types::{Message, MessageAcceptance, MessageId, RawMessage};
|
||||
pub type IdentTopic = Topic<self::topic::IdentityHash>;
|
||||
pub type Sha256Topic = Topic<self::topic::Sha256Hash>;
|
||||
pub use self::types::FailedMessages;
|
||||
937
beacon_node/lighthouse_network/src/gossipsub/peer_score.rs
Normal file
937
beacon_node/lighthouse_network/src/gossipsub/peer_score.rs
Normal file
@ -0,0 +1,937 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//!
|
||||
//! Manages and stores the Scoring logic of a particular peer on the gossipsub behaviour.
|
||||
|
||||
use super::metrics::{Metrics, Penalty};
|
||||
use super::time_cache::TimeCache;
|
||||
use super::{MessageId, TopicHash};
|
||||
use instant::Instant;
|
||||
use libp2p::identity::PeerId;
|
||||
use std::collections::{hash_map, HashMap, HashSet};
|
||||
use std::net::IpAddr;
|
||||
use std::time::Duration;
|
||||
|
||||
mod params;
|
||||
use super::ValidationError;
|
||||
pub use params::{
|
||||
score_parameter_decay, score_parameter_decay_with_base, PeerScoreParams, PeerScoreThresholds,
|
||||
TopicScoreParams,
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
/// The number of seconds delivery messages are stored in the cache.
|
||||
const TIME_CACHE_DURATION: u64 = 120;
|
||||
|
||||
pub(crate) struct PeerScore {
|
||||
params: PeerScoreParams,
|
||||
/// The score parameters.
|
||||
peer_stats: HashMap<PeerId, PeerStats>,
|
||||
/// Tracking peers per IP.
|
||||
peer_ips: HashMap<IpAddr, HashSet<PeerId>>,
|
||||
/// Message delivery tracking. This is a time-cache of [`DeliveryRecord`]s.
|
||||
deliveries: TimeCache<MessageId, DeliveryRecord>,
|
||||
/// callback for monitoring message delivery times
|
||||
message_delivery_time_callback: Option<fn(&PeerId, &TopicHash, f64)>,
|
||||
}
|
||||
|
||||
/// General statistics for a given gossipsub peer.
|
||||
struct PeerStats {
|
||||
/// Connection status of the peer.
|
||||
status: ConnectionStatus,
|
||||
/// Stats per topic.
|
||||
topics: HashMap<TopicHash, TopicStats>,
|
||||
/// IP tracking for individual peers.
|
||||
known_ips: HashSet<IpAddr>,
|
||||
/// Behaviour penalty that is applied to the peer, assigned by the behaviour.
|
||||
behaviour_penalty: f64,
|
||||
/// Application specific score. Can be manipulated by calling PeerScore::set_application_score
|
||||
application_score: f64,
|
||||
/// Scoring based on how whether this peer consumes messages fast enough or not.
|
||||
slow_peer_penalty: f64,
|
||||
}
|
||||
|
||||
enum ConnectionStatus {
|
||||
/// The peer is connected.
|
||||
Connected,
|
||||
/// The peer is disconnected
|
||||
Disconnected {
|
||||
/// Expiration time of the score state for disconnected peers.
|
||||
expire: Instant,
|
||||
},
|
||||
}
|
||||
|
||||
impl Default for PeerStats {
|
||||
fn default() -> Self {
|
||||
PeerStats {
|
||||
status: ConnectionStatus::Connected,
|
||||
topics: HashMap::new(),
|
||||
known_ips: HashSet::new(),
|
||||
behaviour_penalty: 0f64,
|
||||
application_score: 0f64,
|
||||
slow_peer_penalty: 0f64,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PeerStats {
|
||||
/// Returns a mutable reference to topic stats if they exist, otherwise if the supplied parameters score the
|
||||
/// topic, inserts the default stats and returns a reference to those. If neither apply, returns None.
|
||||
pub(crate) fn stats_or_default_mut(
|
||||
&mut self,
|
||||
topic_hash: TopicHash,
|
||||
params: &PeerScoreParams,
|
||||
) -> Option<&mut TopicStats> {
|
||||
if params.topics.get(&topic_hash).is_some() {
|
||||
Some(self.topics.entry(topic_hash).or_default())
|
||||
} else {
|
||||
self.topics.get_mut(&topic_hash)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Stats assigned to peer for each topic.
|
||||
struct TopicStats {
|
||||
mesh_status: MeshStatus,
|
||||
/// Number of first message deliveries.
|
||||
first_message_deliveries: f64,
|
||||
/// True if the peer has been in the mesh for enough time to activate mesh message deliveries.
|
||||
mesh_message_deliveries_active: bool,
|
||||
/// Number of message deliveries from the mesh.
|
||||
mesh_message_deliveries: f64,
|
||||
/// Mesh rate failure penalty.
|
||||
mesh_failure_penalty: f64,
|
||||
/// Invalid message counter.
|
||||
invalid_message_deliveries: f64,
|
||||
}
|
||||
|
||||
impl TopicStats {
|
||||
/// Returns true if the peer is in the `mesh`.
|
||||
pub(crate) fn in_mesh(&self) -> bool {
|
||||
matches!(self.mesh_status, MeshStatus::Active { .. })
|
||||
}
|
||||
}
|
||||
|
||||
/// Status defining a peer's inclusion in the mesh and associated parameters.
|
||||
enum MeshStatus {
|
||||
Active {
|
||||
/// The time the peer was last GRAFTed;
|
||||
graft_time: Instant,
|
||||
/// The time the peer has been in the mesh.
|
||||
mesh_time: Duration,
|
||||
},
|
||||
InActive,
|
||||
}
|
||||
|
||||
impl MeshStatus {
|
||||
/// Initialises a new [`MeshStatus::Active`] mesh status.
|
||||
pub(crate) fn new_active() -> Self {
|
||||
MeshStatus::Active {
|
||||
graft_time: Instant::now(),
|
||||
mesh_time: Duration::from_secs(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for TopicStats {
|
||||
fn default() -> Self {
|
||||
TopicStats {
|
||||
mesh_status: MeshStatus::InActive,
|
||||
first_message_deliveries: Default::default(),
|
||||
mesh_message_deliveries_active: Default::default(),
|
||||
mesh_message_deliveries: Default::default(),
|
||||
mesh_failure_penalty: Default::default(),
|
||||
invalid_message_deliveries: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
struct DeliveryRecord {
|
||||
status: DeliveryStatus,
|
||||
first_seen: Instant,
|
||||
peers: HashSet<PeerId>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
enum DeliveryStatus {
|
||||
/// Don't know (yet) if the message is valid.
|
||||
Unknown,
|
||||
/// The message is valid together with the validated time.
|
||||
Valid(Instant),
|
||||
/// The message is invalid.
|
||||
Invalid,
|
||||
/// Instructed by the validator to ignore the message.
|
||||
Ignored,
|
||||
}
|
||||
|
||||
impl Default for DeliveryRecord {
|
||||
fn default() -> Self {
|
||||
DeliveryRecord {
|
||||
status: DeliveryStatus::Unknown,
|
||||
first_seen: Instant::now(),
|
||||
peers: HashSet::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PeerScore {
|
||||
/// Creates a new [`PeerScore`] using a given set of peer scoring parameters.
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn new(params: PeerScoreParams) -> Self {
|
||||
Self::new_with_message_delivery_time_callback(params, None)
|
||||
}
|
||||
|
||||
pub(crate) fn new_with_message_delivery_time_callback(
|
||||
params: PeerScoreParams,
|
||||
callback: Option<fn(&PeerId, &TopicHash, f64)>,
|
||||
) -> Self {
|
||||
PeerScore {
|
||||
params,
|
||||
peer_stats: HashMap::new(),
|
||||
peer_ips: HashMap::new(),
|
||||
deliveries: TimeCache::new(Duration::from_secs(TIME_CACHE_DURATION)),
|
||||
message_delivery_time_callback: callback,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the score for a peer
|
||||
pub(crate) fn score(&self, peer_id: &PeerId) -> f64 {
|
||||
self.metric_score(peer_id, None)
|
||||
}
|
||||
|
||||
/// Returns the score for a peer, logging metrics. This is called from the heartbeat and
|
||||
/// increments the metric counts for penalties.
|
||||
pub(crate) fn metric_score(&self, peer_id: &PeerId, mut metrics: Option<&mut Metrics>) -> f64 {
|
||||
let Some(peer_stats) = self.peer_stats.get(peer_id) else {
|
||||
return 0.0;
|
||||
};
|
||||
let mut score = 0.0;
|
||||
|
||||
// topic scores
|
||||
for (topic, topic_stats) in peer_stats.topics.iter() {
|
||||
// topic parameters
|
||||
if let Some(topic_params) = self.params.topics.get(topic) {
|
||||
// we are tracking the topic
|
||||
|
||||
// the topic score
|
||||
let mut topic_score = 0.0;
|
||||
|
||||
// P1: time in mesh
|
||||
if let MeshStatus::Active { mesh_time, .. } = topic_stats.mesh_status {
|
||||
let p1 = {
|
||||
let v = mesh_time.as_secs_f64()
|
||||
/ topic_params.time_in_mesh_quantum.as_secs_f64();
|
||||
if v < topic_params.time_in_mesh_cap {
|
||||
v
|
||||
} else {
|
||||
topic_params.time_in_mesh_cap
|
||||
}
|
||||
};
|
||||
topic_score += p1 * topic_params.time_in_mesh_weight;
|
||||
}
|
||||
|
||||
// P2: first message deliveries
|
||||
let p2 = {
|
||||
let v = topic_stats.first_message_deliveries;
|
||||
if v < topic_params.first_message_deliveries_cap {
|
||||
v
|
||||
} else {
|
||||
topic_params.first_message_deliveries_cap
|
||||
}
|
||||
};
|
||||
topic_score += p2 * topic_params.first_message_deliveries_weight;
|
||||
|
||||
// P3: mesh message deliveries
|
||||
if topic_stats.mesh_message_deliveries_active
|
||||
&& topic_stats.mesh_message_deliveries
|
||||
< topic_params.mesh_message_deliveries_threshold
|
||||
{
|
||||
let deficit = topic_params.mesh_message_deliveries_threshold
|
||||
- topic_stats.mesh_message_deliveries;
|
||||
let p3 = deficit * deficit;
|
||||
topic_score += p3 * topic_params.mesh_message_deliveries_weight;
|
||||
if let Some(metrics) = metrics.as_mut() {
|
||||
metrics.register_score_penalty(Penalty::MessageDeficit);
|
||||
}
|
||||
tracing::debug!(
|
||||
peer=%peer_id,
|
||||
%topic,
|
||||
%deficit,
|
||||
penalty=%topic_score,
|
||||
"[Penalty] The peer has a mesh deliveries deficit and will be penalized"
|
||||
);
|
||||
}
|
||||
|
||||
// P3b:
|
||||
// NOTE: the weight of P3b is negative (validated in TopicScoreParams.validate), so this detracts.
|
||||
let p3b = topic_stats.mesh_failure_penalty;
|
||||
topic_score += p3b * topic_params.mesh_failure_penalty_weight;
|
||||
|
||||
// P4: invalid messages
|
||||
// NOTE: the weight of P4 is negative (validated in TopicScoreParams.validate), so this detracts.
|
||||
let p4 =
|
||||
topic_stats.invalid_message_deliveries * topic_stats.invalid_message_deliveries;
|
||||
topic_score += p4 * topic_params.invalid_message_deliveries_weight;
|
||||
|
||||
// update score, mixing with topic weight
|
||||
score += topic_score * topic_params.topic_weight;
|
||||
}
|
||||
}
|
||||
|
||||
// apply the topic score cap, if any
|
||||
if self.params.topic_score_cap > 0f64 && score > self.params.topic_score_cap {
|
||||
score = self.params.topic_score_cap;
|
||||
}
|
||||
|
||||
// P5: application-specific score
|
||||
let p5 = peer_stats.application_score;
|
||||
score += p5 * self.params.app_specific_weight;
|
||||
|
||||
// P6: IP collocation factor
|
||||
for ip in peer_stats.known_ips.iter() {
|
||||
if self.params.ip_colocation_factor_whitelist.get(ip).is_some() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// P6 has a cliff (ip_colocation_factor_threshold); it's only applied iff
|
||||
// at least that many peers are connected to us from that source IP
|
||||
// addr. It is quadratic, and the weight is negative (validated by
|
||||
// peer_score_params.validate()).
|
||||
if let Some(peers_in_ip) = self.peer_ips.get(ip).map(|peers| peers.len()) {
|
||||
if (peers_in_ip as f64) > self.params.ip_colocation_factor_threshold {
|
||||
let surplus = (peers_in_ip as f64) - self.params.ip_colocation_factor_threshold;
|
||||
let p6 = surplus * surplus;
|
||||
if let Some(metrics) = metrics.as_mut() {
|
||||
metrics.register_score_penalty(Penalty::IPColocation);
|
||||
}
|
||||
tracing::debug!(
|
||||
peer=%peer_id,
|
||||
surplus_ip=%ip,
|
||||
surplus=%surplus,
|
||||
"[Penalty] The peer gets penalized because of too many peers with the same ip"
|
||||
);
|
||||
score += p6 * self.params.ip_colocation_factor_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// P7: behavioural pattern penalty
|
||||
if peer_stats.behaviour_penalty > self.params.behaviour_penalty_threshold {
|
||||
let excess = peer_stats.behaviour_penalty - self.params.behaviour_penalty_threshold;
|
||||
let p7 = excess * excess;
|
||||
score += p7 * self.params.behaviour_penalty_weight;
|
||||
}
|
||||
|
||||
// Slow peer weighting
|
||||
if peer_stats.slow_peer_penalty > self.params.slow_peer_threshold {
|
||||
let excess = peer_stats.slow_peer_penalty - self.params.slow_peer_threshold;
|
||||
score += excess * self.params.slow_peer_weight;
|
||||
}
|
||||
|
||||
score
|
||||
}
|
||||
|
||||
pub(crate) fn add_penalty(&mut self, peer_id: &PeerId, count: usize) {
|
||||
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
|
||||
tracing::debug!(
|
||||
peer=%peer_id,
|
||||
%count,
|
||||
"[Penalty] Behavioral penalty for peer"
|
||||
);
|
||||
peer_stats.behaviour_penalty += count as f64;
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_ips_for_peer(
|
||||
peer_stats: &PeerStats,
|
||||
peer_ips: &mut HashMap<IpAddr, HashSet<PeerId>>,
|
||||
peer_id: &PeerId,
|
||||
) {
|
||||
for ip in peer_stats.known_ips.iter() {
|
||||
if let Some(peer_set) = peer_ips.get_mut(ip) {
|
||||
peer_set.remove(peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn refresh_scores(&mut self) {
|
||||
let now = Instant::now();
|
||||
let params_ref = &self.params;
|
||||
let peer_ips_ref = &mut self.peer_ips;
|
||||
self.peer_stats.retain(|peer_id, peer_stats| {
|
||||
if let ConnectionStatus::Disconnected { expire } = peer_stats.status {
|
||||
// has the retention period expired?
|
||||
if now > expire {
|
||||
// yes, throw it away (but clean up the IP tracking first)
|
||||
Self::remove_ips_for_peer(peer_stats, peer_ips_ref, peer_id);
|
||||
// re address this, use retain or entry
|
||||
return false;
|
||||
}
|
||||
|
||||
// we don't decay retained scores, as the peer is not active.
|
||||
// this way the peer cannot reset a negative score by simply disconnecting and reconnecting,
|
||||
// unless the retention period has elapsed.
|
||||
// similarly, a well behaved peer does not lose its score by getting disconnected.
|
||||
return true;
|
||||
}
|
||||
|
||||
for (topic, topic_stats) in peer_stats.topics.iter_mut() {
|
||||
// the topic parameters
|
||||
if let Some(topic_params) = params_ref.topics.get(topic) {
|
||||
// decay counters
|
||||
topic_stats.first_message_deliveries *=
|
||||
topic_params.first_message_deliveries_decay;
|
||||
if topic_stats.first_message_deliveries < params_ref.decay_to_zero {
|
||||
topic_stats.first_message_deliveries = 0.0;
|
||||
}
|
||||
topic_stats.mesh_message_deliveries *=
|
||||
topic_params.mesh_message_deliveries_decay;
|
||||
if topic_stats.mesh_message_deliveries < params_ref.decay_to_zero {
|
||||
topic_stats.mesh_message_deliveries = 0.0;
|
||||
}
|
||||
topic_stats.mesh_failure_penalty *= topic_params.mesh_failure_penalty_decay;
|
||||
if topic_stats.mesh_failure_penalty < params_ref.decay_to_zero {
|
||||
topic_stats.mesh_failure_penalty = 0.0;
|
||||
}
|
||||
topic_stats.invalid_message_deliveries *=
|
||||
topic_params.invalid_message_deliveries_decay;
|
||||
if topic_stats.invalid_message_deliveries < params_ref.decay_to_zero {
|
||||
topic_stats.invalid_message_deliveries = 0.0;
|
||||
}
|
||||
// update mesh time and activate mesh message delivery parameter if need be
|
||||
if let MeshStatus::Active {
|
||||
ref mut mesh_time,
|
||||
ref mut graft_time,
|
||||
} = topic_stats.mesh_status
|
||||
{
|
||||
*mesh_time = now.duration_since(*graft_time);
|
||||
if *mesh_time > topic_params.mesh_message_deliveries_activation {
|
||||
topic_stats.mesh_message_deliveries_active = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// decay P7 counter
|
||||
peer_stats.behaviour_penalty *= params_ref.behaviour_penalty_decay;
|
||||
if peer_stats.behaviour_penalty < params_ref.decay_to_zero {
|
||||
peer_stats.behaviour_penalty = 0.0;
|
||||
}
|
||||
|
||||
// decay slow peer score
|
||||
peer_stats.slow_peer_penalty *= params_ref.slow_peer_decay;
|
||||
if peer_stats.slow_peer_penalty < params_ref.decay_to_zero {
|
||||
peer_stats.slow_peer_penalty = 0.0;
|
||||
}
|
||||
|
||||
true
|
||||
});
|
||||
}
|
||||
|
||||
/// Adds a connected peer to [`PeerScore`], initialising with empty ips (ips get added later
|
||||
/// through add_ip.
|
||||
pub(crate) fn add_peer(&mut self, peer_id: PeerId) {
|
||||
let peer_stats = self.peer_stats.entry(peer_id).or_default();
|
||||
|
||||
// mark the peer as connected
|
||||
peer_stats.status = ConnectionStatus::Connected;
|
||||
}
|
||||
|
||||
/// Adds a new ip to a peer, if the peer is not yet known creates a new peer_stats entry for it
|
||||
pub(crate) fn add_ip(&mut self, peer_id: &PeerId, ip: IpAddr) {
|
||||
tracing::trace!(peer=%peer_id, %ip, "Add ip for peer");
|
||||
let peer_stats = self.peer_stats.entry(*peer_id).or_default();
|
||||
|
||||
// Mark the peer as connected (currently the default is connected, but we don't want to
|
||||
// rely on the default).
|
||||
peer_stats.status = ConnectionStatus::Connected;
|
||||
|
||||
// Insert the ip
|
||||
peer_stats.known_ips.insert(ip);
|
||||
self.peer_ips.entry(ip).or_default().insert(*peer_id);
|
||||
}
|
||||
|
||||
/// Indicate that a peer has been too slow to consume a message.
|
||||
pub(crate) fn failed_message_slow_peer(&mut self, peer_id: &PeerId) {
|
||||
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
|
||||
peer_stats.slow_peer_penalty += 1.0;
|
||||
tracing::debug!(peer=%peer_id, %peer_stats.slow_peer_penalty, "[Penalty] Expired message penalty.");
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes an ip from a peer
|
||||
pub(crate) fn remove_ip(&mut self, peer_id: &PeerId, ip: &IpAddr) {
|
||||
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
|
||||
peer_stats.known_ips.remove(ip);
|
||||
if let Some(peer_ids) = self.peer_ips.get_mut(ip) {
|
||||
tracing::trace!(peer=%peer_id, %ip, "Remove ip for peer");
|
||||
peer_ids.remove(peer_id);
|
||||
} else {
|
||||
tracing::trace!(
|
||||
peer=%peer_id,
|
||||
%ip,
|
||||
"No entry in peer_ips for ip which should get removed for peer"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
tracing::trace!(
|
||||
peer=%peer_id,
|
||||
%ip,
|
||||
"No peer_stats for peer which should remove the ip"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes a peer from the score table. This retains peer statistics if their score is
|
||||
/// non-positive.
|
||||
pub(crate) fn remove_peer(&mut self, peer_id: &PeerId) {
|
||||
// we only retain non-positive scores of peers
|
||||
if self.score(peer_id) > 0f64 {
|
||||
if let hash_map::Entry::Occupied(entry) = self.peer_stats.entry(*peer_id) {
|
||||
Self::remove_ips_for_peer(entry.get(), &mut self.peer_ips, peer_id);
|
||||
entry.remove();
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// if the peer is retained (including it's score) the `first_message_delivery` counters
|
||||
// are reset to 0 and mesh delivery penalties applied.
|
||||
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
|
||||
for (topic, topic_stats) in peer_stats.topics.iter_mut() {
|
||||
topic_stats.first_message_deliveries = 0f64;
|
||||
|
||||
if let Some(threshold) = self
|
||||
.params
|
||||
.topics
|
||||
.get(topic)
|
||||
.map(|param| param.mesh_message_deliveries_threshold)
|
||||
{
|
||||
if topic_stats.in_mesh()
|
||||
&& topic_stats.mesh_message_deliveries_active
|
||||
&& topic_stats.mesh_message_deliveries < threshold
|
||||
{
|
||||
let deficit = threshold - topic_stats.mesh_message_deliveries;
|
||||
topic_stats.mesh_failure_penalty += deficit * deficit;
|
||||
}
|
||||
}
|
||||
|
||||
topic_stats.mesh_status = MeshStatus::InActive;
|
||||
topic_stats.mesh_message_deliveries_active = false;
|
||||
}
|
||||
|
||||
peer_stats.status = ConnectionStatus::Disconnected {
|
||||
expire: Instant::now() + self.params.retain_score,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles scoring functionality as a peer GRAFTs to a topic.
|
||||
pub(crate) fn graft(&mut self, peer_id: &PeerId, topic: impl Into<TopicHash>) {
|
||||
let topic = topic.into();
|
||||
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
|
||||
// if we are scoring the topic, update the mesh status.
|
||||
if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic, &self.params) {
|
||||
topic_stats.mesh_status = MeshStatus::new_active();
|
||||
topic_stats.mesh_message_deliveries_active = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles scoring functionality as a peer PRUNEs from a topic.
|
||||
pub(crate) fn prune(&mut self, peer_id: &PeerId, topic: TopicHash) {
|
||||
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
|
||||
// if we are scoring the topic, update the mesh status.
|
||||
if let Some(topic_stats) = peer_stats.stats_or_default_mut(topic.clone(), &self.params)
|
||||
{
|
||||
// sticky mesh delivery rate failure penalty
|
||||
let threshold = self
|
||||
.params
|
||||
.topics
|
||||
.get(&topic)
|
||||
.expect("Topic must exist in order for there to be topic stats")
|
||||
.mesh_message_deliveries_threshold;
|
||||
if topic_stats.mesh_message_deliveries_active
|
||||
&& topic_stats.mesh_message_deliveries < threshold
|
||||
{
|
||||
let deficit = threshold - topic_stats.mesh_message_deliveries;
|
||||
topic_stats.mesh_failure_penalty += deficit * deficit;
|
||||
}
|
||||
topic_stats.mesh_message_deliveries_active = false;
|
||||
topic_stats.mesh_status = MeshStatus::InActive;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn validate_message(
|
||||
&mut self,
|
||||
from: &PeerId,
|
||||
msg_id: &MessageId,
|
||||
topic_hash: &TopicHash,
|
||||
) {
|
||||
// adds an empty record with the message id
|
||||
self.deliveries.entry(msg_id.clone()).or_default();
|
||||
|
||||
if let Some(callback) = self.message_delivery_time_callback {
|
||||
if self
|
||||
.peer_stats
|
||||
.get(from)
|
||||
.and_then(|s| s.topics.get(topic_hash))
|
||||
.map(|ts| ts.in_mesh())
|
||||
.unwrap_or(false)
|
||||
{
|
||||
callback(from, topic_hash, 0.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn deliver_message(
|
||||
&mut self,
|
||||
from: &PeerId,
|
||||
msg_id: &MessageId,
|
||||
topic_hash: &TopicHash,
|
||||
) {
|
||||
self.mark_first_message_delivery(from, topic_hash);
|
||||
|
||||
let record = self.deliveries.entry(msg_id.clone()).or_default();
|
||||
|
||||
// this should be the first delivery trace
|
||||
if record.status != DeliveryStatus::Unknown {
|
||||
tracing::warn!(
|
||||
peer=%from,
|
||||
status=?record.status,
|
||||
first_seen=?record.first_seen.elapsed().as_secs(),
|
||||
"Unexpected delivery trace"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// mark the message as valid and reward mesh peers that have already forwarded it to us
|
||||
record.status = DeliveryStatus::Valid(Instant::now());
|
||||
for peer in record.peers.iter().cloned().collect::<Vec<_>>() {
|
||||
// this check is to make sure a peer can't send us a message twice and get a double
|
||||
// count if it is a first delivery
|
||||
if &peer != from {
|
||||
self.mark_duplicate_message_delivery(&peer, topic_hash, None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Similar to `reject_message` except does not require the message id or reason for an invalid message.
|
||||
pub(crate) fn reject_invalid_message(&mut self, from: &PeerId, topic_hash: &TopicHash) {
|
||||
tracing::debug!(
|
||||
peer=%from,
|
||||
"[Penalty] Message from peer rejected because of ValidationError or SelfOrigin"
|
||||
);
|
||||
|
||||
self.mark_invalid_message_delivery(from, topic_hash);
|
||||
}
|
||||
|
||||
// Reject a message.
|
||||
pub(crate) fn reject_message(
|
||||
&mut self,
|
||||
from: &PeerId,
|
||||
msg_id: &MessageId,
|
||||
topic_hash: &TopicHash,
|
||||
reason: RejectReason,
|
||||
) {
|
||||
match reason {
|
||||
// these messages are not tracked, but the peer is penalized as they are invalid
|
||||
RejectReason::ValidationError(_) | RejectReason::SelfOrigin => {
|
||||
self.reject_invalid_message(from, topic_hash);
|
||||
return;
|
||||
}
|
||||
// we ignore those messages, so do nothing.
|
||||
RejectReason::BlackListedPeer | RejectReason::BlackListedSource => {
|
||||
return;
|
||||
}
|
||||
_ => {} // the rest are handled after record creation
|
||||
}
|
||||
|
||||
let peers: Vec<_> = {
|
||||
let record = self.deliveries.entry(msg_id.clone()).or_default();
|
||||
|
||||
// Multiple peers can now reject the same message as we track which peers send us the
|
||||
// message. If we have already updated the status, return.
|
||||
if record.status != DeliveryStatus::Unknown {
|
||||
return;
|
||||
}
|
||||
|
||||
if let RejectReason::ValidationIgnored = reason {
|
||||
// we were explicitly instructed by the validator to ignore the message but not penalize
|
||||
// the peer
|
||||
record.status = DeliveryStatus::Ignored;
|
||||
record.peers.clear();
|
||||
return;
|
||||
}
|
||||
|
||||
// mark the message as invalid and penalize peers that have already forwarded it.
|
||||
record.status = DeliveryStatus::Invalid;
|
||||
// release the delivery time tracking map to free some memory early
|
||||
record.peers.drain().collect()
|
||||
};
|
||||
|
||||
self.mark_invalid_message_delivery(from, topic_hash);
|
||||
for peer_id in peers.iter() {
|
||||
self.mark_invalid_message_delivery(peer_id, topic_hash)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn duplicated_message(
|
||||
&mut self,
|
||||
from: &PeerId,
|
||||
msg_id: &MessageId,
|
||||
topic_hash: &TopicHash,
|
||||
) {
|
||||
let record = self.deliveries.entry(msg_id.clone()).or_default();
|
||||
|
||||
if record.peers.get(from).is_some() {
|
||||
// we have already seen this duplicate!
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(callback) = self.message_delivery_time_callback {
|
||||
let time = if let DeliveryStatus::Valid(validated) = record.status {
|
||||
validated.elapsed().as_secs_f64()
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
if self
|
||||
.peer_stats
|
||||
.get(from)
|
||||
.and_then(|s| s.topics.get(topic_hash))
|
||||
.map(|ts| ts.in_mesh())
|
||||
.unwrap_or(false)
|
||||
{
|
||||
callback(from, topic_hash, time);
|
||||
}
|
||||
}
|
||||
|
||||
match record.status {
|
||||
DeliveryStatus::Unknown => {
|
||||
// the message is being validated; track the peer delivery and wait for
|
||||
// the Deliver/Reject notification.
|
||||
record.peers.insert(*from);
|
||||
}
|
||||
DeliveryStatus::Valid(validated) => {
|
||||
// mark the peer delivery time to only count a duplicate delivery once.
|
||||
record.peers.insert(*from);
|
||||
self.mark_duplicate_message_delivery(from, topic_hash, Some(validated));
|
||||
}
|
||||
DeliveryStatus::Invalid => {
|
||||
// we no longer track delivery time
|
||||
self.mark_invalid_message_delivery(from, topic_hash);
|
||||
}
|
||||
DeliveryStatus::Ignored => {
|
||||
// the message was ignored; do nothing (we don't know if it was valid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the application specific score for a peer. Returns true if the peer is the peer is
|
||||
/// connected or if the score of the peer is not yet expired and false otherwise.
|
||||
pub(crate) fn set_application_score(&mut self, peer_id: &PeerId, new_score: f64) -> bool {
|
||||
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
|
||||
peer_stats.application_score = new_score;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets scoring parameters for a topic.
|
||||
pub(crate) fn set_topic_params(&mut self, topic_hash: TopicHash, params: TopicScoreParams) {
|
||||
use hash_map::Entry::*;
|
||||
match self.params.topics.entry(topic_hash.clone()) {
|
||||
Occupied(mut entry) => {
|
||||
let first_message_deliveries_cap = params.first_message_deliveries_cap;
|
||||
let mesh_message_deliveries_cap = params.mesh_message_deliveries_cap;
|
||||
let old_params = entry.insert(params);
|
||||
|
||||
if old_params.first_message_deliveries_cap > first_message_deliveries_cap {
|
||||
for stats in &mut self.peer_stats.values_mut() {
|
||||
if let Some(tstats) = stats.topics.get_mut(&topic_hash) {
|
||||
if tstats.first_message_deliveries > first_message_deliveries_cap {
|
||||
tstats.first_message_deliveries = first_message_deliveries_cap;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if old_params.mesh_message_deliveries_cap > mesh_message_deliveries_cap {
|
||||
for stats in self.peer_stats.values_mut() {
|
||||
if let Some(tstats) = stats.topics.get_mut(&topic_hash) {
|
||||
if tstats.mesh_message_deliveries > mesh_message_deliveries_cap {
|
||||
tstats.mesh_message_deliveries = mesh_message_deliveries_cap;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Vacant(entry) => {
|
||||
entry.insert(params);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a scoring parameters for a topic if existent.
|
||||
pub(crate) fn get_topic_params(&self, topic_hash: &TopicHash) -> Option<&TopicScoreParams> {
|
||||
self.params.topics.get(topic_hash)
|
||||
}
|
||||
|
||||
/// Increments the "invalid message deliveries" counter for all scored topics the message
|
||||
/// is published in.
|
||||
fn mark_invalid_message_delivery(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) {
|
||||
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
|
||||
if let Some(topic_stats) =
|
||||
peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params)
|
||||
{
|
||||
tracing::debug!(
|
||||
peer=%peer_id,
|
||||
topic=%topic_hash,
|
||||
"[Penalty] Peer delivered an invalid message in topic and gets penalized \
|
||||
for it",
|
||||
);
|
||||
topic_stats.invalid_message_deliveries += 1f64;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Increments the "first message deliveries" counter for all scored topics the message is
|
||||
/// published in, as well as the "mesh message deliveries" counter, if the peer is in the
|
||||
/// mesh for the topic.
|
||||
fn mark_first_message_delivery(&mut self, peer_id: &PeerId, topic_hash: &TopicHash) {
|
||||
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
|
||||
if let Some(topic_stats) =
|
||||
peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params)
|
||||
{
|
||||
let cap = self
|
||||
.params
|
||||
.topics
|
||||
.get(topic_hash)
|
||||
.expect("Topic must exist if there are known topic_stats")
|
||||
.first_message_deliveries_cap;
|
||||
topic_stats.first_message_deliveries =
|
||||
if topic_stats.first_message_deliveries + 1f64 > cap {
|
||||
cap
|
||||
} else {
|
||||
topic_stats.first_message_deliveries + 1f64
|
||||
};
|
||||
|
||||
if let MeshStatus::Active { .. } = topic_stats.mesh_status {
|
||||
let cap = self
|
||||
.params
|
||||
.topics
|
||||
.get(topic_hash)
|
||||
.expect("Topic must exist if there are known topic_stats")
|
||||
.mesh_message_deliveries_cap;
|
||||
|
||||
topic_stats.mesh_message_deliveries =
|
||||
if topic_stats.mesh_message_deliveries + 1f64 > cap {
|
||||
cap
|
||||
} else {
|
||||
topic_stats.mesh_message_deliveries + 1f64
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Increments the "mesh message deliveries" counter for messages we've seen before, as long the
|
||||
/// message was received within the P3 window.
|
||||
fn mark_duplicate_message_delivery(
|
||||
&mut self,
|
||||
peer_id: &PeerId,
|
||||
topic_hash: &TopicHash,
|
||||
validated_time: Option<Instant>,
|
||||
) {
|
||||
if let Some(peer_stats) = self.peer_stats.get_mut(peer_id) {
|
||||
let now = if validated_time.is_some() {
|
||||
Some(Instant::now())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(topic_stats) =
|
||||
peer_stats.stats_or_default_mut(topic_hash.clone(), &self.params)
|
||||
{
|
||||
if let MeshStatus::Active { .. } = topic_stats.mesh_status {
|
||||
let topic_params = self
|
||||
.params
|
||||
.topics
|
||||
.get(topic_hash)
|
||||
.expect("Topic must exist if there are known topic_stats");
|
||||
|
||||
// check against the mesh delivery window -- if the validated time is passed as 0, then
|
||||
// the message was received before we finished validation and thus falls within the mesh
|
||||
// delivery window.
|
||||
let mut falls_in_mesh_deliver_window = true;
|
||||
if let Some(validated_time) = validated_time {
|
||||
if let Some(now) = &now {
|
||||
//should always be true
|
||||
let window_time = validated_time
|
||||
.checked_add(topic_params.mesh_message_deliveries_window)
|
||||
.unwrap_or(*now);
|
||||
if now > &window_time {
|
||||
falls_in_mesh_deliver_window = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if falls_in_mesh_deliver_window {
|
||||
let cap = topic_params.mesh_message_deliveries_cap;
|
||||
topic_stats.mesh_message_deliveries =
|
||||
if topic_stats.mesh_message_deliveries + 1f64 > cap {
|
||||
cap
|
||||
} else {
|
||||
topic_stats.mesh_message_deliveries + 1f64
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn mesh_message_deliveries(&self, peer: &PeerId, topic: &TopicHash) -> Option<f64> {
|
||||
self.peer_stats
|
||||
.get(peer)
|
||||
.and_then(|s| s.topics.get(topic))
|
||||
.map(|t| t.mesh_message_deliveries)
|
||||
}
|
||||
}
|
||||
|
||||
/// The reason a Gossipsub message has been rejected.
|
||||
#[derive(Clone, Copy)]
|
||||
pub(crate) enum RejectReason {
|
||||
/// The message failed the configured validation during decoding.
|
||||
ValidationError(ValidationError),
|
||||
/// The message source is us.
|
||||
SelfOrigin,
|
||||
/// The peer that sent the message was blacklisted.
|
||||
BlackListedPeer,
|
||||
/// The source (from field) of the message was blacklisted.
|
||||
BlackListedSource,
|
||||
/// The validation was ignored.
|
||||
ValidationIgnored,
|
||||
/// The validation failed.
|
||||
ValidationFailed,
|
||||
}
|
||||
@ -0,0 +1,404 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use crate::gossipsub::TopicHash;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::net::IpAddr;
|
||||
use std::time::Duration;
|
||||
|
||||
/// The default number of seconds for a decay interval.
|
||||
const DEFAULT_DECAY_INTERVAL: u64 = 1;
|
||||
/// The default rate to decay to 0.
|
||||
const DEFAULT_DECAY_TO_ZERO: f64 = 0.1;
|
||||
|
||||
/// Computes the decay factor for a parameter, assuming the `decay_interval` is 1s
|
||||
/// and that the value decays to zero if it drops below 0.01.
|
||||
pub fn score_parameter_decay(decay: Duration) -> f64 {
|
||||
score_parameter_decay_with_base(
|
||||
decay,
|
||||
Duration::from_secs(DEFAULT_DECAY_INTERVAL),
|
||||
DEFAULT_DECAY_TO_ZERO,
|
||||
)
|
||||
}
|
||||
|
||||
/// Computes the decay factor for a parameter using base as the `decay_interval`.
|
||||
pub fn score_parameter_decay_with_base(decay: Duration, base: Duration, decay_to_zero: f64) -> f64 {
|
||||
// the decay is linear, so after n ticks the value is factor^n
|
||||
// so factor^n = decay_to_zero => factor = decay_to_zero^(1/n)
|
||||
let ticks = decay.as_secs_f64() / base.as_secs_f64();
|
||||
decay_to_zero.powf(1f64 / ticks)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PeerScoreThresholds {
|
||||
/// The score threshold below which gossip propagation is suppressed;
|
||||
/// should be negative.
|
||||
pub gossip_threshold: f64,
|
||||
|
||||
/// The score threshold below which we shouldn't publish when using flood
|
||||
/// publishing (also applies to fanout peers); should be negative and <= `gossip_threshold`.
|
||||
pub publish_threshold: f64,
|
||||
|
||||
/// The score threshold below which message processing is suppressed altogether,
|
||||
/// implementing an effective graylist according to peer score; should be negative and
|
||||
/// <= `publish_threshold`.
|
||||
pub graylist_threshold: f64,
|
||||
|
||||
/// The score threshold below which px will be ignored; this should be positive
|
||||
/// and limited to scores attainable by bootstrappers and other trusted nodes.
|
||||
pub accept_px_threshold: f64,
|
||||
|
||||
/// The median mesh score threshold before triggering opportunistic
|
||||
/// grafting; this should have a small positive value.
|
||||
pub opportunistic_graft_threshold: f64,
|
||||
}
|
||||
|
||||
impl Default for PeerScoreThresholds {
|
||||
fn default() -> Self {
|
||||
PeerScoreThresholds {
|
||||
gossip_threshold: -10.0,
|
||||
publish_threshold: -50.0,
|
||||
graylist_threshold: -80.0,
|
||||
accept_px_threshold: 10.0,
|
||||
opportunistic_graft_threshold: 20.0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PeerScoreThresholds {
|
||||
pub fn validate(&self) -> Result<(), &'static str> {
|
||||
if self.gossip_threshold > 0f64 {
|
||||
return Err("invalid gossip threshold; it must be <= 0");
|
||||
}
|
||||
if self.publish_threshold > 0f64 || self.publish_threshold > self.gossip_threshold {
|
||||
return Err("Invalid publish threshold; it must be <= 0 and <= gossip threshold");
|
||||
}
|
||||
if self.graylist_threshold > 0f64 || self.graylist_threshold > self.publish_threshold {
|
||||
return Err("Invalid graylist threshold; it must be <= 0 and <= publish threshold");
|
||||
}
|
||||
if self.accept_px_threshold < 0f64 {
|
||||
return Err("Invalid accept px threshold; it must be >= 0");
|
||||
}
|
||||
if self.opportunistic_graft_threshold < 0f64 {
|
||||
return Err("Invalid opportunistic grafting threshold; it must be >= 0");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct PeerScoreParams {
|
||||
/// Score parameters per topic.
|
||||
pub topics: HashMap<TopicHash, TopicScoreParams>,
|
||||
|
||||
/// Aggregate topic score cap; this limits the total contribution of topics towards a positive
|
||||
/// score. It must be positive (or 0 for no cap).
|
||||
pub topic_score_cap: f64,
|
||||
|
||||
/// P5: Application-specific peer scoring
|
||||
pub app_specific_weight: f64,
|
||||
|
||||
/// P6: IP-colocation factor.
|
||||
/// The parameter has an associated counter which counts the number of peers with the same IP.
|
||||
/// If the number of peers in the same IP exceeds `ip_colocation_factor_threshold, then the value
|
||||
/// is the square of the difference, ie `(peers_in_same_ip - ip_colocation_threshold)^2`.
|
||||
/// If the number of peers in the same IP is less than the threshold, then the value is 0.
|
||||
/// The weight of the parameter MUST be negative, unless you want to disable for testing.
|
||||
/// Note: In order to simulate many IPs in a manageable manner when testing, you can set the weight to 0
|
||||
/// thus disabling the IP colocation penalty.
|
||||
pub ip_colocation_factor_weight: f64,
|
||||
pub ip_colocation_factor_threshold: f64,
|
||||
pub ip_colocation_factor_whitelist: HashSet<IpAddr>,
|
||||
|
||||
/// P7: behavioural pattern penalties.
|
||||
/// This parameter has an associated counter which tracks misbehaviour as detected by the
|
||||
/// router. The router currently applies penalties for the following behaviors:
|
||||
/// - attempting to re-graft before the prune backoff time has elapsed.
|
||||
/// - not following up in IWANT requests for messages advertised with IHAVE.
|
||||
///
|
||||
/// The value of the parameter is the square of the counter over the threshold, which decays
|
||||
/// with BehaviourPenaltyDecay.
|
||||
/// The weight of the parameter MUST be negative (or zero to disable).
|
||||
pub behaviour_penalty_weight: f64,
|
||||
pub behaviour_penalty_threshold: f64,
|
||||
pub behaviour_penalty_decay: f64,
|
||||
|
||||
/// The decay interval for parameter counters.
|
||||
pub decay_interval: Duration,
|
||||
|
||||
/// Counter value below which it is considered 0.
|
||||
pub decay_to_zero: f64,
|
||||
|
||||
/// Time to remember counters for a disconnected peer.
|
||||
pub retain_score: Duration,
|
||||
|
||||
/// Slow peer penalty conditions
|
||||
pub slow_peer_weight: f64,
|
||||
pub slow_peer_threshold: f64,
|
||||
pub slow_peer_decay: f64,
|
||||
}
|
||||
|
||||
impl Default for PeerScoreParams {
|
||||
fn default() -> Self {
|
||||
PeerScoreParams {
|
||||
topics: HashMap::new(),
|
||||
topic_score_cap: 3600.0,
|
||||
app_specific_weight: 10.0,
|
||||
ip_colocation_factor_weight: -5.0,
|
||||
ip_colocation_factor_threshold: 10.0,
|
||||
ip_colocation_factor_whitelist: HashSet::new(),
|
||||
behaviour_penalty_weight: -10.0,
|
||||
behaviour_penalty_threshold: 0.0,
|
||||
behaviour_penalty_decay: 0.2,
|
||||
decay_interval: Duration::from_secs(DEFAULT_DECAY_INTERVAL),
|
||||
decay_to_zero: DEFAULT_DECAY_TO_ZERO,
|
||||
retain_score: Duration::from_secs(3600),
|
||||
slow_peer_weight: -0.2,
|
||||
slow_peer_threshold: 0.0,
|
||||
slow_peer_decay: 0.2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Peer score parameter validation
|
||||
impl PeerScoreParams {
|
||||
pub fn validate(&self) -> Result<(), String> {
|
||||
for (topic, params) in self.topics.iter() {
|
||||
if let Err(e) = params.validate() {
|
||||
return Err(format!("Invalid score parameters for topic {topic}: {e}"));
|
||||
}
|
||||
}
|
||||
|
||||
// check that the topic score is 0 or something positive
|
||||
if self.topic_score_cap < 0f64 {
|
||||
return Err("Invalid topic score cap; must be positive (or 0 for no cap)".into());
|
||||
}
|
||||
|
||||
// check the IP colocation factor
|
||||
if self.ip_colocation_factor_weight > 0f64 {
|
||||
return Err(
|
||||
"Invalid ip_colocation_factor_weight; must be negative (or 0 to disable)".into(),
|
||||
);
|
||||
}
|
||||
if self.ip_colocation_factor_weight != 0f64 && self.ip_colocation_factor_threshold < 1f64 {
|
||||
return Err("Invalid ip_colocation_factor_threshold; must be at least 1".into());
|
||||
}
|
||||
|
||||
// check the behaviour penalty
|
||||
if self.behaviour_penalty_weight > 0f64 {
|
||||
return Err(
|
||||
"Invalid behaviour_penalty_weight; must be negative (or 0 to disable)".into(),
|
||||
);
|
||||
}
|
||||
if self.behaviour_penalty_weight != 0f64
|
||||
&& (self.behaviour_penalty_decay <= 0f64 || self.behaviour_penalty_decay >= 1f64)
|
||||
{
|
||||
return Err("invalid behaviour_penalty_decay; must be between 0 and 1".into());
|
||||
}
|
||||
|
||||
if self.behaviour_penalty_threshold < 0f64 {
|
||||
return Err("invalid behaviour_penalty_threshold; must be >= 0".into());
|
||||
}
|
||||
|
||||
// check the decay parameters
|
||||
if self.decay_interval < Duration::from_secs(1) {
|
||||
return Err("Invalid decay_interval; must be at least 1s".into());
|
||||
}
|
||||
if self.decay_to_zero <= 0f64 || self.decay_to_zero >= 1f64 {
|
||||
return Err("Invalid decay_to_zero; must be between 0 and 1".into());
|
||||
}
|
||||
|
||||
// no need to check the score retention; a value of 0 means that we don't retain scores
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TopicScoreParams {
|
||||
/// The weight of the topic.
|
||||
pub topic_weight: f64,
|
||||
|
||||
/// P1: time in the mesh
|
||||
/// This is the time the peer has been grafted in the mesh.
|
||||
/// The value of of the parameter is the `time/time_in_mesh_quantum`, capped by `time_in_mesh_cap`
|
||||
/// The weight of the parameter must be positive (or zero to disable).
|
||||
pub time_in_mesh_weight: f64,
|
||||
pub time_in_mesh_quantum: Duration,
|
||||
pub time_in_mesh_cap: f64,
|
||||
|
||||
/// P2: first message deliveries
|
||||
/// This is the number of message deliveries in the topic.
|
||||
/// The value of the parameter is a counter, decaying with `first_message_deliveries_decay`, and capped
|
||||
/// by `first_message_deliveries_cap`.
|
||||
/// The weight of the parameter MUST be positive (or zero to disable).
|
||||
pub first_message_deliveries_weight: f64,
|
||||
pub first_message_deliveries_decay: f64,
|
||||
pub first_message_deliveries_cap: f64,
|
||||
|
||||
/// P3: mesh message deliveries
|
||||
/// This is the number of message deliveries in the mesh, within the
|
||||
/// `mesh_message_deliveries_window` of message validation; deliveries during validation also
|
||||
/// count and are retroactively applied when validation succeeds.
|
||||
/// This window accounts for the minimum time before a hostile mesh peer trying to game the
|
||||
/// score could replay back a valid message we just sent them.
|
||||
/// It effectively tracks first and near-first deliveries, ie a message seen from a mesh peer
|
||||
/// before we have forwarded it to them.
|
||||
/// The parameter has an associated counter, decaying with `mesh_message_deliveries_decay`.
|
||||
/// If the counter exceeds the threshold, its value is 0.
|
||||
/// If the counter is below the `mesh_message_deliveries_threshold`, the value is the square of
|
||||
/// the deficit, ie (`message_deliveries_threshold - counter)^2`
|
||||
/// The penalty is only activated after `mesh_message_deliveries_activation` time in the mesh.
|
||||
/// The weight of the parameter MUST be negative (or zero to disable).
|
||||
pub mesh_message_deliveries_weight: f64,
|
||||
pub mesh_message_deliveries_decay: f64,
|
||||
pub mesh_message_deliveries_cap: f64,
|
||||
pub mesh_message_deliveries_threshold: f64,
|
||||
pub mesh_message_deliveries_window: Duration,
|
||||
pub mesh_message_deliveries_activation: Duration,
|
||||
|
||||
/// P3b: sticky mesh propagation failures
|
||||
/// This is a sticky penalty that applies when a peer gets pruned from the mesh with an active
|
||||
/// mesh message delivery penalty.
|
||||
/// The weight of the parameter MUST be negative (or zero to disable)
|
||||
pub mesh_failure_penalty_weight: f64,
|
||||
pub mesh_failure_penalty_decay: f64,
|
||||
|
||||
/// P4: invalid messages
|
||||
/// This is the number of invalid messages in the topic.
|
||||
/// The value of the parameter is the square of the counter, decaying with
|
||||
/// `invalid_message_deliveries_decay`.
|
||||
/// The weight of the parameter MUST be negative (or zero to disable).
|
||||
pub invalid_message_deliveries_weight: f64,
|
||||
pub invalid_message_deliveries_decay: f64,
|
||||
}
|
||||
|
||||
/// NOTE: The topic score parameters are very network specific.
|
||||
/// For any production system, these values should be manually set.
|
||||
impl Default for TopicScoreParams {
|
||||
fn default() -> Self {
|
||||
TopicScoreParams {
|
||||
topic_weight: 0.5,
|
||||
// P1
|
||||
time_in_mesh_weight: 1.0,
|
||||
time_in_mesh_quantum: Duration::from_millis(1),
|
||||
time_in_mesh_cap: 3600.0,
|
||||
// P2
|
||||
first_message_deliveries_weight: 1.0,
|
||||
first_message_deliveries_decay: 0.5,
|
||||
first_message_deliveries_cap: 2000.0,
|
||||
// P3
|
||||
mesh_message_deliveries_weight: -1.0,
|
||||
mesh_message_deliveries_decay: 0.5,
|
||||
mesh_message_deliveries_cap: 100.0,
|
||||
mesh_message_deliveries_threshold: 20.0,
|
||||
mesh_message_deliveries_window: Duration::from_millis(10),
|
||||
mesh_message_deliveries_activation: Duration::from_secs(5),
|
||||
// P3b
|
||||
mesh_failure_penalty_weight: -1.0,
|
||||
mesh_failure_penalty_decay: 0.5,
|
||||
// P4
|
||||
invalid_message_deliveries_weight: -1.0,
|
||||
invalid_message_deliveries_decay: 0.3,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TopicScoreParams {
|
||||
pub fn validate(&self) -> Result<(), &'static str> {
|
||||
// make sure we have a sane topic weight
|
||||
if self.topic_weight < 0f64 {
|
||||
return Err("invalid topic weight; must be >= 0");
|
||||
}
|
||||
|
||||
if self.time_in_mesh_quantum == Duration::from_secs(0) {
|
||||
return Err("Invalid time_in_mesh_quantum; must be non zero");
|
||||
}
|
||||
if self.time_in_mesh_weight < 0f64 {
|
||||
return Err("Invalid time_in_mesh_weight; must be positive (or 0 to disable)");
|
||||
}
|
||||
if self.time_in_mesh_weight != 0f64 && self.time_in_mesh_cap <= 0f64 {
|
||||
return Err("Invalid time_in_mesh_cap must be positive");
|
||||
}
|
||||
|
||||
if self.first_message_deliveries_weight < 0f64 {
|
||||
return Err(
|
||||
"Invalid first_message_deliveries_weight; must be positive (or 0 to disable)",
|
||||
);
|
||||
}
|
||||
if self.first_message_deliveries_weight != 0f64
|
||||
&& (self.first_message_deliveries_decay <= 0f64
|
||||
|| self.first_message_deliveries_decay >= 1f64)
|
||||
{
|
||||
return Err("Invalid first_message_deliveries_decay; must be between 0 and 1");
|
||||
}
|
||||
if self.first_message_deliveries_weight != 0f64 && self.first_message_deliveries_cap <= 0f64
|
||||
{
|
||||
return Err("Invalid first_message_deliveries_cap must be positive");
|
||||
}
|
||||
|
||||
if self.mesh_message_deliveries_weight > 0f64 {
|
||||
return Err(
|
||||
"Invalid mesh_message_deliveries_weight; must be negative (or 0 to disable)",
|
||||
);
|
||||
}
|
||||
if self.mesh_message_deliveries_weight != 0f64
|
||||
&& (self.mesh_message_deliveries_decay <= 0f64
|
||||
|| self.mesh_message_deliveries_decay >= 1f64)
|
||||
{
|
||||
return Err("Invalid mesh_message_deliveries_decay; must be between 0 and 1");
|
||||
}
|
||||
if self.mesh_message_deliveries_weight != 0f64 && self.mesh_message_deliveries_cap <= 0f64 {
|
||||
return Err("Invalid mesh_message_deliveries_cap must be positive");
|
||||
}
|
||||
if self.mesh_message_deliveries_weight != 0f64
|
||||
&& self.mesh_message_deliveries_threshold <= 0f64
|
||||
{
|
||||
return Err("Invalid mesh_message_deliveries_threshold; must be positive");
|
||||
}
|
||||
if self.mesh_message_deliveries_weight != 0f64
|
||||
&& self.mesh_message_deliveries_activation < Duration::from_secs(1)
|
||||
{
|
||||
return Err("Invalid mesh_message_deliveries_activation; must be at least 1s");
|
||||
}
|
||||
|
||||
// check P3b
|
||||
if self.mesh_failure_penalty_weight > 0f64 {
|
||||
return Err("Invalid mesh_failure_penalty_weight; must be negative (or 0 to disable)");
|
||||
}
|
||||
if self.mesh_failure_penalty_weight != 0f64
|
||||
&& (self.mesh_failure_penalty_decay <= 0f64 || self.mesh_failure_penalty_decay >= 1f64)
|
||||
{
|
||||
return Err("Invalid mesh_failure_penalty_decay; must be between 0 and 1");
|
||||
}
|
||||
|
||||
// check P4
|
||||
if self.invalid_message_deliveries_weight > 0f64 {
|
||||
return Err(
|
||||
"Invalid invalid_message_deliveries_weight; must be negative (or 0 to disable)",
|
||||
);
|
||||
}
|
||||
if self.invalid_message_deliveries_decay <= 0f64
|
||||
|| self.invalid_message_deliveries_decay >= 1f64
|
||||
{
|
||||
return Err("Invalid invalid_message_deliveries_decay; must be between 0 and 1");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
978
beacon_node/lighthouse_network/src/gossipsub/peer_score/tests.rs
Normal file
978
beacon_node/lighthouse_network/src/gossipsub/peer_score/tests.rs
Normal file
@ -0,0 +1,978 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
/// A collection of unit tests mostly ported from the go implementation.
|
||||
use super::*;
|
||||
|
||||
use crate::gossipsub::types::RawMessage;
|
||||
use crate::gossipsub::{IdentTopic as Topic, Message};
|
||||
|
||||
// estimates a value within variance
|
||||
fn within_variance(value: f64, expected: f64, variance: f64) -> bool {
|
||||
if expected >= 0.0 {
|
||||
return value > expected * (1.0 - variance) && value < expected * (1.0 + variance);
|
||||
}
|
||||
value > expected * (1.0 + variance) && value < expected * (1.0 - variance)
|
||||
}
|
||||
|
||||
// generates a random gossipsub message with sequence number i
|
||||
fn make_test_message(seq: u64) -> (MessageId, RawMessage) {
|
||||
let raw_message = RawMessage {
|
||||
source: Some(PeerId::random()),
|
||||
data: vec![12, 34, 56],
|
||||
sequence_number: Some(seq),
|
||||
topic: Topic::new("test").hash(),
|
||||
signature: None,
|
||||
key: None,
|
||||
validated: true,
|
||||
};
|
||||
|
||||
let message = Message {
|
||||
source: raw_message.source,
|
||||
data: raw_message.data.clone(),
|
||||
sequence_number: raw_message.sequence_number,
|
||||
topic: raw_message.topic.clone(),
|
||||
};
|
||||
|
||||
let id = default_message_id()(&message);
|
||||
(id, raw_message)
|
||||
}
|
||||
|
||||
fn default_message_id() -> fn(&Message) -> MessageId {
|
||||
|message| {
|
||||
// default message id is: source + sequence number
|
||||
// NOTE: If either the peer_id or source is not provided, we set to 0;
|
||||
let mut source_string = if let Some(peer_id) = message.source.as_ref() {
|
||||
peer_id.to_base58()
|
||||
} else {
|
||||
PeerId::from_bytes(&[0, 1, 0])
|
||||
.expect("Valid peer id")
|
||||
.to_base58()
|
||||
};
|
||||
source_string.push_str(&message.sequence_number.unwrap_or_default().to_string());
|
||||
MessageId::from(source_string)
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_time_in_mesh() {
|
||||
// Create parameters with reasonable default values
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams {
|
||||
topic_score_cap: 1000.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 0.5,
|
||||
time_in_mesh_weight: 1.0,
|
||||
time_in_mesh_quantum: Duration::from_millis(1),
|
||||
time_in_mesh_cap: 3600.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params.clone());
|
||||
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
// Peer score should start at 0
|
||||
peer_score.add_peer(peer_id);
|
||||
|
||||
let score = peer_score.score(&peer_id);
|
||||
assert!(
|
||||
score == 0.0,
|
||||
"expected score to start at zero. Score found: {score}"
|
||||
);
|
||||
|
||||
// The time in mesh depends on how long the peer has been grafted
|
||||
peer_score.graft(&peer_id, topic);
|
||||
let elapsed = topic_params.time_in_mesh_quantum * 200;
|
||||
std::thread::sleep(elapsed);
|
||||
peer_score.refresh_scores();
|
||||
|
||||
let score = peer_score.score(&peer_id);
|
||||
let expected = topic_params.topic_weight
|
||||
* topic_params.time_in_mesh_weight
|
||||
* (elapsed.as_millis() / topic_params.time_in_mesh_quantum.as_millis()) as f64;
|
||||
assert!(
|
||||
score >= expected,
|
||||
"The score: {score} should be greater than or equal to: {expected}"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_time_in_mesh_cap() {
|
||||
// Create parameters with reasonable default values
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams::default();
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 0.5,
|
||||
time_in_mesh_weight: 1.0,
|
||||
time_in_mesh_quantum: Duration::from_millis(1),
|
||||
time_in_mesh_cap: 10.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params.clone());
|
||||
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
// Peer score should start at 0
|
||||
peer_score.add_peer(peer_id);
|
||||
|
||||
let score = peer_score.score(&peer_id);
|
||||
assert!(
|
||||
score == 0.0,
|
||||
"expected score to start at zero. Score found: {score}"
|
||||
);
|
||||
|
||||
// The time in mesh depends on how long the peer has been grafted
|
||||
peer_score.graft(&peer_id, topic);
|
||||
let elapsed = topic_params.time_in_mesh_quantum * 40;
|
||||
std::thread::sleep(elapsed);
|
||||
peer_score.refresh_scores();
|
||||
|
||||
let score = peer_score.score(&peer_id);
|
||||
let expected = topic_params.topic_weight
|
||||
* topic_params.time_in_mesh_weight
|
||||
* topic_params.time_in_mesh_cap;
|
||||
let variance = 0.5;
|
||||
assert!(
|
||||
within_variance(score, expected, variance),
|
||||
"The score: {} should be within {} of {}",
|
||||
score,
|
||||
score * variance,
|
||||
expected
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_first_message_deliveries() {
|
||||
// Create parameters with reasonable default values
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams::default();
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 1.0,
|
||||
first_message_deliveries_weight: 1.0,
|
||||
first_message_deliveries_decay: 1.0,
|
||||
first_message_deliveries_cap: 2000.0,
|
||||
time_in_mesh_weight: 0.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params.clone());
|
||||
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
// Peer score should start at 0
|
||||
peer_score.add_peer(peer_id);
|
||||
peer_score.graft(&peer_id, topic);
|
||||
|
||||
// deliver a bunch of messages from the peer
|
||||
let messages = 100;
|
||||
for seq in 0..messages {
|
||||
let (id, msg) = make_test_message(seq);
|
||||
peer_score.validate_message(&peer_id, &id, &msg.topic);
|
||||
peer_score.deliver_message(&peer_id, &id, &msg.topic);
|
||||
}
|
||||
|
||||
peer_score.refresh_scores();
|
||||
|
||||
let score = peer_score.score(&peer_id);
|
||||
let expected =
|
||||
topic_params.topic_weight * topic_params.first_message_deliveries_weight * messages as f64;
|
||||
assert!(score == expected, "The score: {score} should be {expected}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_first_message_deliveries_cap() {
|
||||
// Create parameters with reasonable default values
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams::default();
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 1.0,
|
||||
first_message_deliveries_weight: 1.0,
|
||||
first_message_deliveries_decay: 1.0, // test without decay
|
||||
first_message_deliveries_cap: 50.0,
|
||||
time_in_mesh_weight: 0.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params.clone());
|
||||
|
||||
let peer_id = PeerId::random();
|
||||
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
// Peer score should start at 0
|
||||
peer_score.add_peer(peer_id);
|
||||
peer_score.graft(&peer_id, topic);
|
||||
|
||||
// deliver a bunch of messages from the peer
|
||||
let messages = 100;
|
||||
for seq in 0..messages {
|
||||
let (id, msg) = make_test_message(seq);
|
||||
peer_score.validate_message(&peer_id, &id, &msg.topic);
|
||||
peer_score.deliver_message(&peer_id, &id, &msg.topic);
|
||||
}
|
||||
|
||||
peer_score.refresh_scores();
|
||||
let score = peer_score.score(&peer_id);
|
||||
let expected = topic_params.topic_weight
|
||||
* topic_params.first_message_deliveries_weight
|
||||
* topic_params.first_message_deliveries_cap;
|
||||
assert!(score == expected, "The score: {score} should be {expected}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_first_message_deliveries_decay() {
|
||||
// Create parameters with reasonable default values
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams::default();
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 1.0,
|
||||
first_message_deliveries_weight: 1.0,
|
||||
first_message_deliveries_decay: 0.9, // decay 10% per decay interval
|
||||
first_message_deliveries_cap: 2000.0,
|
||||
time_in_mesh_weight: 0.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params.clone());
|
||||
let peer_id = PeerId::random();
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
peer_score.add_peer(peer_id);
|
||||
peer_score.graft(&peer_id, topic);
|
||||
|
||||
// deliver a bunch of messages from the peer
|
||||
let messages = 100;
|
||||
for seq in 0..messages {
|
||||
let (id, msg) = make_test_message(seq);
|
||||
peer_score.validate_message(&peer_id, &id, &msg.topic);
|
||||
peer_score.deliver_message(&peer_id, &id, &msg.topic);
|
||||
}
|
||||
|
||||
peer_score.refresh_scores();
|
||||
let score = peer_score.score(&peer_id);
|
||||
let mut expected = topic_params.topic_weight
|
||||
* topic_params.first_message_deliveries_weight
|
||||
* topic_params.first_message_deliveries_decay
|
||||
* messages as f64;
|
||||
assert!(score == expected, "The score: {score} should be {expected}");
|
||||
|
||||
// refreshing the scores applies the decay param
|
||||
let decay_intervals = 10;
|
||||
for _ in 0..decay_intervals {
|
||||
peer_score.refresh_scores();
|
||||
expected *= topic_params.first_message_deliveries_decay;
|
||||
}
|
||||
let score = peer_score.score(&peer_id);
|
||||
assert!(score == expected, "The score: {score} should be {expected}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_mesh_message_deliveries() {
|
||||
// Create parameters with reasonable default values
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams::default();
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 1.0,
|
||||
mesh_message_deliveries_weight: -1.0,
|
||||
mesh_message_deliveries_activation: Duration::from_secs(1),
|
||||
mesh_message_deliveries_window: Duration::from_millis(10),
|
||||
mesh_message_deliveries_threshold: 20.0,
|
||||
mesh_message_deliveries_cap: 100.0,
|
||||
mesh_message_deliveries_decay: 1.0,
|
||||
first_message_deliveries_weight: 0.0,
|
||||
time_in_mesh_weight: 0.0,
|
||||
mesh_failure_penalty_weight: 0.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params.clone());
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
|
||||
// peer A always delivers the message first.
|
||||
// peer B delivers next (within the delivery window).
|
||||
// peer C delivers outside the delivery window.
|
||||
// we expect peers A and B to have a score of zero, since all other parameter weights are zero.
|
||||
// Peer C should have a negative score.
|
||||
let peer_id_a = PeerId::random();
|
||||
let peer_id_b = PeerId::random();
|
||||
let peer_id_c = PeerId::random();
|
||||
|
||||
let peers = vec![peer_id_a, peer_id_b, peer_id_c];
|
||||
|
||||
for peer_id in &peers {
|
||||
peer_score.add_peer(*peer_id);
|
||||
peer_score.graft(peer_id, topic.clone());
|
||||
}
|
||||
|
||||
// assert that nobody has been penalized yet for not delivering messages before activation time
|
||||
peer_score.refresh_scores();
|
||||
for peer_id in &peers {
|
||||
let score = peer_score.score(peer_id);
|
||||
assert!(
|
||||
score >= 0.0,
|
||||
"expected no mesh delivery penalty before activation time, got score {score}"
|
||||
);
|
||||
}
|
||||
|
||||
// wait for the activation time to kick in
|
||||
std::thread::sleep(topic_params.mesh_message_deliveries_activation);
|
||||
|
||||
// deliver a bunch of messages from peer A, with duplicates within the window from peer B,
|
||||
// and duplicates outside the window from peer C.
|
||||
let messages = 100;
|
||||
let mut messages_to_send = Vec::new();
|
||||
for seq in 0..messages {
|
||||
let (id, msg) = make_test_message(seq);
|
||||
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
|
||||
peer_score.deliver_message(&peer_id_a, &id, &msg.topic);
|
||||
|
||||
peer_score.duplicated_message(&peer_id_b, &id, &msg.topic);
|
||||
messages_to_send.push((id, msg));
|
||||
}
|
||||
|
||||
std::thread::sleep(topic_params.mesh_message_deliveries_window + Duration::from_millis(20));
|
||||
|
||||
for (id, msg) in messages_to_send {
|
||||
peer_score.duplicated_message(&peer_id_c, &id, &msg.topic);
|
||||
}
|
||||
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
let score_b = peer_score.score(&peer_id_b);
|
||||
let score_c = peer_score.score(&peer_id_c);
|
||||
|
||||
assert!(
|
||||
score_a >= 0.0,
|
||||
"expected non-negative score for Peer A, got score {score_a}"
|
||||
);
|
||||
assert!(
|
||||
score_b >= 0.0,
|
||||
"expected non-negative score for Peer B, got score {score_b}"
|
||||
);
|
||||
|
||||
// the penalty is the difference between the threshold and the actual mesh deliveries, squared.
|
||||
// since we didn't deliver anything, this is just the value of the threshold
|
||||
let penalty = topic_params.mesh_message_deliveries_threshold
|
||||
* topic_params.mesh_message_deliveries_threshold;
|
||||
let expected =
|
||||
topic_params.topic_weight * topic_params.mesh_message_deliveries_weight * penalty;
|
||||
|
||||
assert!(score_c == expected, "Score: {score_c}. Expected {expected}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_mesh_message_deliveries_decay() {
|
||||
// Create parameters with reasonable default values
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams::default();
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 1.0,
|
||||
mesh_message_deliveries_weight: -1.0,
|
||||
mesh_message_deliveries_activation: Duration::from_secs(0),
|
||||
mesh_message_deliveries_window: Duration::from_millis(10),
|
||||
mesh_message_deliveries_threshold: 20.0,
|
||||
mesh_message_deliveries_cap: 100.0,
|
||||
mesh_message_deliveries_decay: 0.9,
|
||||
first_message_deliveries_weight: 0.0,
|
||||
time_in_mesh_weight: 0.0,
|
||||
time_in_mesh_quantum: Duration::from_secs(1),
|
||||
mesh_failure_penalty_weight: 0.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params.clone());
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
|
||||
let peer_id_a = PeerId::random();
|
||||
peer_score.add_peer(peer_id_a);
|
||||
peer_score.graft(&peer_id_a, topic);
|
||||
|
||||
// deliver a bunch of messages from peer A
|
||||
let messages = 100;
|
||||
for seq in 0..messages {
|
||||
let (id, msg) = make_test_message(seq);
|
||||
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
|
||||
peer_score.deliver_message(&peer_id_a, &id, &msg.topic);
|
||||
}
|
||||
|
||||
// we should have a positive score, since we delivered more messages than the threshold
|
||||
peer_score.refresh_scores();
|
||||
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
assert!(
|
||||
score_a >= 0.0,
|
||||
"expected non-negative score for Peer A, got score {score_a}"
|
||||
);
|
||||
|
||||
let mut decayed_delivery_count = (messages as f64) * topic_params.mesh_message_deliveries_decay;
|
||||
for _ in 0..20 {
|
||||
peer_score.refresh_scores();
|
||||
decayed_delivery_count *= topic_params.mesh_message_deliveries_decay;
|
||||
}
|
||||
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
// the penalty is the difference between the threshold and the (decayed) mesh deliveries, squared.
|
||||
let deficit = topic_params.mesh_message_deliveries_threshold - decayed_delivery_count;
|
||||
let penalty = deficit * deficit;
|
||||
let expected =
|
||||
topic_params.topic_weight * topic_params.mesh_message_deliveries_weight * penalty;
|
||||
|
||||
assert_eq!(score_a, expected, "Invalid score");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_mesh_failure_penalty() {
|
||||
// Create parameters with reasonable default values
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams::default();
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
// the mesh failure penalty is applied when a peer is pruned while their
|
||||
// mesh deliveries are under the threshold.
|
||||
// for this test, we set the mesh delivery threshold, but set
|
||||
// mesh_message_deliveries to zero, so the only affect on the score
|
||||
// is from the mesh failure penalty
|
||||
topic_weight: 1.0,
|
||||
mesh_message_deliveries_weight: 0.0,
|
||||
mesh_message_deliveries_activation: Duration::from_secs(0),
|
||||
mesh_message_deliveries_window: Duration::from_millis(10),
|
||||
mesh_message_deliveries_threshold: 20.0,
|
||||
mesh_message_deliveries_cap: 100.0,
|
||||
mesh_message_deliveries_decay: 1.0,
|
||||
first_message_deliveries_weight: 0.0,
|
||||
time_in_mesh_weight: 0.0,
|
||||
time_in_mesh_quantum: Duration::from_secs(1),
|
||||
mesh_failure_penalty_weight: -1.0,
|
||||
mesh_failure_penalty_decay: 1.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params.clone());
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
|
||||
let peer_id_a = PeerId::random();
|
||||
let peer_id_b = PeerId::random();
|
||||
|
||||
let peers = vec![peer_id_a, peer_id_b];
|
||||
|
||||
for peer_id in &peers {
|
||||
peer_score.add_peer(*peer_id);
|
||||
peer_score.graft(peer_id, topic.clone());
|
||||
}
|
||||
|
||||
// deliver a bunch of messages from peer A
|
||||
let messages = 100;
|
||||
for seq in 0..messages {
|
||||
let (id, msg) = make_test_message(seq);
|
||||
|
||||
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
|
||||
peer_score.deliver_message(&peer_id_a, &id, &msg.topic);
|
||||
}
|
||||
|
||||
// peers A and B should both have zero scores, since the failure penalty hasn't been applied yet
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
let score_b = peer_score.score(&peer_id_b);
|
||||
assert!(
|
||||
score_a >= 0.0,
|
||||
"expected non-negative score for Peer A, got score {score_a}"
|
||||
);
|
||||
assert!(
|
||||
score_b >= 0.0,
|
||||
"expected non-negative score for Peer B, got score {score_b}"
|
||||
);
|
||||
|
||||
// prune peer B to apply the penalty
|
||||
peer_score.prune(&peer_id_b, topic.hash());
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
|
||||
assert_eq!(score_a, 0.0, "expected Peer A to have a 0");
|
||||
|
||||
// penalty calculation is the same as for mesh_message_deliveries, but multiplied by
|
||||
// mesh_failure_penalty_weigh
|
||||
// instead of mesh_message_deliveries_weight
|
||||
let penalty = topic_params.mesh_message_deliveries_threshold
|
||||
* topic_params.mesh_message_deliveries_threshold;
|
||||
let expected = topic_params.topic_weight * topic_params.mesh_failure_penalty_weight * penalty;
|
||||
|
||||
let score_b = peer_score.score(&peer_id_b);
|
||||
|
||||
assert_eq!(score_b, expected, "Peer B should have expected score",);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_invalid_message_deliveries() {
|
||||
// Create parameters with reasonable default values
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams::default();
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 1.0,
|
||||
mesh_message_deliveries_weight: 0.0,
|
||||
mesh_message_deliveries_activation: Duration::from_secs(1),
|
||||
mesh_message_deliveries_window: Duration::from_millis(10),
|
||||
mesh_message_deliveries_threshold: 20.0,
|
||||
mesh_message_deliveries_cap: 100.0,
|
||||
mesh_message_deliveries_decay: 1.0,
|
||||
first_message_deliveries_weight: 0.0,
|
||||
time_in_mesh_weight: 0.0,
|
||||
mesh_failure_penalty_weight: 0.0,
|
||||
invalid_message_deliveries_weight: -1.0,
|
||||
invalid_message_deliveries_decay: 1.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params.clone());
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
|
||||
let peer_id_a = PeerId::random();
|
||||
peer_score.add_peer(peer_id_a);
|
||||
peer_score.graft(&peer_id_a, topic);
|
||||
|
||||
// reject a bunch of messages from peer A
|
||||
let messages = 100;
|
||||
for seq in 0..messages {
|
||||
let (id, msg) = make_test_message(seq);
|
||||
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed);
|
||||
}
|
||||
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
|
||||
let expected = topic_params.topic_weight
|
||||
* topic_params.invalid_message_deliveries_weight
|
||||
* (messages * messages) as f64;
|
||||
|
||||
assert_eq!(score_a, expected, "Peer has unexpected score",);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_invalid_message_deliveris_decay() {
|
||||
// Create parameters with reasonable default values
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams::default();
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 1.0,
|
||||
mesh_message_deliveries_weight: 0.0,
|
||||
mesh_message_deliveries_activation: Duration::from_secs(1),
|
||||
mesh_message_deliveries_window: Duration::from_millis(10),
|
||||
mesh_message_deliveries_threshold: 20.0,
|
||||
mesh_message_deliveries_cap: 100.0,
|
||||
mesh_message_deliveries_decay: 1.0,
|
||||
first_message_deliveries_weight: 0.0,
|
||||
time_in_mesh_weight: 0.0,
|
||||
mesh_failure_penalty_weight: 0.0,
|
||||
invalid_message_deliveries_weight: -1.0,
|
||||
invalid_message_deliveries_decay: 0.9,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params.clone());
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
|
||||
let peer_id_a = PeerId::random();
|
||||
peer_score.add_peer(peer_id_a);
|
||||
peer_score.graft(&peer_id_a, topic);
|
||||
|
||||
// reject a bunch of messages from peer A
|
||||
let messages = 100;
|
||||
for seq in 0..messages {
|
||||
let (id, msg) = make_test_message(seq);
|
||||
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed);
|
||||
}
|
||||
|
||||
peer_score.refresh_scores();
|
||||
|
||||
let decay = topic_params.invalid_message_deliveries_decay * messages as f64;
|
||||
|
||||
let mut expected =
|
||||
topic_params.topic_weight * topic_params.invalid_message_deliveries_weight * decay * decay;
|
||||
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
assert_eq!(score_a, expected, "Peer has unexpected score");
|
||||
|
||||
// refresh scores a few times to apply decay
|
||||
for _ in 0..10 {
|
||||
peer_score.refresh_scores();
|
||||
expected *= topic_params.invalid_message_deliveries_decay
|
||||
* topic_params.invalid_message_deliveries_decay;
|
||||
}
|
||||
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
assert_eq!(score_a, expected, "Peer has unexpected score");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_reject_message_deliveries() {
|
||||
// This tests adds coverage for the dark corners of rejection tracing
|
||||
|
||||
// Create parameters with reasonable default values
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams::default();
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 1.0,
|
||||
mesh_message_deliveries_weight: 0.0,
|
||||
first_message_deliveries_weight: 0.0,
|
||||
mesh_failure_penalty_weight: 0.0,
|
||||
time_in_mesh_weight: 0.0,
|
||||
time_in_mesh_quantum: Duration::from_secs(1),
|
||||
invalid_message_deliveries_weight: -1.0,
|
||||
invalid_message_deliveries_decay: 1.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params);
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
|
||||
let peer_id_a = PeerId::random();
|
||||
let peer_id_b = PeerId::random();
|
||||
|
||||
let peers = vec![peer_id_a, peer_id_b];
|
||||
|
||||
for peer_id in &peers {
|
||||
peer_score.add_peer(*peer_id);
|
||||
}
|
||||
|
||||
let (id, msg) = make_test_message(1);
|
||||
|
||||
// these should have no effect in the score
|
||||
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::BlackListedPeer);
|
||||
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::BlackListedSource);
|
||||
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored);
|
||||
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
let score_b = peer_score.score(&peer_id_b);
|
||||
|
||||
assert_eq!(score_a, 0.0, "Should have no effect on the score");
|
||||
assert_eq!(score_b, 0.0, "Should have no effect on the score");
|
||||
|
||||
// insert a record in the message deliveries
|
||||
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
|
||||
|
||||
// this should have no effect in the score, and subsequent duplicate messages should have no
|
||||
// effect either
|
||||
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored);
|
||||
peer_score.duplicated_message(&peer_id_b, &id, &msg.topic);
|
||||
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
let score_b = peer_score.score(&peer_id_b);
|
||||
|
||||
assert_eq!(score_a, 0.0, "Should have no effect on the score");
|
||||
assert_eq!(score_b, 0.0, "Should have no effect on the score");
|
||||
|
||||
// now clear the delivery record
|
||||
peer_score.deliveries.clear();
|
||||
|
||||
// insert a record in the message deliveries
|
||||
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
|
||||
|
||||
// this should have no effect in the score, and subsequent duplicate messages should have no
|
||||
// effect either
|
||||
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationIgnored);
|
||||
peer_score.duplicated_message(&peer_id_b, &id, &msg.topic);
|
||||
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
let score_b = peer_score.score(&peer_id_b);
|
||||
|
||||
assert_eq!(score_a, 0.0, "Should have no effect on the score");
|
||||
assert_eq!(score_b, 0.0, "Should have no effect on the score");
|
||||
|
||||
// now clear the delivery record
|
||||
peer_score.deliveries.clear();
|
||||
|
||||
// insert a new record in the message deliveries
|
||||
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
|
||||
|
||||
// and reject the message to make sure duplicates are also penalized
|
||||
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed);
|
||||
peer_score.duplicated_message(&peer_id_b, &id, &msg.topic);
|
||||
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
let score_b = peer_score.score(&peer_id_b);
|
||||
|
||||
assert_eq!(score_a, -1.0, "Score should be effected");
|
||||
assert_eq!(score_b, -1.0, "Score should be effected");
|
||||
|
||||
// now clear the delivery record again
|
||||
peer_score.deliveries.clear();
|
||||
|
||||
// insert a new record in the message deliveries
|
||||
peer_score.validate_message(&peer_id_a, &id, &msg.topic);
|
||||
|
||||
// and reject the message after a duplicate has arrived
|
||||
peer_score.duplicated_message(&peer_id_b, &id, &msg.topic);
|
||||
peer_score.reject_message(&peer_id_a, &id, &msg.topic, RejectReason::ValidationFailed);
|
||||
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
let score_b = peer_score.score(&peer_id_b);
|
||||
|
||||
assert_eq!(score_a, -4.0, "Score should be effected");
|
||||
assert_eq!(score_b, -4.0, "Score should be effected");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_application_score() {
|
||||
// Create parameters with reasonable default values
|
||||
let app_specific_weight = 0.5;
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams {
|
||||
app_specific_weight,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 1.0,
|
||||
mesh_message_deliveries_weight: 0.0,
|
||||
first_message_deliveries_weight: 0.0,
|
||||
mesh_failure_penalty_weight: 0.0,
|
||||
time_in_mesh_weight: 0.0,
|
||||
time_in_mesh_quantum: Duration::from_secs(1),
|
||||
invalid_message_deliveries_weight: 0.0,
|
||||
invalid_message_deliveries_decay: 1.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params);
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
|
||||
let peer_id_a = PeerId::random();
|
||||
peer_score.add_peer(peer_id_a);
|
||||
peer_score.graft(&peer_id_a, topic);
|
||||
|
||||
let messages = 100;
|
||||
for i in -100..messages {
|
||||
let app_score_value = i as f64;
|
||||
peer_score.set_application_score(&peer_id_a, app_score_value);
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
let expected = (i as f64) * app_specific_weight;
|
||||
assert_eq!(score_a, expected, "Peer has unexpected score");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_ip_colocation() {
|
||||
// Create parameters with reasonable default values
|
||||
let ip_colocation_factor_weight = -1.0;
|
||||
let ip_colocation_factor_threshold = 1.0;
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams {
|
||||
ip_colocation_factor_weight,
|
||||
ip_colocation_factor_threshold,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 1.0,
|
||||
mesh_message_deliveries_weight: 0.0,
|
||||
first_message_deliveries_weight: 0.0,
|
||||
mesh_failure_penalty_weight: 0.0,
|
||||
time_in_mesh_weight: 0.0,
|
||||
time_in_mesh_quantum: Duration::from_secs(1),
|
||||
invalid_message_deliveries_weight: 0.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params);
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
|
||||
let peer_id_a = PeerId::random();
|
||||
let peer_id_b = PeerId::random();
|
||||
let peer_id_c = PeerId::random();
|
||||
let peer_id_d = PeerId::random();
|
||||
|
||||
let peers = vec![peer_id_a, peer_id_b, peer_id_c, peer_id_d];
|
||||
for peer_id in &peers {
|
||||
peer_score.add_peer(*peer_id);
|
||||
peer_score.graft(peer_id, topic.clone());
|
||||
}
|
||||
|
||||
// peerA should have no penalty, but B, C, and D should be penalized for sharing an IP
|
||||
peer_score.add_ip(&peer_id_a, "1.2.3.4".parse().unwrap());
|
||||
peer_score.add_ip(&peer_id_b, "2.3.4.5".parse().unwrap());
|
||||
peer_score.add_ip(&peer_id_c, "2.3.4.5".parse().unwrap());
|
||||
peer_score.add_ip(&peer_id_c, "3.4.5.6".parse().unwrap());
|
||||
peer_score.add_ip(&peer_id_d, "2.3.4.5".parse().unwrap());
|
||||
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
let score_b = peer_score.score(&peer_id_b);
|
||||
let score_c = peer_score.score(&peer_id_c);
|
||||
let score_d = peer_score.score(&peer_id_d);
|
||||
|
||||
assert_eq!(score_a, 0.0, "Peer A should be unaffected");
|
||||
|
||||
let n_shared = 3.0;
|
||||
let ip_surplus = n_shared - ip_colocation_factor_threshold;
|
||||
let penalty = ip_surplus * ip_surplus;
|
||||
let expected = ip_colocation_factor_weight * penalty;
|
||||
|
||||
assert_eq!(score_b, expected, "Peer B should have expected score");
|
||||
assert_eq!(score_c, expected, "Peer C should have expected score");
|
||||
assert_eq!(score_d, expected, "Peer D should have expected score");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_behaviour_penality() {
|
||||
// Create parameters with reasonable default values
|
||||
let behaviour_penalty_weight = -1.0;
|
||||
let behaviour_penalty_decay = 0.99;
|
||||
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let mut params = PeerScoreParams {
|
||||
behaviour_penalty_decay,
|
||||
behaviour_penalty_weight,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 1.0,
|
||||
mesh_message_deliveries_weight: 0.0,
|
||||
first_message_deliveries_weight: 0.0,
|
||||
mesh_failure_penalty_weight: 0.0,
|
||||
time_in_mesh_weight: 0.0,
|
||||
time_in_mesh_quantum: Duration::from_secs(1),
|
||||
invalid_message_deliveries_weight: 0.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params);
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
|
||||
let peer_id_a = PeerId::random();
|
||||
|
||||
// add a penalty to a non-existent peer.
|
||||
peer_score.add_penalty(&peer_id_a, 1);
|
||||
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
assert_eq!(score_a, 0.0, "Peer A should be unaffected");
|
||||
|
||||
// add the peer and test penalties
|
||||
peer_score.add_peer(peer_id_a);
|
||||
assert_eq!(score_a, 0.0, "Peer A should be unaffected");
|
||||
|
||||
peer_score.add_penalty(&peer_id_a, 1);
|
||||
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
assert_eq!(score_a, -1.0, "Peer A should have been penalized");
|
||||
|
||||
peer_score.add_penalty(&peer_id_a, 1);
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
assert_eq!(score_a, -4.0, "Peer A should have been penalized");
|
||||
|
||||
peer_score.refresh_scores();
|
||||
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
assert_eq!(score_a, -3.9204, "Peer A should have been penalized");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_retention() {
|
||||
// Create parameters with reasonable default values
|
||||
let topic = Topic::new("test");
|
||||
let topic_hash = topic.hash();
|
||||
let app_specific_weight = 1.0;
|
||||
let app_score_value = -1000.0;
|
||||
let retain_score = Duration::from_secs(1);
|
||||
let mut params = PeerScoreParams {
|
||||
app_specific_weight,
|
||||
retain_score,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let topic_params = TopicScoreParams {
|
||||
topic_weight: 0.0,
|
||||
mesh_message_deliveries_weight: 0.0,
|
||||
mesh_message_deliveries_activation: Duration::from_secs(0),
|
||||
first_message_deliveries_weight: 0.0,
|
||||
time_in_mesh_weight: 0.0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
params.topics.insert(topic_hash, topic_params);
|
||||
let mut peer_score = PeerScore::new(params);
|
||||
|
||||
let peer_id_a = PeerId::random();
|
||||
peer_score.add_peer(peer_id_a);
|
||||
peer_score.graft(&peer_id_a, topic);
|
||||
|
||||
peer_score.set_application_score(&peer_id_a, app_score_value);
|
||||
|
||||
// score should equal -1000 (app specific score)
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
assert_eq!(
|
||||
score_a, app_score_value,
|
||||
"Score should be the application specific score"
|
||||
);
|
||||
|
||||
// disconnect & wait half of RetainScore time. Should still have negative score
|
||||
peer_score.remove_peer(&peer_id_a);
|
||||
std::thread::sleep(retain_score / 2);
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
assert_eq!(
|
||||
score_a, app_score_value,
|
||||
"Score should be the application specific score"
|
||||
);
|
||||
|
||||
// wait remaining time (plus a little slop) and the score should reset to zero
|
||||
std::thread::sleep(retain_score / 2 + Duration::from_millis(50));
|
||||
peer_score.refresh_scores();
|
||||
let score_a = peer_score.score(&peer_id_a);
|
||||
assert_eq!(
|
||||
score_a, 0.0,
|
||||
"Score should be the application specific score"
|
||||
);
|
||||
}
|
||||
625
beacon_node/lighthouse_network/src/gossipsub/protocol.rs
Normal file
625
beacon_node/lighthouse_network/src/gossipsub/protocol.rs
Normal file
@ -0,0 +1,625 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use super::config::ValidationMode;
|
||||
use super::handler::HandlerEvent;
|
||||
use super::rpc_proto::proto;
|
||||
use super::topic::TopicHash;
|
||||
use super::types::{
|
||||
ControlAction, Graft, IHave, IWant, MessageId, PeerInfo, PeerKind, Prune, RawMessage, Rpc,
|
||||
Subscription, SubscriptionAction,
|
||||
};
|
||||
use super::ValidationError;
|
||||
use asynchronous_codec::{Decoder, Encoder, Framed};
|
||||
use byteorder::{BigEndian, ByteOrder};
|
||||
use bytes::BytesMut;
|
||||
use futures::future;
|
||||
use futures::prelude::*;
|
||||
use libp2p::core::{InboundUpgrade, OutboundUpgrade, UpgradeInfo};
|
||||
use libp2p::identity::{PeerId, PublicKey};
|
||||
use libp2p::swarm::StreamProtocol;
|
||||
use quick_protobuf::Writer;
|
||||
use std::pin::Pin;
|
||||
use void::Void;
|
||||
|
||||
pub(crate) const SIGNING_PREFIX: &[u8] = b"libp2p-pubsub:";
|
||||
|
||||
pub(crate) const GOSSIPSUB_1_1_0_PROTOCOL: ProtocolId = ProtocolId {
|
||||
protocol: StreamProtocol::new("/meshsub/1.1.0"),
|
||||
kind: PeerKind::Gossipsubv1_1,
|
||||
};
|
||||
pub(crate) const GOSSIPSUB_1_0_0_PROTOCOL: ProtocolId = ProtocolId {
|
||||
protocol: StreamProtocol::new("/meshsub/1.0.0"),
|
||||
kind: PeerKind::Gossipsub,
|
||||
};
|
||||
pub(crate) const FLOODSUB_PROTOCOL: ProtocolId = ProtocolId {
|
||||
protocol: StreamProtocol::new("/floodsub/1.0.0"),
|
||||
kind: PeerKind::Floodsub,
|
||||
};
|
||||
|
||||
/// Implementation of [`InboundUpgrade`] and [`OutboundUpgrade`] for the Gossipsub protocol.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ProtocolConfig {
|
||||
/// The Gossipsub protocol id to listen on.
|
||||
pub(crate) protocol_ids: Vec<ProtocolId>,
|
||||
/// The maximum transmit size for a packet.
|
||||
pub(crate) max_transmit_size: usize,
|
||||
/// Determines the level of validation to be done on incoming messages.
|
||||
pub(crate) validation_mode: ValidationMode,
|
||||
}
|
||||
|
||||
impl Default for ProtocolConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_transmit_size: 65536,
|
||||
validation_mode: ValidationMode::Strict,
|
||||
protocol_ids: vec![GOSSIPSUB_1_1_0_PROTOCOL, GOSSIPSUB_1_0_0_PROTOCOL],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The protocol ID
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct ProtocolId {
|
||||
/// The RPC message type/name.
|
||||
pub protocol: StreamProtocol,
|
||||
/// The type of protocol we support
|
||||
pub kind: PeerKind,
|
||||
}
|
||||
|
||||
impl AsRef<str> for ProtocolId {
|
||||
fn as_ref(&self) -> &str {
|
||||
self.protocol.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl UpgradeInfo for ProtocolConfig {
|
||||
type Info = ProtocolId;
|
||||
type InfoIter = Vec<Self::Info>;
|
||||
|
||||
fn protocol_info(&self) -> Self::InfoIter {
|
||||
self.protocol_ids.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSocket> InboundUpgrade<TSocket> for ProtocolConfig
|
||||
where
|
||||
TSocket: AsyncRead + AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
type Output = (Framed<TSocket, GossipsubCodec>, PeerKind);
|
||||
type Error = Void;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<Self::Output, Self::Error>> + Send>>;
|
||||
|
||||
fn upgrade_inbound(self, socket: TSocket, protocol_id: Self::Info) -> Self::Future {
|
||||
Box::pin(future::ok((
|
||||
Framed::new(
|
||||
socket,
|
||||
GossipsubCodec::new(self.max_transmit_size, self.validation_mode),
|
||||
),
|
||||
protocol_id.kind,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
impl<TSocket> OutboundUpgrade<TSocket> for ProtocolConfig
|
||||
where
|
||||
TSocket: AsyncWrite + AsyncRead + Unpin + Send + 'static,
|
||||
{
|
||||
type Output = (Framed<TSocket, GossipsubCodec>, PeerKind);
|
||||
type Error = Void;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<Self::Output, Self::Error>> + Send>>;
|
||||
|
||||
fn upgrade_outbound(self, socket: TSocket, protocol_id: Self::Info) -> Self::Future {
|
||||
Box::pin(future::ok((
|
||||
Framed::new(
|
||||
socket,
|
||||
GossipsubCodec::new(self.max_transmit_size, self.validation_mode),
|
||||
),
|
||||
protocol_id.kind,
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
/* Gossip codec for the framing */
|
||||
|
||||
pub struct GossipsubCodec {
|
||||
/// Determines the level of validation performed on incoming messages.
|
||||
validation_mode: ValidationMode,
|
||||
/// The codec to handle common encoding/decoding of protobuf messages
|
||||
codec: quick_protobuf_codec::Codec<proto::RPC>,
|
||||
}
|
||||
|
||||
impl GossipsubCodec {
|
||||
pub fn new(max_length: usize, validation_mode: ValidationMode) -> GossipsubCodec {
|
||||
let codec = quick_protobuf_codec::Codec::new(max_length);
|
||||
GossipsubCodec {
|
||||
validation_mode,
|
||||
codec,
|
||||
}
|
||||
}
|
||||
|
||||
/// Verifies a gossipsub message. This returns either a success or failure. All errors
|
||||
/// are logged, which prevents error handling in the codec and handler. We simply drop invalid
|
||||
/// messages and log warnings, rather than propagating errors through the codec.
|
||||
fn verify_signature(message: &proto::Message) -> bool {
|
||||
use quick_protobuf::MessageWrite;
|
||||
|
||||
let Some(from) = message.from.as_ref() else {
|
||||
tracing::debug!("Signature verification failed: No source id given");
|
||||
return false;
|
||||
};
|
||||
|
||||
let Ok(source) = PeerId::from_bytes(from) else {
|
||||
tracing::debug!("Signature verification failed: Invalid Peer Id");
|
||||
return false;
|
||||
};
|
||||
|
||||
let Some(signature) = message.signature.as_ref() else {
|
||||
tracing::debug!("Signature verification failed: No signature provided");
|
||||
return false;
|
||||
};
|
||||
|
||||
// If there is a key value in the protobuf, use that key otherwise the key must be
|
||||
// obtained from the inlined source peer_id.
|
||||
let public_key = match message.key.as_deref().map(PublicKey::try_decode_protobuf) {
|
||||
Some(Ok(key)) => key,
|
||||
_ => match PublicKey::try_decode_protobuf(&source.to_bytes()[2..]) {
|
||||
Ok(v) => v,
|
||||
Err(_) => {
|
||||
tracing::warn!("Signature verification failed: No valid public key supplied");
|
||||
return false;
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
// The key must match the peer_id
|
||||
if source != public_key.to_peer_id() {
|
||||
tracing::warn!(
|
||||
"Signature verification failed: Public key doesn't match source peer id"
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Construct the signature bytes
|
||||
let mut message_sig = message.clone();
|
||||
message_sig.signature = None;
|
||||
message_sig.key = None;
|
||||
let mut buf = Vec::with_capacity(message_sig.get_size());
|
||||
let mut writer = Writer::new(&mut buf);
|
||||
message_sig
|
||||
.write_message(&mut writer)
|
||||
.expect("Encoding to succeed");
|
||||
let mut signature_bytes = SIGNING_PREFIX.to_vec();
|
||||
signature_bytes.extend_from_slice(&buf);
|
||||
public_key.verify(&signature_bytes, signature)
|
||||
}
|
||||
}
|
||||
|
||||
impl Encoder for GossipsubCodec {
|
||||
type Item<'a> = proto::RPC;
|
||||
type Error = quick_protobuf_codec::Error;
|
||||
|
||||
fn encode(&mut self, item: Self::Item<'_>, dst: &mut BytesMut) -> Result<(), Self::Error> {
|
||||
self.codec.encode(item, dst)
|
||||
}
|
||||
}
|
||||
|
||||
impl Decoder for GossipsubCodec {
|
||||
type Item = HandlerEvent;
|
||||
type Error = quick_protobuf_codec::Error;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
let Some(rpc) = self.codec.decode(src)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
// Store valid messages.
|
||||
let mut messages = Vec::with_capacity(rpc.publish.len());
|
||||
// Store any invalid messages.
|
||||
let mut invalid_messages = Vec::new();
|
||||
|
||||
for message in rpc.publish.into_iter() {
|
||||
// Keep track of the type of invalid message.
|
||||
let mut invalid_kind = None;
|
||||
let mut verify_signature = false;
|
||||
let mut verify_sequence_no = false;
|
||||
let mut verify_source = false;
|
||||
|
||||
match self.validation_mode {
|
||||
ValidationMode::Strict => {
|
||||
// Validate everything
|
||||
verify_signature = true;
|
||||
verify_sequence_no = true;
|
||||
verify_source = true;
|
||||
}
|
||||
ValidationMode::Permissive => {
|
||||
// If the fields exist, validate them
|
||||
if message.signature.is_some() {
|
||||
verify_signature = true;
|
||||
}
|
||||
if message.seqno.is_some() {
|
||||
verify_sequence_no = true;
|
||||
}
|
||||
if message.from.is_some() {
|
||||
verify_source = true;
|
||||
}
|
||||
}
|
||||
ValidationMode::Anonymous => {
|
||||
if message.signature.is_some() {
|
||||
tracing::warn!(
|
||||
"Signature field was non-empty and anonymous validation mode is set"
|
||||
);
|
||||
invalid_kind = Some(ValidationError::SignaturePresent);
|
||||
} else if message.seqno.is_some() {
|
||||
tracing::warn!(
|
||||
"Sequence number was non-empty and anonymous validation mode is set"
|
||||
);
|
||||
invalid_kind = Some(ValidationError::SequenceNumberPresent);
|
||||
} else if message.from.is_some() {
|
||||
tracing::warn!("Message dropped. Message source was non-empty and anonymous validation mode is set");
|
||||
invalid_kind = Some(ValidationError::MessageSourcePresent);
|
||||
}
|
||||
}
|
||||
ValidationMode::None => {}
|
||||
}
|
||||
|
||||
// If the initial validation logic failed, add the message to invalid messages and
|
||||
// continue processing the others.
|
||||
if let Some(validation_error) = invalid_kind.take() {
|
||||
let message = RawMessage {
|
||||
source: None, // don't bother inform the application
|
||||
data: message.data.unwrap_or_default(),
|
||||
sequence_number: None, // don't inform the application
|
||||
topic: TopicHash::from_raw(message.topic),
|
||||
signature: None, // don't inform the application
|
||||
key: message.key,
|
||||
validated: false,
|
||||
};
|
||||
invalid_messages.push((message, validation_error));
|
||||
// proceed to the next message
|
||||
continue;
|
||||
}
|
||||
|
||||
// verify message signatures if required
|
||||
if verify_signature && !GossipsubCodec::verify_signature(&message) {
|
||||
tracing::warn!("Invalid signature for received message");
|
||||
|
||||
// Build the invalid message (ignoring further validation of sequence number
|
||||
// and source)
|
||||
let message = RawMessage {
|
||||
source: None, // don't bother inform the application
|
||||
data: message.data.unwrap_or_default(),
|
||||
sequence_number: None, // don't inform the application
|
||||
topic: TopicHash::from_raw(message.topic),
|
||||
signature: None, // don't inform the application
|
||||
key: message.key,
|
||||
validated: false,
|
||||
};
|
||||
invalid_messages.push((message, ValidationError::InvalidSignature));
|
||||
// proceed to the next message
|
||||
continue;
|
||||
}
|
||||
|
||||
// ensure the sequence number is a u64
|
||||
let sequence_number = if verify_sequence_no {
|
||||
if let Some(seq_no) = message.seqno {
|
||||
if seq_no.is_empty() {
|
||||
None
|
||||
} else if seq_no.len() != 8 {
|
||||
tracing::debug!(
|
||||
sequence_number=?seq_no,
|
||||
sequence_length=%seq_no.len(),
|
||||
"Invalid sequence number length for received message"
|
||||
);
|
||||
let message = RawMessage {
|
||||
source: None, // don't bother inform the application
|
||||
data: message.data.unwrap_or_default(),
|
||||
sequence_number: None, // don't inform the application
|
||||
topic: TopicHash::from_raw(message.topic),
|
||||
signature: message.signature, // don't inform the application
|
||||
key: message.key,
|
||||
validated: false,
|
||||
};
|
||||
invalid_messages.push((message, ValidationError::InvalidSequenceNumber));
|
||||
// proceed to the next message
|
||||
continue;
|
||||
} else {
|
||||
// valid sequence number
|
||||
Some(BigEndian::read_u64(&seq_no))
|
||||
}
|
||||
} else {
|
||||
// sequence number was not present
|
||||
tracing::debug!("Sequence number not present but expected");
|
||||
let message = RawMessage {
|
||||
source: None, // don't bother inform the application
|
||||
data: message.data.unwrap_or_default(),
|
||||
sequence_number: None, // don't inform the application
|
||||
topic: TopicHash::from_raw(message.topic),
|
||||
signature: message.signature, // don't inform the application
|
||||
key: message.key,
|
||||
validated: false,
|
||||
};
|
||||
invalid_messages.push((message, ValidationError::EmptySequenceNumber));
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
// Do not verify the sequence number, consider it empty
|
||||
None
|
||||
};
|
||||
|
||||
// Verify the message source if required
|
||||
let source = if verify_source {
|
||||
if let Some(bytes) = message.from {
|
||||
if !bytes.is_empty() {
|
||||
match PeerId::from_bytes(&bytes) {
|
||||
Ok(peer_id) => Some(peer_id), // valid peer id
|
||||
Err(_) => {
|
||||
// invalid peer id, add to invalid messages
|
||||
tracing::debug!("Message source has an invalid PeerId");
|
||||
let message = RawMessage {
|
||||
source: None, // don't bother inform the application
|
||||
data: message.data.unwrap_or_default(),
|
||||
sequence_number,
|
||||
topic: TopicHash::from_raw(message.topic),
|
||||
signature: message.signature, // don't inform the application
|
||||
key: message.key,
|
||||
validated: false,
|
||||
};
|
||||
invalid_messages.push((message, ValidationError::InvalidPeerId));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// This message has passed all validation, add it to the validated messages.
|
||||
messages.push(RawMessage {
|
||||
source,
|
||||
data: message.data.unwrap_or_default(),
|
||||
sequence_number,
|
||||
topic: TopicHash::from_raw(message.topic),
|
||||
signature: message.signature,
|
||||
key: message.key,
|
||||
validated: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut control_msgs = Vec::new();
|
||||
|
||||
if let Some(rpc_control) = rpc.control {
|
||||
// Collect the gossipsub control messages
|
||||
let ihave_msgs: Vec<ControlAction> = rpc_control
|
||||
.ihave
|
||||
.into_iter()
|
||||
.map(|ihave| {
|
||||
ControlAction::IHave(IHave {
|
||||
topic_hash: TopicHash::from_raw(ihave.topic_id.unwrap_or_default()),
|
||||
message_ids: ihave
|
||||
.message_ids
|
||||
.into_iter()
|
||||
.map(MessageId::from)
|
||||
.collect::<Vec<_>>(),
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let iwant_msgs: Vec<ControlAction> = rpc_control
|
||||
.iwant
|
||||
.into_iter()
|
||||
.map(|iwant| {
|
||||
ControlAction::IWant(IWant {
|
||||
message_ids: iwant
|
||||
.message_ids
|
||||
.into_iter()
|
||||
.map(MessageId::from)
|
||||
.collect::<Vec<_>>(),
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let graft_msgs: Vec<ControlAction> = rpc_control
|
||||
.graft
|
||||
.into_iter()
|
||||
.map(|graft| {
|
||||
ControlAction::Graft(Graft {
|
||||
topic_hash: TopicHash::from_raw(graft.topic_id.unwrap_or_default()),
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut prune_msgs = Vec::new();
|
||||
|
||||
for prune in rpc_control.prune {
|
||||
// filter out invalid peers
|
||||
let peers = prune
|
||||
.peers
|
||||
.into_iter()
|
||||
.filter_map(|info| {
|
||||
info.peer_id
|
||||
.as_ref()
|
||||
.and_then(|id| PeerId::from_bytes(id).ok())
|
||||
.map(|peer_id|
|
||||
//TODO signedPeerRecord, see https://github.com/libp2p/specs/pull/217
|
||||
PeerInfo {
|
||||
peer_id: Some(peer_id),
|
||||
})
|
||||
})
|
||||
.collect::<Vec<PeerInfo>>();
|
||||
|
||||
let topic_hash = TopicHash::from_raw(prune.topic_id.unwrap_or_default());
|
||||
prune_msgs.push(ControlAction::Prune(Prune {
|
||||
topic_hash,
|
||||
peers,
|
||||
backoff: prune.backoff,
|
||||
}));
|
||||
}
|
||||
|
||||
control_msgs.extend(ihave_msgs);
|
||||
control_msgs.extend(iwant_msgs);
|
||||
control_msgs.extend(graft_msgs);
|
||||
control_msgs.extend(prune_msgs);
|
||||
}
|
||||
|
||||
Ok(Some(HandlerEvent::Message {
|
||||
rpc: Rpc {
|
||||
messages,
|
||||
subscriptions: rpc
|
||||
.subscriptions
|
||||
.into_iter()
|
||||
.map(|sub| Subscription {
|
||||
action: if Some(true) == sub.subscribe {
|
||||
SubscriptionAction::Subscribe
|
||||
} else {
|
||||
SubscriptionAction::Unsubscribe
|
||||
},
|
||||
topic_hash: TopicHash::from_raw(sub.topic_id.unwrap_or_default()),
|
||||
})
|
||||
.collect(),
|
||||
control_msgs,
|
||||
},
|
||||
invalid_messages,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::gossipsub::config::Config;
|
||||
use crate::gossipsub::protocol::{BytesMut, GossipsubCodec, HandlerEvent};
|
||||
use crate::gossipsub::*;
|
||||
use crate::gossipsub::{IdentTopic as Topic, Version};
|
||||
use libp2p::identity::Keypair;
|
||||
use quickcheck::*;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct Message(RawMessage);
|
||||
|
||||
impl Arbitrary for Message {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
let keypair = TestKeypair::arbitrary(g);
|
||||
|
||||
// generate an arbitrary GossipsubMessage using the behaviour signing functionality
|
||||
let config = Config::default();
|
||||
let mut gs: Behaviour =
|
||||
Behaviour::new(MessageAuthenticity::Signed(keypair.0), config).unwrap();
|
||||
let mut data_g = quickcheck::Gen::new(10024);
|
||||
let data = (0..u8::arbitrary(&mut data_g))
|
||||
.map(|_| u8::arbitrary(g))
|
||||
.collect::<Vec<_>>();
|
||||
let topic_id = TopicId::arbitrary(g).0;
|
||||
Message(gs.build_raw_message(topic_id, data).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct TopicId(TopicHash);
|
||||
|
||||
impl Arbitrary for TopicId {
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
let mut data_g = quickcheck::Gen::new(1024);
|
||||
let topic_string: String = (0..u8::arbitrary(&mut data_g))
|
||||
.map(|_| char::arbitrary(g))
|
||||
.collect::<String>();
|
||||
TopicId(Topic::new(topic_string).into())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct TestKeypair(Keypair);
|
||||
|
||||
impl Arbitrary for TestKeypair {
|
||||
#[cfg(feature = "rsa")]
|
||||
fn arbitrary(g: &mut Gen) -> Self {
|
||||
let keypair = if bool::arbitrary(g) {
|
||||
// Small enough to be inlined.
|
||||
Keypair::generate_ed25519()
|
||||
} else {
|
||||
// Too large to be inlined.
|
||||
let mut rsa_key = hex::decode("308204bd020100300d06092a864886f70d0101010500048204a7308204a30201000282010100ef930f41a71288b643c1cbecbf5f72ab53992249e2b00835bf07390b6745419f3848cbcc5b030faa127bc88cdcda1c1d6f3ff699f0524c15ab9d2c9d8015f5d4bd09881069aad4e9f91b8b0d2964d215cdbbae83ddd31a7622a8228acee07079f6e501aea95508fa26c6122816ef7b00ac526d422bd12aed347c37fff6c1c307f3ba57bb28a7f28609e0bdcc839da4eedca39f5d2fa855ba4b0f9c763e9764937db929a1839054642175312a3de2d3405c9d27bdf6505ef471ce85c5e015eee85bf7874b3d512f715de58d0794fd8afe021c197fbd385bb88a930342fac8da31c27166e2edab00fa55dc1c3814448ba38363077f4e8fe2bdea1c081f85f1aa6f02030100010282010028ff427a1aac1a470e7b4879601a6656193d3857ea79f33db74df61e14730e92bf9ffd78200efb0c40937c3356cbe049cd32e5f15be5c96d5febcaa9bd3484d7fded76a25062d282a3856a1b3b7d2c525cdd8434beae147628e21adf241dd64198d5819f310d033743915ba40ea0b6acdbd0533022ad6daa1ff42de51885f9e8bab2306c6ef1181902d1cd7709006eba1ab0587842b724e0519f295c24f6d848907f772ae9a0953fc931f4af16a07df450fb8bfa94572562437056613647818c238a6ff3f606cffa0533e4b8755da33418dfbc64a85110b1a036623c947400a536bb8df65e5ebe46f2dfd0cfc86e7aeeddd7574c253e8fbf755562b3669525d902818100f9fff30c6677b78dd31ec7a634361438457e80be7a7faf390903067ea8355faa78a1204a82b6e99cb7d9058d23c1ecf6cfe4a900137a00cecc0113fd68c5931602980267ea9a95d182d48ba0a6b4d5dd32fdac685cb2e5d8b42509b2eb59c9579ea6a67ccc7547427e2bd1fb1f23b0ccb4dd6ba7d206c8dd93253d70a451701302818100f5530dfef678d73ce6a401ae47043af10a2e3f224c71ae933035ecd68ccbc4df52d72bc6ca2b17e8faf3e548b483a2506c0369ab80df3b137b54d53fac98f95547c2bc245b416e650ce617e0d29db36066f1335a9ba02ad3e0edf9dc3d58fd835835042663edebce81803972696c789012847cb1f854ab2ac0a1bd3867ac7fb502818029c53010d456105f2bf52a9a8482bca2224a5eac74bf3cc1a4d5d291fafcdffd15a6a6448cce8efdd661f6617ca5fc37c8c885cc3374e109ac6049bcbf72b37eabf44602a2da2d4a1237fd145c863e6d75059976de762d9d258c42b0984e2a2befa01c95217c3ee9c736ff209c355466ff99375194eff943bc402ea1d172a1ed02818027175bf493bbbfb8719c12b47d967bf9eac061c90a5b5711172e9095c38bb8cc493c063abffe4bea110b0a2f22ac9311b3947ba31b7ef6bfecf8209eebd6d86c316a2366bbafda7279b2b47d5bb24b6202254f249205dcad347b574433f6593733b806f84316276c1990a016ce1bbdbe5f650325acc7791aefe515ecc60063bd02818100b6a2077f4adcf15a17092d9c4a346d6022ac48f3861b73cf714f84c440a07419a7ce75a73b9cbff4597c53c128bf81e87b272d70428a272d99f90cd9b9ea1033298e108f919c6477400145a102df3fb5601ffc4588203cf710002517bfa24e6ad32f4d09c6b1a995fa28a3104131bedd9072f3b4fb4a5c2056232643d310453f").unwrap();
|
||||
Keypair::rsa_from_pkcs8(&mut rsa_key).unwrap()
|
||||
};
|
||||
TestKeypair(keypair)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "rsa"))]
|
||||
fn arbitrary(_g: &mut Gen) -> Self {
|
||||
// Small enough to be inlined.
|
||||
TestKeypair(Keypair::generate_ed25519())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for TestKeypair {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("TestKeypair")
|
||||
.field("public", &self.0.public())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
/// Test that RPC messages can be encoded and decoded successfully.
|
||||
fn encode_decode() {
|
||||
fn prop(message: Message) {
|
||||
let message = message.0;
|
||||
|
||||
let rpc = crate::gossipsub::types::Rpc {
|
||||
messages: vec![message.clone()],
|
||||
subscriptions: vec![],
|
||||
control_msgs: vec![],
|
||||
};
|
||||
|
||||
let mut codec = GossipsubCodec::new(u32::MAX as usize, ValidationMode::Strict);
|
||||
let mut buf = BytesMut::new();
|
||||
codec.encode(rpc.into_protobuf(), &mut buf).unwrap();
|
||||
let decoded_rpc = codec.decode(&mut buf).unwrap().unwrap();
|
||||
// mark as validated as its a published message
|
||||
match decoded_rpc {
|
||||
HandlerEvent::Message { mut rpc, .. } => {
|
||||
rpc.messages[0].validated = true;
|
||||
|
||||
assert_eq!(vec![message], rpc.messages);
|
||||
}
|
||||
_ => panic!("Must decode a message"),
|
||||
}
|
||||
}
|
||||
|
||||
QuickCheck::new().quickcheck(prop as fn(_) -> _)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn support_floodsub_with_custom_protocol() {
|
||||
let protocol_config = ConfigBuilder::default()
|
||||
.protocol_id("/foosub", Version::V1_1)
|
||||
.support_floodsub()
|
||||
.build()
|
||||
.unwrap()
|
||||
.protocol_config();
|
||||
|
||||
assert_eq!(protocol_config.protocol_ids[0].protocol, "/foosub");
|
||||
assert_eq!(protocol_config.protocol_ids[1].protocol, "/floodsub/1.0.0");
|
||||
}
|
||||
}
|
||||
92
beacon_node/lighthouse_network/src/gossipsub/rpc_proto.rs
Normal file
92
beacon_node/lighthouse_network/src/gossipsub/rpc_proto.rs
Normal file
@ -0,0 +1,92 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
pub(crate) mod proto {
|
||||
#![allow(unreachable_pub)]
|
||||
include!("generated/mod.rs");
|
||||
pub use self::gossipsub::pb::{mod_RPC::SubOpts, *};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::gossipsub::rpc_proto::proto::compat;
|
||||
use crate::gossipsub::IdentTopic as Topic;
|
||||
use libp2p::identity::PeerId;
|
||||
use quick_protobuf::{BytesReader, MessageRead, MessageWrite, Writer};
|
||||
use rand::Rng;
|
||||
|
||||
#[test]
|
||||
fn test_multi_topic_message_compatibility() {
|
||||
let topic1 = Topic::new("t1").hash();
|
||||
let topic2 = Topic::new("t2").hash();
|
||||
|
||||
let new_message1 = super::proto::Message {
|
||||
from: Some(PeerId::random().to_bytes()),
|
||||
data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
|
||||
seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()),
|
||||
topic: topic1.clone().into_string(),
|
||||
signature: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
|
||||
key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
|
||||
};
|
||||
let old_message1 = compat::pb::Message {
|
||||
from: Some(PeerId::random().to_bytes()),
|
||||
data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
|
||||
seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()),
|
||||
topic_ids: vec![topic1.clone().into_string()],
|
||||
signature: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
|
||||
key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
|
||||
};
|
||||
let old_message2 = compat::pb::Message {
|
||||
from: Some(PeerId::random().to_bytes()),
|
||||
data: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
|
||||
seqno: Some(rand::thread_rng().gen::<[u8; 8]>().to_vec()),
|
||||
topic_ids: vec![topic1.clone().into_string(), topic2.clone().into_string()],
|
||||
signature: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
|
||||
key: Some(rand::thread_rng().gen::<[u8; 32]>().to_vec()),
|
||||
};
|
||||
|
||||
let mut new_message1b = Vec::with_capacity(new_message1.get_size());
|
||||
let mut writer = Writer::new(&mut new_message1b);
|
||||
new_message1.write_message(&mut writer).unwrap();
|
||||
|
||||
let mut old_message1b = Vec::with_capacity(old_message1.get_size());
|
||||
let mut writer = Writer::new(&mut old_message1b);
|
||||
old_message1.write_message(&mut writer).unwrap();
|
||||
|
||||
let mut old_message2b = Vec::with_capacity(old_message2.get_size());
|
||||
let mut writer = Writer::new(&mut old_message2b);
|
||||
old_message2.write_message(&mut writer).unwrap();
|
||||
|
||||
let mut reader = BytesReader::from_bytes(&old_message1b[..]);
|
||||
let new_message =
|
||||
super::proto::Message::from_reader(&mut reader, &old_message1b[..]).unwrap();
|
||||
assert_eq!(new_message.topic, topic1.clone().into_string());
|
||||
|
||||
let mut reader = BytesReader::from_bytes(&old_message2b[..]);
|
||||
let new_message =
|
||||
super::proto::Message::from_reader(&mut reader, &old_message2b[..]).unwrap();
|
||||
assert_eq!(new_message.topic, topic2.into_string());
|
||||
|
||||
let mut reader = BytesReader::from_bytes(&new_message1b[..]);
|
||||
let old_message =
|
||||
compat::pb::Message::from_reader(&mut reader, &new_message1b[..]).unwrap();
|
||||
assert_eq!(old_message.topic_ids, vec![topic1.into_string()]);
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,436 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use crate::gossipsub::types::Subscription;
|
||||
use crate::gossipsub::TopicHash;
|
||||
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||
|
||||
pub trait TopicSubscriptionFilter {
|
||||
/// Returns true iff the topic is of interest and we can subscribe to it.
|
||||
fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool;
|
||||
|
||||
/// Filters a list of incoming subscriptions and returns a filtered set
|
||||
/// By default this deduplicates the subscriptions and calls
|
||||
/// [`Self::filter_incoming_subscription_set`] on the filtered set.
|
||||
fn filter_incoming_subscriptions<'a>(
|
||||
&mut self,
|
||||
subscriptions: &'a [Subscription],
|
||||
currently_subscribed_topics: &BTreeSet<TopicHash>,
|
||||
) -> Result<HashSet<&'a Subscription>, String> {
|
||||
let mut filtered_subscriptions: HashMap<TopicHash, &Subscription> = HashMap::new();
|
||||
for subscription in subscriptions {
|
||||
use std::collections::hash_map::Entry::*;
|
||||
match filtered_subscriptions.entry(subscription.topic_hash.clone()) {
|
||||
Occupied(entry) => {
|
||||
if entry.get().action != subscription.action {
|
||||
entry.remove();
|
||||
}
|
||||
}
|
||||
Vacant(entry) => {
|
||||
entry.insert(subscription);
|
||||
}
|
||||
}
|
||||
}
|
||||
self.filter_incoming_subscription_set(
|
||||
filtered_subscriptions.into_values().collect(),
|
||||
currently_subscribed_topics,
|
||||
)
|
||||
}
|
||||
|
||||
/// Filters a set of deduplicated subscriptions
|
||||
/// By default this filters the elements based on [`Self::allow_incoming_subscription`].
|
||||
fn filter_incoming_subscription_set<'a>(
|
||||
&mut self,
|
||||
mut subscriptions: HashSet<&'a Subscription>,
|
||||
_currently_subscribed_topics: &BTreeSet<TopicHash>,
|
||||
) -> Result<HashSet<&'a Subscription>, String> {
|
||||
subscriptions.retain(|s| {
|
||||
if self.allow_incoming_subscription(s) {
|
||||
true
|
||||
} else {
|
||||
tracing::debug!(subscription=?s, "Filtered incoming subscription");
|
||||
false
|
||||
}
|
||||
});
|
||||
Ok(subscriptions)
|
||||
}
|
||||
|
||||
/// Returns true iff we allow an incoming subscription.
|
||||
/// This is used by the default implementation of filter_incoming_subscription_set to decide
|
||||
/// whether to filter out a subscription or not.
|
||||
/// By default this uses can_subscribe to decide the same for incoming subscriptions as for
|
||||
/// outgoing ones.
|
||||
fn allow_incoming_subscription(&mut self, subscription: &Subscription) -> bool {
|
||||
self.can_subscribe(&subscription.topic_hash)
|
||||
}
|
||||
}
|
||||
|
||||
//some useful implementers
|
||||
|
||||
/// Allows all subscriptions
|
||||
#[derive(Default, Clone)]
|
||||
pub struct AllowAllSubscriptionFilter {}
|
||||
|
||||
impl TopicSubscriptionFilter for AllowAllSubscriptionFilter {
|
||||
fn can_subscribe(&mut self, _: &TopicHash) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows only whitelisted subscriptions
|
||||
#[derive(Default, Clone)]
|
||||
pub struct WhitelistSubscriptionFilter(pub HashSet<TopicHash>);
|
||||
|
||||
impl TopicSubscriptionFilter for WhitelistSubscriptionFilter {
|
||||
fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool {
|
||||
self.0.contains(topic_hash)
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a max count to a given subscription filter
|
||||
pub struct MaxCountSubscriptionFilter<T: TopicSubscriptionFilter> {
|
||||
pub filter: T,
|
||||
pub max_subscribed_topics: usize,
|
||||
pub max_subscriptions_per_request: usize,
|
||||
}
|
||||
|
||||
impl<T: TopicSubscriptionFilter> TopicSubscriptionFilter for MaxCountSubscriptionFilter<T> {
|
||||
fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool {
|
||||
self.filter.can_subscribe(topic_hash)
|
||||
}
|
||||
|
||||
fn filter_incoming_subscriptions<'a>(
|
||||
&mut self,
|
||||
subscriptions: &'a [Subscription],
|
||||
currently_subscribed_topics: &BTreeSet<TopicHash>,
|
||||
) -> Result<HashSet<&'a Subscription>, String> {
|
||||
if subscriptions.len() > self.max_subscriptions_per_request {
|
||||
return Err("too many subscriptions per request".into());
|
||||
}
|
||||
let result = self
|
||||
.filter
|
||||
.filter_incoming_subscriptions(subscriptions, currently_subscribed_topics)?;
|
||||
|
||||
use crate::gossipsub::types::SubscriptionAction::*;
|
||||
|
||||
let mut unsubscribed = 0;
|
||||
let mut new_subscribed = 0;
|
||||
for s in &result {
|
||||
let currently_contained = currently_subscribed_topics.contains(&s.topic_hash);
|
||||
match s.action {
|
||||
Unsubscribe => {
|
||||
if currently_contained {
|
||||
unsubscribed += 1;
|
||||
}
|
||||
}
|
||||
Subscribe => {
|
||||
if !currently_contained {
|
||||
new_subscribed += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if new_subscribed + currently_subscribed_topics.len()
|
||||
> self.max_subscribed_topics + unsubscribed
|
||||
{
|
||||
return Err("too many subscribed topics".into());
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
/// Combines two subscription filters
|
||||
pub struct CombinedSubscriptionFilters<T: TopicSubscriptionFilter, S: TopicSubscriptionFilter> {
|
||||
pub filter1: T,
|
||||
pub filter2: S,
|
||||
}
|
||||
|
||||
impl<T, S> TopicSubscriptionFilter for CombinedSubscriptionFilters<T, S>
|
||||
where
|
||||
T: TopicSubscriptionFilter,
|
||||
S: TopicSubscriptionFilter,
|
||||
{
|
||||
fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool {
|
||||
self.filter1.can_subscribe(topic_hash) && self.filter2.can_subscribe(topic_hash)
|
||||
}
|
||||
|
||||
fn filter_incoming_subscription_set<'a>(
|
||||
&mut self,
|
||||
subscriptions: HashSet<&'a Subscription>,
|
||||
currently_subscribed_topics: &BTreeSet<TopicHash>,
|
||||
) -> Result<HashSet<&'a Subscription>, String> {
|
||||
let intermediate = self
|
||||
.filter1
|
||||
.filter_incoming_subscription_set(subscriptions, currently_subscribed_topics)?;
|
||||
self.filter2
|
||||
.filter_incoming_subscription_set(intermediate, currently_subscribed_topics)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CallbackSubscriptionFilter<T>(pub T)
|
||||
where
|
||||
T: FnMut(&TopicHash) -> bool;
|
||||
|
||||
impl<T> TopicSubscriptionFilter for CallbackSubscriptionFilter<T>
|
||||
where
|
||||
T: FnMut(&TopicHash) -> bool,
|
||||
{
|
||||
fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool {
|
||||
(self.0)(topic_hash)
|
||||
}
|
||||
}
|
||||
|
||||
///A subscription filter that filters topics based on a regular expression.
|
||||
pub struct RegexSubscriptionFilter(pub regex::Regex);
|
||||
|
||||
impl TopicSubscriptionFilter for RegexSubscriptionFilter {
|
||||
fn can_subscribe(&mut self, topic_hash: &TopicHash) -> bool {
|
||||
self.0.is_match(topic_hash.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::gossipsub::types::SubscriptionAction::*;
|
||||
use std::iter::FromIterator;
|
||||
|
||||
#[test]
|
||||
fn test_filter_incoming_allow_all_with_duplicates() {
|
||||
let mut filter = AllowAllSubscriptionFilter {};
|
||||
|
||||
let t1 = TopicHash::from_raw("t1");
|
||||
let t2 = TopicHash::from_raw("t2");
|
||||
|
||||
let old = BTreeSet::from_iter(vec![t1.clone()]);
|
||||
let subscriptions = vec![
|
||||
Subscription {
|
||||
action: Unsubscribe,
|
||||
topic_hash: t1.clone(),
|
||||
},
|
||||
Subscription {
|
||||
action: Unsubscribe,
|
||||
topic_hash: t2.clone(),
|
||||
},
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t2,
|
||||
},
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t1.clone(),
|
||||
},
|
||||
Subscription {
|
||||
action: Unsubscribe,
|
||||
topic_hash: t1,
|
||||
},
|
||||
];
|
||||
|
||||
let result = filter
|
||||
.filter_incoming_subscriptions(&subscriptions, &old)
|
||||
.unwrap();
|
||||
assert_eq!(result, vec![&subscriptions[4]].into_iter().collect());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_incoming_whitelist() {
|
||||
let t1 = TopicHash::from_raw("t1");
|
||||
let t2 = TopicHash::from_raw("t2");
|
||||
|
||||
let mut filter = WhitelistSubscriptionFilter(HashSet::from_iter(vec![t1.clone()]));
|
||||
|
||||
let old = Default::default();
|
||||
let subscriptions = vec![
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t1,
|
||||
},
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t2,
|
||||
},
|
||||
];
|
||||
|
||||
let result = filter
|
||||
.filter_incoming_subscriptions(&subscriptions, &old)
|
||||
.unwrap();
|
||||
assert_eq!(result, vec![&subscriptions[0]].into_iter().collect());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_incoming_too_many_subscriptions_per_request() {
|
||||
let t1 = TopicHash::from_raw("t1");
|
||||
|
||||
let mut filter = MaxCountSubscriptionFilter {
|
||||
filter: AllowAllSubscriptionFilter {},
|
||||
max_subscribed_topics: 100,
|
||||
max_subscriptions_per_request: 2,
|
||||
};
|
||||
|
||||
let old = Default::default();
|
||||
|
||||
let subscriptions = vec![
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t1.clone(),
|
||||
},
|
||||
Subscription {
|
||||
action: Unsubscribe,
|
||||
topic_hash: t1.clone(),
|
||||
},
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t1,
|
||||
},
|
||||
];
|
||||
|
||||
let result = filter.filter_incoming_subscriptions(&subscriptions, &old);
|
||||
assert_eq!(result, Err("too many subscriptions per request".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_incoming_too_many_subscriptions() {
|
||||
let t: Vec<_> = (0..4)
|
||||
.map(|i| TopicHash::from_raw(format!("t{i}")))
|
||||
.collect();
|
||||
|
||||
let mut filter = MaxCountSubscriptionFilter {
|
||||
filter: AllowAllSubscriptionFilter {},
|
||||
max_subscribed_topics: 3,
|
||||
max_subscriptions_per_request: 2,
|
||||
};
|
||||
|
||||
let old = t[0..2].iter().cloned().collect();
|
||||
|
||||
let subscriptions = vec![
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t[2].clone(),
|
||||
},
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t[3].clone(),
|
||||
},
|
||||
];
|
||||
|
||||
let result = filter.filter_incoming_subscriptions(&subscriptions, &old);
|
||||
assert_eq!(result, Err("too many subscribed topics".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_incoming_max_subscribed_valid() {
|
||||
let t: Vec<_> = (0..5)
|
||||
.map(|i| TopicHash::from_raw(format!("t{i}")))
|
||||
.collect();
|
||||
|
||||
let mut filter = MaxCountSubscriptionFilter {
|
||||
filter: WhitelistSubscriptionFilter(t.iter().take(4).cloned().collect()),
|
||||
max_subscribed_topics: 2,
|
||||
max_subscriptions_per_request: 5,
|
||||
};
|
||||
|
||||
let old = t[0..2].iter().cloned().collect();
|
||||
|
||||
let subscriptions = vec![
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t[4].clone(),
|
||||
},
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t[2].clone(),
|
||||
},
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t[3].clone(),
|
||||
},
|
||||
Subscription {
|
||||
action: Unsubscribe,
|
||||
topic_hash: t[0].clone(),
|
||||
},
|
||||
Subscription {
|
||||
action: Unsubscribe,
|
||||
topic_hash: t[1].clone(),
|
||||
},
|
||||
];
|
||||
|
||||
let result = filter
|
||||
.filter_incoming_subscriptions(&subscriptions, &old)
|
||||
.unwrap();
|
||||
assert_eq!(result, subscriptions[1..].iter().collect());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_callback_filter() {
|
||||
let t1 = TopicHash::from_raw("t1");
|
||||
let t2 = TopicHash::from_raw("t2");
|
||||
|
||||
let mut filter = CallbackSubscriptionFilter(|h| h.as_str() == "t1");
|
||||
|
||||
let old = Default::default();
|
||||
let subscriptions = vec![
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t1,
|
||||
},
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t2,
|
||||
},
|
||||
];
|
||||
|
||||
let result = filter
|
||||
.filter_incoming_subscriptions(&subscriptions, &old)
|
||||
.unwrap();
|
||||
assert_eq!(result, vec![&subscriptions[0]].into_iter().collect());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_regex_subscription_filter() {
|
||||
let t1 = TopicHash::from_raw("tt");
|
||||
let t2 = TopicHash::from_raw("et3t3te");
|
||||
let t3 = TopicHash::from_raw("abcdefghijklmnopqrsuvwxyz");
|
||||
|
||||
let mut filter = RegexSubscriptionFilter(regex::Regex::new("t.*t").unwrap());
|
||||
|
||||
let old = Default::default();
|
||||
let subscriptions = vec![
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t1,
|
||||
},
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t2,
|
||||
},
|
||||
Subscription {
|
||||
action: Subscribe,
|
||||
topic_hash: t3,
|
||||
},
|
||||
];
|
||||
|
||||
let result = filter
|
||||
.filter_incoming_subscriptions(&subscriptions, &old)
|
||||
.unwrap();
|
||||
assert_eq!(result, subscriptions[..2].iter().collect());
|
||||
}
|
||||
}
|
||||
219
beacon_node/lighthouse_network/src/gossipsub/time_cache.rs
Normal file
219
beacon_node/lighthouse_network/src/gossipsub/time_cache.rs
Normal file
@ -0,0 +1,219 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! This implements a time-based LRU cache for checking gossipsub message duplicates.
|
||||
|
||||
use fnv::FnvHashMap;
|
||||
use instant::Instant;
|
||||
use std::collections::hash_map::{
|
||||
self,
|
||||
Entry::{Occupied, Vacant},
|
||||
};
|
||||
use std::collections::VecDeque;
|
||||
use std::time::Duration;
|
||||
|
||||
struct ExpiringElement<Element> {
|
||||
/// The element that expires
|
||||
element: Element,
|
||||
/// The expire time.
|
||||
expires: Instant,
|
||||
}
|
||||
|
||||
pub(crate) struct TimeCache<Key, Value> {
|
||||
/// Mapping a key to its value together with its latest expire time (can be updated through
|
||||
/// reinserts).
|
||||
map: FnvHashMap<Key, ExpiringElement<Value>>,
|
||||
/// An ordered list of keys by expires time.
|
||||
list: VecDeque<ExpiringElement<Key>>,
|
||||
/// The time elements remain in the cache.
|
||||
ttl: Duration,
|
||||
}
|
||||
|
||||
pub(crate) struct OccupiedEntry<'a, K, V> {
|
||||
entry: hash_map::OccupiedEntry<'a, K, ExpiringElement<V>>,
|
||||
}
|
||||
|
||||
impl<'a, K, V> OccupiedEntry<'a, K, V>
|
||||
where
|
||||
K: Eq + std::hash::Hash + Clone,
|
||||
{
|
||||
pub(crate) fn into_mut(self) -> &'a mut V {
|
||||
&mut self.entry.into_mut().element
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct VacantEntry<'a, K, V> {
|
||||
expiration: Instant,
|
||||
entry: hash_map::VacantEntry<'a, K, ExpiringElement<V>>,
|
||||
list: &'a mut VecDeque<ExpiringElement<K>>,
|
||||
}
|
||||
|
||||
impl<'a, K, V> VacantEntry<'a, K, V>
|
||||
where
|
||||
K: Eq + std::hash::Hash + Clone,
|
||||
{
|
||||
pub(crate) fn insert(self, value: V) -> &'a mut V {
|
||||
self.list.push_back(ExpiringElement {
|
||||
element: self.entry.key().clone(),
|
||||
expires: self.expiration,
|
||||
});
|
||||
&mut self
|
||||
.entry
|
||||
.insert(ExpiringElement {
|
||||
element: value,
|
||||
expires: self.expiration,
|
||||
})
|
||||
.element
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) enum Entry<'a, K: 'a, V: 'a> {
|
||||
Occupied(OccupiedEntry<'a, K, V>),
|
||||
Vacant(VacantEntry<'a, K, V>),
|
||||
}
|
||||
|
||||
impl<'a, K: 'a, V: 'a> Entry<'a, K, V>
|
||||
where
|
||||
K: Eq + std::hash::Hash + Clone,
|
||||
{
|
||||
pub(crate) fn or_default(self) -> &'a mut V
|
||||
where
|
||||
V: Default,
|
||||
{
|
||||
match self {
|
||||
Entry::Occupied(entry) => entry.into_mut(),
|
||||
Entry::Vacant(entry) => entry.insert(V::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Key, Value> TimeCache<Key, Value>
|
||||
where
|
||||
Key: Eq + std::hash::Hash + Clone,
|
||||
{
|
||||
pub(crate) fn new(ttl: Duration) -> Self {
|
||||
TimeCache {
|
||||
map: FnvHashMap::default(),
|
||||
list: VecDeque::new(),
|
||||
ttl,
|
||||
}
|
||||
}
|
||||
|
||||
fn remove_expired_keys(&mut self, now: Instant) {
|
||||
while let Some(element) = self.list.pop_front() {
|
||||
if element.expires > now {
|
||||
self.list.push_front(element);
|
||||
break;
|
||||
}
|
||||
if let Occupied(entry) = self.map.entry(element.element.clone()) {
|
||||
if entry.get().expires <= now {
|
||||
entry.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn entry(&mut self, key: Key) -> Entry<Key, Value> {
|
||||
let now = Instant::now();
|
||||
self.remove_expired_keys(now);
|
||||
match self.map.entry(key) {
|
||||
Occupied(entry) => Entry::Occupied(OccupiedEntry { entry }),
|
||||
Vacant(entry) => Entry::Vacant(VacantEntry {
|
||||
expiration: now + self.ttl,
|
||||
entry,
|
||||
list: &mut self.list,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Empties the entire cache.
|
||||
#[cfg(test)]
|
||||
pub(crate) fn clear(&mut self) {
|
||||
self.map.clear();
|
||||
self.list.clear();
|
||||
}
|
||||
|
||||
pub(crate) fn contains_key(&self, key: &Key) -> bool {
|
||||
self.map.contains_key(key)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct DuplicateCache<Key>(TimeCache<Key, ()>);
|
||||
|
||||
impl<Key> DuplicateCache<Key>
|
||||
where
|
||||
Key: Eq + std::hash::Hash + Clone,
|
||||
{
|
||||
pub(crate) fn new(ttl: Duration) -> Self {
|
||||
Self(TimeCache::new(ttl))
|
||||
}
|
||||
|
||||
// Inserts new elements and removes any expired elements.
|
||||
//
|
||||
// If the key was not present this returns `true`. If the value was already present this
|
||||
// returns `false`.
|
||||
pub(crate) fn insert(&mut self, key: Key) -> bool {
|
||||
if let Entry::Vacant(entry) = self.0.entry(key) {
|
||||
entry.insert(());
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn contains(&self, key: &Key) -> bool {
|
||||
self.0.contains_key(key)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn cache_added_entries_exist() {
|
||||
let mut cache = DuplicateCache::new(Duration::from_secs(10));
|
||||
|
||||
cache.insert("t");
|
||||
cache.insert("e");
|
||||
|
||||
// Should report that 't' and 't' already exists
|
||||
assert!(!cache.insert("t"));
|
||||
assert!(!cache.insert("e"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn cache_entries_expire() {
|
||||
let mut cache = DuplicateCache::new(Duration::from_millis(100));
|
||||
|
||||
cache.insert("t");
|
||||
assert!(!cache.insert("t"));
|
||||
cache.insert("e");
|
||||
//assert!(!cache.insert("t"));
|
||||
assert!(!cache.insert("e"));
|
||||
// sleep until cache expiry
|
||||
std::thread::sleep(Duration::from_millis(101));
|
||||
// add another element to clear previous cache
|
||||
cache.insert("s");
|
||||
|
||||
// should be removed from the cache
|
||||
assert!(cache.insert("t"));
|
||||
}
|
||||
}
|
||||
123
beacon_node/lighthouse_network/src/gossipsub/topic.rs
Normal file
123
beacon_node/lighthouse_network/src/gossipsub/topic.rs
Normal file
@ -0,0 +1,123 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
use crate::gossipsub::rpc_proto::proto;
|
||||
use base64::prelude::*;
|
||||
use prometheus_client::encoding::EncodeLabelSet;
|
||||
use quick_protobuf::Writer;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::fmt;
|
||||
|
||||
/// A generic trait that can be extended for various hashing types for a topic.
|
||||
pub trait Hasher {
|
||||
/// The function that takes a topic string and creates a topic hash.
|
||||
fn hash(topic_string: String) -> TopicHash;
|
||||
}
|
||||
|
||||
/// A type for representing topics who use the identity hash.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IdentityHash {}
|
||||
impl Hasher for IdentityHash {
|
||||
/// Creates a [`TopicHash`] as a raw string.
|
||||
fn hash(topic_string: String) -> TopicHash {
|
||||
TopicHash { hash: topic_string }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Sha256Hash {}
|
||||
impl Hasher for Sha256Hash {
|
||||
/// Creates a [`TopicHash`] by SHA256 hashing the topic then base64 encoding the
|
||||
/// hash.
|
||||
fn hash(topic_string: String) -> TopicHash {
|
||||
use quick_protobuf::MessageWrite;
|
||||
|
||||
let topic_descripter = proto::TopicDescriptor {
|
||||
name: Some(topic_string),
|
||||
auth: None,
|
||||
enc: None,
|
||||
};
|
||||
let mut bytes = Vec::with_capacity(topic_descripter.get_size());
|
||||
let mut writer = Writer::new(&mut bytes);
|
||||
topic_descripter
|
||||
.write_message(&mut writer)
|
||||
.expect("Encoding to succeed");
|
||||
let hash = BASE64_STANDARD.encode(Sha256::digest(&bytes));
|
||||
TopicHash { hash }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, EncodeLabelSet)]
|
||||
pub struct TopicHash {
|
||||
/// The topic hash. Stored as a string to align with the protobuf API.
|
||||
hash: String,
|
||||
}
|
||||
|
||||
impl TopicHash {
|
||||
pub fn from_raw(hash: impl Into<String>) -> TopicHash {
|
||||
TopicHash { hash: hash.into() }
|
||||
}
|
||||
|
||||
pub fn into_string(self) -> String {
|
||||
self.hash
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &str {
|
||||
&self.hash
|
||||
}
|
||||
}
|
||||
|
||||
/// A gossipsub topic.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub struct Topic<H: Hasher> {
|
||||
topic: String,
|
||||
phantom_data: std::marker::PhantomData<H>,
|
||||
}
|
||||
|
||||
impl<H: Hasher> From<Topic<H>> for TopicHash {
|
||||
fn from(topic: Topic<H>) -> TopicHash {
|
||||
topic.hash()
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher> Topic<H> {
|
||||
pub fn new(topic: impl Into<String>) -> Self {
|
||||
Topic {
|
||||
topic: topic.into(),
|
||||
phantom_data: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hash(&self) -> TopicHash {
|
||||
H::hash(self.topic.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<H: Hasher> fmt::Display for Topic<H> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "{}", self.topic)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for TopicHash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.hash)
|
||||
}
|
||||
}
|
||||
72
beacon_node/lighthouse_network/src/gossipsub/transform.rs
Normal file
72
beacon_node/lighthouse_network/src/gossipsub/transform.rs
Normal file
@ -0,0 +1,72 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! This trait allows of extended user-level decoding that can apply to message-data before a
|
||||
//! message-id is calculated.
|
||||
//!
|
||||
//! This is primarily designed to allow applications to implement their own custom compression
|
||||
//! algorithms that can be topic-specific. Once the raw data is transformed the message-id is then
|
||||
//! calculated, allowing for applications to employ message-id functions post compression.
|
||||
|
||||
use crate::gossipsub::{Message, RawMessage, TopicHash};
|
||||
|
||||
/// A general trait of transforming a [`RawMessage`] into a [`Message`]. The
|
||||
/// [`RawMessage`] is obtained from the wire and the [`Message`] is used to
|
||||
/// calculate the [`crate::gossipsub::MessageId`] of the message and is what is sent to the application.
|
||||
///
|
||||
/// The inbound/outbound transforms must be inverses. Applying the inbound transform and then the
|
||||
/// outbound transform MUST leave the underlying data un-modified.
|
||||
///
|
||||
/// By default, this is the identity transform for all fields in [`Message`].
|
||||
pub trait DataTransform {
|
||||
/// Takes a [`RawMessage`] received and converts it to a [`Message`].
|
||||
fn inbound_transform(&self, raw_message: RawMessage) -> Result<Message, std::io::Error>;
|
||||
|
||||
/// Takes the data to be published (a topic and associated data) transforms the data. The
|
||||
/// transformed data will then be used to create a [`crate::gossipsub::RawMessage`] to be sent to peers.
|
||||
fn outbound_transform(
|
||||
&self,
|
||||
topic: &TopicHash,
|
||||
data: Vec<u8>,
|
||||
) -> Result<Vec<u8>, std::io::Error>;
|
||||
}
|
||||
|
||||
/// The default transform, the raw data is propagated as is to the application layer gossipsub.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct IdentityTransform;
|
||||
|
||||
impl DataTransform for IdentityTransform {
|
||||
fn inbound_transform(&self, raw_message: RawMessage) -> Result<Message, std::io::Error> {
|
||||
Ok(Message {
|
||||
source: raw_message.source,
|
||||
data: raw_message.data,
|
||||
sequence_number: raw_message.sequence_number,
|
||||
topic: raw_message.topic,
|
||||
})
|
||||
}
|
||||
|
||||
fn outbound_transform(
|
||||
&self,
|
||||
_topic: &TopicHash,
|
||||
data: Vec<u8>,
|
||||
) -> Result<Vec<u8>, std::io::Error> {
|
||||
Ok(data)
|
||||
}
|
||||
}
|
||||
818
beacon_node/lighthouse_network/src/gossipsub/types.rs
Normal file
818
beacon_node/lighthouse_network/src/gossipsub/types.rs
Normal file
@ -0,0 +1,818 @@
|
||||
// Copyright 2020 Sigma Prime Pty Ltd.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
||||
// copy of this software and associated documentation files (the "Software"),
|
||||
// to deal in the Software without restriction, including without limitation
|
||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
// and/or sell copies of the Software, and to permit persons to whom the
|
||||
// Software is furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
// DEALINGS IN THE SOFTWARE.
|
||||
|
||||
//! A collection of types using the Gossipsub system.
|
||||
use crate::gossipsub::metrics::Metrics;
|
||||
use crate::gossipsub::TopicHash;
|
||||
use async_channel::{Receiver, Sender};
|
||||
use futures::stream::Peekable;
|
||||
use futures::{Future, Stream, StreamExt};
|
||||
use futures_timer::Delay;
|
||||
use instant::Duration;
|
||||
use libp2p::identity::PeerId;
|
||||
use libp2p::swarm::ConnectionId;
|
||||
use prometheus_client::encoding::EncodeLabelValue;
|
||||
use quick_protobuf::MessageWrite;
|
||||
use std::collections::BTreeSet;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{fmt, pin::Pin};
|
||||
|
||||
use crate::gossipsub::rpc_proto::proto;
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// The type of messages that have expired while attempting to send to a peer.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct FailedMessages {
|
||||
/// The number of publish messages that failed to be published in a heartbeat.
|
||||
pub publish: usize,
|
||||
/// The number of forward messages that failed to be published in a heartbeat.
|
||||
pub forward: usize,
|
||||
/// The number of messages that were failed to be sent to the priority queue as it was full.
|
||||
pub priority: usize,
|
||||
/// The number of messages that were failed to be sent to the non-priority queue as it was full.
|
||||
pub non_priority: usize,
|
||||
}
|
||||
|
||||
impl FailedMessages {
|
||||
/// The total number of messages that expired due a timeout.
|
||||
pub fn total_timeout(&self) -> usize {
|
||||
self.publish + self.forward
|
||||
}
|
||||
|
||||
/// The total number of messages that failed due to the queue being full.
|
||||
pub fn total_queue_full(&self) -> usize {
|
||||
self.priority + self.non_priority
|
||||
}
|
||||
|
||||
/// The total failed messages in a heartbeat.
|
||||
pub fn total(&self) -> usize {
|
||||
self.total_timeout() + self.total_queue_full()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Validation kinds from the application for received messages.
|
||||
pub enum MessageAcceptance {
|
||||
/// The message is considered valid, and it should be delivered and forwarded to the network.
|
||||
Accept,
|
||||
/// The message is considered invalid, and it should be rejected and trigger the P₄ penalty.
|
||||
Reject,
|
||||
/// The message is neither delivered nor forwarded to the network, but the router does not
|
||||
/// trigger the P₄ penalty.
|
||||
Ignore,
|
||||
}
|
||||
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[derive(Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
||||
pub struct MessageId(pub Vec<u8>);
|
||||
|
||||
impl MessageId {
|
||||
pub fn new(value: &[u8]) -> Self {
|
||||
Self(value.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Into<Vec<u8>>> From<T> for MessageId {
|
||||
fn from(value: T) -> Self {
|
||||
Self(value.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for MessageId {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", hex_fmt::HexFmt(&self.0))
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for MessageId {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "MessageId({})", hex_fmt::HexFmt(&self.0))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct PeerConnections {
|
||||
/// The kind of protocol the peer supports.
|
||||
pub(crate) kind: PeerKind,
|
||||
/// Its current connections.
|
||||
pub(crate) connections: Vec<ConnectionId>,
|
||||
/// The rpc sender to the peer.
|
||||
pub(crate) sender: RpcSender,
|
||||
/// Subscribed topics.
|
||||
pub(crate) topics: BTreeSet<TopicHash>,
|
||||
}
|
||||
|
||||
/// Describes the types of peers that can exist in the gossipsub context.
|
||||
#[derive(Debug, Clone, PartialEq, Hash, EncodeLabelValue, Eq)]
|
||||
pub enum PeerKind {
|
||||
/// A gossipsub 1.1 peer.
|
||||
Gossipsubv1_1,
|
||||
/// A gossipsub 1.0 peer.
|
||||
Gossipsub,
|
||||
/// A floodsub peer.
|
||||
Floodsub,
|
||||
/// The peer doesn't support any of the protocols.
|
||||
NotSupported,
|
||||
}
|
||||
|
||||
/// A message received by the gossipsub system and stored locally in caches..
|
||||
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub struct RawMessage {
|
||||
/// Id of the peer that published this message.
|
||||
pub source: Option<PeerId>,
|
||||
|
||||
/// Content of the message. Its meaning is out of scope of this library.
|
||||
pub data: Vec<u8>,
|
||||
|
||||
/// A random sequence number.
|
||||
pub sequence_number: Option<u64>,
|
||||
|
||||
/// The topic this message belongs to
|
||||
pub topic: TopicHash,
|
||||
|
||||
/// The signature of the message if it's signed.
|
||||
pub signature: Option<Vec<u8>>,
|
||||
|
||||
/// The public key of the message if it is signed and the source [`PeerId`] cannot be inlined.
|
||||
pub key: Option<Vec<u8>>,
|
||||
|
||||
/// Flag indicating if this message has been validated by the application or not.
|
||||
pub validated: bool,
|
||||
}
|
||||
|
||||
impl RawMessage {
|
||||
/// Calculates the encoded length of this message (used for calculating metrics).
|
||||
pub fn raw_protobuf_len(&self) -> usize {
|
||||
let message = proto::Message {
|
||||
from: self.source.map(|m| m.to_bytes()),
|
||||
data: Some(self.data.clone()),
|
||||
seqno: self.sequence_number.map(|s| s.to_be_bytes().to_vec()),
|
||||
topic: TopicHash::into_string(self.topic.clone()),
|
||||
signature: self.signature.clone(),
|
||||
key: self.key.clone(),
|
||||
};
|
||||
message.get_size()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RawMessage> for proto::Message {
|
||||
fn from(raw: RawMessage) -> Self {
|
||||
proto::Message {
|
||||
from: raw.source.map(|m| m.to_bytes()),
|
||||
data: Some(raw.data),
|
||||
seqno: raw.sequence_number.map(|s| s.to_be_bytes().to_vec()),
|
||||
topic: TopicHash::into_string(raw.topic),
|
||||
signature: raw.signature,
|
||||
key: raw.key,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The message sent to the user after a [`RawMessage`] has been transformed by a
|
||||
/// [`crate::gossipsub::DataTransform`].
|
||||
#[derive(Clone, PartialEq, Eq, Hash)]
|
||||
pub struct Message {
|
||||
/// Id of the peer that published this message.
|
||||
pub source: Option<PeerId>,
|
||||
|
||||
/// Content of the message.
|
||||
pub data: Vec<u8>,
|
||||
|
||||
/// A random sequence number.
|
||||
pub sequence_number: Option<u64>,
|
||||
|
||||
/// The topic this message belongs to
|
||||
pub topic: TopicHash,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Message {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Message")
|
||||
.field(
|
||||
"data",
|
||||
&format_args!("{:<20}", &hex_fmt::HexFmt(&self.data)),
|
||||
)
|
||||
.field("source", &self.source)
|
||||
.field("sequence_number", &self.sequence_number)
|
||||
.field("topic", &self.topic)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// A subscription received by the gossipsub system.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct Subscription {
|
||||
/// Action to perform.
|
||||
pub action: SubscriptionAction,
|
||||
/// The topic from which to subscribe or unsubscribe.
|
||||
pub topic_hash: TopicHash,
|
||||
}
|
||||
|
||||
/// Action that a subscription wants to perform.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum SubscriptionAction {
|
||||
/// The remote wants to subscribe to the given topic.
|
||||
Subscribe,
|
||||
/// The remote wants to unsubscribe from the given topic.
|
||||
Unsubscribe,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub(crate) struct PeerInfo {
|
||||
pub(crate) peer_id: Option<PeerId>,
|
||||
//TODO add this when RFC: Signed Address Records got added to the spec (see pull request
|
||||
// https://github.com/libp2p/specs/pull/217)
|
||||
//pub signed_peer_record: ?,
|
||||
}
|
||||
|
||||
/// A Control message received by the gossipsub system.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum ControlAction {
|
||||
/// Node broadcasts known messages per topic - IHave control message.
|
||||
IHave(IHave),
|
||||
/// The node requests specific message ids (peer_id + sequence _number) - IWant control message.
|
||||
IWant(IWant),
|
||||
/// The node has been added to the mesh - Graft control message.
|
||||
Graft(Graft),
|
||||
/// The node has been removed from the mesh - Prune control message.
|
||||
Prune(Prune),
|
||||
}
|
||||
|
||||
/// Node broadcasts known messages per topic - IHave control message.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct IHave {
|
||||
/// The topic of the messages.
|
||||
pub(crate) topic_hash: TopicHash,
|
||||
/// A list of known message ids (peer_id + sequence _number) as a string.
|
||||
pub(crate) message_ids: Vec<MessageId>,
|
||||
}
|
||||
|
||||
/// The node requests specific message ids (peer_id + sequence _number) - IWant control message.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct IWant {
|
||||
/// A list of known message ids (peer_id + sequence _number) as a string.
|
||||
pub(crate) message_ids: Vec<MessageId>,
|
||||
}
|
||||
|
||||
/// The node has been added to the mesh - Graft control message.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct Graft {
|
||||
/// The mesh topic the peer should be added to.
|
||||
pub(crate) topic_hash: TopicHash,
|
||||
}
|
||||
|
||||
/// The node has been removed from the mesh - Prune control message.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct Prune {
|
||||
/// The mesh topic the peer should be removed from.
|
||||
pub(crate) topic_hash: TopicHash,
|
||||
/// A list of peers to be proposed to the removed peer as peer exchange
|
||||
pub(crate) peers: Vec<PeerInfo>,
|
||||
/// The backoff time in seconds before we allow to reconnect
|
||||
pub(crate) backoff: Option<u64>,
|
||||
}
|
||||
|
||||
/// A Gossipsub RPC message sent.
|
||||
#[derive(Debug)]
|
||||
pub enum RpcOut {
|
||||
/// Publish a Gossipsub message on network. The [`Delay`] tags the time we attempted to
|
||||
/// send it.
|
||||
Publish { message: RawMessage, timeout: Delay },
|
||||
/// Forward a Gossipsub message to the network. The [`Delay`] tags the time we attempted to
|
||||
/// send it.
|
||||
Forward { message: RawMessage, timeout: Delay },
|
||||
/// Subscribe a topic.
|
||||
Subscribe(TopicHash),
|
||||
/// Unsubscribe a topic.
|
||||
Unsubscribe(TopicHash),
|
||||
/// Send a GRAFT control message.
|
||||
Graft(Graft),
|
||||
/// Send a PRUNE control message.
|
||||
Prune(Prune),
|
||||
/// Send a IHave control message.
|
||||
IHave(IHave),
|
||||
/// Send a IWant control message.
|
||||
IWant(IWant),
|
||||
}
|
||||
|
||||
impl RpcOut {
|
||||
/// Converts the GossipsubRPC into its protobuf format.
|
||||
// A convenience function to avoid explicitly specifying types.
|
||||
pub fn into_protobuf(self) -> proto::RPC {
|
||||
self.into()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<RpcOut> for proto::RPC {
|
||||
/// Converts the RPC into protobuf format.
|
||||
fn from(rpc: RpcOut) -> Self {
|
||||
match rpc {
|
||||
RpcOut::Publish {
|
||||
message,
|
||||
timeout: _,
|
||||
} => proto::RPC {
|
||||
subscriptions: Vec::new(),
|
||||
publish: vec![message.into()],
|
||||
control: None,
|
||||
},
|
||||
RpcOut::Forward {
|
||||
message,
|
||||
timeout: _,
|
||||
} => proto::RPC {
|
||||
publish: vec![message.into()],
|
||||
subscriptions: Vec::new(),
|
||||
control: None,
|
||||
},
|
||||
RpcOut::Subscribe(topic) => proto::RPC {
|
||||
publish: Vec::new(),
|
||||
subscriptions: vec![proto::SubOpts {
|
||||
subscribe: Some(true),
|
||||
topic_id: Some(topic.into_string()),
|
||||
}],
|
||||
control: None,
|
||||
},
|
||||
RpcOut::Unsubscribe(topic) => proto::RPC {
|
||||
publish: Vec::new(),
|
||||
subscriptions: vec![proto::SubOpts {
|
||||
subscribe: Some(false),
|
||||
topic_id: Some(topic.into_string()),
|
||||
}],
|
||||
control: None,
|
||||
},
|
||||
RpcOut::IHave(IHave {
|
||||
topic_hash,
|
||||
message_ids,
|
||||
}) => proto::RPC {
|
||||
publish: Vec::new(),
|
||||
subscriptions: Vec::new(),
|
||||
control: Some(proto::ControlMessage {
|
||||
ihave: vec![proto::ControlIHave {
|
||||
topic_id: Some(topic_hash.into_string()),
|
||||
message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(),
|
||||
}],
|
||||
iwant: vec![],
|
||||
graft: vec![],
|
||||
prune: vec![],
|
||||
}),
|
||||
},
|
||||
RpcOut::IWant(IWant { message_ids }) => proto::RPC {
|
||||
publish: Vec::new(),
|
||||
subscriptions: Vec::new(),
|
||||
control: Some(proto::ControlMessage {
|
||||
ihave: vec![],
|
||||
iwant: vec![proto::ControlIWant {
|
||||
message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(),
|
||||
}],
|
||||
graft: vec![],
|
||||
prune: vec![],
|
||||
}),
|
||||
},
|
||||
RpcOut::Graft(Graft { topic_hash }) => proto::RPC {
|
||||
publish: Vec::new(),
|
||||
subscriptions: vec![],
|
||||
control: Some(proto::ControlMessage {
|
||||
ihave: vec![],
|
||||
iwant: vec![],
|
||||
graft: vec![proto::ControlGraft {
|
||||
topic_id: Some(topic_hash.into_string()),
|
||||
}],
|
||||
prune: vec![],
|
||||
}),
|
||||
},
|
||||
RpcOut::Prune(Prune {
|
||||
topic_hash,
|
||||
peers,
|
||||
backoff,
|
||||
}) => {
|
||||
proto::RPC {
|
||||
publish: Vec::new(),
|
||||
subscriptions: vec![],
|
||||
control: Some(proto::ControlMessage {
|
||||
ihave: vec![],
|
||||
iwant: vec![],
|
||||
graft: vec![],
|
||||
prune: vec![proto::ControlPrune {
|
||||
topic_id: Some(topic_hash.into_string()),
|
||||
peers: peers
|
||||
.into_iter()
|
||||
.map(|info| proto::PeerInfo {
|
||||
peer_id: info.peer_id.map(|id| id.to_bytes()),
|
||||
// TODO, see https://github.com/libp2p/specs/pull/217
|
||||
signed_peer_record: None,
|
||||
})
|
||||
.collect(),
|
||||
backoff,
|
||||
}],
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An RPC received/sent.
|
||||
#[derive(Clone, PartialEq, Eq, Hash)]
|
||||
pub struct Rpc {
|
||||
/// List of messages that were part of this RPC query.
|
||||
pub messages: Vec<RawMessage>,
|
||||
/// List of subscriptions.
|
||||
pub subscriptions: Vec<Subscription>,
|
||||
/// List of Gossipsub control messages.
|
||||
pub control_msgs: Vec<ControlAction>,
|
||||
}
|
||||
|
||||
impl Rpc {
|
||||
/// Converts the GossipsubRPC into its protobuf format.
|
||||
// A convenience function to avoid explicitly specifying types.
|
||||
pub fn into_protobuf(self) -> proto::RPC {
|
||||
self.into()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Rpc> for proto::RPC {
|
||||
/// Converts the RPC into protobuf format.
|
||||
fn from(rpc: Rpc) -> Self {
|
||||
// Messages
|
||||
let mut publish = Vec::new();
|
||||
|
||||
for message in rpc.messages.into_iter() {
|
||||
let message = proto::Message {
|
||||
from: message.source.map(|m| m.to_bytes()),
|
||||
data: Some(message.data),
|
||||
seqno: message.sequence_number.map(|s| s.to_be_bytes().to_vec()),
|
||||
topic: TopicHash::into_string(message.topic),
|
||||
signature: message.signature,
|
||||
key: message.key,
|
||||
};
|
||||
|
||||
publish.push(message);
|
||||
}
|
||||
|
||||
// subscriptions
|
||||
let subscriptions = rpc
|
||||
.subscriptions
|
||||
.into_iter()
|
||||
.map(|sub| proto::SubOpts {
|
||||
subscribe: Some(sub.action == SubscriptionAction::Subscribe),
|
||||
topic_id: Some(sub.topic_hash.into_string()),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// control messages
|
||||
let mut control = proto::ControlMessage {
|
||||
ihave: Vec::new(),
|
||||
iwant: Vec::new(),
|
||||
graft: Vec::new(),
|
||||
prune: Vec::new(),
|
||||
};
|
||||
|
||||
let empty_control_msg = rpc.control_msgs.is_empty();
|
||||
|
||||
for action in rpc.control_msgs {
|
||||
match action {
|
||||
// collect all ihave messages
|
||||
ControlAction::IHave(IHave {
|
||||
topic_hash,
|
||||
message_ids,
|
||||
}) => {
|
||||
let rpc_ihave = proto::ControlIHave {
|
||||
topic_id: Some(topic_hash.into_string()),
|
||||
message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(),
|
||||
};
|
||||
control.ihave.push(rpc_ihave);
|
||||
}
|
||||
ControlAction::IWant(IWant { message_ids }) => {
|
||||
let rpc_iwant = proto::ControlIWant {
|
||||
message_ids: message_ids.into_iter().map(|msg_id| msg_id.0).collect(),
|
||||
};
|
||||
control.iwant.push(rpc_iwant);
|
||||
}
|
||||
ControlAction::Graft(Graft { topic_hash }) => {
|
||||
let rpc_graft = proto::ControlGraft {
|
||||
topic_id: Some(topic_hash.into_string()),
|
||||
};
|
||||
control.graft.push(rpc_graft);
|
||||
}
|
||||
ControlAction::Prune(Prune {
|
||||
topic_hash,
|
||||
peers,
|
||||
backoff,
|
||||
}) => {
|
||||
let rpc_prune = proto::ControlPrune {
|
||||
topic_id: Some(topic_hash.into_string()),
|
||||
peers: peers
|
||||
.into_iter()
|
||||
.map(|info| proto::PeerInfo {
|
||||
peer_id: info.peer_id.map(|id| id.to_bytes()),
|
||||
// TODO, see https://github.com/libp2p/specs/pull/217
|
||||
signed_peer_record: None,
|
||||
})
|
||||
.collect(),
|
||||
backoff,
|
||||
};
|
||||
control.prune.push(rpc_prune);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
proto::RPC {
|
||||
subscriptions,
|
||||
publish,
|
||||
control: if empty_control_msg {
|
||||
None
|
||||
} else {
|
||||
Some(control)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Rpc {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut b = f.debug_struct("GossipsubRpc");
|
||||
if !self.messages.is_empty() {
|
||||
b.field("messages", &self.messages);
|
||||
}
|
||||
if !self.subscriptions.is_empty() {
|
||||
b.field("subscriptions", &self.subscriptions);
|
||||
}
|
||||
if !self.control_msgs.is_empty() {
|
||||
b.field("control_msgs", &self.control_msgs);
|
||||
}
|
||||
b.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl PeerKind {
|
||||
pub fn as_static_ref(&self) -> &'static str {
|
||||
match self {
|
||||
Self::NotSupported => "Not Supported",
|
||||
Self::Floodsub => "Floodsub",
|
||||
Self::Gossipsub => "Gossipsub v1.0",
|
||||
Self::Gossipsubv1_1 => "Gossipsub v1.1",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for PeerKind {
|
||||
fn as_ref(&self) -> &str {
|
||||
self.as_static_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for PeerKind {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str(self.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
/// `RpcOut` sender that is priority aware.
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct RpcSender {
|
||||
cap: usize,
|
||||
len: Arc<AtomicUsize>,
|
||||
pub(crate) priority_sender: Sender<RpcOut>,
|
||||
pub(crate) non_priority_sender: Sender<RpcOut>,
|
||||
priority_receiver: Receiver<RpcOut>,
|
||||
non_priority_receiver: Receiver<RpcOut>,
|
||||
}
|
||||
|
||||
impl RpcSender {
|
||||
/// Create a RpcSender.
|
||||
pub(crate) fn new(cap: usize) -> RpcSender {
|
||||
let (priority_sender, priority_receiver) = async_channel::unbounded();
|
||||
let (non_priority_sender, non_priority_receiver) = async_channel::bounded(cap / 2);
|
||||
let len = Arc::new(AtomicUsize::new(0));
|
||||
RpcSender {
|
||||
cap: cap / 2,
|
||||
len,
|
||||
priority_sender,
|
||||
non_priority_sender,
|
||||
priority_receiver,
|
||||
non_priority_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new Receiver to the sender.
|
||||
pub(crate) fn new_receiver(&self) -> RpcReceiver {
|
||||
RpcReceiver {
|
||||
priority_len: self.len.clone(),
|
||||
priority: self.priority_receiver.clone().peekable(),
|
||||
non_priority: self.non_priority_receiver.clone().peekable(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a `RpcOut::Graft` message to the `RpcReceiver`
|
||||
/// this is high priority.
|
||||
pub(crate) fn graft(&mut self, graft: Graft) {
|
||||
self.priority_sender
|
||||
.try_send(RpcOut::Graft(graft))
|
||||
.expect("Channel is unbounded and should always be open");
|
||||
}
|
||||
|
||||
/// Send a `RpcOut::Prune` message to the `RpcReceiver`
|
||||
/// this is high priority.
|
||||
pub(crate) fn prune(&mut self, prune: Prune) {
|
||||
self.priority_sender
|
||||
.try_send(RpcOut::Prune(prune))
|
||||
.expect("Channel is unbounded and should always be open");
|
||||
}
|
||||
|
||||
/// Send a `RpcOut::IHave` message to the `RpcReceiver`
|
||||
/// this is low priority, if the queue is full an Err is returned.
|
||||
#[allow(clippy::result_large_err)]
|
||||
pub(crate) fn ihave(&mut self, ihave: IHave) -> Result<(), RpcOut> {
|
||||
self.non_priority_sender
|
||||
.try_send(RpcOut::IHave(ihave))
|
||||
.map_err(|err| err.into_inner())
|
||||
}
|
||||
|
||||
/// Send a `RpcOut::IHave` message to the `RpcReceiver`
|
||||
/// this is low priority, if the queue is full an Err is returned.
|
||||
#[allow(clippy::result_large_err)]
|
||||
pub(crate) fn iwant(&mut self, iwant: IWant) -> Result<(), RpcOut> {
|
||||
self.non_priority_sender
|
||||
.try_send(RpcOut::IWant(iwant))
|
||||
.map_err(|err| err.into_inner())
|
||||
}
|
||||
|
||||
/// Send a `RpcOut::Subscribe` message to the `RpcReceiver`
|
||||
/// this is high priority.
|
||||
pub(crate) fn subscribe(&mut self, topic: TopicHash) {
|
||||
self.priority_sender
|
||||
.try_send(RpcOut::Subscribe(topic))
|
||||
.expect("Channel is unbounded and should always be open");
|
||||
}
|
||||
|
||||
/// Send a `RpcOut::Unsubscribe` message to the `RpcReceiver`
|
||||
/// this is high priority.
|
||||
pub(crate) fn unsubscribe(&mut self, topic: TopicHash) {
|
||||
self.priority_sender
|
||||
.try_send(RpcOut::Unsubscribe(topic))
|
||||
.expect("Channel is unbounded and should always be open");
|
||||
}
|
||||
|
||||
/// Send a `RpcOut::Publish` message to the `RpcReceiver`
|
||||
/// this is high priority. If message sending fails, an `Err` is returned.
|
||||
pub(crate) fn publish(
|
||||
&mut self,
|
||||
message: RawMessage,
|
||||
timeout: Duration,
|
||||
metrics: Option<&mut Metrics>,
|
||||
) -> Result<(), ()> {
|
||||
if self.len.load(Ordering::Relaxed) >= self.cap {
|
||||
return Err(());
|
||||
}
|
||||
self.priority_sender
|
||||
.try_send(RpcOut::Publish {
|
||||
message: message.clone(),
|
||||
timeout: Delay::new(timeout),
|
||||
})
|
||||
.expect("Channel is unbounded and should always be open");
|
||||
self.len.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
if let Some(m) = metrics {
|
||||
m.msg_sent(&message.topic, message.raw_protobuf_len());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Send a `RpcOut::Forward` message to the `RpcReceiver`
|
||||
/// this is high priority. If the queue is full the message is discarded.
|
||||
pub(crate) fn forward(
|
||||
&mut self,
|
||||
message: RawMessage,
|
||||
timeout: Duration,
|
||||
metrics: Option<&mut Metrics>,
|
||||
) -> Result<(), ()> {
|
||||
self.non_priority_sender
|
||||
.try_send(RpcOut::Forward {
|
||||
message: message.clone(),
|
||||
timeout: Delay::new(timeout),
|
||||
})
|
||||
.map_err(|_| ())?;
|
||||
|
||||
if let Some(m) = metrics {
|
||||
m.msg_sent(&message.topic, message.raw_protobuf_len());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the current size of the priority queue.
|
||||
pub(crate) fn priority_len(&self) -> usize {
|
||||
self.len.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Returns the current size of the non-priority queue.
|
||||
pub(crate) fn non_priority_len(&self) -> usize {
|
||||
self.non_priority_sender.len()
|
||||
}
|
||||
}
|
||||
|
||||
/// `RpcOut` sender that is priority aware.
|
||||
#[derive(Debug)]
|
||||
pub struct RpcReceiver {
|
||||
/// The maximum length of the priority queue.
|
||||
pub(crate) priority_len: Arc<AtomicUsize>,
|
||||
/// The priority queue receiver.
|
||||
pub(crate) priority: Peekable<Receiver<RpcOut>>,
|
||||
/// The non priority queue receiver.
|
||||
pub(crate) non_priority: Peekable<Receiver<RpcOut>>,
|
||||
}
|
||||
|
||||
impl RpcReceiver {
|
||||
// Peek the next message in the queues and return it if its timeout has elapsed.
|
||||
// Returns `None` if there aren't any more messages on the stream or none is stale.
|
||||
pub(crate) fn poll_stale(&mut self, cx: &mut Context<'_>) -> Poll<Option<RpcOut>> {
|
||||
// Peek priority queue.
|
||||
let priority = match Pin::new(&mut self.priority).poll_peek_mut(cx) {
|
||||
Poll::Ready(Some(RpcOut::Publish {
|
||||
message: _,
|
||||
ref mut timeout,
|
||||
})) => {
|
||||
if Pin::new(timeout).poll(cx).is_ready() {
|
||||
// Return the message.
|
||||
let dropped = futures::ready!(self.priority.poll_next_unpin(cx))
|
||||
.expect("There should be a message");
|
||||
return Poll::Ready(Some(dropped));
|
||||
}
|
||||
Poll::Ready(None)
|
||||
}
|
||||
poll => poll,
|
||||
};
|
||||
|
||||
let non_priority = match Pin::new(&mut self.non_priority).poll_peek_mut(cx) {
|
||||
Poll::Ready(Some(RpcOut::Forward {
|
||||
message: _,
|
||||
ref mut timeout,
|
||||
})) => {
|
||||
if Pin::new(timeout).poll(cx).is_ready() {
|
||||
// Return the message.
|
||||
let dropped = futures::ready!(self.non_priority.poll_next_unpin(cx))
|
||||
.expect("There should be a message");
|
||||
return Poll::Ready(Some(dropped));
|
||||
}
|
||||
Poll::Ready(None)
|
||||
}
|
||||
poll => poll,
|
||||
};
|
||||
|
||||
match (priority, non_priority) {
|
||||
(Poll::Ready(None), Poll::Ready(None)) => Poll::Ready(None),
|
||||
_ => Poll::Pending,
|
||||
}
|
||||
}
|
||||
|
||||
/// Poll queues and return true if both are empty.
|
||||
pub(crate) fn poll_is_empty(&mut self, cx: &mut Context<'_>) -> bool {
|
||||
matches!(
|
||||
(
|
||||
Pin::new(&mut self.priority).poll_peek(cx),
|
||||
Pin::new(&mut self.non_priority).poll_peek(cx),
|
||||
),
|
||||
(Poll::Ready(None), Poll::Ready(None))
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for RpcReceiver {
|
||||
type Item = RpcOut;
|
||||
|
||||
fn poll_next(
|
||||
mut self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Option<Self::Item>> {
|
||||
// The priority queue is first polled.
|
||||
if let Poll::Ready(rpc) = Pin::new(&mut self.priority).poll_next(cx) {
|
||||
if let Some(RpcOut::Publish { .. }) = rpc {
|
||||
self.priority_len.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
return Poll::Ready(rpc);
|
||||
}
|
||||
// Then we poll the non priority.
|
||||
Pin::new(&mut self.non_priority).poll_next(cx)
|
||||
}
|
||||
}
|
||||
@ -10,6 +10,7 @@ pub mod service;
|
||||
|
||||
#[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy
|
||||
pub mod discovery;
|
||||
pub mod gossipsub;
|
||||
pub mod listen_addr;
|
||||
pub mod metrics;
|
||||
pub mod peer_manager;
|
||||
@ -114,8 +115,8 @@ pub use prometheus_client;
|
||||
pub use config::Config as NetworkConfig;
|
||||
pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr};
|
||||
pub use discv5;
|
||||
pub use gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash};
|
||||
pub use libp2p;
|
||||
pub use libp2p::gossipsub::{IdentTopic, MessageAcceptance, MessageId, Topic, TopicHash};
|
||||
pub use libp2p::{core::ConnectedPoint, PeerId, Swarm};
|
||||
pub use libp2p::{multiaddr, Multiaddr};
|
||||
pub use metrics::scrape_discovery_metrics;
|
||||
|
||||
@ -1,9 +1,10 @@
|
||||
pub use lighthouse_metrics::*;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref NAT_OPEN: Result<IntCounter> = try_create_int_counter(
|
||||
pub static ref NAT_OPEN: Result<IntGaugeVec> = try_create_int_gauge_vec(
|
||||
"nat_open",
|
||||
"An estimate indicating if the local node is exposed to the internet."
|
||||
"An estimate indicating if the local node is reachable from external nodes",
|
||||
&["protocol"]
|
||||
);
|
||||
pub static ref ADDRESS_UPDATE_COUNT: Result<IntCounter> = try_create_int_counter(
|
||||
"libp2p_address_update_total",
|
||||
@ -14,6 +15,9 @@ lazy_static! {
|
||||
"Count of libp2p peers currently connected"
|
||||
);
|
||||
|
||||
pub static ref PEERS_CONNECTED_MULTI: Result<IntGaugeVec> =
|
||||
try_create_int_gauge_vec("libp2p_peers_multi", "Count of libp2p peers currently connected", &["direction", "transport"]);
|
||||
|
||||
pub static ref TCP_PEERS_CONNECTED: Result<IntGauge> = try_create_int_gauge(
|
||||
"libp2p_tcp_peers",
|
||||
"Count of libp2p peers currently connected via TCP"
|
||||
@ -32,13 +36,10 @@ lazy_static! {
|
||||
"libp2p_peer_disconnect_event_total",
|
||||
"Count of libp2p peer disconnect events"
|
||||
);
|
||||
pub static ref DISCOVERY_SENT_BYTES: Result<IntGauge> = try_create_int_gauge(
|
||||
"discovery_sent_bytes",
|
||||
"The number of bytes sent in discovery"
|
||||
);
|
||||
pub static ref DISCOVERY_RECV_BYTES: Result<IntGauge> = try_create_int_gauge(
|
||||
"discovery_recv_bytes",
|
||||
"The number of bytes received in discovery"
|
||||
pub static ref DISCOVERY_BYTES: Result<IntGaugeVec> = try_create_int_gauge_vec(
|
||||
"discovery_bytes",
|
||||
"The number of bytes sent and received in discovery",
|
||||
&["direction"]
|
||||
);
|
||||
pub static ref DISCOVERY_QUEUE: Result<IntGauge> = try_create_int_gauge(
|
||||
"discovery_queue_size",
|
||||
@ -135,17 +136,6 @@ lazy_static! {
|
||||
&["type"]
|
||||
);
|
||||
|
||||
/*
|
||||
* Inbound/Outbound peers
|
||||
*/
|
||||
/// The number of peers that dialed us.
|
||||
pub static ref NETWORK_INBOUND_PEERS: Result<IntGauge> =
|
||||
try_create_int_gauge("network_inbound_peers","The number of peers that are currently connected that have dialed us.");
|
||||
|
||||
/// The number of peers that we dialed us.
|
||||
pub static ref NETWORK_OUTBOUND_PEERS: Result<IntGauge> =
|
||||
try_create_int_gauge("network_outbound_peers","The number of peers that are currently connected that we dialed.");
|
||||
|
||||
/*
|
||||
* Peer Reporting
|
||||
*/
|
||||
@ -156,31 +146,11 @@ lazy_static! {
|
||||
);
|
||||
}
|
||||
|
||||
/// Checks if we consider the NAT open.
|
||||
///
|
||||
/// Conditions for an open NAT:
|
||||
/// 1. We have 1 or more SOCKET_UPDATED messages. This occurs when discovery has a majority of
|
||||
/// users reporting an external port and our ENR gets updated.
|
||||
/// 2. We have 0 SOCKET_UPDATED messages (can be true if the port was correct on boot), then we
|
||||
/// rely on whether we have any inbound messages. If we have no socket update messages, but
|
||||
/// manage to get at least one inbound peer, we are exposed correctly.
|
||||
pub fn check_nat() {
|
||||
// NAT is already deemed open.
|
||||
if NAT_OPEN.as_ref().map(|v| v.get()).unwrap_or(0) != 0 {
|
||||
return;
|
||||
}
|
||||
if ADDRESS_UPDATE_COUNT.as_ref().map(|v| v.get()).unwrap_or(0) != 0
|
||||
|| NETWORK_INBOUND_PEERS.as_ref().map(|v| v.get()).unwrap_or(0) != 0_i64
|
||||
{
|
||||
inc_counter(&NAT_OPEN);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn scrape_discovery_metrics() {
|
||||
let metrics =
|
||||
discv5::metrics::Metrics::from(discv5::Discv5::<discv5::DefaultProtocolId>::raw_metrics());
|
||||
set_float_gauge(&DISCOVERY_REQS, metrics.unsolicited_requests_per_second);
|
||||
set_gauge(&DISCOVERY_SESSIONS, metrics.active_sessions as i64);
|
||||
set_gauge(&DISCOVERY_SENT_BYTES, metrics.bytes_sent as i64);
|
||||
set_gauge(&DISCOVERY_RECV_BYTES, metrics.bytes_recv as i64);
|
||||
set_gauge_vec(&DISCOVERY_BYTES, &["inbound"], metrics.bytes_recv as i64);
|
||||
set_gauge_vec(&DISCOVERY_BYTES, &["outbound"], metrics.bytes_sent as i64);
|
||||
}
|
||||
|
||||
@ -18,6 +18,8 @@ pub struct Config {
|
||||
pub discovery_enabled: bool,
|
||||
/// Whether metrics are enabled.
|
||||
pub metrics_enabled: bool,
|
||||
/// Whether quic is enabled.
|
||||
pub quic_enabled: bool,
|
||||
/// Target number of peers to connect to.
|
||||
pub target_peer_count: usize,
|
||||
|
||||
@ -37,6 +39,7 @@ impl Default for Config {
|
||||
Config {
|
||||
discovery_enabled: true,
|
||||
metrics_enabled: false,
|
||||
quic_enabled: true,
|
||||
target_peer_count: DEFAULT_TARGET_PEERS,
|
||||
status_interval: DEFAULT_STATUS_INTERVAL,
|
||||
ping_interval_inbound: DEFAULT_PING_INTERVAL_INBOUND,
|
||||
|
||||
@ -10,7 +10,7 @@ use delay_map::HashSetDelay;
|
||||
use discv5::Enr;
|
||||
use libp2p::identify::Info as IdentifyInfo;
|
||||
use lru_cache::LRUTimeCache;
|
||||
use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult};
|
||||
use peerdb::{BanOperation, BanResult, ScoreUpdateResult};
|
||||
use rand::seq::SliceRandom;
|
||||
use slog::{debug, error, trace, warn};
|
||||
use smallvec::SmallVec;
|
||||
@ -18,7 +18,6 @@ use std::{
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use strum::IntoEnumIterator;
|
||||
use types::{EthSpec, SyncSubnetId};
|
||||
|
||||
pub use libp2p::core::Multiaddr;
|
||||
@ -104,6 +103,8 @@ pub struct PeerManager<TSpec: EthSpec> {
|
||||
discovery_enabled: bool,
|
||||
/// Keeps track if the current instance is reporting metrics or not.
|
||||
metrics_enabled: bool,
|
||||
/// Keeps track of whether the QUIC protocol is enabled or not.
|
||||
quic_enabled: bool,
|
||||
/// The logger associated with the `PeerManager`.
|
||||
log: slog::Logger,
|
||||
}
|
||||
@ -149,6 +150,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
status_interval,
|
||||
ping_interval_inbound,
|
||||
ping_interval_outbound,
|
||||
quic_enabled,
|
||||
} = cfg;
|
||||
|
||||
// Set up the peer manager heartbeat interval
|
||||
@ -167,6 +169,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
heartbeat,
|
||||
discovery_enabled,
|
||||
metrics_enabled,
|
||||
quic_enabled,
|
||||
log: log.clone(),
|
||||
})
|
||||
}
|
||||
@ -715,46 +718,6 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
}
|
||||
}
|
||||
|
||||
// This function updates metrics for all connected peers.
|
||||
fn update_connected_peer_metrics(&self) {
|
||||
// Do nothing if we don't have metrics enabled.
|
||||
if !self.metrics_enabled {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut connected_peer_count = 0;
|
||||
let mut inbound_connected_peers = 0;
|
||||
let mut outbound_connected_peers = 0;
|
||||
let mut clients_per_peer = HashMap::new();
|
||||
|
||||
for (_peer, peer_info) in self.network_globals.peers.read().connected_peers() {
|
||||
connected_peer_count += 1;
|
||||
if let PeerConnectionStatus::Connected { n_in, .. } = peer_info.connection_status() {
|
||||
if *n_in > 0 {
|
||||
inbound_connected_peers += 1;
|
||||
} else {
|
||||
outbound_connected_peers += 1;
|
||||
}
|
||||
}
|
||||
*clients_per_peer
|
||||
.entry(peer_info.client().kind.to_string())
|
||||
.or_default() += 1;
|
||||
}
|
||||
|
||||
metrics::set_gauge(&metrics::PEERS_CONNECTED, connected_peer_count);
|
||||
metrics::set_gauge(&metrics::NETWORK_INBOUND_PEERS, inbound_connected_peers);
|
||||
metrics::set_gauge(&metrics::NETWORK_OUTBOUND_PEERS, outbound_connected_peers);
|
||||
|
||||
for client_kind in ClientKind::iter() {
|
||||
let value = clients_per_peer.get(&client_kind.to_string()).unwrap_or(&0);
|
||||
metrics::set_gauge_vec(
|
||||
&metrics::PEERS_PER_CLIENT,
|
||||
&[client_kind.as_ref()],
|
||||
*value as i64,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/* Internal functions */
|
||||
|
||||
/// Sets a peer as connected as long as their reputation allows it
|
||||
@ -917,8 +880,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
let outbound_only_peer_count = self.network_globals.connected_outbound_only_peers();
|
||||
let wanted_peers = if peer_count < self.target_peers.saturating_sub(dialing_peers) {
|
||||
// We need more peers in general.
|
||||
// Note: The maximum discovery query is bounded by `Discovery`.
|
||||
self.target_peers.saturating_sub(dialing_peers) - peer_count
|
||||
self.max_peers().saturating_sub(dialing_peers) - peer_count
|
||||
} else if outbound_only_peer_count < self.min_outbound_only_peers()
|
||||
&& peer_count < self.max_outbound_dialing_peers()
|
||||
{
|
||||
|
||||
@ -96,10 +96,16 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
||||
if let Some(enr) = self.peers_to_dial.pop() {
|
||||
let peer_id = enr.peer_id();
|
||||
self.inject_peer_connection(&peer_id, ConnectingType::Dialing, Some(enr.clone()));
|
||||
let quic_multiaddrs = enr.multiaddr_quic();
|
||||
if !quic_multiaddrs.is_empty() {
|
||||
debug!(self.log, "Dialing QUIC supported peer"; "peer_id"=> %peer_id, "quic_multiaddrs" => ?quic_multiaddrs);
|
||||
}
|
||||
|
||||
let quic_multiaddrs = if self.quic_enabled {
|
||||
let quic_multiaddrs = enr.multiaddr_quic();
|
||||
if !quic_multiaddrs.is_empty() {
|
||||
debug!(self.log, "Dialing QUIC supported peer"; "peer_id"=> %peer_id, "quic_multiaddrs" => ?quic_multiaddrs);
|
||||
}
|
||||
quic_multiaddrs
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
// Prioritize Quic connections over Tcp ones.
|
||||
let multiaddrs = quic_multiaddrs
|
||||
@ -148,8 +154,8 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
||||
self.on_dial_failure(peer_id);
|
||||
}
|
||||
FromSwarm::ExternalAddrConfirmed(_) => {
|
||||
// TODO: we likely want to check this against our assumed external tcp
|
||||
// address
|
||||
// We have an external address confirmed, means we are able to do NAT traversal.
|
||||
metrics::set_gauge_vec(&metrics::NAT_OPEN, &["libp2p"], 1);
|
||||
}
|
||||
_ => {
|
||||
// NOTE: FromSwarm is a non exhaustive enum so updates should be based on release
|
||||
@ -237,14 +243,15 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
self.events.push(PeerManagerEvent::MetaData(peer_id));
|
||||
}
|
||||
|
||||
// Check NAT if metrics are enabled
|
||||
if self.network_globals.local_enr.read().udp4().is_some() {
|
||||
metrics::check_nat();
|
||||
}
|
||||
|
||||
// increment prometheus metrics
|
||||
if self.metrics_enabled {
|
||||
let remote_addr = endpoint.get_remote_address();
|
||||
let direction = if endpoint.is_dialer() {
|
||||
"outbound"
|
||||
} else {
|
||||
"inbound"
|
||||
};
|
||||
|
||||
match remote_addr.iter().find(|proto| {
|
||||
matches!(
|
||||
proto,
|
||||
@ -252,10 +259,10 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
)
|
||||
}) {
|
||||
Some(multiaddr::Protocol::QuicV1) => {
|
||||
metrics::inc_gauge(&metrics::QUIC_PEERS_CONNECTED);
|
||||
metrics::inc_gauge_vec(&metrics::PEERS_CONNECTED_MULTI, &[direction, "quic"]);
|
||||
}
|
||||
Some(multiaddr::Protocol::Tcp(_)) => {
|
||||
metrics::inc_gauge(&metrics::TCP_PEERS_CONNECTED);
|
||||
metrics::inc_gauge_vec(&metrics::PEERS_CONNECTED_MULTI, &[direction, "tcp"]);
|
||||
}
|
||||
Some(_) => unreachable!(),
|
||||
None => {
|
||||
@ -263,7 +270,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
}
|
||||
};
|
||||
|
||||
self.update_connected_peer_metrics();
|
||||
metrics::inc_gauge(&metrics::PEERS_CONNECTED);
|
||||
metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT);
|
||||
}
|
||||
|
||||
@ -333,6 +340,12 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
let remote_addr = endpoint.get_remote_address();
|
||||
// Update the prometheus metrics
|
||||
if self.metrics_enabled {
|
||||
let direction = if endpoint.is_dialer() {
|
||||
"outbound"
|
||||
} else {
|
||||
"inbound"
|
||||
};
|
||||
|
||||
match remote_addr.iter().find(|proto| {
|
||||
matches!(
|
||||
proto,
|
||||
@ -340,15 +353,16 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||
)
|
||||
}) {
|
||||
Some(multiaddr::Protocol::QuicV1) => {
|
||||
metrics::dec_gauge(&metrics::QUIC_PEERS_CONNECTED);
|
||||
metrics::dec_gauge_vec(&metrics::PEERS_CONNECTED_MULTI, &[direction, "quic"]);
|
||||
}
|
||||
Some(multiaddr::Protocol::Tcp(_)) => {
|
||||
metrics::dec_gauge(&metrics::TCP_PEERS_CONNECTED);
|
||||
metrics::dec_gauge_vec(&metrics::PEERS_CONNECTED_MULTI, &[direction, "tcp"]);
|
||||
}
|
||||
// If it's an unknown protocol we already logged when connection was established.
|
||||
_ => {}
|
||||
};
|
||||
self.update_connected_peer_metrics();
|
||||
// Legacy standard metrics.
|
||||
metrics::dec_gauge(&metrics::PEERS_CONNECTED);
|
||||
metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT);
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user