commit
e179be1f0b
4
.github/workflows/docker.yml
vendored
4
.github/workflows/docker.yml
vendored
@ -41,7 +41,6 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "VERSION=capella" >> $GITHUB_ENV
|
echo "VERSION=capella" >> $GITHUB_ENV
|
||||||
echo "VERSION_SUFFIX=" >> $GITHUB_ENV
|
echo "VERSION_SUFFIX=" >> $GITHUB_ENV
|
||||||
echo "CROSS_FEATURES=withdrawals-processing" >> $GITHUB_ENV
|
|
||||||
- name: Extract version (if eip4844)
|
- name: Extract version (if eip4844)
|
||||||
if: github.event.ref == 'refs/heads/eip4844'
|
if: github.event.ref == 'refs/heads/eip4844'
|
||||||
run: |
|
run: |
|
||||||
@ -55,7 +54,6 @@ jobs:
|
|||||||
outputs:
|
outputs:
|
||||||
VERSION: ${{ env.VERSION }}
|
VERSION: ${{ env.VERSION }}
|
||||||
VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }}
|
VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }}
|
||||||
CROSS_FEATURES: ${{ env.CROSS_FEATURES }}
|
|
||||||
build-docker-single-arch:
|
build-docker-single-arch:
|
||||||
name: build-docker-${{ matrix.binary }}
|
name: build-docker-${{ matrix.binary }}
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
@ -74,7 +72,7 @@ jobs:
|
|||||||
DOCKER_CLI_EXPERIMENTAL: enabled
|
DOCKER_CLI_EXPERIMENTAL: enabled
|
||||||
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
||||||
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
||||||
CROSS_FEATURES: ${{ needs.extract-version.outputs.CROSS_FEATURES }}
|
CROSS_FEATURES: null
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Update Rust
|
- name: Update Rust
|
||||||
|
2
.github/workflows/local-testnet.yml
vendored
2
.github/workflows/local-testnet.yml
vendored
@ -21,7 +21,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
|
27
.github/workflows/release.yml
vendored
27
.github/workflows/release.yml
vendored
@ -8,8 +8,8 @@ on:
|
|||||||
env:
|
env:
|
||||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
||||||
REPO_NAME: sigp/lighthouse
|
REPO_NAME: ${{ github.repository_owner }}/lighthouse
|
||||||
IMAGE_NAME: sigp/lighthouse
|
IMAGE_NAME: ${{ github.repository_owner }}/lighthouse
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
extract-version:
|
extract-version:
|
||||||
@ -63,12 +63,8 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Checkout sources
|
- name: Checkout sources
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
- name: Build toolchain
|
- name: Get latest version of stable Rust
|
||||||
uses: actions-rs/toolchain@v1
|
run: rustup update stable
|
||||||
with:
|
|
||||||
toolchain: stable
|
|
||||||
profile: minimal
|
|
||||||
override: true
|
|
||||||
|
|
||||||
# ==============================
|
# ==============================
|
||||||
# Windows dependencies
|
# Windows dependencies
|
||||||
@ -88,7 +84,7 @@ jobs:
|
|||||||
# ==============================
|
# ==============================
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows')
|
if: contains(matrix.arch, 'darwin') || contains(matrix.arch, 'windows')
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
@ -179,13 +175,13 @@ jobs:
|
|||||||
# =======================================================================
|
# =======================================================================
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
||||||
path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz
|
||||||
|
|
||||||
- name: Upload signature
|
- name: Upload signature
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc
|
name: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc
|
||||||
path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc
|
path: lighthouse-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.arch }}.tar.gz.asc
|
||||||
@ -208,7 +204,7 @@ jobs:
|
|||||||
# ==============================
|
# ==============================
|
||||||
|
|
||||||
- name: Download artifacts
|
- name: Download artifacts
|
||||||
uses: actions/download-artifact@v2
|
uses: actions/download-artifact@v3
|
||||||
|
|
||||||
# ==============================
|
# ==============================
|
||||||
# Create release draft
|
# Create release draft
|
||||||
@ -216,11 +212,14 @@ jobs:
|
|||||||
|
|
||||||
- name: Generate Full Changelog
|
- name: Generate Full Changelog
|
||||||
id: changelog
|
id: changelog
|
||||||
run: echo "CHANGELOG=$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT
|
run: |
|
||||||
|
echo "CHANGELOG<<EOF" >> $GITHUB_OUTPUT
|
||||||
|
echo "$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT
|
||||||
|
echo "EOF" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Create Release Draft
|
- name: Create Release Draft
|
||||||
env:
|
env:
|
||||||
GITHUB_USER: sigp
|
GITHUB_USER: ${{ github.repository_owner }}
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
# The formatting here is borrowed from OpenEthereum: https://github.com/openethereum/openethereum/blob/main/.github/workflows/build.yml
|
# The formatting here is borrowed from OpenEthereum: https://github.com/openethereum/openethereum/blob/main/.github/workflows/build.yml
|
||||||
|
57
.github/workflows/test-suite.yml
vendored
57
.github/workflows/test-suite.yml
vendored
@ -12,7 +12,9 @@ env:
|
|||||||
# Deny warnings in CI
|
# Deny warnings in CI
|
||||||
RUSTFLAGS: "-D warnings"
|
RUSTFLAGS: "-D warnings"
|
||||||
# The Nightly version used for cargo-udeps, might need updating from time to time.
|
# The Nightly version used for cargo-udeps, might need updating from time to time.
|
||||||
PINNED_NIGHTLY: nightly-2022-05-20
|
PINNED_NIGHTLY: nightly-2022-12-15
|
||||||
|
# Prevent Github API rate limiting.
|
||||||
|
LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
jobs:
|
jobs:
|
||||||
target-branch-check:
|
target-branch-check:
|
||||||
name: target-branch-check
|
name: target-branch-check
|
||||||
@ -51,7 +53,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -95,7 +97,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run beacon_chain tests for all known forks
|
- name: Run beacon_chain tests for all known forks
|
||||||
@ -109,7 +111,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run operation_pool tests for all known forks
|
- name: Run operation_pool tests for all known forks
|
||||||
@ -133,7 +135,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -149,7 +151,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run state_transition_vectors in release.
|
- name: Run state_transition_vectors in release.
|
||||||
@ -163,7 +165,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run consensus-spec-tests with blst, milagro and fake_crypto
|
- name: Run consensus-spec-tests with blst, milagro and fake_crypto
|
||||||
@ -189,7 +191,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -205,7 +207,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -221,7 +223,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -237,7 +239,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -253,7 +255,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
@ -285,7 +287,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run exec engine integration tests in release
|
- name: Run exec engine integration tests in release
|
||||||
@ -299,7 +301,7 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Typecheck benchmark code without running it
|
- name: Typecheck benchmark code without running it
|
||||||
@ -323,34 +325,13 @@ jobs:
|
|||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Lint code for quality and style with Clippy
|
- name: Lint code for quality and style with Clippy
|
||||||
run: make lint
|
run: make lint
|
||||||
- name: Certify Cargo.lock freshness
|
- name: Certify Cargo.lock freshness
|
||||||
run: git diff --exit-code Cargo.lock
|
run: git diff --exit-code Cargo.lock
|
||||||
disallowed-from-async-lint:
|
|
||||||
name: disallowed-from-async-lint
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: cargo-fmt
|
|
||||||
continue-on-error: true
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: Install SigP Clippy fork
|
|
||||||
run: |
|
|
||||||
cd ..
|
|
||||||
git clone https://github.com/michaelsproul/rust-clippy.git
|
|
||||||
cd rust-clippy
|
|
||||||
git checkout 31a49666ccfcd7963b63345d6ce757c373f22c2a
|
|
||||||
cargo build --release --bin cargo-clippy --bin clippy-driver
|
|
||||||
cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin
|
|
||||||
- name: Install Protoc
|
|
||||||
uses: arduino/setup-protoc@v1
|
|
||||||
with:
|
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- name: Run Clippy with the disallowed-from-async lint
|
|
||||||
run: make nightly-lint
|
|
||||||
check-msrv:
|
check-msrv:
|
||||||
name: check-msrv
|
name: check-msrv
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@ -360,7 +341,7 @@ jobs:
|
|||||||
- name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }})
|
- name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }})
|
||||||
run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }}
|
run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }}
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run cargo check
|
- name: Run cargo check
|
||||||
@ -404,7 +385,7 @@ jobs:
|
|||||||
# NOTE: cargo-udeps version is pinned until this issue is resolved:
|
# NOTE: cargo-udeps version is pinned until this issue is resolved:
|
||||||
# https://github.com/est31/cargo-udeps/issues/135
|
# https://github.com/est31/cargo-udeps/issues/135
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@e52d9eb8f7b63115df1ac544a1376fdbf5a39612
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install cargo-udeps
|
- name: Install cargo-udeps
|
||||||
|
2001
Cargo.lock
generated
2001
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -102,6 +102,7 @@ eth2_hashing = { path = "crypto/eth2_hashing" }
|
|||||||
tree_hash = { path = "consensus/tree_hash" }
|
tree_hash = { path = "consensus/tree_hash" }
|
||||||
tree_hash_derive = { path = "consensus/tree_hash_derive" }
|
tree_hash_derive = { path = "consensus/tree_hash_derive" }
|
||||||
eth2_serde_utils = { path = "consensus/serde_utils" }
|
eth2_serde_utils = { path = "consensus/serde_utils" }
|
||||||
|
arbitrary = { git = "https://github.com/michaelsproul/arbitrary", rev="a572fd8743012a4f1ada5ee5968b1b3619c427ba" }
|
||||||
|
|
||||||
[profile.maxperf]
|
[profile.maxperf]
|
||||||
inherits = "release"
|
inherits = "release"
|
||||||
|
4
Makefile
4
Makefile
@ -21,14 +21,14 @@ CROSS_FEATURES ?= gnosis,slasher-lmdb,slasher-mdbx
|
|||||||
CROSS_PROFILE ?= release
|
CROSS_PROFILE ?= release
|
||||||
|
|
||||||
# List of features to use when running EF tests.
|
# List of features to use when running EF tests.
|
||||||
EF_TEST_FEATURES ?= beacon_chain/withdrawals-processing
|
EF_TEST_FEATURES ?=
|
||||||
|
|
||||||
# Cargo profile for regular builds.
|
# Cargo profile for regular builds.
|
||||||
PROFILE ?= release
|
PROFILE ?= release
|
||||||
|
|
||||||
# List of all hard forks. This list is used to set env variables for several tests so that
|
# List of all hard forks. This list is used to set env variables for several tests so that
|
||||||
# they run for different forks.
|
# they run for different forks.
|
||||||
FORKS=phase0 altair merge
|
FORKS=phase0 altair merge capella
|
||||||
|
|
||||||
# Builds the Lighthouse binary in release (optimized).
|
# Builds the Lighthouse binary in release (optimized).
|
||||||
#
|
#
|
||||||
|
@ -10,7 +10,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> {
|
pub fn cli_run(wallet_base_dir: PathBuf) -> Result<(), String> {
|
||||||
let mgr = WalletManager::open(&wallet_base_dir)
|
let mgr = WalletManager::open(wallet_base_dir)
|
||||||
.map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?;
|
.map_err(|e| format!("Unable to open --{}: {:?}", WALLETS_DIR_FLAG, e))?;
|
||||||
|
|
||||||
for (name, _uuid) in mgr
|
for (name, _uuid) in mgr
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "beacon_node"
|
name = "beacon_node"
|
||||||
version = "3.3.0"
|
version = "3.4.0"
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
@ -13,12 +13,6 @@ node_test_rig = { path = "../testing/node_test_rig" }
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing.
|
write_ssz_files = ["beacon_chain/write_ssz_files"] # Writes debugging .ssz files to /tmp during block processing.
|
||||||
withdrawals-processing = [
|
|
||||||
"beacon_chain/withdrawals-processing",
|
|
||||||
"store/withdrawals-processing",
|
|
||||||
"execution_layer/withdrawals-processing",
|
|
||||||
"http_api/withdrawals-processing",
|
|
||||||
]
|
|
||||||
spec-minimal = ["beacon_chain/spec-minimal"]
|
spec-minimal = ["beacon_chain/spec-minimal"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
@ -10,12 +10,6 @@ default = ["participation_metrics"]
|
|||||||
write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing.
|
write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing.
|
||||||
participation_metrics = [] # Exposes validator participation metrics to Prometheus.
|
participation_metrics = [] # Exposes validator participation metrics to Prometheus.
|
||||||
fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable
|
fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable
|
||||||
withdrawals-processing = [
|
|
||||||
"state_processing/withdrawals-processing",
|
|
||||||
"store/withdrawals-processing",
|
|
||||||
"execution_layer/withdrawals-processing",
|
|
||||||
"operation_pool/withdrawals-processing"
|
|
||||||
]
|
|
||||||
spec-minimal = ["kzg/minimal-spec"]
|
spec-minimal = ["kzg/minimal-spec"]
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
@ -24,6 +18,8 @@ environment = { path = "../../lighthouse/environment" }
|
|||||||
serde_json = "1.0.58"
|
serde_json = "1.0.58"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
serde_json = "1.0.58"
|
||||||
|
eth2_network_config = { path = "../../common/eth2_network_config"}
|
||||||
merkle_proof = { path = "../../consensus/merkle_proof" }
|
merkle_proof = { path = "../../consensus/merkle_proof" }
|
||||||
store = { path = "../store" }
|
store = { path = "../store" }
|
||||||
parking_lot = "0.12.0"
|
parking_lot = "0.12.0"
|
||||||
|
@ -366,7 +366,6 @@ pub struct BeaconChain<T: BeaconChainTypes> {
|
|||||||
pub(crate) observed_attester_slashings:
|
pub(crate) observed_attester_slashings:
|
||||||
Mutex<ObservedOperations<AttesterSlashing<T::EthSpec>, T::EthSpec>>,
|
Mutex<ObservedOperations<AttesterSlashing<T::EthSpec>, T::EthSpec>>,
|
||||||
/// Maintains a record of which validators we've seen BLS to execution changes for.
|
/// Maintains a record of which validators we've seen BLS to execution changes for.
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
pub(crate) observed_bls_to_execution_changes:
|
pub(crate) observed_bls_to_execution_changes:
|
||||||
Mutex<ObservedOperations<SignedBlsToExecutionChange, T::EthSpec>>,
|
Mutex<ObservedOperations<SignedBlsToExecutionChange, T::EthSpec>>,
|
||||||
/// The most recently validated light client finality update received on gossip.
|
/// The most recently validated light client finality update received on gossip.
|
||||||
@ -2293,29 +2292,18 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
&self,
|
&self,
|
||||||
bls_to_execution_change: SignedBlsToExecutionChange,
|
bls_to_execution_change: SignedBlsToExecutionChange,
|
||||||
) -> Result<ObservationOutcome<SignedBlsToExecutionChange, T::EthSpec>, Error> {
|
) -> Result<ObservationOutcome<SignedBlsToExecutionChange, T::EthSpec>, Error> {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
let current_fork = self.spec.fork_name_at_slot::<T::EthSpec>(self.slot()?);
|
||||||
{
|
if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork {
|
||||||
let current_fork = self.spec.fork_name_at_slot::<T::EthSpec>(self.slot()?);
|
// Disallow BLS to execution changes prior to the Capella fork.
|
||||||
if let ForkName::Base | ForkName::Altair | ForkName::Merge = current_fork {
|
return Err(Error::BlsToExecutionChangeBadFork(current_fork));
|
||||||
// Disallow BLS to execution changes prior to the Capella fork.
|
|
||||||
return Err(Error::BlsToExecutionChangeBadFork(current_fork));
|
|
||||||
}
|
|
||||||
|
|
||||||
let wall_clock_state = self.wall_clock_state()?;
|
|
||||||
|
|
||||||
Ok(self
|
|
||||||
.observed_bls_to_execution_changes
|
|
||||||
.lock()
|
|
||||||
.verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: remove this whole block once withdrawals-processing is removed
|
let wall_clock_state = self.wall_clock_state()?;
|
||||||
#[cfg(not(feature = "withdrawals-processing"))]
|
|
||||||
{
|
Ok(self
|
||||||
#[allow(clippy::drop_non_drop)]
|
.observed_bls_to_execution_changes
|
||||||
drop(bls_to_execution_change);
|
.lock()
|
||||||
Ok(ObservationOutcome::AlreadyKnown)
|
.verify_and_observe(bls_to_execution_change, &wall_clock_state, &self.spec)?)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Import a BLS to execution change to the op pool.
|
/// Import a BLS to execution change to the op pool.
|
||||||
@ -2324,12 +2312,8 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
bls_to_execution_change: SigVerifiedOp<SignedBlsToExecutionChange, T::EthSpec>,
|
bls_to_execution_change: SigVerifiedOp<SignedBlsToExecutionChange, T::EthSpec>,
|
||||||
) {
|
) {
|
||||||
if self.eth1_chain.is_some() {
|
if self.eth1_chain.is_some() {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
self.op_pool
|
self.op_pool
|
||||||
.insert_bls_to_execution_change(bls_to_execution_change);
|
.insert_bls_to_execution_change(bls_to_execution_change);
|
||||||
|
|
||||||
#[cfg(not(feature = "withdrawals-processing"))]
|
|
||||||
drop(bls_to_execution_change);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4879,9 +4863,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.ok_or(Error::InvalidSlot(prepare_slot))?
|
.ok_or(Error::InvalidSlot(prepare_slot))?
|
||||||
.as_secs(),
|
.as_secs(),
|
||||||
pre_payload_attributes.prev_randao,
|
pre_payload_attributes.prev_randao,
|
||||||
execution_layer
|
execution_layer.get_suggested_fee_recipient(proposer).await,
|
||||||
.get_suggested_fee_recipient(proposer as u64)
|
|
||||||
.await,
|
|
||||||
withdrawals,
|
withdrawals,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -583,11 +583,13 @@ where
|
|||||||
mut self,
|
mut self,
|
||||||
auto_register: bool,
|
auto_register: bool,
|
||||||
validators: Vec<PublicKeyBytes>,
|
validators: Vec<PublicKeyBytes>,
|
||||||
|
individual_metrics_threshold: usize,
|
||||||
log: Logger,
|
log: Logger,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
self.validator_monitor = Some(ValidatorMonitor::new(
|
self.validator_monitor = Some(ValidatorMonitor::new(
|
||||||
validators,
|
validators,
|
||||||
auto_register,
|
auto_register,
|
||||||
|
individual_metrics_threshold,
|
||||||
log.clone(),
|
log.clone(),
|
||||||
));
|
));
|
||||||
self
|
self
|
||||||
@ -815,7 +817,6 @@ where
|
|||||||
observed_voluntary_exits: <_>::default(),
|
observed_voluntary_exits: <_>::default(),
|
||||||
observed_proposer_slashings: <_>::default(),
|
observed_proposer_slashings: <_>::default(),
|
||||||
observed_attester_slashings: <_>::default(),
|
observed_attester_slashings: <_>::default(),
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
observed_bls_to_execution_changes: <_>::default(),
|
observed_bls_to_execution_changes: <_>::default(),
|
||||||
latest_seen_finality_update: <_>::default(),
|
latest_seen_finality_update: <_>::default(),
|
||||||
latest_seen_optimistic_update: <_>::default(),
|
latest_seen_optimistic_update: <_>::default(),
|
||||||
@ -1010,6 +1011,7 @@ fn descriptive_db_error(item: &str, error: &StoreError) -> String {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
|
||||||
use eth2_hashing::hash;
|
use eth2_hashing::hash;
|
||||||
use genesis::{
|
use genesis::{
|
||||||
generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH,
|
generate_deterministic_keypairs, interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH,
|
||||||
@ -1066,7 +1068,12 @@ mod test {
|
|||||||
.testing_slot_clock(Duration::from_secs(1))
|
.testing_slot_clock(Duration::from_secs(1))
|
||||||
.expect("should configure testing slot clock")
|
.expect("should configure testing slot clock")
|
||||||
.shutdown_sender(shutdown_tx)
|
.shutdown_sender(shutdown_tx)
|
||||||
.monitor_validators(true, vec![], log.clone())
|
.monitor_validators(
|
||||||
|
true,
|
||||||
|
vec![],
|
||||||
|
DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD,
|
||||||
|
log.clone(),
|
||||||
|
)
|
||||||
.build()
|
.build()
|
||||||
.expect("should build");
|
.expect("should build");
|
||||||
|
|
||||||
|
@ -65,6 +65,8 @@ pub struct ChainConfig {
|
|||||||
/// Low values are useful for execution engines which don't improve their payload after the
|
/// Low values are useful for execution engines which don't improve their payload after the
|
||||||
/// first call, and high values are useful for ensuring the EL is given ample notice.
|
/// first call, and high values are useful for ensuring the EL is given ample notice.
|
||||||
pub prepare_payload_lookahead: Duration,
|
pub prepare_payload_lookahead: Duration,
|
||||||
|
/// Use EL-free optimistic sync for the finalized part of the chain.
|
||||||
|
pub optimistic_finalized_sync: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ChainConfig {
|
impl Default for ChainConfig {
|
||||||
@ -89,6 +91,8 @@ impl Default for ChainConfig {
|
|||||||
count_unrealized_full: CountUnrealizedFull::default(),
|
count_unrealized_full: CountUnrealizedFull::default(),
|
||||||
checkpoint_sync_url_timeout: 60,
|
checkpoint_sync_url_timeout: 60,
|
||||||
prepare_payload_lookahead: Duration::from_secs(4),
|
prepare_payload_lookahead: Duration::from_secs(4),
|
||||||
|
// This value isn't actually read except in tests.
|
||||||
|
optimistic_finalized_sync: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@ use crate::{
|
|||||||
use execution_layer::{BlockProposalContents, BuilderParams, PayloadAttributes, PayloadStatus};
|
use execution_layer::{BlockProposalContents, BuilderParams, PayloadAttributes, PayloadStatus};
|
||||||
use fork_choice::{InvalidationOperation, PayloadVerificationStatus};
|
use fork_choice::{InvalidationOperation, PayloadVerificationStatus};
|
||||||
use proto_array::{Block as ProtoBlock, ExecutionStatus};
|
use proto_array::{Block as ProtoBlock, ExecutionStatus};
|
||||||
use slog::debug;
|
use slog::{debug, warn};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use state_processing::per_block_processing::{
|
use state_processing::per_block_processing::{
|
||||||
compute_timestamp_at_slot, get_expected_withdrawals, is_execution_enabled,
|
compute_timestamp_at_slot, get_expected_withdrawals, is_execution_enabled,
|
||||||
@ -60,26 +60,51 @@ impl<T: BeaconChainTypes> PayloadNotifier<T> {
|
|||||||
state: &BeaconState<T::EthSpec>,
|
state: &BeaconState<T::EthSpec>,
|
||||||
notify_execution_layer: NotifyExecutionLayer,
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> Result<Self, BlockError<T::EthSpec>> {
|
) -> Result<Self, BlockError<T::EthSpec>> {
|
||||||
let payload_verification_status = match notify_execution_layer {
|
let payload_verification_status = if is_execution_enabled(state, block.message().body()) {
|
||||||
NotifyExecutionLayer::No => Some(PayloadVerificationStatus::Optimistic),
|
// Perform the initial stages of payload verification.
|
||||||
NotifyExecutionLayer::Yes => {
|
//
|
||||||
if is_execution_enabled(state, block.message().body()) {
|
// We will duplicate these checks again during `per_block_processing`, however these
|
||||||
// Perform the initial stages of payload verification.
|
// checks are cheap and doing them here ensures we have verified them before marking
|
||||||
//
|
// the block as optimistically imported. This is particularly relevant in the case
|
||||||
// We will duplicate these checks again during `per_block_processing`, however these checks
|
// where we do not send the block to the EL at all.
|
||||||
// are cheap and doing them here ensures we protect the execution engine from junk.
|
let block_message = block.message();
|
||||||
partially_verify_execution_payload::<T::EthSpec, FullPayload<T::EthSpec>>(
|
let payload = block_message.execution_payload()?;
|
||||||
state,
|
partially_verify_execution_payload::<_, FullPayload<_>>(
|
||||||
block.slot(),
|
state,
|
||||||
block.message().execution_payload()?,
|
block.slot(),
|
||||||
&chain.spec,
|
payload,
|
||||||
)
|
&chain.spec,
|
||||||
.map_err(BlockError::PerBlockProcessingError)?;
|
)
|
||||||
None
|
.map_err(BlockError::PerBlockProcessingError)?;
|
||||||
} else {
|
|
||||||
Some(PayloadVerificationStatus::Irrelevant)
|
match notify_execution_layer {
|
||||||
|
NotifyExecutionLayer::No if chain.config.optimistic_finalized_sync => {
|
||||||
|
// Verify the block hash here in Lighthouse and immediately mark the block as
|
||||||
|
// optimistically imported. This saves a lot of roundtrips to the EL.
|
||||||
|
let execution_layer = chain
|
||||||
|
.execution_layer
|
||||||
|
.as_ref()
|
||||||
|
.ok_or(ExecutionPayloadError::NoExecutionConnection)?;
|
||||||
|
|
||||||
|
if let Err(e) =
|
||||||
|
execution_layer.verify_payload_block_hash(payload.execution_payload_ref())
|
||||||
|
{
|
||||||
|
warn!(
|
||||||
|
chain.log,
|
||||||
|
"Falling back to slow block hash verification";
|
||||||
|
"block_number" => payload.block_number(),
|
||||||
|
"info" => "you can silence this warning with --disable-optimistic-finalized-sync",
|
||||||
|
"error" => ?e,
|
||||||
|
);
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(PayloadVerificationStatus::Optimistic)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
_ => None,
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
Some(PayloadVerificationStatus::Irrelevant)
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
@ -460,7 +485,7 @@ where
|
|||||||
if is_terminal_block_hash_set && !is_activation_epoch_reached {
|
if is_terminal_block_hash_set && !is_activation_epoch_reached {
|
||||||
// Use the "empty" payload if there's a terminal block hash, but we haven't reached the
|
// Use the "empty" payload if there's a terminal block hash, but we haven't reached the
|
||||||
// terminal block epoch yet.
|
// terminal block epoch yet.
|
||||||
return Ok(BlockProposalContents::default_at_fork(fork));
|
return BlockProposalContents::default_at_fork(fork).map_err(Into::into);
|
||||||
}
|
}
|
||||||
|
|
||||||
let terminal_pow_block_hash = execution_layer
|
let terminal_pow_block_hash = execution_layer
|
||||||
@ -473,7 +498,7 @@ where
|
|||||||
} else {
|
} else {
|
||||||
// If the merge transition hasn't occurred yet and the EL hasn't found the terminal
|
// If the merge transition hasn't occurred yet and the EL hasn't found the terminal
|
||||||
// block, return an "empty" payload.
|
// block, return an "empty" payload.
|
||||||
return Ok(BlockProposalContents::default_at_fork(fork));
|
return BlockProposalContents::default_at_fork(fork).map_err(Into::into);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
latest_execution_payload_header_block_hash
|
latest_execution_payload_header_block_hash
|
||||||
|
@ -402,7 +402,7 @@ impl<T: AggregateMap> NaiveAggregationPool<T> {
|
|||||||
|
|
||||||
/// Returns the total number of items stored in `self`.
|
/// Returns the total number of items stored in `self`.
|
||||||
pub fn num_items(&self) -> usize {
|
pub fn num_items(&self) -> usize {
|
||||||
self.maps.iter().map(|(_, map)| map.len()).sum()
|
self.maps.values().map(T::len).sum()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an aggregated `T::Value` with the given `T::Data`, if any.
|
/// Returns an aggregated `T::Value` with the given `T::Data`, if any.
|
||||||
@ -448,11 +448,7 @@ impl<T: AggregateMap> NaiveAggregationPool<T> {
|
|||||||
// If we have too many maps, remove the lowest amount to ensure we only have
|
// If we have too many maps, remove the lowest amount to ensure we only have
|
||||||
// `SLOTS_RETAINED` left.
|
// `SLOTS_RETAINED` left.
|
||||||
if self.maps.len() > SLOTS_RETAINED {
|
if self.maps.len() > SLOTS_RETAINED {
|
||||||
let mut slots = self
|
let mut slots = self.maps.keys().copied().collect::<Vec<_>>();
|
||||||
.maps
|
|
||||||
.iter()
|
|
||||||
.map(|(slot, _map)| *slot)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
// Sort is generally pretty slow, however `SLOTS_RETAINED` is quite low so it should be
|
// Sort is generally pretty slow, however `SLOTS_RETAINED` is quite low so it should be
|
||||||
// negligible.
|
// negligible.
|
||||||
slots.sort_unstable();
|
slots.sort_unstable();
|
||||||
|
@ -6,12 +6,9 @@ use std::collections::HashSet;
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use types::{
|
use types::{
|
||||||
AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing,
|
AttesterSlashing, BeaconState, ChainSpec, EthSpec, ForkName, ProposerSlashing,
|
||||||
SignedVoluntaryExit, Slot,
|
SignedBlsToExecutionChange, SignedVoluntaryExit, Slot,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
use types::SignedBlsToExecutionChange;
|
|
||||||
|
|
||||||
/// Number of validator indices to store on the stack in `observed_validators`.
|
/// Number of validator indices to store on the stack in `observed_validators`.
|
||||||
pub const SMALL_VEC_SIZE: usize = 8;
|
pub const SMALL_VEC_SIZE: usize = 8;
|
||||||
|
|
||||||
@ -83,7 +80,6 @@ impl<E: EthSpec> ObservableOperation<E> for AttesterSlashing<E> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
impl<E: EthSpec> ObservableOperation<E> for SignedBlsToExecutionChange {
|
impl<E: EthSpec> ObservableOperation<E> for SignedBlsToExecutionChange {
|
||||||
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
fn observed_validators(&self) -> SmallVec<[u64; SMALL_VEC_SIZE]> {
|
||||||
smallvec![self.message.validator_index]
|
smallvec![self.message.validator_index]
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
//! Utilities for managing database schema changes.
|
//! Utilities for managing database schema changes.
|
||||||
mod migration_schema_v12;
|
mod migration_schema_v12;
|
||||||
mod migration_schema_v13;
|
mod migration_schema_v13;
|
||||||
|
mod migration_schema_v14;
|
||||||
|
|
||||||
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY};
|
use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY};
|
||||||
use crate::eth1_chain::SszEth1;
|
use crate::eth1_chain::SszEth1;
|
||||||
@ -114,6 +115,14 @@ pub fn migrate_schema<T: BeaconChainTypes>(
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
(SchemaVersion(13), SchemaVersion(14)) => {
|
||||||
|
let ops = migration_schema_v14::upgrade_to_v14::<T>(db.clone(), log)?;
|
||||||
|
db.store_schema_version_atomically(to, ops)
|
||||||
|
}
|
||||||
|
(SchemaVersion(14), SchemaVersion(13)) => {
|
||||||
|
let ops = migration_schema_v14::downgrade_from_v14::<T>(db.clone(), log)?;
|
||||||
|
db.store_schema_version_atomically(to, ops)
|
||||||
|
}
|
||||||
// Anything else is an error.
|
// Anything else is an error.
|
||||||
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
|
(_, _) => Err(HotColdDBError::UnsupportedSchemaVersion {
|
||||||
target_version: to,
|
target_version: to,
|
||||||
|
@ -168,16 +168,14 @@ pub fn downgrade_from_v12<T: BeaconChainTypes>(
|
|||||||
log: Logger,
|
log: Logger,
|
||||||
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||||
// Load a V12 op pool and transform it to V5.
|
// Load a V12 op pool and transform it to V5.
|
||||||
let PersistedOperationPoolV12 {
|
let PersistedOperationPoolV12::<T::EthSpec> {
|
||||||
attestations,
|
attestations,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings,
|
attester_slashings,
|
||||||
proposer_slashings,
|
proposer_slashings,
|
||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
} = if let Some(PersistedOperationPool::<T::EthSpec>::V12(op_pool)) =
|
} = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? {
|
||||||
db.get_item(&OP_POOL_DB_KEY)?
|
op_pool_v12
|
||||||
{
|
|
||||||
op_pool
|
|
||||||
} else {
|
} else {
|
||||||
debug!(log, "Nothing to do, no operation pool stored");
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
return Ok(vec![]);
|
return Ok(vec![]);
|
||||||
|
@ -0,0 +1,75 @@
|
|||||||
|
use crate::beacon_chain::{BeaconChainTypes, OP_POOL_DB_KEY};
|
||||||
|
use operation_pool::{
|
||||||
|
PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14,
|
||||||
|
};
|
||||||
|
use slog::{debug, info, Logger};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use store::{Error, HotColdDB, KeyValueStoreOp, StoreItem};
|
||||||
|
|
||||||
|
pub fn upgrade_to_v14<T: BeaconChainTypes>(
|
||||||
|
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||||
|
log: Logger,
|
||||||
|
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||||
|
// Load a V12 op pool and transform it to V14.
|
||||||
|
let PersistedOperationPoolV12::<T::EthSpec> {
|
||||||
|
attestations,
|
||||||
|
sync_contributions,
|
||||||
|
attester_slashings,
|
||||||
|
proposer_slashings,
|
||||||
|
voluntary_exits,
|
||||||
|
} = if let Some(op_pool_v12) = db.get_item(&OP_POOL_DB_KEY)? {
|
||||||
|
op_pool_v12
|
||||||
|
} else {
|
||||||
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
|
return Ok(vec![]);
|
||||||
|
};
|
||||||
|
|
||||||
|
// initialize with empty vector
|
||||||
|
let bls_to_execution_changes = vec![];
|
||||||
|
let v14 = PersistedOperationPool::V14(PersistedOperationPoolV14 {
|
||||||
|
attestations,
|
||||||
|
sync_contributions,
|
||||||
|
attester_slashings,
|
||||||
|
proposer_slashings,
|
||||||
|
voluntary_exits,
|
||||||
|
bls_to_execution_changes,
|
||||||
|
});
|
||||||
|
Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn downgrade_from_v14<T: BeaconChainTypes>(
|
||||||
|
db: Arc<HotColdDB<T::EthSpec, T::HotStore, T::ColdStore>>,
|
||||||
|
log: Logger,
|
||||||
|
) -> Result<Vec<KeyValueStoreOp>, Error> {
|
||||||
|
// Load a V14 op pool and transform it to V12.
|
||||||
|
let PersistedOperationPoolV14 {
|
||||||
|
attestations,
|
||||||
|
sync_contributions,
|
||||||
|
attester_slashings,
|
||||||
|
proposer_slashings,
|
||||||
|
voluntary_exits,
|
||||||
|
bls_to_execution_changes,
|
||||||
|
} = if let Some(PersistedOperationPool::<T::EthSpec>::V14(op_pool)) =
|
||||||
|
db.get_item(&OP_POOL_DB_KEY)?
|
||||||
|
{
|
||||||
|
op_pool
|
||||||
|
} else {
|
||||||
|
debug!(log, "Nothing to do, no operation pool stored");
|
||||||
|
return Ok(vec![]);
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(
|
||||||
|
log,
|
||||||
|
"Dropping bls_to_execution_changes from pool";
|
||||||
|
"count" => bls_to_execution_changes.len(),
|
||||||
|
);
|
||||||
|
|
||||||
|
let v12 = PersistedOperationPoolV12 {
|
||||||
|
attestations,
|
||||||
|
sync_contributions,
|
||||||
|
attester_slashings,
|
||||||
|
proposer_slashings,
|
||||||
|
voluntary_exits,
|
||||||
|
};
|
||||||
|
Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)])
|
||||||
|
}
|
@ -2,6 +2,7 @@ pub use crate::persisted_beacon_chain::PersistedBeaconChain;
|
|||||||
pub use crate::{
|
pub use crate::{
|
||||||
beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY},
|
beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY},
|
||||||
migrate::MigratorConfig,
|
migrate::MigratorConfig,
|
||||||
|
validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD,
|
||||||
BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification,
|
BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification,
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -11,11 +12,11 @@ use crate::{
|
|||||||
StateSkipConfig,
|
StateSkipConfig,
|
||||||
};
|
};
|
||||||
use bls::get_withdrawal_credentials;
|
use bls::get_withdrawal_credentials;
|
||||||
use execution_layer::test_utils::DEFAULT_JWT_SECRET;
|
|
||||||
use execution_layer::{
|
use execution_layer::{
|
||||||
auth::JwtKey,
|
auth::JwtKey,
|
||||||
test_utils::{
|
test_utils::{
|
||||||
ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_TERMINAL_BLOCK,
|
ExecutionBlockGenerator, MockExecutionLayer, TestingBuilder, DEFAULT_JWT_SECRET,
|
||||||
|
DEFAULT_TERMINAL_BLOCK,
|
||||||
},
|
},
|
||||||
ExecutionLayer,
|
ExecutionLayer,
|
||||||
};
|
};
|
||||||
@ -23,6 +24,7 @@ use fork_choice::CountUnrealized;
|
|||||||
use futures::channel::mpsc::Receiver;
|
use futures::channel::mpsc::Receiver;
|
||||||
pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH};
|
pub use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH};
|
||||||
use int_to_bytes::int_to_bytes32;
|
use int_to_bytes::int_to_bytes32;
|
||||||
|
use kzg::TrustedSetup;
|
||||||
use merkle_proof::MerkleTree;
|
use merkle_proof::MerkleTree;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
use parking_lot::RwLockWriteGuard;
|
use parking_lot::RwLockWriteGuard;
|
||||||
@ -366,6 +368,7 @@ where
|
|||||||
.collect::<Result<_, _>>()
|
.collect::<Result<_, _>>()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
let spec = MainnetEthSpec::default_spec();
|
||||||
let config = execution_layer::Config {
|
let config = execution_layer::Config {
|
||||||
execution_endpoints: urls,
|
execution_endpoints: urls,
|
||||||
secret_files: vec![],
|
secret_files: vec![],
|
||||||
@ -376,6 +379,7 @@ where
|
|||||||
config,
|
config,
|
||||||
self.runtime.task_executor.clone(),
|
self.runtime.task_executor.clone(),
|
||||||
self.log.clone(),
|
self.log.clone(),
|
||||||
|
&spec,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
@ -383,15 +387,42 @@ where
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn recalculate_fork_times_with_genesis(mut self, genesis_time: u64) -> Self {
|
||||||
|
let mock = self
|
||||||
|
.mock_execution_layer
|
||||||
|
.as_mut()
|
||||||
|
.expect("must have mock execution layer to recalculate fork times");
|
||||||
|
let spec = self
|
||||||
|
.spec
|
||||||
|
.clone()
|
||||||
|
.expect("cannot recalculate fork times without spec");
|
||||||
|
mock.server.execution_block_generator().shanghai_time =
|
||||||
|
spec.capella_fork_epoch.map(|epoch| {
|
||||||
|
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||||
|
});
|
||||||
|
mock.server.execution_block_generator().eip4844_time =
|
||||||
|
spec.eip4844_fork_epoch.map(|epoch| {
|
||||||
|
genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||||
|
});
|
||||||
|
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
pub fn mock_execution_layer(mut self) -> Self {
|
pub fn mock_execution_layer(mut self) -> Self {
|
||||||
let spec = self.spec.clone().expect("cannot build without spec");
|
let spec = self.spec.clone().expect("cannot build without spec");
|
||||||
|
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
|
||||||
|
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||||
|
});
|
||||||
|
let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| {
|
||||||
|
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||||
|
});
|
||||||
let mock = MockExecutionLayer::new(
|
let mock = MockExecutionLayer::new(
|
||||||
self.runtime.task_executor.clone(),
|
self.runtime.task_executor.clone(),
|
||||||
spec.terminal_total_difficulty,
|
|
||||||
DEFAULT_TERMINAL_BLOCK,
|
DEFAULT_TERMINAL_BLOCK,
|
||||||
spec.terminal_block_hash,
|
shanghai_time,
|
||||||
spec.terminal_block_hash_activation_epoch,
|
eip4844_time,
|
||||||
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
||||||
|
spec,
|
||||||
None,
|
None,
|
||||||
);
|
);
|
||||||
self.execution_layer = Some(mock.el.clone());
|
self.execution_layer = Some(mock.el.clone());
|
||||||
@ -405,13 +436,19 @@ where
|
|||||||
let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap();
|
let builder_url = SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap();
|
||||||
|
|
||||||
let spec = self.spec.clone().expect("cannot build without spec");
|
let spec = self.spec.clone().expect("cannot build without spec");
|
||||||
|
let shanghai_time = spec.capella_fork_epoch.map(|epoch| {
|
||||||
|
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||||
|
});
|
||||||
|
let eip4844_time = spec.eip4844_fork_epoch.map(|epoch| {
|
||||||
|
HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64()
|
||||||
|
});
|
||||||
let mock_el = MockExecutionLayer::new(
|
let mock_el = MockExecutionLayer::new(
|
||||||
self.runtime.task_executor.clone(),
|
self.runtime.task_executor.clone(),
|
||||||
spec.terminal_total_difficulty,
|
|
||||||
DEFAULT_TERMINAL_BLOCK,
|
DEFAULT_TERMINAL_BLOCK,
|
||||||
spec.terminal_block_hash,
|
shanghai_time,
|
||||||
spec.terminal_block_hash_activation_epoch,
|
eip4844_time,
|
||||||
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
||||||
|
spec.clone(),
|
||||||
Some(builder_url.clone()),
|
Some(builder_url.clone()),
|
||||||
)
|
)
|
||||||
.move_to_terminal_block();
|
.move_to_terminal_block();
|
||||||
@ -456,6 +493,10 @@ where
|
|||||||
let validator_keypairs = self
|
let validator_keypairs = self
|
||||||
.validator_keypairs
|
.validator_keypairs
|
||||||
.expect("cannot build without validator keypairs");
|
.expect("cannot build without validator keypairs");
|
||||||
|
let trusted_setup: TrustedSetup =
|
||||||
|
serde_json::from_reader(eth2_network_config::TRUSTED_SETUP)
|
||||||
|
.map_err(|e| format!("Unable to read trusted setup file: {}", e))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let mut builder = BeaconChainBuilder::new(self.eth_spec_instance)
|
let mut builder = BeaconChainBuilder::new(self.eth_spec_instance)
|
||||||
.logger(log.clone())
|
.logger(log.clone())
|
||||||
@ -472,7 +513,8 @@ where
|
|||||||
log.clone(),
|
log.clone(),
|
||||||
5,
|
5,
|
||||||
)))
|
)))
|
||||||
.monitor_validators(true, vec![], log);
|
.monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log)
|
||||||
|
.trusted_setup(trusted_setup);
|
||||||
|
|
||||||
builder = if let Some(mutator) = self.initial_mutator {
|
builder = if let Some(mutator) = self.initial_mutator {
|
||||||
mutator(builder)
|
mutator(builder)
|
||||||
@ -1459,7 +1501,7 @@ where
|
|||||||
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
|
let proposer_index = state.get_beacon_proposer_index(slot, &self.spec).unwrap();
|
||||||
|
|
||||||
let signed_block = block.sign(
|
let signed_block = block.sign(
|
||||||
&self.validator_keypairs[proposer_index as usize].sk,
|
&self.validator_keypairs[proposer_index].sk,
|
||||||
&state.fork(),
|
&state.fork(),
|
||||||
state.genesis_validators_root(),
|
state.genesis_validators_root(),
|
||||||
&self.spec,
|
&self.spec,
|
||||||
|
@ -21,10 +21,21 @@ use types::{
|
|||||||
SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit,
|
SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/// Used for Prometheus labels.
|
||||||
|
///
|
||||||
|
/// We've used `total` for this value to align with Nimbus, as per:
|
||||||
|
/// https://github.com/sigp/lighthouse/pull/3728#issuecomment-1375173063
|
||||||
|
const TOTAL_LABEL: &str = "total";
|
||||||
|
|
||||||
/// The validator monitor collects per-epoch data about each monitored validator. Historical data
|
/// The validator monitor collects per-epoch data about each monitored validator. Historical data
|
||||||
/// will be kept around for `HISTORIC_EPOCHS` before it is pruned.
|
/// will be kept around for `HISTORIC_EPOCHS` before it is pruned.
|
||||||
pub const HISTORIC_EPOCHS: usize = 4;
|
pub const HISTORIC_EPOCHS: usize = 4;
|
||||||
|
|
||||||
|
/// Once the validator monitor reaches this number of validators it will stop
|
||||||
|
/// tracking their metrics/logging individually in an effort to reduce
|
||||||
|
/// Prometheus cardinality and log volume.
|
||||||
|
pub const DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD: usize = 64;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
InvalidPubkey(String),
|
InvalidPubkey(String),
|
||||||
@ -258,16 +269,27 @@ pub struct ValidatorMonitor<T> {
|
|||||||
indices: HashMap<u64, PublicKeyBytes>,
|
indices: HashMap<u64, PublicKeyBytes>,
|
||||||
/// If true, allow the automatic registration of validators.
|
/// If true, allow the automatic registration of validators.
|
||||||
auto_register: bool,
|
auto_register: bool,
|
||||||
|
/// Once the number of monitored validators goes above this threshold, we
|
||||||
|
/// will stop tracking metrics/logs on a per-validator basis. This prevents
|
||||||
|
/// large validator counts causing infeasibly high cardinailty for
|
||||||
|
/// Prometheus and high log volumes.
|
||||||
|
individual_tracking_threshold: usize,
|
||||||
log: Logger,
|
log: Logger,
|
||||||
_phantom: PhantomData<T>,
|
_phantom: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> ValidatorMonitor<T> {
|
impl<T: EthSpec> ValidatorMonitor<T> {
|
||||||
pub fn new(pubkeys: Vec<PublicKeyBytes>, auto_register: bool, log: Logger) -> Self {
|
pub fn new(
|
||||||
|
pubkeys: Vec<PublicKeyBytes>,
|
||||||
|
auto_register: bool,
|
||||||
|
individual_tracking_threshold: usize,
|
||||||
|
log: Logger,
|
||||||
|
) -> Self {
|
||||||
let mut s = Self {
|
let mut s = Self {
|
||||||
validators: <_>::default(),
|
validators: <_>::default(),
|
||||||
indices: <_>::default(),
|
indices: <_>::default(),
|
||||||
auto_register,
|
auto_register,
|
||||||
|
individual_tracking_threshold,
|
||||||
log,
|
log,
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
};
|
};
|
||||||
@ -277,6 +299,13 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
s
|
s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns `true` when the validator count is sufficiently low enough to
|
||||||
|
/// emit metrics and logs on a per-validator basis (rather than just an
|
||||||
|
/// aggregated basis).
|
||||||
|
fn individual_tracking(&self) -> bool {
|
||||||
|
self.validators.len() <= self.individual_tracking_threshold
|
||||||
|
}
|
||||||
|
|
||||||
/// Add some validators to `self` for additional monitoring.
|
/// Add some validators to `self` for additional monitoring.
|
||||||
fn add_validator_pubkey(&mut self, pubkey: PublicKeyBytes) {
|
fn add_validator_pubkey(&mut self, pubkey: PublicKeyBytes) {
|
||||||
let index_opt = self
|
let index_opt = self
|
||||||
@ -317,6 +346,12 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
for monitored_validator in self.validators.values() {
|
for monitored_validator in self.validators.values() {
|
||||||
if let Some(i) = monitored_validator.index {
|
if let Some(i) = monitored_validator.index {
|
||||||
monitored_validator.touch_epoch_summary(current_epoch);
|
monitored_validator.touch_epoch_summary(current_epoch);
|
||||||
|
|
||||||
|
// Only log the per-validator metrics if it's enabled.
|
||||||
|
if !self.individual_tracking() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
let i = i as usize;
|
let i = i as usize;
|
||||||
let id = &monitored_validator.id;
|
let id = &monitored_validator.id;
|
||||||
|
|
||||||
@ -379,6 +414,24 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Run `func` with the `TOTAL_LABEL` and optionally the
|
||||||
|
/// `individual_id`.
|
||||||
|
///
|
||||||
|
/// This function is used for registering metrics that can be applied to
|
||||||
|
/// both all validators and an indivdual validator. For example, the count
|
||||||
|
/// of missed head votes can be aggregated across all validators in a single
|
||||||
|
/// metric and also tracked on a per-validator basis.
|
||||||
|
///
|
||||||
|
/// We allow disabling tracking metrics on an individual validator basis
|
||||||
|
/// since it can result in untenable cardinality with high validator counts.
|
||||||
|
fn aggregatable_metric<F: Fn(&str)>(&self, individual_id: &str, func: F) {
|
||||||
|
func(TOTAL_LABEL);
|
||||||
|
|
||||||
|
if self.individual_tracking() {
|
||||||
|
func(individual_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn process_validator_statuses(
|
pub fn process_validator_statuses(
|
||||||
&self,
|
&self,
|
||||||
epoch: Epoch,
|
epoch: Epoch,
|
||||||
@ -431,72 +484,92 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
// For Base states, this will be *any* attestation whatsoever. For Altair states,
|
// For Base states, this will be *any* attestation whatsoever. For Altair states,
|
||||||
// this will be any attestation that matched a "timely" flag.
|
// this will be any attestation that matched a "timely" flag.
|
||||||
if previous_epoch_matched_any {
|
if previous_epoch_matched_any {
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT,
|
metrics::inc_counter_vec(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_HIT,
|
||||||
);
|
&[label],
|
||||||
|
)
|
||||||
|
});
|
||||||
attestation_success.push(id);
|
attestation_success.push(id);
|
||||||
debug!(
|
if self.individual_tracking() {
|
||||||
self.log,
|
debug!(
|
||||||
"Previous epoch attestation success";
|
self.log,
|
||||||
"matched_source" => previous_epoch_matched_source,
|
"Previous epoch attestation success";
|
||||||
"matched_target" => previous_epoch_matched_target,
|
"matched_source" => previous_epoch_matched_source,
|
||||||
"matched_head" => previous_epoch_matched_head,
|
"matched_target" => previous_epoch_matched_target,
|
||||||
"epoch" => prev_epoch,
|
"matched_head" => previous_epoch_matched_head,
|
||||||
"validator" => id,
|
"epoch" => prev_epoch,
|
||||||
)
|
"validator" => id,
|
||||||
|
)
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS,
|
metrics::inc_counter_vec(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_ATTESTER_MISS,
|
||||||
);
|
&[label],
|
||||||
|
);
|
||||||
|
});
|
||||||
attestation_miss.push(id);
|
attestation_miss.push(id);
|
||||||
debug!(
|
if self.individual_tracking() {
|
||||||
self.log,
|
debug!(
|
||||||
"Previous epoch attestation missing";
|
self.log,
|
||||||
"epoch" => prev_epoch,
|
"Previous epoch attestation missing";
|
||||||
"validator" => id,
|
"epoch" => prev_epoch,
|
||||||
)
|
"validator" => id,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Indicates if any on-chain attestation hit the head.
|
// Indicates if any on-chain attestation hit the head.
|
||||||
if previous_epoch_matched_head {
|
if previous_epoch_matched_head {
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT,
|
metrics::inc_counter_vec(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_HIT,
|
||||||
);
|
&[label],
|
||||||
|
);
|
||||||
|
});
|
||||||
} else {
|
} else {
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS,
|
metrics::inc_counter_vec(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_HEAD_ATTESTER_MISS,
|
||||||
);
|
&[label],
|
||||||
|
);
|
||||||
|
});
|
||||||
head_miss.push(id);
|
head_miss.push(id);
|
||||||
debug!(
|
if self.individual_tracking() {
|
||||||
self.log,
|
debug!(
|
||||||
"Attestation failed to match head";
|
self.log,
|
||||||
"epoch" => prev_epoch,
|
"Attestation failed to match head";
|
||||||
"validator" => id,
|
"epoch" => prev_epoch,
|
||||||
);
|
"validator" => id,
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Indicates if any on-chain attestation hit the target.
|
// Indicates if any on-chain attestation hit the target.
|
||||||
if previous_epoch_matched_target {
|
if previous_epoch_matched_target {
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT,
|
metrics::inc_counter_vec(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_HIT,
|
||||||
);
|
&[label],
|
||||||
|
);
|
||||||
|
});
|
||||||
} else {
|
} else {
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS,
|
metrics::inc_counter_vec(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_TARGET_ATTESTER_MISS,
|
||||||
);
|
&[label],
|
||||||
|
);
|
||||||
|
});
|
||||||
target_miss.push(id);
|
target_miss.push(id);
|
||||||
debug!(
|
if self.individual_tracking() {
|
||||||
self.log,
|
debug!(
|
||||||
"Attestation failed to match target";
|
self.log,
|
||||||
"epoch" => prev_epoch,
|
"Attestation failed to match target";
|
||||||
"validator" => id,
|
"epoch" => prev_epoch,
|
||||||
);
|
"validator" => id,
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the minimum value among the validator monitor observed inclusion distance
|
// Get the minimum value among the validator monitor observed inclusion distance
|
||||||
@ -511,21 +584,25 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
if let Some(inclusion_delay) = min_inclusion_distance {
|
if let Some(inclusion_delay) = min_inclusion_distance {
|
||||||
if inclusion_delay > spec.min_attestation_inclusion_delay {
|
if inclusion_delay > spec.min_attestation_inclusion_delay {
|
||||||
suboptimal_inclusion.push(id);
|
suboptimal_inclusion.push(id);
|
||||||
debug!(
|
if self.individual_tracking() {
|
||||||
self.log,
|
debug!(
|
||||||
"Potential sub-optimal inclusion delay";
|
self.log,
|
||||||
"optimal" => spec.min_attestation_inclusion_delay,
|
"Potential sub-optimal inclusion delay";
|
||||||
"delay" => inclusion_delay,
|
"optimal" => spec.min_attestation_inclusion_delay,
|
||||||
"epoch" => prev_epoch,
|
"delay" => inclusion_delay,
|
||||||
"validator" => id,
|
"epoch" => prev_epoch,
|
||||||
);
|
"validator" => id,
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics::set_int_gauge(
|
if self.individual_tracking() {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_INCLUSION_DISTANCE,
|
metrics::set_int_gauge(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ON_CHAIN_INCLUSION_DISTANCE,
|
||||||
inclusion_delay as i64,
|
&[id],
|
||||||
);
|
inclusion_delay as i64,
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Indicates the number of sync committee signatures that made it into
|
// Indicates the number of sync committee signatures that made it into
|
||||||
@ -536,13 +613,19 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
let current_epoch = epoch - 1;
|
let current_epoch = epoch - 1;
|
||||||
if let Some(sync_committee) = summary.sync_committee() {
|
if let Some(sync_committee) = summary.sync_committee() {
|
||||||
if sync_committee.contains(pubkey) {
|
if sync_committee.contains(pubkey) {
|
||||||
metrics::set_int_gauge(
|
if self.individual_tracking() {
|
||||||
&metrics::VALIDATOR_MONITOR_VALIDATOR_IN_CURRENT_SYNC_COMMITTEE,
|
metrics::set_int_gauge(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_VALIDATOR_IN_CURRENT_SYNC_COMMITTEE,
|
||||||
1,
|
&[id],
|
||||||
);
|
1,
|
||||||
|
);
|
||||||
|
}
|
||||||
let epoch_summary = monitored_validator.summaries.read();
|
let epoch_summary = monitored_validator.summaries.read();
|
||||||
if let Some(summary) = epoch_summary.get(¤t_epoch) {
|
if let Some(summary) = epoch_summary.get(¤t_epoch) {
|
||||||
|
// This log is not gated by
|
||||||
|
// `self.individual_tracking()` since the number of
|
||||||
|
// logs that can be generated is capped by the size
|
||||||
|
// of the sync committee.
|
||||||
info!(
|
info!(
|
||||||
self.log,
|
self.log,
|
||||||
"Current epoch sync signatures";
|
"Current epoch sync signatures";
|
||||||
@ -552,7 +635,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
"validator" => id,
|
"validator" => id,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
} else {
|
} else if self.individual_tracking() {
|
||||||
metrics::set_int_gauge(
|
metrics::set_int_gauge(
|
||||||
&metrics::VALIDATOR_MONITOR_VALIDATOR_IN_CURRENT_SYNC_COMMITTEE,
|
&metrics::VALIDATOR_MONITOR_VALIDATOR_IN_CURRENT_SYNC_COMMITTEE,
|
||||||
&[id],
|
&[id],
|
||||||
@ -631,10 +714,7 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
|
|
||||||
// Return the `id`'s of all monitored validators.
|
// Return the `id`'s of all monitored validators.
|
||||||
pub fn get_all_monitored_validators(&self) -> Vec<String> {
|
pub fn get_all_monitored_validators(&self) -> Vec<String> {
|
||||||
self.validators
|
self.validators.values().map(|val| val.id.clone()).collect()
|
||||||
.iter()
|
|
||||||
.map(|(_, val)| val.id.clone())
|
|
||||||
.collect()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`.
|
/// If `self.auto_register == true`, add the `validator_index` to `self.monitored_validators`.
|
||||||
@ -696,12 +776,17 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
let id = &validator.id;
|
let id = &validator.id;
|
||||||
let delay = get_block_delay_ms(seen_timestamp, block, slot_clock);
|
let delay = get_block_delay_ms(seen_timestamp, block, slot_clock);
|
||||||
|
|
||||||
metrics::inc_counter_vec(&metrics::VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL, &[src, id]);
|
self.aggregatable_metric(id, |label| {
|
||||||
metrics::observe_timer_vec(
|
metrics::inc_counter_vec(
|
||||||
&metrics::VALIDATOR_MONITOR_BEACON_BLOCK_DELAY_SECONDS,
|
&metrics::VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL,
|
||||||
&[src, id],
|
&[src, label],
|
||||||
delay,
|
);
|
||||||
);
|
metrics::observe_timer_vec(
|
||||||
|
&metrics::VALIDATOR_MONITOR_BEACON_BLOCK_DELAY_SECONDS,
|
||||||
|
&[src, label],
|
||||||
|
delay,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -767,27 +852,31 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
if let Some(validator) = self.get_validator(*i) {
|
if let Some(validator) = self.get_validator(*i) {
|
||||||
let id = &validator.id;
|
let id = &validator.id;
|
||||||
|
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_TOTAL,
|
metrics::inc_counter_vec(
|
||||||
&[src, id],
|
&metrics::VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_TOTAL,
|
||||||
);
|
&[src, label],
|
||||||
metrics::observe_timer_vec(
|
);
|
||||||
&metrics::VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_DELAY_SECONDS,
|
metrics::observe_timer_vec(
|
||||||
&[src, id],
|
&metrics::VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_DELAY_SECONDS,
|
||||||
delay,
|
&[src, label],
|
||||||
);
|
delay,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
info!(
|
if self.individual_tracking() {
|
||||||
self.log,
|
info!(
|
||||||
"Unaggregated attestation";
|
self.log,
|
||||||
"head" => ?data.beacon_block_root,
|
"Unaggregated attestation";
|
||||||
"index" => %data.index,
|
"head" => ?data.beacon_block_root,
|
||||||
"delay_ms" => %delay.as_millis(),
|
"index" => %data.index,
|
||||||
"epoch" => %epoch,
|
"delay_ms" => %delay.as_millis(),
|
||||||
"slot" => %data.slot,
|
"epoch" => %epoch,
|
||||||
"src" => src,
|
"slot" => %data.slot,
|
||||||
"validator" => %id,
|
"src" => src,
|
||||||
);
|
"validator" => %id,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
validator.with_epoch_summary(epoch, |summary| {
|
validator.with_epoch_summary(epoch, |summary| {
|
||||||
summary.register_unaggregated_attestation(delay)
|
summary.register_unaggregated_attestation(delay)
|
||||||
@ -851,27 +940,31 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
if let Some(validator) = self.get_validator(aggregator_index) {
|
if let Some(validator) = self.get_validator(aggregator_index) {
|
||||||
let id = &validator.id;
|
let id = &validator.id;
|
||||||
|
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_TOTAL,
|
metrics::inc_counter_vec(
|
||||||
&[src, id],
|
&metrics::VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_TOTAL,
|
||||||
);
|
&[src, label],
|
||||||
metrics::observe_timer_vec(
|
);
|
||||||
&metrics::VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_DELAY_SECONDS,
|
metrics::observe_timer_vec(
|
||||||
&[src, id],
|
&metrics::VALIDATOR_MONITOR_AGGREGATED_ATTESTATION_DELAY_SECONDS,
|
||||||
delay,
|
&[src, label],
|
||||||
);
|
delay,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
info!(
|
if self.individual_tracking() {
|
||||||
self.log,
|
info!(
|
||||||
"Aggregated attestation";
|
self.log,
|
||||||
"head" => ?data.beacon_block_root,
|
"Aggregated attestation";
|
||||||
"index" => %data.index,
|
"head" => ?data.beacon_block_root,
|
||||||
"delay_ms" => %delay.as_millis(),
|
"index" => %data.index,
|
||||||
"epoch" => %epoch,
|
"delay_ms" => %delay.as_millis(),
|
||||||
"slot" => %data.slot,
|
"epoch" => %epoch,
|
||||||
"src" => src,
|
"slot" => %data.slot,
|
||||||
"validator" => %id,
|
"src" => src,
|
||||||
);
|
"validator" => %id,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
validator.with_epoch_summary(epoch, |summary| {
|
validator.with_epoch_summary(epoch, |summary| {
|
||||||
summary.register_aggregated_attestation(delay)
|
summary.register_aggregated_attestation(delay)
|
||||||
@ -882,27 +975,31 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
if let Some(validator) = self.get_validator(*i) {
|
if let Some(validator) = self.get_validator(*i) {
|
||||||
let id = &validator.id;
|
let id = &validator.id;
|
||||||
|
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_TOTAL,
|
metrics::inc_counter_vec(
|
||||||
&[src, id],
|
&metrics::VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_TOTAL,
|
||||||
);
|
&[src, label],
|
||||||
metrics::observe_timer_vec(
|
);
|
||||||
&metrics::VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_DELAY_SECONDS,
|
metrics::observe_timer_vec(
|
||||||
&[src, id],
|
&metrics::VALIDATOR_MONITOR_ATTESTATION_IN_AGGREGATE_DELAY_SECONDS,
|
||||||
delay,
|
&[src, label],
|
||||||
);
|
delay,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
info!(
|
if self.individual_tracking() {
|
||||||
self.log,
|
info!(
|
||||||
"Attestation included in aggregate";
|
self.log,
|
||||||
"head" => ?data.beacon_block_root,
|
"Attestation included in aggregate";
|
||||||
"index" => %data.index,
|
"head" => ?data.beacon_block_root,
|
||||||
"delay_ms" => %delay.as_millis(),
|
"index" => %data.index,
|
||||||
"epoch" => %epoch,
|
"delay_ms" => %delay.as_millis(),
|
||||||
"slot" => %data.slot,
|
"epoch" => %epoch,
|
||||||
"src" => src,
|
"slot" => %data.slot,
|
||||||
"validator" => %id,
|
"src" => src,
|
||||||
);
|
"validator" => %id,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
validator.with_epoch_summary(epoch, |summary| {
|
validator.with_epoch_summary(epoch, |summary| {
|
||||||
summary.register_aggregate_attestation_inclusion()
|
summary.register_aggregate_attestation_inclusion()
|
||||||
@ -936,26 +1033,31 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
if let Some(validator) = self.get_validator(*i) {
|
if let Some(validator) = self.get_validator(*i) {
|
||||||
let id = &validator.id;
|
let id = &validator.id;
|
||||||
|
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_TOTAL,
|
metrics::inc_counter_vec(
|
||||||
&["block", id],
|
&metrics::VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_TOTAL,
|
||||||
);
|
&["block", label],
|
||||||
metrics::set_int_gauge(
|
);
|
||||||
&metrics::VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_DELAY_SLOTS,
|
});
|
||||||
&["block", id],
|
|
||||||
delay.as_u64() as i64,
|
|
||||||
);
|
|
||||||
|
|
||||||
info!(
|
if self.individual_tracking() {
|
||||||
self.log,
|
metrics::set_int_gauge(
|
||||||
"Attestation included in block";
|
&metrics::VALIDATOR_MONITOR_ATTESTATION_IN_BLOCK_DELAY_SLOTS,
|
||||||
"head" => ?data.beacon_block_root,
|
&["block", id],
|
||||||
"index" => %data.index,
|
delay.as_u64() as i64,
|
||||||
"inclusion_lag" => format!("{} slot(s)", delay),
|
);
|
||||||
"epoch" => %epoch,
|
|
||||||
"slot" => %data.slot,
|
info!(
|
||||||
"validator" => %id,
|
self.log,
|
||||||
);
|
"Attestation included in block";
|
||||||
|
"head" => ?data.beacon_block_root,
|
||||||
|
"index" => %data.index,
|
||||||
|
"inclusion_lag" => format!("{} slot(s)", delay),
|
||||||
|
"epoch" => %epoch,
|
||||||
|
"slot" => %data.slot,
|
||||||
|
"validator" => %id,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
validator.with_epoch_summary(epoch, |summary| {
|
validator.with_epoch_summary(epoch, |summary| {
|
||||||
summary.register_attestation_block_inclusion(inclusion_distance)
|
summary.register_attestation_block_inclusion(inclusion_distance)
|
||||||
@ -1013,26 +1115,30 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
slot_clock,
|
slot_clock,
|
||||||
);
|
);
|
||||||
|
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_TOTAL,
|
metrics::inc_counter_vec(
|
||||||
&[src, id],
|
&metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_TOTAL,
|
||||||
);
|
&[src, label],
|
||||||
metrics::observe_timer_vec(
|
);
|
||||||
&metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_DELAY_SECONDS,
|
metrics::observe_timer_vec(
|
||||||
&[src, id],
|
&metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGES_DELAY_SECONDS,
|
||||||
delay,
|
&[src, label],
|
||||||
);
|
delay,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
info!(
|
if self.individual_tracking() {
|
||||||
self.log,
|
info!(
|
||||||
"Sync committee message";
|
self.log,
|
||||||
"head" => %sync_committee_message.beacon_block_root,
|
"Sync committee message";
|
||||||
"delay_ms" => %delay.as_millis(),
|
"head" => %sync_committee_message.beacon_block_root,
|
||||||
"epoch" => %epoch,
|
"delay_ms" => %delay.as_millis(),
|
||||||
"slot" => %sync_committee_message.slot,
|
"epoch" => %epoch,
|
||||||
"src" => src,
|
"slot" => %sync_committee_message.slot,
|
||||||
"validator" => %id,
|
"src" => src,
|
||||||
);
|
"validator" => %id,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
validator.with_epoch_summary(epoch, |summary| {
|
validator.with_epoch_summary(epoch, |summary| {
|
||||||
summary.register_sync_committee_message(delay)
|
summary.register_sync_committee_message(delay)
|
||||||
@ -1097,26 +1203,30 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
if let Some(validator) = self.get_validator(aggregator_index) {
|
if let Some(validator) = self.get_validator(aggregator_index) {
|
||||||
let id = &validator.id;
|
let id = &validator.id;
|
||||||
|
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_TOTAL,
|
metrics::inc_counter_vec(
|
||||||
&[src, id],
|
&metrics::VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_TOTAL,
|
||||||
);
|
&[src, label],
|
||||||
metrics::observe_timer_vec(
|
);
|
||||||
&metrics::VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_DELAY_SECONDS,
|
metrics::observe_timer_vec(
|
||||||
&[src, id],
|
&metrics::VALIDATOR_MONITOR_SYNC_CONTRIBUTIONS_DELAY_SECONDS,
|
||||||
delay,
|
&[src, label],
|
||||||
);
|
delay,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
info!(
|
if self.individual_tracking() {
|
||||||
self.log,
|
info!(
|
||||||
"Sync contribution";
|
self.log,
|
||||||
"head" => %beacon_block_root,
|
"Sync contribution";
|
||||||
"delay_ms" => %delay.as_millis(),
|
"head" => %beacon_block_root,
|
||||||
"epoch" => %epoch,
|
"delay_ms" => %delay.as_millis(),
|
||||||
"slot" => %slot,
|
"epoch" => %epoch,
|
||||||
"src" => src,
|
"slot" => %slot,
|
||||||
"validator" => %id,
|
"src" => src,
|
||||||
);
|
"validator" => %id,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
validator.with_epoch_summary(epoch, |summary| {
|
validator.with_epoch_summary(epoch, |summary| {
|
||||||
summary.register_sync_committee_contribution(delay)
|
summary.register_sync_committee_contribution(delay)
|
||||||
@ -1127,21 +1237,25 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
if let Some(validator) = self.validators.get(validator_pubkey) {
|
if let Some(validator) = self.validators.get(validator_pubkey) {
|
||||||
let id = &validator.id;
|
let id = &validator.id;
|
||||||
|
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_CONTRIBUTION_TOTAL,
|
metrics::inc_counter_vec(
|
||||||
&[src, id],
|
&metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_CONTRIBUTION_TOTAL,
|
||||||
);
|
&[src, label],
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
info!(
|
if self.individual_tracking() {
|
||||||
self.log,
|
info!(
|
||||||
"Sync signature included in contribution";
|
self.log,
|
||||||
"head" => %beacon_block_root,
|
"Sync signature included in contribution";
|
||||||
"delay_ms" => %delay.as_millis(),
|
"head" => %beacon_block_root,
|
||||||
"epoch" => %epoch,
|
"delay_ms" => %delay.as_millis(),
|
||||||
"slot" => %slot,
|
"epoch" => %epoch,
|
||||||
"src" => src,
|
"slot" => %slot,
|
||||||
"validator" => %id,
|
"src" => src,
|
||||||
);
|
"validator" => %id,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
validator.with_epoch_summary(epoch, |summary| {
|
validator.with_epoch_summary(epoch, |summary| {
|
||||||
summary.register_sync_signature_contribution_inclusion()
|
summary.register_sync_signature_contribution_inclusion()
|
||||||
@ -1163,19 +1277,23 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
if let Some(validator) = self.validators.get(validator_pubkey) {
|
if let Some(validator) = self.validators.get(validator_pubkey) {
|
||||||
let id = &validator.id;
|
let id = &validator.id;
|
||||||
|
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_BLOCK_TOTAL,
|
metrics::inc_counter_vec(
|
||||||
&["block", id],
|
&metrics::VALIDATOR_MONITOR_SYNC_COMMITTEE_MESSAGE_IN_BLOCK_TOTAL,
|
||||||
);
|
&["block", label],
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
info!(
|
if self.individual_tracking() {
|
||||||
self.log,
|
info!(
|
||||||
"Sync signature included in block";
|
self.log,
|
||||||
"head" => %beacon_block_root,
|
"Sync signature included in block";
|
||||||
"epoch" => %epoch,
|
"head" => %beacon_block_root,
|
||||||
"slot" => %slot,
|
"epoch" => %epoch,
|
||||||
"validator" => %id,
|
"slot" => %slot,
|
||||||
);
|
"validator" => %id,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
validator.with_epoch_summary(epoch, |summary| {
|
validator.with_epoch_summary(epoch, |summary| {
|
||||||
summary.register_sync_signature_block_inclusions();
|
summary.register_sync_signature_block_inclusions();
|
||||||
@ -1204,8 +1322,12 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
let id = &validator.id;
|
let id = &validator.id;
|
||||||
let epoch = exit.epoch;
|
let epoch = exit.epoch;
|
||||||
|
|
||||||
metrics::inc_counter_vec(&metrics::VALIDATOR_MONITOR_EXIT_TOTAL, &[src, id]);
|
self.aggregatable_metric(id, |label| {
|
||||||
|
metrics::inc_counter_vec(&metrics::VALIDATOR_MONITOR_EXIT_TOTAL, &[src, label]);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Not gated behind `self.individual_tracking()` since it's an
|
||||||
|
// infrequent and interesting message.
|
||||||
info!(
|
info!(
|
||||||
self.log,
|
self.log,
|
||||||
"Voluntary exit";
|
"Voluntary exit";
|
||||||
@ -1243,11 +1365,15 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
if let Some(validator) = self.get_validator(proposer) {
|
if let Some(validator) = self.get_validator(proposer) {
|
||||||
let id = &validator.id;
|
let id = &validator.id;
|
||||||
|
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_PROPOSER_SLASHING_TOTAL,
|
metrics::inc_counter_vec(
|
||||||
&[src, id],
|
&metrics::VALIDATOR_MONITOR_PROPOSER_SLASHING_TOTAL,
|
||||||
);
|
&[src, label],
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Not gated behind `self.individual_tracking()` since it's an
|
||||||
|
// infrequent and interesting message.
|
||||||
crit!(
|
crit!(
|
||||||
self.log,
|
self.log,
|
||||||
"Proposer slashing";
|
"Proposer slashing";
|
||||||
@ -1296,11 +1422,15 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
let id = &validator.id;
|
let id = &validator.id;
|
||||||
let epoch = data.slot.epoch(T::slots_per_epoch());
|
let epoch = data.slot.epoch(T::slots_per_epoch());
|
||||||
|
|
||||||
metrics::inc_counter_vec(
|
self.aggregatable_metric(id, |label| {
|
||||||
&metrics::VALIDATOR_MONITOR_ATTESTER_SLASHING_TOTAL,
|
metrics::inc_counter_vec(
|
||||||
&[src, id],
|
&metrics::VALIDATOR_MONITOR_ATTESTER_SLASHING_TOTAL,
|
||||||
);
|
&[src, label],
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Not gated behind `self.individual_tracking()` since it's an
|
||||||
|
// infrequent and interesting message.
|
||||||
crit!(
|
crit!(
|
||||||
self.log,
|
self.log,
|
||||||
"Attester slashing";
|
"Attester slashing";
|
||||||
@ -1350,69 +1480,80 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
/*
|
/*
|
||||||
* Attestations
|
* Attestations
|
||||||
*/
|
*/
|
||||||
metrics::set_gauge_vec(
|
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_TOTAL,
|
|
||||||
&[id],
|
|
||||||
summary.attestations as i64,
|
|
||||||
);
|
|
||||||
if let Some(delay) = summary.attestation_min_delay {
|
if let Some(delay) = summary.attestation_min_delay {
|
||||||
metrics::observe_timer_vec(
|
self.aggregatable_metric(id, |tag| {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_MIN_DELAY_SECONDS,
|
metrics::observe_timer_vec(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_MIN_DELAY_SECONDS,
|
||||||
delay,
|
&[tag],
|
||||||
);
|
delay,
|
||||||
|
);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
metrics::set_gauge_vec(
|
if self.individual_tracking() {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_AGGREGATE_INCLUSIONS,
|
|
||||||
&[id],
|
|
||||||
summary.attestation_aggregate_inclusions as i64,
|
|
||||||
);
|
|
||||||
metrics::set_gauge_vec(
|
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_INCLUSIONS,
|
|
||||||
&[id],
|
|
||||||
summary.attestation_block_inclusions as i64,
|
|
||||||
);
|
|
||||||
if let Some(distance) = summary.attestation_min_block_inclusion_distance {
|
|
||||||
metrics::set_gauge_vec(
|
metrics::set_gauge_vec(
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_MIN_INCLUSION_DISTANCE,
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATIONS_TOTAL,
|
||||||
&[id],
|
&[id],
|
||||||
distance.as_u64() as i64,
|
summary.attestations as i64,
|
||||||
);
|
);
|
||||||
|
metrics::set_gauge_vec(
|
||||||
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_AGGREGATE_INCLUSIONS,
|
||||||
|
&[id],
|
||||||
|
summary.attestation_aggregate_inclusions as i64,
|
||||||
|
);
|
||||||
|
metrics::set_gauge_vec(
|
||||||
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_INCLUSIONS,
|
||||||
|
&[id],
|
||||||
|
summary.attestation_block_inclusions as i64,
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Some(distance) = summary.attestation_min_block_inclusion_distance {
|
||||||
|
metrics::set_gauge_vec(
|
||||||
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTATION_BLOCK_MIN_INCLUSION_DISTANCE,
|
||||||
|
&[id],
|
||||||
|
distance.as_u64() as i64,
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Sync committee messages
|
* Sync committee messages
|
||||||
*/
|
*/
|
||||||
metrics::set_gauge_vec(
|
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_TOTAL,
|
|
||||||
&[id],
|
|
||||||
summary.sync_committee_messages as i64,
|
|
||||||
);
|
|
||||||
if let Some(delay) = summary.sync_committee_message_min_delay {
|
if let Some(delay) = summary.sync_committee_message_min_delay {
|
||||||
metrics::observe_timer_vec(
|
self.aggregatable_metric(id, |tag| {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_MIN_DELAY_SECONDS,
|
metrics::observe_timer_vec(
|
||||||
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_MIN_DELAY_SECONDS,
|
||||||
|
&[tag],
|
||||||
|
delay,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if self.individual_tracking() {
|
||||||
|
metrics::set_gauge_vec(
|
||||||
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_COMMITTEE_MESSAGES_TOTAL,
|
||||||
&[id],
|
&[id],
|
||||||
delay,
|
summary.sync_committee_messages as i64,
|
||||||
|
);
|
||||||
|
metrics::set_gauge_vec(
|
||||||
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_INCLUSIONS,
|
||||||
|
&[id],
|
||||||
|
summary.sync_signature_contribution_inclusions as i64,
|
||||||
|
);
|
||||||
|
metrics::set_gauge_vec(
|
||||||
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_SIGNATURE_BLOCK_INCLUSIONS,
|
||||||
|
&[id],
|
||||||
|
summary.sync_signature_block_inclusions as i64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
metrics::set_gauge_vec(
|
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_INCLUSIONS,
|
|
||||||
&[id],
|
|
||||||
summary.sync_signature_contribution_inclusions as i64,
|
|
||||||
);
|
|
||||||
metrics::set_gauge_vec(
|
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_SIGNATURE_BLOCK_INCLUSIONS,
|
|
||||||
&[id],
|
|
||||||
summary.sync_signature_block_inclusions as i64,
|
|
||||||
);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sync contributions
|
* Sync contributions
|
||||||
*/
|
*/
|
||||||
metrics::set_gauge_vec(
|
if self.individual_tracking() {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTIONS_TOTAL,
|
metrics::set_gauge_vec(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTIONS_TOTAL,
|
||||||
summary.sync_contributions as i64,
|
&[id],
|
||||||
);
|
summary.sync_contributions as i64,
|
||||||
|
);
|
||||||
|
}
|
||||||
if let Some(delay) = summary.sync_contribution_min_delay {
|
if let Some(delay) = summary.sync_contribution_min_delay {
|
||||||
metrics::observe_timer_vec(
|
metrics::observe_timer_vec(
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_MIN_DELAY_SECONDS,
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_SYNC_CONTRIBUTION_MIN_DELAY_SECONDS,
|
||||||
@ -1424,51 +1565,61 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
/*
|
/*
|
||||||
* Blocks
|
* Blocks
|
||||||
*/
|
*/
|
||||||
metrics::set_gauge_vec(
|
if self.individual_tracking() {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_TOTAL,
|
metrics::set_gauge_vec(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_TOTAL,
|
||||||
summary.blocks as i64,
|
|
||||||
);
|
|
||||||
if let Some(delay) = summary.block_min_delay {
|
|
||||||
metrics::observe_timer_vec(
|
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_MIN_DELAY_SECONDS,
|
|
||||||
&[id],
|
&[id],
|
||||||
delay,
|
summary.blocks as i64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
if let Some(delay) = summary.block_min_delay {
|
||||||
|
self.aggregatable_metric(id, |tag| {
|
||||||
|
metrics::observe_timer_vec(
|
||||||
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_BEACON_BLOCKS_MIN_DELAY_SECONDS,
|
||||||
|
&[tag],
|
||||||
|
delay,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* Aggregates
|
* Aggregates
|
||||||
*/
|
*/
|
||||||
metrics::set_gauge_vec(
|
if self.individual_tracking() {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_TOTAL,
|
metrics::set_gauge_vec(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_TOTAL,
|
||||||
summary.aggregates as i64,
|
|
||||||
);
|
|
||||||
if let Some(delay) = summary.aggregate_min_delay {
|
|
||||||
metrics::observe_timer_vec(
|
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_MIN_DELAY_SECONDS,
|
|
||||||
&[id],
|
&[id],
|
||||||
delay,
|
summary.aggregates as i64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
if let Some(delay) = summary.aggregate_min_delay {
|
||||||
|
self.aggregatable_metric(id, |tag| {
|
||||||
|
metrics::observe_timer_vec(
|
||||||
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_AGGREGATES_MIN_DELAY_SECONDS,
|
||||||
|
&[tag],
|
||||||
|
delay,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* Other
|
* Other
|
||||||
*/
|
*/
|
||||||
metrics::set_gauge_vec(
|
if self.individual_tracking() {
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_EXITS_TOTAL,
|
metrics::set_gauge_vec(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_EXITS_TOTAL,
|
||||||
summary.exits as i64,
|
&[id],
|
||||||
);
|
summary.exits as i64,
|
||||||
metrics::set_gauge_vec(
|
);
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_PROPOSER_SLASHINGS_TOTAL,
|
metrics::set_gauge_vec(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_PROPOSER_SLASHINGS_TOTAL,
|
||||||
summary.proposer_slashings as i64,
|
&[id],
|
||||||
);
|
summary.proposer_slashings as i64,
|
||||||
metrics::set_gauge_vec(
|
);
|
||||||
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTER_SLASHINGS_TOTAL,
|
metrics::set_gauge_vec(
|
||||||
&[id],
|
&metrics::VALIDATOR_MONITOR_PREV_EPOCH_ATTESTER_SLASHINGS_TOTAL,
|
||||||
summary.attester_slashings as i64,
|
&[id],
|
||||||
);
|
summary.attester_slashings as i64,
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
167
beacon_node/beacon_chain/tests/capella.rs
Normal file
167
beacon_node/beacon_chain/tests/capella.rs
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
#![cfg(not(debug_assertions))] // Tests run too slow in debug.
|
||||||
|
|
||||||
|
use beacon_chain::test_utils::BeaconChainHarness;
|
||||||
|
use execution_layer::test_utils::Block;
|
||||||
|
use types::*;
|
||||||
|
|
||||||
|
const VALIDATOR_COUNT: usize = 32;
|
||||||
|
type E = MainnetEthSpec;
|
||||||
|
|
||||||
|
fn verify_execution_payload_chain<T: EthSpec>(chain: &[FullPayload<T>]) {
|
||||||
|
let mut prev_ep: Option<FullPayload<T>> = None;
|
||||||
|
|
||||||
|
for ep in chain {
|
||||||
|
assert!(!ep.is_default_with_empty_roots());
|
||||||
|
assert!(ep.block_hash() != ExecutionBlockHash::zero());
|
||||||
|
|
||||||
|
// Check against previous `ExecutionPayload`.
|
||||||
|
if let Some(prev_ep) = prev_ep {
|
||||||
|
assert_eq!(prev_ep.block_hash(), ep.parent_hash());
|
||||||
|
assert_eq!(prev_ep.block_number() + 1, ep.block_number());
|
||||||
|
assert!(ep.timestamp() > prev_ep.timestamp());
|
||||||
|
}
|
||||||
|
prev_ep = Some(ep.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn base_altair_merge_capella() {
|
||||||
|
let altair_fork_epoch = Epoch::new(4);
|
||||||
|
let altair_fork_slot = altair_fork_epoch.start_slot(E::slots_per_epoch());
|
||||||
|
let bellatrix_fork_epoch = Epoch::new(8);
|
||||||
|
let merge_fork_slot = bellatrix_fork_epoch.start_slot(E::slots_per_epoch());
|
||||||
|
let capella_fork_epoch = Epoch::new(12);
|
||||||
|
let capella_fork_slot = capella_fork_epoch.start_slot(E::slots_per_epoch());
|
||||||
|
|
||||||
|
let mut spec = E::default_spec();
|
||||||
|
spec.altair_fork_epoch = Some(altair_fork_epoch);
|
||||||
|
spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch);
|
||||||
|
spec.capella_fork_epoch = Some(capella_fork_epoch);
|
||||||
|
|
||||||
|
let harness = BeaconChainHarness::builder(E::default())
|
||||||
|
.spec(spec)
|
||||||
|
.logger(logging::test_logger())
|
||||||
|
.deterministic_keypairs(VALIDATOR_COUNT)
|
||||||
|
.fresh_ephemeral_store()
|
||||||
|
.mock_execution_layer()
|
||||||
|
.build();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Start with the base fork.
|
||||||
|
*/
|
||||||
|
assert!(harness.chain.head_snapshot().beacon_block.as_base().is_ok());
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do the Altair fork.
|
||||||
|
*/
|
||||||
|
harness.extend_to_slot(altair_fork_slot).await;
|
||||||
|
|
||||||
|
let altair_head = &harness.chain.head_snapshot().beacon_block;
|
||||||
|
assert!(altair_head.as_altair().is_ok());
|
||||||
|
assert_eq!(altair_head.slot(), altair_fork_slot);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do the merge fork, without a terminal PoW block.
|
||||||
|
*/
|
||||||
|
harness.extend_to_slot(merge_fork_slot).await;
|
||||||
|
|
||||||
|
let merge_head = &harness.chain.head_snapshot().beacon_block;
|
||||||
|
assert!(merge_head.as_merge().is_ok());
|
||||||
|
assert_eq!(merge_head.slot(), merge_fork_slot);
|
||||||
|
assert!(
|
||||||
|
merge_head
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.unwrap()
|
||||||
|
.is_default_with_empty_roots(),
|
||||||
|
"Merge head is default payload"
|
||||||
|
);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Next merge block shouldn't include an exec payload.
|
||||||
|
*/
|
||||||
|
harness.extend_slots(1).await;
|
||||||
|
|
||||||
|
let one_after_merge_head = &harness.chain.head_snapshot().beacon_block;
|
||||||
|
assert!(
|
||||||
|
one_after_merge_head
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.unwrap()
|
||||||
|
.is_default_with_empty_roots(),
|
||||||
|
"One after merge head is default payload"
|
||||||
|
);
|
||||||
|
assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 1);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Trigger the terminal PoW block.
|
||||||
|
*/
|
||||||
|
harness
|
||||||
|
.execution_block_generator()
|
||||||
|
.move_to_terminal_block()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Add a slot duration to get to the next slot
|
||||||
|
let timestamp = harness.get_timestamp_at_slot() + harness.spec.seconds_per_slot;
|
||||||
|
harness
|
||||||
|
.execution_block_generator()
|
||||||
|
.modify_last_block(|block| {
|
||||||
|
if let Block::PoW(terminal_block) = block {
|
||||||
|
terminal_block.timestamp = timestamp;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
harness.extend_slots(1).await;
|
||||||
|
|
||||||
|
let two_after_merge_head = &harness.chain.head_snapshot().beacon_block;
|
||||||
|
assert!(
|
||||||
|
two_after_merge_head
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.unwrap()
|
||||||
|
.is_default_with_empty_roots(),
|
||||||
|
"Two after merge head is default payload"
|
||||||
|
);
|
||||||
|
assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Next merge block should include an exec payload.
|
||||||
|
*/
|
||||||
|
let mut execution_payloads = vec![];
|
||||||
|
for _ in (merge_fork_slot.as_u64() + 3)..capella_fork_slot.as_u64() {
|
||||||
|
harness.extend_slots(1).await;
|
||||||
|
let block = &harness.chain.head_snapshot().beacon_block;
|
||||||
|
let full_payload: FullPayload<E> = block
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.unwrap()
|
||||||
|
.clone()
|
||||||
|
.into();
|
||||||
|
// pre-capella shouldn't have withdrawals
|
||||||
|
assert!(full_payload.withdrawals_root().is_err());
|
||||||
|
execution_payloads.push(full_payload);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Should enter capella fork now.
|
||||||
|
*/
|
||||||
|
for _ in 0..16 {
|
||||||
|
harness.extend_slots(1).await;
|
||||||
|
let block = &harness.chain.head_snapshot().beacon_block;
|
||||||
|
let full_payload: FullPayload<E> = block
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.unwrap()
|
||||||
|
.clone()
|
||||||
|
.into();
|
||||||
|
// post-capella should have withdrawals
|
||||||
|
assert!(full_payload.withdrawals_root().is_ok());
|
||||||
|
execution_payloads.push(full_payload);
|
||||||
|
}
|
||||||
|
|
||||||
|
verify_execution_payload_chain(execution_payloads.as_slice());
|
||||||
|
}
|
@ -1,6 +1,7 @@
|
|||||||
mod attestation_production;
|
mod attestation_production;
|
||||||
mod attestation_verification;
|
mod attestation_verification;
|
||||||
mod block_verification;
|
mod block_verification;
|
||||||
|
mod capella;
|
||||||
mod merge;
|
mod merge;
|
||||||
mod op_verification;
|
mod op_verification;
|
||||||
mod payload_invalidation;
|
mod payload_invalidation;
|
||||||
|
@ -17,12 +17,9 @@ fn verify_execution_payload_chain<T: EthSpec>(chain: &[FullPayload<T>]) {
|
|||||||
|
|
||||||
// Check against previous `ExecutionPayload`.
|
// Check against previous `ExecutionPayload`.
|
||||||
if let Some(prev_ep) = prev_ep {
|
if let Some(prev_ep) = prev_ep {
|
||||||
assert_eq!(prev_ep.block_hash(), ep.execution_payload().parent_hash());
|
assert_eq!(prev_ep.block_hash(), ep.parent_hash());
|
||||||
assert_eq!(
|
assert_eq!(prev_ep.block_number() + 1, ep.block_number());
|
||||||
prev_ep.execution_payload().block_number() + 1,
|
assert!(ep.timestamp() > prev_ep.timestamp());
|
||||||
ep.execution_payload().block_number()
|
|
||||||
);
|
|
||||||
assert!(ep.execution_payload().timestamp() > prev_ep.execution_payload().timestamp());
|
|
||||||
}
|
}
|
||||||
prev_ep = Some(ep.clone());
|
prev_ep = Some(ep.clone());
|
||||||
}
|
}
|
||||||
@ -191,18 +188,17 @@ async fn base_altair_merge_with_terminal_block_after_fork() {
|
|||||||
|
|
||||||
harness.extend_slots(1).await;
|
harness.extend_slots(1).await;
|
||||||
|
|
||||||
let one_after_merge_head = &harness.chain.head_snapshot().beacon_block;
|
let two_after_merge_head = &harness.chain.head_snapshot().beacon_block;
|
||||||
// FIXME: why is this being tested twice?
|
|
||||||
assert!(
|
assert!(
|
||||||
one_after_merge_head
|
two_after_merge_head
|
||||||
.message()
|
.message()
|
||||||
.body()
|
.body()
|
||||||
.execution_payload()
|
.execution_payload()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.is_default_with_empty_roots(),
|
.is_default_with_empty_roots(),
|
||||||
"One after merge head is default payload"
|
"Two after merge head is default payload"
|
||||||
);
|
);
|
||||||
assert_eq!(one_after_merge_head.slot(), merge_fork_slot + 2);
|
assert_eq!(two_after_merge_head.slot(), merge_fork_slot + 2);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Next merge block should include an exec payload.
|
* Next merge block should include an exec payload.
|
||||||
|
@ -5,6 +5,7 @@ use beacon_chain::builder::BeaconChainBuilder;
|
|||||||
use beacon_chain::test_utils::{
|
use beacon_chain::test_utils::{
|
||||||
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
|
test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType,
|
||||||
};
|
};
|
||||||
|
use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain,
|
historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain,
|
||||||
BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer,
|
BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer,
|
||||||
@ -2121,7 +2122,7 @@ async fn weak_subjectivity_sync() {
|
|||||||
log.clone(),
|
log.clone(),
|
||||||
1,
|
1,
|
||||||
)))
|
)))
|
||||||
.monitor_validators(true, vec![], log)
|
.monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log)
|
||||||
.build()
|
.build()
|
||||||
.expect("should build"),
|
.expect("should build"),
|
||||||
);
|
);
|
||||||
|
@ -154,6 +154,7 @@ where
|
|||||||
config,
|
config,
|
||||||
context.executor.clone(),
|
context.executor.clone(),
|
||||||
context.log().clone(),
|
context.log().clone(),
|
||||||
|
&spec,
|
||||||
)
|
)
|
||||||
.map_err(|e| format!("unable to start execution layer endpoints: {:?}", e))?;
|
.map_err(|e| format!("unable to start execution layer endpoints: {:?}", e))?;
|
||||||
Some(execution_layer)
|
Some(execution_layer)
|
||||||
@ -173,6 +174,7 @@ where
|
|||||||
.monitor_validators(
|
.monitor_validators(
|
||||||
config.validator_monitor_auto,
|
config.validator_monitor_auto,
|
||||||
config.validator_monitor_pubkeys.clone(),
|
config.validator_monitor_pubkeys.clone(),
|
||||||
|
config.validator_monitor_individual_tracking_threshold,
|
||||||
runtime_context
|
runtime_context
|
||||||
.service_context("val_mon".to_string())
|
.service_context("val_mon".to_string())
|
||||||
.log()
|
.log()
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD;
|
||||||
use beacon_chain::TrustedSetup;
|
use beacon_chain::TrustedSetup;
|
||||||
use directory::DEFAULT_ROOT_DIR;
|
use directory::DEFAULT_ROOT_DIR;
|
||||||
use environment::LoggerConfig;
|
use environment::LoggerConfig;
|
||||||
@ -60,6 +61,11 @@ pub struct Config {
|
|||||||
pub validator_monitor_auto: bool,
|
pub validator_monitor_auto: bool,
|
||||||
/// A list of validator pubkeys to monitor.
|
/// A list of validator pubkeys to monitor.
|
||||||
pub validator_monitor_pubkeys: Vec<PublicKeyBytes>,
|
pub validator_monitor_pubkeys: Vec<PublicKeyBytes>,
|
||||||
|
/// Once the number of monitored validators goes above this threshold, we
|
||||||
|
/// will stop tracking metrics on a per-validator basis. This prevents large
|
||||||
|
/// validator counts causing infeasibly high cardinailty for Prometheus and
|
||||||
|
/// high log volumes.
|
||||||
|
pub validator_monitor_individual_tracking_threshold: usize,
|
||||||
#[serde(skip)]
|
#[serde(skip)]
|
||||||
/// The `genesis` field is not serialized or deserialized by `serde` to ensure it is defined
|
/// The `genesis` field is not serialized or deserialized by `serde` to ensure it is defined
|
||||||
/// via the CLI at runtime, instead of from a configuration file saved to disk.
|
/// via the CLI at runtime, instead of from a configuration file saved to disk.
|
||||||
@ -100,6 +106,7 @@ impl Default for Config {
|
|||||||
slasher: None,
|
slasher: None,
|
||||||
validator_monitor_auto: false,
|
validator_monitor_auto: false,
|
||||||
validator_monitor_pubkeys: vec![],
|
validator_monitor_pubkeys: vec![],
|
||||||
|
validator_monitor_individual_tracking_threshold: DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD,
|
||||||
logger_config: LoggerConfig::default(),
|
logger_config: LoggerConfig::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -675,7 +675,7 @@ pub mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_finalization_boundaries() {
|
fn test_finalization_boundaries() {
|
||||||
let n = 8;
|
let n = 8;
|
||||||
let half = (n / 2) as usize;
|
let half = n / 2;
|
||||||
|
|
||||||
let mut deposit_cache = get_cache_with_deposits(n as u64);
|
let mut deposit_cache = get_cache_with_deposits(n as u64);
|
||||||
|
|
||||||
@ -828,9 +828,9 @@ pub mod tests {
|
|||||||
// get_log(half+quarter) should return log with index `half+quarter`
|
// get_log(half+quarter) should return log with index `half+quarter`
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
q3_log_before_finalization.index,
|
q3_log_before_finalization.index,
|
||||||
(half + quarter) as u64,
|
half + quarter,
|
||||||
"log index should be {}",
|
"log index should be {}",
|
||||||
(half + quarter),
|
half + quarter,
|
||||||
);
|
);
|
||||||
|
|
||||||
// get lower quarter of deposits with max deposit count
|
// get lower quarter of deposits with max deposit count
|
||||||
|
@ -122,7 +122,7 @@ impl SszEth1Cache {
|
|||||||
cache: self.deposit_cache.to_deposit_cache()?,
|
cache: self.deposit_cache.to_deposit_cache()?,
|
||||||
last_processed_block: self.last_processed_block,
|
last_processed_block: self.last_processed_block,
|
||||||
}),
|
}),
|
||||||
endpoint: endpoint_from_config(&config)
|
endpoint: endpoint_from_config(&config, &spec)
|
||||||
.map_err(|e| format!("Failed to create endpoint: {:?}", e))?,
|
.map_err(|e| format!("Failed to create endpoint: {:?}", e))?,
|
||||||
to_finalize: RwLock::new(None),
|
to_finalize: RwLock::new(None),
|
||||||
// Set the remote head_block zero when creating a new instance. We only care about
|
// Set the remote head_block zero when creating a new instance. We only care about
|
||||||
|
@ -363,7 +363,7 @@ impl Default for Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn endpoint_from_config(config: &Config) -> Result<HttpJsonRpc, String> {
|
pub fn endpoint_from_config(config: &Config, spec: &ChainSpec) -> Result<HttpJsonRpc, String> {
|
||||||
match config.endpoint.clone() {
|
match config.endpoint.clone() {
|
||||||
Eth1Endpoint::Auth {
|
Eth1Endpoint::Auth {
|
||||||
endpoint,
|
endpoint,
|
||||||
@ -373,11 +373,16 @@ pub fn endpoint_from_config(config: &Config) -> Result<HttpJsonRpc, String> {
|
|||||||
} => {
|
} => {
|
||||||
let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version)
|
let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version)
|
||||||
.map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?;
|
.map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?;
|
||||||
HttpJsonRpc::new_with_auth(endpoint, auth, Some(config.execution_timeout_multiplier))
|
HttpJsonRpc::new_with_auth(
|
||||||
.map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e))
|
endpoint,
|
||||||
|
auth,
|
||||||
|
Some(config.execution_timeout_multiplier),
|
||||||
|
spec,
|
||||||
|
)
|
||||||
|
.map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e))
|
||||||
}
|
}
|
||||||
Eth1Endpoint::NoAuth(endpoint) => {
|
Eth1Endpoint::NoAuth(endpoint) => {
|
||||||
HttpJsonRpc::new(endpoint, Some(config.execution_timeout_multiplier))
|
HttpJsonRpc::new(endpoint, Some(config.execution_timeout_multiplier), spec)
|
||||||
.map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e))
|
.map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -404,7 +409,7 @@ impl Service {
|
|||||||
deposit_cache: RwLock::new(DepositUpdater::new(
|
deposit_cache: RwLock::new(DepositUpdater::new(
|
||||||
config.deposit_contract_deploy_block,
|
config.deposit_contract_deploy_block,
|
||||||
)),
|
)),
|
||||||
endpoint: endpoint_from_config(&config)?,
|
endpoint: endpoint_from_config(&config, &spec)?,
|
||||||
to_finalize: RwLock::new(None),
|
to_finalize: RwLock::new(None),
|
||||||
remote_head_block: RwLock::new(None),
|
remote_head_block: RwLock::new(None),
|
||||||
config: RwLock::new(config),
|
config: RwLock::new(config),
|
||||||
@ -433,7 +438,7 @@ impl Service {
|
|||||||
inner: Arc::new(Inner {
|
inner: Arc::new(Inner {
|
||||||
block_cache: <_>::default(),
|
block_cache: <_>::default(),
|
||||||
deposit_cache: RwLock::new(deposit_cache),
|
deposit_cache: RwLock::new(deposit_cache),
|
||||||
endpoint: endpoint_from_config(&config)
|
endpoint: endpoint_from_config(&config, &spec)
|
||||||
.map_err(Error::FailedToInitializeFromSnapshot)?,
|
.map_err(Error::FailedToInitializeFromSnapshot)?,
|
||||||
to_finalize: RwLock::new(None),
|
to_finalize: RwLock::new(None),
|
||||||
remote_head_block: RwLock::new(None),
|
remote_head_block: RwLock::new(None),
|
||||||
|
@ -494,7 +494,8 @@ mod deposit_tree {
|
|||||||
let mut deposit_counts = vec![];
|
let mut deposit_counts = vec![];
|
||||||
|
|
||||||
let client =
|
let client =
|
||||||
HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap();
|
HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None, spec)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// Perform deposits to the smart contract, recording it's state along the way.
|
// Perform deposits to the smart contract, recording it's state along the way.
|
||||||
for deposit in &deposits {
|
for deposit in &deposits {
|
||||||
@ -598,8 +599,12 @@ mod http {
|
|||||||
.expect("should start eth1 environment");
|
.expect("should start eth1 environment");
|
||||||
let deposit_contract = ð1.deposit_contract;
|
let deposit_contract = ð1.deposit_contract;
|
||||||
let web3 = eth1.web3();
|
let web3 = eth1.web3();
|
||||||
let client =
|
let client = HttpJsonRpc::new(
|
||||||
HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap();
|
SensitiveUrl::parse(ð1.endpoint()).unwrap(),
|
||||||
|
None,
|
||||||
|
&MainnetEthSpec::default_spec(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
let block_number = get_block_number(&web3).await;
|
let block_number = get_block_number(&web3).await;
|
||||||
let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await;
|
let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await;
|
||||||
@ -697,6 +702,7 @@ mod fast {
|
|||||||
let web3 = eth1.web3();
|
let web3 = eth1.web3();
|
||||||
|
|
||||||
let now = get_block_number(&web3).await;
|
let now = get_block_number(&web3).await;
|
||||||
|
let spec = MainnetEthSpec::default_spec();
|
||||||
let service = Service::new(
|
let service = Service::new(
|
||||||
Config {
|
Config {
|
||||||
endpoint: Eth1Endpoint::NoAuth(
|
endpoint: Eth1Endpoint::NoAuth(
|
||||||
@ -710,11 +716,12 @@ mod fast {
|
|||||||
..Config::default()
|
..Config::default()
|
||||||
},
|
},
|
||||||
log,
|
log,
|
||||||
MainnetEthSpec::default_spec(),
|
spec.clone(),
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let client =
|
let client =
|
||||||
HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap();
|
HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None, &spec)
|
||||||
|
.unwrap();
|
||||||
let n = 10;
|
let n = 10;
|
||||||
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect();
|
||||||
for deposit in &deposits {
|
for deposit in &deposits {
|
||||||
|
@ -4,8 +4,6 @@ version = "0.1.0"
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
[features]
|
|
||||||
withdrawals-processing = ["state_processing/withdrawals-processing", "eth2/withdrawals-processing"]
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
types = { path = "../../consensus/types"}
|
types = { path = "../../consensus/types"}
|
||||||
@ -40,7 +38,7 @@ rand = "0.8.5"
|
|||||||
zeroize = { version = "1.4.2", features = ["zeroize_derive"] }
|
zeroize = { version = "1.4.2", features = ["zeroize_derive"] }
|
||||||
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
lighthouse_metrics = { path = "../../common/lighthouse_metrics" }
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
ethers-core = "0.17.0"
|
ethers-core = "1.0.2"
|
||||||
builder_client = { path = "../builder_client" }
|
builder_client = { path = "../builder_client" }
|
||||||
fork_choice = { path = "../../consensus/fork_choice" }
|
fork_choice = { path = "../../consensus/fork_choice" }
|
||||||
mev-build-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "6c99b0fbdc0427b1625469d2e575303ce08de5b8" }
|
mev-build-rs = { git = "https://github.com/ralexstokes/mev-rs", rev = "6c99b0fbdc0427b1625469d2e575303ce08de5b8" }
|
||||||
@ -48,3 +46,7 @@ ethereum-consensus = { git = "https://github.com/ralexstokes/ethereum-consensus"
|
|||||||
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" }
|
ssz-rs = { git = "https://github.com/ralexstokes/ssz-rs", rev = "cb08f1" }
|
||||||
tokio-stream = { version = "0.1.9", features = [ "sync" ] }
|
tokio-stream = { version = "0.1.9", features = [ "sync" ] }
|
||||||
strum = "0.24.0"
|
strum = "0.24.0"
|
||||||
|
keccak-hash = "0.10.0"
|
||||||
|
hash256-std-hasher = "0.15.2"
|
||||||
|
triehash = "0.8.4"
|
||||||
|
hash-db = "0.15.2"
|
||||||
|
193
beacon_node/execution_layer/src/block_hash.rs
Normal file
193
beacon_node/execution_layer/src/block_hash.rs
Normal file
@ -0,0 +1,193 @@
|
|||||||
|
use crate::{
|
||||||
|
json_structures::JsonWithdrawal,
|
||||||
|
keccak::{keccak256, KeccakHasher},
|
||||||
|
metrics, Error, ExecutionLayer,
|
||||||
|
};
|
||||||
|
use ethers_core::utils::rlp::RlpStream;
|
||||||
|
use keccak_hash::KECCAK_EMPTY_LIST_RLP;
|
||||||
|
use triehash::ordered_trie_root;
|
||||||
|
use types::{
|
||||||
|
map_execution_block_header_fields_except_withdrawals, Address, EthSpec, ExecutionBlockHash,
|
||||||
|
ExecutionBlockHeader, ExecutionPayloadRef, Hash256, Hash64, Uint256,
|
||||||
|
};
|
||||||
|
|
||||||
|
impl<T: EthSpec> ExecutionLayer<T> {
|
||||||
|
/// Verify `payload.block_hash` locally within Lighthouse.
|
||||||
|
///
|
||||||
|
/// No remote calls to the execution client will be made, so this is quite a cheap check.
|
||||||
|
pub fn verify_payload_block_hash(&self, payload: ExecutionPayloadRef<T>) -> Result<(), Error> {
|
||||||
|
let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH);
|
||||||
|
|
||||||
|
// Calculate the transactions root.
|
||||||
|
// We're currently using a deprecated Parity library for this. We should move to a
|
||||||
|
// better alternative when one appears, possibly following Reth.
|
||||||
|
let rlp_transactions_root = ordered_trie_root::<KeccakHasher, _>(
|
||||||
|
payload.transactions().iter().map(|txn_bytes| &**txn_bytes),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Calculate withdrawals root (post-Capella).
|
||||||
|
let rlp_withdrawals_root = if let Ok(withdrawals) = payload.withdrawals() {
|
||||||
|
Some(ordered_trie_root::<KeccakHasher, _>(
|
||||||
|
withdrawals.iter().map(|withdrawal| {
|
||||||
|
rlp_encode_withdrawal(&JsonWithdrawal::from(withdrawal.clone()))
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// Construct the block header.
|
||||||
|
let exec_block_header = ExecutionBlockHeader::from_payload(
|
||||||
|
payload,
|
||||||
|
KECCAK_EMPTY_LIST_RLP.as_fixed_bytes().into(),
|
||||||
|
rlp_transactions_root,
|
||||||
|
rlp_withdrawals_root,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Hash the RLP encoding of the block header.
|
||||||
|
let rlp_block_header = rlp_encode_block_header(&exec_block_header);
|
||||||
|
let header_hash = ExecutionBlockHash::from_root(keccak256(&rlp_block_header));
|
||||||
|
|
||||||
|
if header_hash != payload.block_hash() {
|
||||||
|
return Err(Error::BlockHashMismatch {
|
||||||
|
computed: header_hash,
|
||||||
|
payload: payload.block_hash(),
|
||||||
|
transactions_root: rlp_transactions_root,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// RLP encode a withdrawal.
|
||||||
|
pub fn rlp_encode_withdrawal(withdrawal: &JsonWithdrawal) -> Vec<u8> {
|
||||||
|
let mut rlp_stream = RlpStream::new();
|
||||||
|
rlp_stream.begin_list(4);
|
||||||
|
rlp_stream.append(&withdrawal.index);
|
||||||
|
rlp_stream.append(&withdrawal.validator_index);
|
||||||
|
rlp_stream.append(&withdrawal.address);
|
||||||
|
rlp_stream.append(&withdrawal.amount);
|
||||||
|
rlp_stream.out().into()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// RLP encode an execution block header.
|
||||||
|
pub fn rlp_encode_block_header(header: &ExecutionBlockHeader) -> Vec<u8> {
|
||||||
|
let mut rlp_header_stream = RlpStream::new();
|
||||||
|
rlp_header_stream.begin_unbounded_list();
|
||||||
|
map_execution_block_header_fields_except_withdrawals!(&header, |_, field| {
|
||||||
|
rlp_header_stream.append(field);
|
||||||
|
});
|
||||||
|
if let Some(withdrawals_root) = &header.withdrawals_root {
|
||||||
|
rlp_header_stream.append(withdrawals_root);
|
||||||
|
}
|
||||||
|
rlp_header_stream.finalize_unbounded_list();
|
||||||
|
rlp_header_stream.out().into()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::*;
|
||||||
|
use hex::FromHex;
|
||||||
|
use std::str::FromStr;
|
||||||
|
|
||||||
|
fn test_rlp_encoding(
|
||||||
|
header: &ExecutionBlockHeader,
|
||||||
|
expected_rlp: Option<&str>,
|
||||||
|
expected_hash: Hash256,
|
||||||
|
) {
|
||||||
|
let rlp_encoding = rlp_encode_block_header(header);
|
||||||
|
|
||||||
|
if let Some(expected_rlp) = expected_rlp {
|
||||||
|
let computed_rlp = hex::encode(&rlp_encoding);
|
||||||
|
assert_eq!(expected_rlp, computed_rlp);
|
||||||
|
}
|
||||||
|
|
||||||
|
let computed_hash = keccak256(&rlp_encoding);
|
||||||
|
assert_eq!(expected_hash, computed_hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rlp_encode_eip1559_block() {
|
||||||
|
let header = ExecutionBlockHeader {
|
||||||
|
parent_hash: Hash256::from_str("e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2a").unwrap(),
|
||||||
|
ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(),
|
||||||
|
beneficiary: Address::from_str("ba5e000000000000000000000000000000000000").unwrap(),
|
||||||
|
state_root: Hash256::from_str("ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7").unwrap(),
|
||||||
|
transactions_root: Hash256::from_str("50f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accf").unwrap(),
|
||||||
|
receipts_root: Hash256::from_str("29b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9").unwrap(),
|
||||||
|
logs_bloom: <[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(),
|
||||||
|
difficulty: 0x020000.into(),
|
||||||
|
number: 0x01_u64.into(),
|
||||||
|
gas_limit: 0x016345785d8a0000_u64.into(),
|
||||||
|
gas_used: 0x015534_u64.into(),
|
||||||
|
timestamp: 0x079e,
|
||||||
|
extra_data: vec![0x42],
|
||||||
|
mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000000000").unwrap(),
|
||||||
|
nonce: Hash64::zero(),
|
||||||
|
base_fee_per_gas: 0x036b_u64.into(),
|
||||||
|
withdrawals_root: None,
|
||||||
|
};
|
||||||
|
let expected_rlp = "f90200a0e0a94a7a3c9617401586b1a27025d2d9671332d22d540e0af72b069170380f2aa01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0ec3c94b18b8a1cff7d60f8d258ec723312932928626b4c9355eb4ab3568ec7f7a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000830200000188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000000000088000000000000000082036b";
|
||||||
|
let expected_hash =
|
||||||
|
Hash256::from_str("6a251c7c3c5dca7b42407a3752ff48f3bbca1fab7f9868371d9918daf1988d1f")
|
||||||
|
.unwrap();
|
||||||
|
test_rlp_encoding(&header, Some(expected_rlp), expected_hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rlp_encode_merge_block() {
|
||||||
|
let header = ExecutionBlockHeader {
|
||||||
|
parent_hash: Hash256::from_str("927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbe").unwrap(),
|
||||||
|
ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(),
|
||||||
|
beneficiary: Address::from_str("ba5e000000000000000000000000000000000000").unwrap(),
|
||||||
|
state_root: Hash256::from_str("0xe97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82").unwrap(),
|
||||||
|
transactions_root: Hash256::from_str("50f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accf").unwrap(),
|
||||||
|
receipts_root: Hash256::from_str("29b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9").unwrap(),
|
||||||
|
logs_bloom: <[u8; 256]>::from_hex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap().into(),
|
||||||
|
difficulty: 0x00.into(),
|
||||||
|
number: 0x01_u64.into(),
|
||||||
|
gas_limit: 0x016345785d8a0000_u64.into(),
|
||||||
|
gas_used: 0x015534_u64.into(),
|
||||||
|
timestamp: 0x079e,
|
||||||
|
extra_data: vec![0x42],
|
||||||
|
mix_hash: Hash256::from_str("0000000000000000000000000000000000000000000000000000000000020000").unwrap(),
|
||||||
|
nonce: Hash64::zero(),
|
||||||
|
base_fee_per_gas: 0x036b_u64.into(),
|
||||||
|
withdrawals_root: None,
|
||||||
|
};
|
||||||
|
let expected_rlp = "f901fda0927ca537f06c783a3a2635b8805eef1c8c2124f7444ad4a3389898dd832f2dbea01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794ba5e000000000000000000000000000000000000a0e97859b065bd8dbbb4519c7cb935024de2484c2b7f881181b4360492f0b06b82a050f738580ed699f0469702c7ccc63ed2e51bc034be9479b7bff4e68dee84accfa029b0562f7140574dd0d50dee8a271b22e1a0a7b78fca58f7c60370d8317ba2a9b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800188016345785d8a00008301553482079e42a0000000000000000000000000000000000000000000000000000000000002000088000000000000000082036b";
|
||||||
|
let expected_hash =
|
||||||
|
Hash256::from_str("0x5b1f0f2efdaa19e996b4aea59eeb67620259f09732732a339a10dac311333684")
|
||||||
|
.unwrap();
|
||||||
|
test_rlp_encoding(&header, Some(expected_rlp), expected_hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test a real payload from mainnet.
|
||||||
|
#[test]
|
||||||
|
fn test_rlp_encode_block_16182891() {
|
||||||
|
let header = ExecutionBlockHeader {
|
||||||
|
parent_hash: Hash256::from_str("3e9c7b3f403947f110f68c4564a004b73dd8ebf73b143e46cc637926eec01a6d").unwrap(),
|
||||||
|
ommers_hash: Hash256::from_str("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347").unwrap(),
|
||||||
|
beneficiary: Address::from_str("dafea492d9c6733ae3d56b7ed1adb60692c98bc5").unwrap(),
|
||||||
|
state_root: Hash256::from_str("5a8183d230818a167477420ce3a393ca3ef8706a7d596694ab6059894ed6fda9").unwrap(),
|
||||||
|
transactions_root: Hash256::from_str("0223f0cb35f184d2ac409e89dc0768ad738f777bd1c85d3302ca50f307180c94").unwrap(),
|
||||||
|
receipts_root: Hash256::from_str("371c76821b1cc21232574604eac5349d51647eb530e2a45d4f6fe2c501351aa5").unwrap(),
|
||||||
|
logs_bloom: <[u8; 256]>::from_hex("1a2c559955848d2662a0634cb40c7a6192a1524f11061203689bcbcdec901b054084d4f4d688009d24c10918e0089b48e72fe2d7abafb903889d10c3827c6901096612d259801b1b7ba1663a4201f5f88f416a9997c55bcc2c54785280143b057a008764c606182e324216822a2d5913e797a05c16cc1468d001acf3783b18e00e0203033e43106178db554029e83ca46402dc49d929d7882a04a0e7215041bdabf7430bd10ef4bb658a40f064c63c4816660241c2480862f26742fdf9ca41637731350301c344e439428182a03e384484e6d65d0c8a10117c6739ca201b60974519a1ae6b0c3966c0f650b449d10eae065dab2c83ab4edbab5efdea50bbc801").unwrap().into(),
|
||||||
|
difficulty: 0.into(),
|
||||||
|
number: 16182891.into(),
|
||||||
|
gas_limit: 0x1c9c380.into(),
|
||||||
|
gas_used: 0xe9b752.into(),
|
||||||
|
timestamp: 0x6399bf63,
|
||||||
|
extra_data: hex::decode("496c6c756d696e61746520446d6f63726174697a6520447374726962757465").unwrap(),
|
||||||
|
mix_hash: Hash256::from_str("bf5289894b2ceab3549f92f063febbac896b280ddb18129a57cff13113c11b13").unwrap(),
|
||||||
|
nonce: Hash64::zero(),
|
||||||
|
base_fee_per_gas: 0x34187b238_u64.into(),
|
||||||
|
withdrawals_root: None,
|
||||||
|
};
|
||||||
|
let expected_hash =
|
||||||
|
Hash256::from_str("6da69709cd5a34079b6604d29cd78fc01dacd7c6268980057ad92a2bede87351")
|
||||||
|
.unwrap();
|
||||||
|
test_rlp_encoding(&header, None, expected_hash);
|
||||||
|
}
|
||||||
|
}
|
@ -329,7 +329,7 @@ pub struct ProposeBlindedBlockResponse {
|
|||||||
// This name is work in progress, it could
|
// This name is work in progress, it could
|
||||||
// change when this method is actually proposed
|
// change when this method is actually proposed
|
||||||
// but I'm writing this as it has been described
|
// but I'm writing this as it has been described
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
pub struct SupportedApis {
|
pub struct SupportedApis {
|
||||||
pub new_payload_v1: bool,
|
pub new_payload_v1: bool,
|
||||||
pub new_payload_v2: bool,
|
pub new_payload_v2: bool,
|
||||||
|
@ -27,7 +27,7 @@ impl From<jsonwebtoken::errors::Error> for Error {
|
|||||||
/// Provides wrapper around `[u8; JWT_SECRET_LENGTH]` that implements `Zeroize`.
|
/// Provides wrapper around `[u8; JWT_SECRET_LENGTH]` that implements `Zeroize`.
|
||||||
#[derive(Zeroize, Clone)]
|
#[derive(Zeroize, Clone)]
|
||||||
#[zeroize(drop)]
|
#[zeroize(drop)]
|
||||||
pub struct JwtKey([u8; JWT_SECRET_LENGTH as usize]);
|
pub struct JwtKey([u8; JWT_SECRET_LENGTH]);
|
||||||
|
|
||||||
impl JwtKey {
|
impl JwtKey {
|
||||||
/// Wrap given slice in `Self`. Returns an error if slice.len() != `JWT_SECRET_LENGTH`.
|
/// Wrap given slice in `Self`. Returns an error if slice.len() != `JWT_SECRET_LENGTH`.
|
||||||
|
@ -10,7 +10,7 @@ use serde_json::json;
|
|||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use types::EthSpec;
|
use types::{ChainSpec, EthSpec};
|
||||||
|
|
||||||
pub use deposit_log::{DepositLog, Log};
|
pub use deposit_log::{DepositLog, Log};
|
||||||
pub use reqwest::Client;
|
pub use reqwest::Client;
|
||||||
@ -540,12 +540,27 @@ impl HttpJsonRpc {
|
|||||||
pub fn new(
|
pub fn new(
|
||||||
url: SensitiveUrl,
|
url: SensitiveUrl,
|
||||||
execution_timeout_multiplier: Option<u32>,
|
execution_timeout_multiplier: Option<u32>,
|
||||||
|
spec: &ChainSpec,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
|
// FIXME: remove this `cached_supported_apis` spec hack once the `engine_getCapabilities`
|
||||||
|
// method is implemented in all execution clients:
|
||||||
|
// https://github.com/ethereum/execution-apis/issues/321
|
||||||
|
let cached_supported_apis = RwLock::new(Some(SupportedApis {
|
||||||
|
new_payload_v1: true,
|
||||||
|
new_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(),
|
||||||
|
forkchoice_updated_v1: true,
|
||||||
|
forkchoice_updated_v2: spec.capella_fork_epoch.is_some()
|
||||||
|
|| spec.eip4844_fork_epoch.is_some(),
|
||||||
|
get_payload_v1: true,
|
||||||
|
get_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(),
|
||||||
|
exchange_transition_configuration_v1: true,
|
||||||
|
}));
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
client: Client::builder().build()?,
|
client: Client::builder().build()?,
|
||||||
url,
|
url,
|
||||||
execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1),
|
execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1),
|
||||||
cached_supported_apis: Default::default(),
|
cached_supported_apis,
|
||||||
auth: None,
|
auth: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -554,12 +569,27 @@ impl HttpJsonRpc {
|
|||||||
url: SensitiveUrl,
|
url: SensitiveUrl,
|
||||||
auth: Auth,
|
auth: Auth,
|
||||||
execution_timeout_multiplier: Option<u32>,
|
execution_timeout_multiplier: Option<u32>,
|
||||||
|
spec: &ChainSpec,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
|
// FIXME: remove this `cached_supported_apis` spec hack once the `engine_getCapabilities`
|
||||||
|
// method is implemented in all execution clients:
|
||||||
|
// https://github.com/ethereum/execution-apis/issues/321
|
||||||
|
let cached_supported_apis = RwLock::new(Some(SupportedApis {
|
||||||
|
new_payload_v1: true,
|
||||||
|
new_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(),
|
||||||
|
forkchoice_updated_v1: true,
|
||||||
|
forkchoice_updated_v2: spec.capella_fork_epoch.is_some()
|
||||||
|
|| spec.eip4844_fork_epoch.is_some(),
|
||||||
|
get_payload_v1: true,
|
||||||
|
get_payload_v2: spec.capella_fork_epoch.is_some() || spec.eip4844_fork_epoch.is_some(),
|
||||||
|
exchange_transition_configuration_v1: true,
|
||||||
|
}));
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
client: Client::builder().build()?,
|
client: Client::builder().build()?,
|
||||||
url,
|
url,
|
||||||
execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1),
|
execution_timeout_multiplier: execution_timeout_multiplier.unwrap_or(1),
|
||||||
cached_supported_apis: Default::default(),
|
cached_supported_apis,
|
||||||
auth: Some(auth),
|
auth: Some(auth),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -885,21 +915,25 @@ impl HttpJsonRpc {
|
|||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
// this is a stub as this method hasn't been defined yet
|
// TODO: This is currently a stub for the `engine_getCapabilities`
|
||||||
pub async fn supported_apis_v1(&self) -> Result<SupportedApis, Error> {
|
// method. This stub is unused because we set cached_supported_apis
|
||||||
|
// in the constructor based on the `spec`
|
||||||
|
// Implement this once the execution clients support it
|
||||||
|
// https://github.com/ethereum/execution-apis/issues/321
|
||||||
|
pub async fn get_capabilities(&self) -> Result<SupportedApis, Error> {
|
||||||
Ok(SupportedApis {
|
Ok(SupportedApis {
|
||||||
new_payload_v1: true,
|
new_payload_v1: true,
|
||||||
new_payload_v2: cfg!(feature = "withdrawals-processing"),
|
new_payload_v2: true,
|
||||||
forkchoice_updated_v1: true,
|
forkchoice_updated_v1: true,
|
||||||
forkchoice_updated_v2: cfg!(feature = "withdrawals-processing"),
|
forkchoice_updated_v2: true,
|
||||||
get_payload_v1: true,
|
get_payload_v1: true,
|
||||||
get_payload_v2: cfg!(feature = "withdrawals-processing"),
|
get_payload_v2: true,
|
||||||
exchange_transition_configuration_v1: true,
|
exchange_transition_configuration_v1: true,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn set_cached_supported_apis(&self, supported_apis: SupportedApis) {
|
pub async fn set_cached_supported_apis(&self, supported_apis: Option<SupportedApis>) {
|
||||||
*self.cached_supported_apis.write().await = Some(supported_apis);
|
*self.cached_supported_apis.write().await = supported_apis;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_cached_supported_apis(&self) -> Result<SupportedApis, Error> {
|
pub async fn get_cached_supported_apis(&self) -> Result<SupportedApis, Error> {
|
||||||
@ -907,8 +941,8 @@ impl HttpJsonRpc {
|
|||||||
if let Some(supported_apis) = cached_opt {
|
if let Some(supported_apis) = cached_opt {
|
||||||
Ok(supported_apis)
|
Ok(supported_apis)
|
||||||
} else {
|
} else {
|
||||||
let supported_apis = self.supported_apis_v1().await?;
|
let supported_apis = self.get_capabilities().await?;
|
||||||
self.set_cached_supported_apis(supported_apis).await;
|
self.set_cached_supported_apis(Some(supported_apis)).await;
|
||||||
Ok(supported_apis)
|
Ok(supported_apis)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -919,10 +953,13 @@ impl HttpJsonRpc {
|
|||||||
&self,
|
&self,
|
||||||
execution_payload: ExecutionPayload<T>,
|
execution_payload: ExecutionPayload<T>,
|
||||||
) -> Result<PayloadStatusV1, Error> {
|
) -> Result<PayloadStatusV1, Error> {
|
||||||
match execution_payload {
|
let supported_apis = self.get_cached_supported_apis().await?;
|
||||||
ExecutionPayload::Eip4844(_) => self.new_payload_v3(execution_payload).await,
|
if supported_apis.new_payload_v2 {
|
||||||
ExecutionPayload::Capella(_) => self.new_payload_v2(execution_payload).await,
|
self.new_payload_v2(execution_payload).await
|
||||||
ExecutionPayload::Merge(_) => self.new_payload_v1(execution_payload).await,
|
} else if supported_apis.new_payload_v1 {
|
||||||
|
self.new_payload_v1(execution_payload).await
|
||||||
|
} else {
|
||||||
|
Err(Error::RequiredMethodUnsupported("engine_newPayload"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -933,11 +970,13 @@ impl HttpJsonRpc {
|
|||||||
fork_name: ForkName,
|
fork_name: ForkName,
|
||||||
payload_id: PayloadId,
|
payload_id: PayloadId,
|
||||||
) -> Result<ExecutionPayload<T>, Error> {
|
) -> Result<ExecutionPayload<T>, Error> {
|
||||||
match fork_name {
|
let supported_apis = self.get_cached_supported_apis().await?;
|
||||||
ForkName::Eip4844 => self.get_payload_v3(fork_name, payload_id).await,
|
if supported_apis.get_payload_v2 {
|
||||||
ForkName::Capella => self.get_payload_v2(fork_name, payload_id).await,
|
self.get_payload_v2(fork_name, payload_id).await
|
||||||
ForkName::Merge => self.get_payload_v1(fork_name, payload_id).await,
|
} else if supported_apis.new_payload_v1 {
|
||||||
_ => Err(Error::RequiredMethodUnsupported("engine_getPayload")),
|
self.get_payload_v1(fork_name, payload_id).await
|
||||||
|
} else {
|
||||||
|
Err(Error::RequiredMethodUnsupported("engine_getPayload"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -945,25 +984,23 @@ impl HttpJsonRpc {
|
|||||||
// forkchoice_updated that the execution engine supports
|
// forkchoice_updated that the execution engine supports
|
||||||
pub async fn forkchoice_updated(
|
pub async fn forkchoice_updated(
|
||||||
&self,
|
&self,
|
||||||
fork_name: ForkName,
|
|
||||||
forkchoice_state: ForkchoiceState,
|
forkchoice_state: ForkchoiceState,
|
||||||
payload_attributes: Option<PayloadAttributes>,
|
payload_attributes: Option<PayloadAttributes>,
|
||||||
) -> Result<ForkchoiceUpdatedResponse, Error> {
|
) -> Result<ForkchoiceUpdatedResponse, Error> {
|
||||||
match fork_name {
|
let supported_apis = self.get_cached_supported_apis().await?;
|
||||||
ForkName::Capella | ForkName::Eip4844 => {
|
if supported_apis.forkchoice_updated_v2 {
|
||||||
self.forkchoice_updated_v2(forkchoice_state, payload_attributes)
|
self.forkchoice_updated_v2(forkchoice_state, payload_attributes)
|
||||||
.await
|
|
||||||
}
|
|
||||||
ForkName::Merge => {
|
|
||||||
self.forkchoice_updated_v1(
|
|
||||||
forkchoice_state,
|
|
||||||
payload_attributes
|
|
||||||
.map(|pa| pa.downgrade_to_v1())
|
|
||||||
.transpose()?,
|
|
||||||
)
|
|
||||||
.await
|
.await
|
||||||
}
|
} else if supported_apis.forkchoice_updated_v1 {
|
||||||
_ => Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated")),
|
self.forkchoice_updated_v1(
|
||||||
|
forkchoice_state,
|
||||||
|
payload_attributes
|
||||||
|
.map(|pa| pa.downgrade_to_v1())
|
||||||
|
.transpose()?,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
} else {
|
||||||
|
Err(Error::RequiredMethodUnsupported("engine_forkchoiceUpdated"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -989,6 +1026,7 @@ mod test {
|
|||||||
impl Tester {
|
impl Tester {
|
||||||
pub fn new(with_auth: bool) -> Self {
|
pub fn new(with_auth: bool) -> Self {
|
||||||
let server = MockServer::unit_testing();
|
let server = MockServer::unit_testing();
|
||||||
|
let spec = MainnetEthSpec::default_spec();
|
||||||
|
|
||||||
let rpc_url = SensitiveUrl::parse(&server.url()).unwrap();
|
let rpc_url = SensitiveUrl::parse(&server.url()).unwrap();
|
||||||
let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap();
|
let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap();
|
||||||
@ -999,13 +1037,13 @@ mod test {
|
|||||||
let echo_auth =
|
let echo_auth =
|
||||||
Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None);
|
Auth::new(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), None, None);
|
||||||
(
|
(
|
||||||
Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth, None).unwrap()),
|
Arc::new(HttpJsonRpc::new_with_auth(rpc_url, rpc_auth, None, &spec).unwrap()),
|
||||||
Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth, None).unwrap()),
|
Arc::new(HttpJsonRpc::new_with_auth(echo_url, echo_auth, None, &spec).unwrap()),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
(
|
(
|
||||||
Arc::new(HttpJsonRpc::new(rpc_url, None).unwrap()),
|
Arc::new(HttpJsonRpc::new(rpc_url, None, &spec).unwrap()),
|
||||||
Arc::new(HttpJsonRpc::new(echo_url, None).unwrap()),
|
Arc::new(HttpJsonRpc::new(echo_url, None, &spec).unwrap()),
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -331,8 +331,8 @@ pub struct JsonWithdrawal {
|
|||||||
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||||
pub validator_index: u64,
|
pub validator_index: u64,
|
||||||
pub address: Address,
|
pub address: Address,
|
||||||
#[serde(with = "eth2_serde_utils::u256_hex_be")]
|
#[serde(with = "eth2_serde_utils::u64_hex_be")]
|
||||||
pub amount: Uint256,
|
pub amount: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Withdrawal> for JsonWithdrawal {
|
impl From<Withdrawal> for JsonWithdrawal {
|
||||||
@ -341,7 +341,7 @@ impl From<Withdrawal> for JsonWithdrawal {
|
|||||||
index: withdrawal.index,
|
index: withdrawal.index,
|
||||||
validator_index: withdrawal.validator_index,
|
validator_index: withdrawal.validator_index,
|
||||||
address: withdrawal.address,
|
address: withdrawal.address,
|
||||||
amount: Uint256::from((withdrawal.amount as u128) * 1000000000u128),
|
amount: withdrawal.amount,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -352,8 +352,7 @@ impl From<JsonWithdrawal> for Withdrawal {
|
|||||||
index: jw.index,
|
index: jw.index,
|
||||||
validator_index: jw.validator_index,
|
validator_index: jw.validator_index,
|
||||||
address: jw.address,
|
address: jw.address,
|
||||||
//FIXME(sean) if EE gives us too large a number this panics
|
amount: jw.amount,
|
||||||
amount: (jw.amount / 1000000000).as_u64(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ use std::sync::Arc;
|
|||||||
use task_executor::TaskExecutor;
|
use task_executor::TaskExecutor;
|
||||||
use tokio::sync::{watch, Mutex, RwLock};
|
use tokio::sync::{watch, Mutex, RwLock};
|
||||||
use tokio_stream::wrappers::WatchStream;
|
use tokio_stream::wrappers::WatchStream;
|
||||||
use types::{ExecutionBlockHash, ForkName};
|
use types::ExecutionBlockHash;
|
||||||
|
|
||||||
/// The number of payload IDs that will be stored for each `Engine`.
|
/// The number of payload IDs that will be stored for each `Engine`.
|
||||||
///
|
///
|
||||||
@ -114,7 +114,7 @@ pub struct Engine {
|
|||||||
pub api: HttpJsonRpc,
|
pub api: HttpJsonRpc,
|
||||||
payload_id_cache: Mutex<LruCache<PayloadIdCacheKey, PayloadId>>,
|
payload_id_cache: Mutex<LruCache<PayloadIdCacheKey, PayloadId>>,
|
||||||
state: RwLock<State>,
|
state: RwLock<State>,
|
||||||
latest_forkchoice_state: RwLock<Option<(ForkName, ForkchoiceState)>>,
|
latest_forkchoice_state: RwLock<Option<ForkchoiceState>>,
|
||||||
executor: TaskExecutor,
|
executor: TaskExecutor,
|
||||||
log: Logger,
|
log: Logger,
|
||||||
}
|
}
|
||||||
@ -153,15 +153,13 @@ impl Engine {
|
|||||||
|
|
||||||
pub async fn notify_forkchoice_updated(
|
pub async fn notify_forkchoice_updated(
|
||||||
&self,
|
&self,
|
||||||
fork_name: ForkName,
|
|
||||||
forkchoice_state: ForkchoiceState,
|
forkchoice_state: ForkchoiceState,
|
||||||
payload_attributes: Option<PayloadAttributes>,
|
payload_attributes: Option<PayloadAttributes>,
|
||||||
log: &Logger,
|
log: &Logger,
|
||||||
) -> Result<ForkchoiceUpdatedResponse, EngineApiError> {
|
) -> Result<ForkchoiceUpdatedResponse, EngineApiError> {
|
||||||
info!(log, "Notifying FCU"; "fork_name" => ?fork_name);
|
|
||||||
let response = self
|
let response = self
|
||||||
.api
|
.api
|
||||||
.forkchoice_updated(fork_name, forkchoice_state, payload_attributes.clone())
|
.forkchoice_updated(forkchoice_state, payload_attributes.clone())
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if let Some(payload_id) = response.payload_id {
|
if let Some(payload_id) = response.payload_id {
|
||||||
@ -181,18 +179,18 @@ impl Engine {
|
|||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_latest_forkchoice_state(&self) -> Option<(ForkName, ForkchoiceState)> {
|
async fn get_latest_forkchoice_state(&self) -> Option<ForkchoiceState> {
|
||||||
*self.latest_forkchoice_state.read().await
|
*self.latest_forkchoice_state.read().await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn set_latest_forkchoice_state(&self, fork_name: ForkName, state: ForkchoiceState) {
|
pub async fn set_latest_forkchoice_state(&self, state: ForkchoiceState) {
|
||||||
*self.latest_forkchoice_state.write().await = Some((fork_name, state));
|
*self.latest_forkchoice_state.write().await = Some(state);
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn send_latest_forkchoice_state(&self) {
|
async fn send_latest_forkchoice_state(&self) {
|
||||||
let latest_forkchoice_state = self.get_latest_forkchoice_state().await;
|
let latest_forkchoice_state = self.get_latest_forkchoice_state().await;
|
||||||
|
|
||||||
if let Some((fork_name, forkchoice_state)) = latest_forkchoice_state {
|
if let Some(forkchoice_state) = latest_forkchoice_state {
|
||||||
if forkchoice_state.head_block_hash == ExecutionBlockHash::zero() {
|
if forkchoice_state.head_block_hash == ExecutionBlockHash::zero() {
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -206,16 +204,11 @@ impl Engine {
|
|||||||
self.log,
|
self.log,
|
||||||
"Issuing forkchoiceUpdated";
|
"Issuing forkchoiceUpdated";
|
||||||
"forkchoice_state" => ?forkchoice_state,
|
"forkchoice_state" => ?forkchoice_state,
|
||||||
"fork_name" => ?fork_name,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// For simplicity, payload attributes are never included in this call. It may be
|
// For simplicity, payload attributes are never included in this call. It may be
|
||||||
// reasonable to include them in the future.
|
// reasonable to include them in the future.
|
||||||
if let Err(e) = self
|
if let Err(e) = self.api.forkchoice_updated(forkchoice_state, None).await {
|
||||||
.api
|
|
||||||
.forkchoice_updated(fork_name, forkchoice_state, None)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
"Failed to issue latest head to engine";
|
"Failed to issue latest head to engine";
|
||||||
|
35
beacon_node/execution_layer/src/keccak.rs
Normal file
35
beacon_node/execution_layer/src/keccak.rs
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
// Copyright 2017, 2018 Parity Technologies
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
use hash256_std_hasher::Hash256StdHasher;
|
||||||
|
use hash_db::Hasher;
|
||||||
|
use types::Hash256;
|
||||||
|
|
||||||
|
pub fn keccak256(bytes: &[u8]) -> Hash256 {
|
||||||
|
Hash256::from(ethers_core::utils::keccak256(bytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Keccak hasher.
|
||||||
|
#[derive(Default, Debug, Clone, PartialEq)]
|
||||||
|
pub struct KeccakHasher;
|
||||||
|
|
||||||
|
impl Hasher for KeccakHasher {
|
||||||
|
type Out = Hash256;
|
||||||
|
type StdHasher = Hash256StdHasher;
|
||||||
|
|
||||||
|
const LENGTH: usize = 32;
|
||||||
|
|
||||||
|
fn hash(x: &[u8]) -> Self::Out {
|
||||||
|
keccak256(x)
|
||||||
|
}
|
||||||
|
}
|
@ -35,7 +35,7 @@ use tokio::{
|
|||||||
time::sleep,
|
time::sleep,
|
||||||
};
|
};
|
||||||
use tokio_stream::wrappers::WatchStream;
|
use tokio_stream::wrappers::WatchStream;
|
||||||
use types::{AbstractExecPayload, Blob, ExecPayload, KzgCommitment};
|
use types::{AbstractExecPayload, BeaconStateError, Blob, ExecPayload, KzgCommitment};
|
||||||
use types::{
|
use types::{
|
||||||
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName,
|
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName,
|
||||||
ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256,
|
ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256,
|
||||||
@ -44,8 +44,10 @@ use types::{
|
|||||||
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
|
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
mod block_hash;
|
||||||
mod engine_api;
|
mod engine_api;
|
||||||
mod engines;
|
mod engines;
|
||||||
|
mod keccak;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
pub mod payload_cache;
|
pub mod payload_cache;
|
||||||
mod payload_status;
|
mod payload_status;
|
||||||
@ -94,7 +96,19 @@ pub enum Error {
|
|||||||
ShuttingDown,
|
ShuttingDown,
|
||||||
FeeRecipientUnspecified,
|
FeeRecipientUnspecified,
|
||||||
MissingLatestValidHash,
|
MissingLatestValidHash,
|
||||||
|
BlockHashMismatch {
|
||||||
|
computed: ExecutionBlockHash,
|
||||||
|
payload: ExecutionBlockHash,
|
||||||
|
transactions_root: Hash256,
|
||||||
|
},
|
||||||
InvalidJWTSecret(String),
|
InvalidJWTSecret(String),
|
||||||
|
BeaconStateError(BeaconStateError),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BeaconStateError> for Error {
|
||||||
|
fn from(e: BeaconStateError) -> Self {
|
||||||
|
Error::BeaconStateError(e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ApiError> for Error {
|
impl From<ApiError> for Error {
|
||||||
@ -150,17 +164,17 @@ impl<T: EthSpec, Payload: AbstractExecPayload<T>> BlockProposalContents<T, Paylo
|
|||||||
} => payload,
|
} => payload,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn default_at_fork(fork_name: ForkName) -> Self {
|
pub fn default_at_fork(fork_name: ForkName) -> Result<Self, BeaconStateError> {
|
||||||
match fork_name {
|
Ok(match fork_name {
|
||||||
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
|
ForkName::Base | ForkName::Altair | ForkName::Merge | ForkName::Capella => {
|
||||||
BlockProposalContents::Payload(Payload::default_at_fork(fork_name))
|
BlockProposalContents::Payload(Payload::default_at_fork(fork_name)?)
|
||||||
}
|
}
|
||||||
ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs {
|
ForkName::Eip4844 => BlockProposalContents::PayloadAndBlobs {
|
||||||
payload: Payload::default_at_fork(fork_name),
|
payload: Payload::default_at_fork(fork_name)?,
|
||||||
blobs: VariableList::default(),
|
blobs: VariableList::default(),
|
||||||
kzg_commitments: VariableList::default(),
|
kzg_commitments: VariableList::default(),
|
||||||
},
|
},
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,7 +228,6 @@ struct Inner<E: EthSpec> {
|
|||||||
executor: TaskExecutor,
|
executor: TaskExecutor,
|
||||||
payload_cache: PayloadCache<E>,
|
payload_cache: PayloadCache<E>,
|
||||||
builder_profit_threshold: Uint256,
|
builder_profit_threshold: Uint256,
|
||||||
spec: ChainSpec,
|
|
||||||
log: Logger,
|
log: Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -238,8 +251,6 @@ pub struct Config {
|
|||||||
/// The minimum value of an external payload for it to be considered in a proposal.
|
/// The minimum value of an external payload for it to be considered in a proposal.
|
||||||
pub builder_profit_threshold: u128,
|
pub builder_profit_threshold: u128,
|
||||||
pub execution_timeout_multiplier: Option<u32>,
|
pub execution_timeout_multiplier: Option<u32>,
|
||||||
#[serde(skip)]
|
|
||||||
pub spec: ChainSpec,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Provides access to one execution engine and provides a neat interface for consumption by the
|
/// Provides access to one execution engine and provides a neat interface for consumption by the
|
||||||
@ -251,7 +262,12 @@ pub struct ExecutionLayer<T: EthSpec> {
|
|||||||
|
|
||||||
impl<T: EthSpec> ExecutionLayer<T> {
|
impl<T: EthSpec> ExecutionLayer<T> {
|
||||||
/// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP.
|
/// Instantiate `Self` with an Execution engine specified in `Config`, using JSON-RPC via HTTP.
|
||||||
pub fn from_config(config: Config, executor: TaskExecutor, log: Logger) -> Result<Self, Error> {
|
pub fn from_config(
|
||||||
|
config: Config,
|
||||||
|
executor: TaskExecutor,
|
||||||
|
log: Logger,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
let Config {
|
let Config {
|
||||||
execution_endpoints: urls,
|
execution_endpoints: urls,
|
||||||
builder_url,
|
builder_url,
|
||||||
@ -262,7 +278,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
default_datadir,
|
default_datadir,
|
||||||
builder_profit_threshold,
|
builder_profit_threshold,
|
||||||
execution_timeout_multiplier,
|
execution_timeout_multiplier,
|
||||||
spec,
|
|
||||||
} = config;
|
} = config;
|
||||||
|
|
||||||
if urls.len() > 1 {
|
if urls.len() > 1 {
|
||||||
@ -307,8 +322,13 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
let engine: Engine = {
|
let engine: Engine = {
|
||||||
let auth = Auth::new(jwt_key, jwt_id, jwt_version);
|
let auth = Auth::new(jwt_key, jwt_id, jwt_version);
|
||||||
debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path());
|
debug!(log, "Loaded execution endpoint"; "endpoint" => %execution_url, "jwt_path" => ?secret_file.as_path());
|
||||||
let api = HttpJsonRpc::new_with_auth(execution_url, auth, execution_timeout_multiplier)
|
let api = HttpJsonRpc::new_with_auth(
|
||||||
.map_err(Error::ApiError)?;
|
execution_url,
|
||||||
|
auth,
|
||||||
|
execution_timeout_multiplier,
|
||||||
|
&spec,
|
||||||
|
)
|
||||||
|
.map_err(Error::ApiError)?;
|
||||||
Engine::new(api, executor.clone(), &log)
|
Engine::new(api, executor.clone(), &log)
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -334,7 +354,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
executor,
|
executor,
|
||||||
payload_cache: PayloadCache::default(),
|
payload_cache: PayloadCache::default(),
|
||||||
builder_profit_threshold: Uint256::from(builder_profit_threshold),
|
builder_profit_threshold: Uint256::from(builder_profit_threshold),
|
||||||
spec,
|
|
||||||
log,
|
log,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -805,10 +824,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
spec,
|
spec,
|
||||||
) {
|
) {
|
||||||
Ok(()) => Ok(ProvenancedPayload::Builder(
|
Ok(()) => Ok(ProvenancedPayload::Builder(
|
||||||
//FIXME(sean) the builder API needs to be updated
|
|
||||||
// NOTE the comment above was removed in the
|
|
||||||
// rebase with unstable.. I think it goes
|
|
||||||
// here now?
|
|
||||||
BlockProposalContents::Payload(relay.data.message.header),
|
BlockProposalContents::Payload(relay.data.message.header),
|
||||||
)),
|
)),
|
||||||
Err(reason) if !reason.payload_invalid() => {
|
Err(reason) if !reason.payload_invalid() => {
|
||||||
@ -860,19 +875,11 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
spec,
|
spec,
|
||||||
) {
|
) {
|
||||||
Ok(()) => Ok(ProvenancedPayload::Builder(
|
Ok(()) => Ok(ProvenancedPayload::Builder(
|
||||||
//FIXME(sean) the builder API needs to be updated
|
|
||||||
// NOTE the comment above was removed in the
|
|
||||||
// rebase with unstable.. I think it goes
|
|
||||||
// here now?
|
|
||||||
BlockProposalContents::Payload(relay.data.message.header),
|
BlockProposalContents::Payload(relay.data.message.header),
|
||||||
)),
|
)),
|
||||||
// If the payload is valid then use it. The local EE failed
|
// If the payload is valid then use it. The local EE failed
|
||||||
// to produce a payload so we have no alternative.
|
// to produce a payload so we have no alternative.
|
||||||
Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder(
|
Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder(
|
||||||
//FIXME(sean) the builder API needs to be updated
|
|
||||||
// NOTE the comment above was removed in the
|
|
||||||
// rebase with unstable.. I think it goes
|
|
||||||
// here now?
|
|
||||||
BlockProposalContents::Payload(relay.data.message.header),
|
BlockProposalContents::Payload(relay.data.message.header),
|
||||||
)),
|
)),
|
||||||
Err(reason) => {
|
Err(reason) => {
|
||||||
@ -1020,7 +1027,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
|
|
||||||
let response = engine
|
let response = engine
|
||||||
.notify_forkchoice_updated(
|
.notify_forkchoice_updated(
|
||||||
current_fork,
|
|
||||||
fork_choice_state,
|
fork_choice_state,
|
||||||
Some(payload_attributes.clone()),
|
Some(payload_attributes.clone()),
|
||||||
self.log(),
|
self.log(),
|
||||||
@ -1281,13 +1287,8 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
finalized_block_hash,
|
finalized_block_hash,
|
||||||
};
|
};
|
||||||
|
|
||||||
let fork_name = self
|
|
||||||
.inner
|
|
||||||
.spec
|
|
||||||
.fork_name_at_epoch(next_slot.epoch(T::slots_per_epoch()));
|
|
||||||
|
|
||||||
self.engine()
|
self.engine()
|
||||||
.set_latest_forkchoice_state(fork_name, forkchoice_state)
|
.set_latest_forkchoice_state(forkchoice_state)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let payload_attributes_ref = &payload_attributes;
|
let payload_attributes_ref = &payload_attributes;
|
||||||
@ -1296,7 +1297,6 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
.request(|engine| async move {
|
.request(|engine| async move {
|
||||||
engine
|
engine
|
||||||
.notify_forkchoice_updated(
|
.notify_forkchoice_updated(
|
||||||
fork_name,
|
|
||||||
forkchoice_state,
|
forkchoice_state,
|
||||||
payload_attributes_ref.clone(),
|
payload_attributes_ref.clone(),
|
||||||
self.log(),
|
self.log(),
|
||||||
|
@ -45,6 +45,11 @@ lazy_static::lazy_static! {
|
|||||||
"execution_layer_get_payload_by_block_hash_time",
|
"execution_layer_get_payload_by_block_hash_time",
|
||||||
"Time to reconstruct a payload from the EE using eth_getBlockByHash"
|
"Time to reconstruct a payload from the EE using eth_getBlockByHash"
|
||||||
);
|
);
|
||||||
|
pub static ref EXECUTION_LAYER_VERIFY_BLOCK_HASH: Result<Histogram> = try_create_histogram_with_buckets(
|
||||||
|
"execution_layer_verify_block_hash_time",
|
||||||
|
"Time to verify the execution block hash in Lighthouse, without the EL",
|
||||||
|
Ok(vec![10e-6, 50e-6, 100e-6, 500e-6, 1e-3, 5e-3, 10e-3, 50e-3, 100e-3, 500e-3]),
|
||||||
|
);
|
||||||
pub static ref EXECUTION_LAYER_PAYLOAD_STATUS: Result<IntCounterVec> = try_create_int_counter_vec(
|
pub static ref EXECUTION_LAYER_PAYLOAD_STATUS: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||||
"execution_layer_payload_status",
|
"execution_layer_payload_status",
|
||||||
"Indicates the payload status returned for a particular method",
|
"Indicates the payload status returned for a particular method",
|
||||||
|
@ -13,7 +13,8 @@ use std::collections::HashMap;
|
|||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use tree_hash_derive::TreeHash;
|
use tree_hash_derive::TreeHash;
|
||||||
use types::{
|
use types::{
|
||||||
EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadMerge, Hash256, Uint256,
|
EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella,
|
||||||
|
ExecutionPayloadEip4844, ExecutionPayloadMerge, ForkName, Hash256, Uint256,
|
||||||
};
|
};
|
||||||
|
|
||||||
const GAS_LIMIT: u64 = 16384;
|
const GAS_LIMIT: u64 = 16384;
|
||||||
@ -113,6 +114,11 @@ pub struct ExecutionBlockGenerator<T: EthSpec> {
|
|||||||
pub pending_payloads: HashMap<ExecutionBlockHash, ExecutionPayload<T>>,
|
pub pending_payloads: HashMap<ExecutionBlockHash, ExecutionPayload<T>>,
|
||||||
pub next_payload_id: u64,
|
pub next_payload_id: u64,
|
||||||
pub payload_ids: HashMap<PayloadId, ExecutionPayload<T>>,
|
pub payload_ids: HashMap<PayloadId, ExecutionPayload<T>>,
|
||||||
|
/*
|
||||||
|
* Post-merge fork triggers
|
||||||
|
*/
|
||||||
|
pub shanghai_time: Option<u64>, // withdrawals
|
||||||
|
pub eip4844_time: Option<u64>, // 4844
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
||||||
@ -120,6 +126,8 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
|||||||
terminal_total_difficulty: Uint256,
|
terminal_total_difficulty: Uint256,
|
||||||
terminal_block_number: u64,
|
terminal_block_number: u64,
|
||||||
terminal_block_hash: ExecutionBlockHash,
|
terminal_block_hash: ExecutionBlockHash,
|
||||||
|
shanghai_time: Option<u64>,
|
||||||
|
eip4844_time: Option<u64>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut gen = Self {
|
let mut gen = Self {
|
||||||
head_block: <_>::default(),
|
head_block: <_>::default(),
|
||||||
@ -132,6 +140,8 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
|||||||
pending_payloads: <_>::default(),
|
pending_payloads: <_>::default(),
|
||||||
next_payload_id: 0,
|
next_payload_id: 0,
|
||||||
payload_ids: <_>::default(),
|
payload_ids: <_>::default(),
|
||||||
|
shanghai_time,
|
||||||
|
eip4844_time,
|
||||||
};
|
};
|
||||||
|
|
||||||
gen.insert_pow_block(0).unwrap();
|
gen.insert_pow_block(0).unwrap();
|
||||||
@ -163,6 +173,16 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName {
|
||||||
|
match self.eip4844_time {
|
||||||
|
Some(fork_time) if timestamp >= fork_time => ForkName::Eip4844,
|
||||||
|
_ => match self.shanghai_time {
|
||||||
|
Some(fork_time) if timestamp >= fork_time => ForkName::Capella,
|
||||||
|
_ => ForkName::Merge,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn execution_block_by_number(&self, number: u64) -> Option<ExecutionBlock> {
|
pub fn execution_block_by_number(&self, number: u64) -> Option<ExecutionBlock> {
|
||||||
self.block_by_number(number)
|
self.block_by_number(number)
|
||||||
.map(|block| block.as_execution_block(self.terminal_total_difficulty))
|
.map(|block| block.as_execution_block(self.terminal_total_difficulty))
|
||||||
@ -395,7 +415,9 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn forkchoice_updated_v1(
|
// This function expects payload_attributes to already be validated with respect to
|
||||||
|
// the current fork [obtained by self.get_fork_at_timestamp(payload_attributes.timestamp)]
|
||||||
|
pub fn forkchoice_updated(
|
||||||
&mut self,
|
&mut self,
|
||||||
forkchoice_state: ForkchoiceState,
|
forkchoice_state: ForkchoiceState,
|
||||||
payload_attributes: Option<PayloadAttributes>,
|
payload_attributes: Option<PayloadAttributes>,
|
||||||
@ -469,23 +491,65 @@ impl<T: EthSpec> ExecutionBlockGenerator<T> {
|
|||||||
transactions: vec![].into(),
|
transactions: vec![].into(),
|
||||||
}),
|
}),
|
||||||
PayloadAttributes::V2(pa) => {
|
PayloadAttributes::V2(pa) => {
|
||||||
// FIXME: think about how to test different forks
|
match self.get_fork_at_timestamp(pa.timestamp) {
|
||||||
ExecutionPayload::Merge(ExecutionPayloadMerge {
|
ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge {
|
||||||
parent_hash: forkchoice_state.head_block_hash,
|
parent_hash: forkchoice_state.head_block_hash,
|
||||||
fee_recipient: pa.suggested_fee_recipient,
|
fee_recipient: pa.suggested_fee_recipient,
|
||||||
receipts_root: Hash256::repeat_byte(42),
|
receipts_root: Hash256::repeat_byte(42),
|
||||||
state_root: Hash256::repeat_byte(43),
|
state_root: Hash256::repeat_byte(43),
|
||||||
logs_bloom: vec![0; 256].into(),
|
logs_bloom: vec![0; 256].into(),
|
||||||
prev_randao: pa.prev_randao,
|
prev_randao: pa.prev_randao,
|
||||||
block_number: parent.block_number() + 1,
|
block_number: parent.block_number() + 1,
|
||||||
gas_limit: GAS_LIMIT,
|
gas_limit: GAS_LIMIT,
|
||||||
gas_used: GAS_USED,
|
gas_used: GAS_USED,
|
||||||
timestamp: pa.timestamp,
|
timestamp: pa.timestamp,
|
||||||
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
||||||
base_fee_per_gas: Uint256::one(),
|
base_fee_per_gas: Uint256::one(),
|
||||||
block_hash: ExecutionBlockHash::zero(),
|
block_hash: ExecutionBlockHash::zero(),
|
||||||
transactions: vec![].into(),
|
transactions: vec![].into(),
|
||||||
})
|
}),
|
||||||
|
ForkName::Capella => {
|
||||||
|
ExecutionPayload::Capella(ExecutionPayloadCapella {
|
||||||
|
parent_hash: forkchoice_state.head_block_hash,
|
||||||
|
fee_recipient: pa.suggested_fee_recipient,
|
||||||
|
receipts_root: Hash256::repeat_byte(42),
|
||||||
|
state_root: Hash256::repeat_byte(43),
|
||||||
|
logs_bloom: vec![0; 256].into(),
|
||||||
|
prev_randao: pa.prev_randao,
|
||||||
|
block_number: parent.block_number() + 1,
|
||||||
|
gas_limit: GAS_LIMIT,
|
||||||
|
gas_used: GAS_USED,
|
||||||
|
timestamp: pa.timestamp,
|
||||||
|
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
||||||
|
base_fee_per_gas: Uint256::one(),
|
||||||
|
block_hash: ExecutionBlockHash::zero(),
|
||||||
|
transactions: vec![].into(),
|
||||||
|
withdrawals: pa.withdrawals.as_ref().unwrap().clone().into(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
ForkName::Eip4844 => {
|
||||||
|
ExecutionPayload::Eip4844(ExecutionPayloadEip4844 {
|
||||||
|
parent_hash: forkchoice_state.head_block_hash,
|
||||||
|
fee_recipient: pa.suggested_fee_recipient,
|
||||||
|
receipts_root: Hash256::repeat_byte(42),
|
||||||
|
state_root: Hash256::repeat_byte(43),
|
||||||
|
logs_bloom: vec![0; 256].into(),
|
||||||
|
prev_randao: pa.prev_randao,
|
||||||
|
block_number: parent.block_number() + 1,
|
||||||
|
gas_limit: GAS_LIMIT,
|
||||||
|
gas_used: GAS_USED,
|
||||||
|
timestamp: pa.timestamp,
|
||||||
|
extra_data: "block gen was here".as_bytes().to_vec().into(),
|
||||||
|
base_fee_per_gas: Uint256::one(),
|
||||||
|
// FIXME(4844): maybe this should be set to something?
|
||||||
|
excess_data_gas: Uint256::one(),
|
||||||
|
block_hash: ExecutionBlockHash::zero(),
|
||||||
|
transactions: vec![].into(),
|
||||||
|
withdrawals: pa.withdrawals.as_ref().unwrap().clone().into(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -576,6 +640,8 @@ mod test {
|
|||||||
TERMINAL_DIFFICULTY.into(),
|
TERMINAL_DIFFICULTY.into(),
|
||||||
TERMINAL_BLOCK,
|
TERMINAL_BLOCK,
|
||||||
ExecutionBlockHash::zero(),
|
ExecutionBlockHash::zero(),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
);
|
);
|
||||||
|
|
||||||
for i in 0..=TERMINAL_BLOCK {
|
for i in 0..=TERMINAL_BLOCK {
|
||||||
|
@ -74,7 +74,7 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
.unwrap())
|
.unwrap())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 => {
|
ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 | ENGINE_NEW_PAYLOAD_V3 => {
|
||||||
let request = match method {
|
let request = match method {
|
||||||
ENGINE_NEW_PAYLOAD_V1 => {
|
ENGINE_NEW_PAYLOAD_V1 => {
|
||||||
JsonExecutionPayload::V1(get_param::<JsonExecutionPayloadV1<T>>(params, 0)?)
|
JsonExecutionPayload::V1(get_param::<JsonExecutionPayloadV1<T>>(params, 0)?)
|
||||||
@ -82,17 +82,65 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
ENGINE_NEW_PAYLOAD_V2 => {
|
ENGINE_NEW_PAYLOAD_V2 => {
|
||||||
JsonExecutionPayload::V2(get_param::<JsonExecutionPayloadV2<T>>(params, 0)?)
|
JsonExecutionPayload::V2(get_param::<JsonExecutionPayloadV2<T>>(params, 0)?)
|
||||||
}
|
}
|
||||||
|
ENGINE_NEW_PAYLOAD_V3 => {
|
||||||
|
JsonExecutionPayload::V2(get_param::<JsonExecutionPayloadV2<T>>(params, 0)?)
|
||||||
|
}
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
let fork = match request {
|
|
||||||
JsonExecutionPayload::V1(_) => ForkName::Merge,
|
let fork = ctx
|
||||||
JsonExecutionPayload::V2(ref payload) => {
|
.execution_block_generator
|
||||||
if payload.withdrawals.is_none() {
|
.read()
|
||||||
ForkName::Merge
|
.get_fork_at_timestamp(*request.timestamp());
|
||||||
} else {
|
// validate method called correctly according to shanghai fork time
|
||||||
ForkName::Capella
|
match fork {
|
||||||
|
ForkName::Merge => {
|
||||||
|
if request.withdrawals().is_ok() && request.withdrawals().unwrap().is_some() {
|
||||||
|
return Err(format!(
|
||||||
|
"{} called with `withdrawals` before capella fork!",
|
||||||
|
method
|
||||||
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ForkName::Capella => {
|
||||||
|
if method == ENGINE_NEW_PAYLOAD_V1 {
|
||||||
|
return Err(format!("{} called after capella fork!", method));
|
||||||
|
}
|
||||||
|
if request.withdrawals().is_err()
|
||||||
|
|| (request.withdrawals().is_ok()
|
||||||
|
&& request.withdrawals().unwrap().is_none())
|
||||||
|
{
|
||||||
|
return Err(format!(
|
||||||
|
"{} called without `withdrawals` after capella fork!",
|
||||||
|
method
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ForkName::Eip4844 => {
|
||||||
|
//FIXME(sean)
|
||||||
|
if method == ENGINE_NEW_PAYLOAD_V1 {
|
||||||
|
return Err(format!("{} called after capella fork!", method));
|
||||||
|
}
|
||||||
|
if request.withdrawals().is_err()
|
||||||
|
|| (request.withdrawals().is_ok()
|
||||||
|
&& request.withdrawals().unwrap().is_none())
|
||||||
|
{
|
||||||
|
return Err(format!(
|
||||||
|
"{} called without `withdrawals` after eip4844 fork!",
|
||||||
|
method
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if request.excess_data_gas().is_err()
|
||||||
|
|| (request.excess_data_gas().is_ok()
|
||||||
|
&& request.excess_data_gas().unwrap().is_none())
|
||||||
|
{
|
||||||
|
return Err(format!(
|
||||||
|
"{} called without `excess_data_gas` after eip4844 fork!",
|
||||||
|
method
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// Canned responses set by block hash take priority.
|
// Canned responses set by block hash take priority.
|
||||||
@ -125,7 +173,7 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
|
|
||||||
Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap())
|
Ok(serde_json::to_value(JsonPayloadStatusV1::from(response)).unwrap())
|
||||||
}
|
}
|
||||||
ENGINE_GET_PAYLOAD_V1 => {
|
ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 | ENGINE_GET_PAYLOAD_V3 => {
|
||||||
let request: JsonPayloadIdRequest = get_param(params, 0)?;
|
let request: JsonPayloadIdRequest = get_param(params, 0)?;
|
||||||
let id = request.into();
|
let id = request.into();
|
||||||
|
|
||||||
@ -135,12 +183,99 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
.get_payload(&id)
|
.get_payload(&id)
|
||||||
.ok_or_else(|| format!("no payload for id {:?}", id))?;
|
.ok_or_else(|| format!("no payload for id {:?}", id))?;
|
||||||
|
|
||||||
Ok(serde_json::to_value(JsonExecutionPayloadV1::try_from(response).unwrap()).unwrap())
|
// validate method called correctly according to shanghai fork time
|
||||||
|
if ctx
|
||||||
|
.execution_block_generator
|
||||||
|
.read()
|
||||||
|
.get_fork_at_timestamp(response.timestamp())
|
||||||
|
== ForkName::Capella
|
||||||
|
&& method == ENGINE_GET_PAYLOAD_V1
|
||||||
|
{
|
||||||
|
return Err(format!("{} called after capella fork!", method));
|
||||||
|
}
|
||||||
|
// validate method called correctly according to eip4844 fork time
|
||||||
|
if ctx
|
||||||
|
.execution_block_generator
|
||||||
|
.read()
|
||||||
|
.get_fork_at_timestamp(response.timestamp())
|
||||||
|
== ForkName::Eip4844
|
||||||
|
//FIXME(sean)
|
||||||
|
&& method == ENGINE_GET_PAYLOAD_V1
|
||||||
|
{
|
||||||
|
return Err(format!("{} called after capella fork!", method));
|
||||||
|
}
|
||||||
|
|
||||||
|
match method {
|
||||||
|
ENGINE_GET_PAYLOAD_V1 => Ok(serde_json::to_value(
|
||||||
|
JsonExecutionPayloadV1::try_from(response).unwrap(),
|
||||||
|
)
|
||||||
|
.unwrap()),
|
||||||
|
ENGINE_GET_PAYLOAD_V2 => Ok(serde_json::to_value(JsonGetPayloadResponse {
|
||||||
|
execution_payload: JsonExecutionPayloadV2::try_from(response).unwrap(),
|
||||||
|
})
|
||||||
|
.unwrap()),
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// FIXME(capella): handle fcu version 2
|
ENGINE_FORKCHOICE_UPDATED_V1 | ENGINE_FORKCHOICE_UPDATED_V2 => {
|
||||||
ENGINE_FORKCHOICE_UPDATED_V1 => {
|
|
||||||
let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0)?;
|
let forkchoice_state: JsonForkchoiceStateV1 = get_param(params, 0)?;
|
||||||
let payload_attributes: Option<JsonPayloadAttributes> = get_param(params, 1)?;
|
let payload_attributes = match method {
|
||||||
|
ENGINE_FORKCHOICE_UPDATED_V1 => {
|
||||||
|
let jpa1: Option<JsonPayloadAttributesV1> = get_param(params, 1)?;
|
||||||
|
jpa1.map(JsonPayloadAttributes::V1)
|
||||||
|
}
|
||||||
|
ENGINE_FORKCHOICE_UPDATED_V2 => {
|
||||||
|
let jpa2: Option<JsonPayloadAttributesV2> = get_param(params, 1)?;
|
||||||
|
jpa2.map(JsonPayloadAttributes::V2)
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// validate method called correctly according to shanghai fork time
|
||||||
|
if let Some(pa) = payload_attributes.as_ref() {
|
||||||
|
match ctx
|
||||||
|
.execution_block_generator
|
||||||
|
.read()
|
||||||
|
.get_fork_at_timestamp(*pa.timestamp())
|
||||||
|
{
|
||||||
|
ForkName::Merge => {
|
||||||
|
if pa.withdrawals().is_ok() && pa.withdrawals().unwrap().is_some() {
|
||||||
|
return Err(format!(
|
||||||
|
"{} called with `withdrawals` before capella fork!",
|
||||||
|
method
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ForkName::Capella => {
|
||||||
|
if method == ENGINE_FORKCHOICE_UPDATED_V1 {
|
||||||
|
return Err(format!("{} called after capella fork!", method));
|
||||||
|
}
|
||||||
|
if pa.withdrawals().is_err()
|
||||||
|
|| (pa.withdrawals().is_ok() && pa.withdrawals().unwrap().is_none())
|
||||||
|
{
|
||||||
|
return Err(format!(
|
||||||
|
"{} called without `withdrawals` after capella fork!",
|
||||||
|
method
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ForkName::Eip4844 => {
|
||||||
|
//FIXME(sean)
|
||||||
|
if method == ENGINE_FORKCHOICE_UPDATED_V1 {
|
||||||
|
return Err(format!("{} called after capella fork!", method));
|
||||||
|
}
|
||||||
|
if pa.withdrawals().is_err()
|
||||||
|
|| (pa.withdrawals().is_ok() && pa.withdrawals().unwrap().is_none())
|
||||||
|
{
|
||||||
|
return Err(format!(
|
||||||
|
"{} called without `withdrawals` after capella fork!",
|
||||||
|
method
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(hook_response) = ctx
|
if let Some(hook_response) = ctx
|
||||||
.hook
|
.hook
|
||||||
@ -161,13 +296,10 @@ pub async fn handle_rpc<T: EthSpec>(
|
|||||||
return Ok(serde_json::to_value(response).unwrap());
|
return Ok(serde_json::to_value(response).unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut response = ctx
|
let mut response = ctx.execution_block_generator.write().forkchoice_updated(
|
||||||
.execution_block_generator
|
forkchoice_state.into(),
|
||||||
.write()
|
payload_attributes.map(|json| json.into()),
|
||||||
.forkchoice_updated_v1(
|
)?;
|
||||||
forkchoice_state.into(),
|
|
||||||
payload_attributes.map(|json| json.into()),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
if let Some(mut status) = ctx.static_forkchoice_updated_response.lock().clone() {
|
if let Some(mut status) = ctx.static_forkchoice_updated_response.lock().clone() {
|
||||||
if status.status == PayloadStatusV1Status::Valid {
|
if status.status == PayloadStatusV1Status::Valid {
|
||||||
|
@ -84,7 +84,8 @@ impl<E: EthSpec> TestingBuilder<E> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let el =
|
let el =
|
||||||
ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap();
|
ExecutionLayer::from_config(config, executor.clone(), executor.log().clone(), &spec)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
// This should probably be done for all fields, we only update ones we are testing with so far.
|
// This should probably be done for all fields, we only update ones we are testing with so far.
|
||||||
let mut context = Context::for_mainnet();
|
let mut context = Context::for_mainnet();
|
||||||
|
@ -9,7 +9,7 @@ use sensitive_url::SensitiveUrl;
|
|||||||
use task_executor::TaskExecutor;
|
use task_executor::TaskExecutor;
|
||||||
use tempfile::NamedTempFile;
|
use tempfile::NamedTempFile;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, Uint256};
|
use types::{Address, ChainSpec, Epoch, EthSpec, FullPayload, Hash256, MainnetEthSpec};
|
||||||
|
|
||||||
pub struct MockExecutionLayer<T: EthSpec> {
|
pub struct MockExecutionLayer<T: EthSpec> {
|
||||||
pub server: MockServer<T>,
|
pub server: MockServer<T>,
|
||||||
@ -20,40 +20,42 @@ pub struct MockExecutionLayer<T: EthSpec> {
|
|||||||
|
|
||||||
impl<T: EthSpec> MockExecutionLayer<T> {
|
impl<T: EthSpec> MockExecutionLayer<T> {
|
||||||
pub fn default_params(executor: TaskExecutor) -> Self {
|
pub fn default_params(executor: TaskExecutor) -> Self {
|
||||||
|
let mut spec = MainnetEthSpec::default_spec();
|
||||||
|
spec.terminal_total_difficulty = DEFAULT_TERMINAL_DIFFICULTY.into();
|
||||||
|
spec.terminal_block_hash = ExecutionBlockHash::zero();
|
||||||
|
spec.terminal_block_hash_activation_epoch = Epoch::new(0);
|
||||||
Self::new(
|
Self::new(
|
||||||
executor,
|
executor,
|
||||||
DEFAULT_TERMINAL_DIFFICULTY.into(),
|
|
||||||
DEFAULT_TERMINAL_BLOCK,
|
DEFAULT_TERMINAL_BLOCK,
|
||||||
ExecutionBlockHash::zero(),
|
None,
|
||||||
Epoch::new(0),
|
None,
|
||||||
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()),
|
||||||
|
spec,
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
executor: TaskExecutor,
|
executor: TaskExecutor,
|
||||||
terminal_total_difficulty: Uint256,
|
|
||||||
terminal_block: u64,
|
terminal_block: u64,
|
||||||
terminal_block_hash: ExecutionBlockHash,
|
shanghai_time: Option<u64>,
|
||||||
terminal_block_hash_activation_epoch: Epoch,
|
eip4844_time: Option<u64>,
|
||||||
jwt_key: Option<JwtKey>,
|
jwt_key: Option<JwtKey>,
|
||||||
|
spec: ChainSpec,
|
||||||
builder_url: Option<SensitiveUrl>,
|
builder_url: Option<SensitiveUrl>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let handle = executor.handle().unwrap();
|
let handle = executor.handle().unwrap();
|
||||||
|
|
||||||
let mut spec = T::default_spec();
|
|
||||||
spec.terminal_total_difficulty = terminal_total_difficulty;
|
|
||||||
spec.terminal_block_hash = terminal_block_hash;
|
|
||||||
spec.terminal_block_hash_activation_epoch = terminal_block_hash_activation_epoch;
|
|
||||||
|
|
||||||
let jwt_key = jwt_key.unwrap_or_else(JwtKey::random);
|
let jwt_key = jwt_key.unwrap_or_else(JwtKey::random);
|
||||||
let server = MockServer::new(
|
let server = MockServer::new(
|
||||||
&handle,
|
&handle,
|
||||||
jwt_key,
|
jwt_key,
|
||||||
terminal_total_difficulty,
|
spec.terminal_total_difficulty,
|
||||||
terminal_block,
|
terminal_block,
|
||||||
terminal_block_hash,
|
spec.terminal_block_hash,
|
||||||
|
shanghai_time,
|
||||||
|
eip4844_time,
|
||||||
);
|
);
|
||||||
|
|
||||||
let url = SensitiveUrl::parse(&server.url()).unwrap();
|
let url = SensitiveUrl::parse(&server.url()).unwrap();
|
||||||
@ -71,7 +73,8 @@ impl<T: EthSpec> MockExecutionLayer<T> {
|
|||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
let el =
|
let el =
|
||||||
ExecutionLayer::from_config(config, executor.clone(), executor.log().clone()).unwrap();
|
ExecutionLayer::from_config(config, executor.clone(), executor.log().clone(), &spec)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
server,
|
server,
|
||||||
|
@ -45,6 +45,8 @@ pub struct MockExecutionConfig {
|
|||||||
pub terminal_difficulty: Uint256,
|
pub terminal_difficulty: Uint256,
|
||||||
pub terminal_block: u64,
|
pub terminal_block: u64,
|
||||||
pub terminal_block_hash: ExecutionBlockHash,
|
pub terminal_block_hash: ExecutionBlockHash,
|
||||||
|
pub shanghai_time: Option<u64>,
|
||||||
|
pub eip4844_time: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for MockExecutionConfig {
|
impl Default for MockExecutionConfig {
|
||||||
@ -55,6 +57,8 @@ impl Default for MockExecutionConfig {
|
|||||||
terminal_block: DEFAULT_TERMINAL_BLOCK,
|
terminal_block: DEFAULT_TERMINAL_BLOCK,
|
||||||
terminal_block_hash: ExecutionBlockHash::zero(),
|
terminal_block_hash: ExecutionBlockHash::zero(),
|
||||||
server_config: Config::default(),
|
server_config: Config::default(),
|
||||||
|
shanghai_time: None,
|
||||||
|
eip4844_time: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -74,6 +78,8 @@ impl<T: EthSpec> MockServer<T> {
|
|||||||
DEFAULT_TERMINAL_DIFFICULTY.into(),
|
DEFAULT_TERMINAL_DIFFICULTY.into(),
|
||||||
DEFAULT_TERMINAL_BLOCK,
|
DEFAULT_TERMINAL_BLOCK,
|
||||||
ExecutionBlockHash::zero(),
|
ExecutionBlockHash::zero(),
|
||||||
|
None, // FIXME(capella): should this be the default?
|
||||||
|
None, // FIXME(eip4844): should this be the default?
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -84,11 +90,18 @@ impl<T: EthSpec> MockServer<T> {
|
|||||||
terminal_block,
|
terminal_block,
|
||||||
terminal_block_hash,
|
terminal_block_hash,
|
||||||
server_config,
|
server_config,
|
||||||
|
shanghai_time,
|
||||||
|
eip4844_time,
|
||||||
} = config;
|
} = config;
|
||||||
let last_echo_request = Arc::new(RwLock::new(None));
|
let last_echo_request = Arc::new(RwLock::new(None));
|
||||||
let preloaded_responses = Arc::new(Mutex::new(vec![]));
|
let preloaded_responses = Arc::new(Mutex::new(vec![]));
|
||||||
let execution_block_generator =
|
let execution_block_generator = ExecutionBlockGenerator::new(
|
||||||
ExecutionBlockGenerator::new(terminal_difficulty, terminal_block, terminal_block_hash);
|
terminal_difficulty,
|
||||||
|
terminal_block,
|
||||||
|
terminal_block_hash,
|
||||||
|
shanghai_time,
|
||||||
|
eip4844_time,
|
||||||
|
);
|
||||||
|
|
||||||
let ctx: Arc<Context<T>> = Arc::new(Context {
|
let ctx: Arc<Context<T>> = Arc::new(Context {
|
||||||
config: server_config,
|
config: server_config,
|
||||||
@ -140,6 +153,8 @@ impl<T: EthSpec> MockServer<T> {
|
|||||||
terminal_difficulty: Uint256,
|
terminal_difficulty: Uint256,
|
||||||
terminal_block: u64,
|
terminal_block: u64,
|
||||||
terminal_block_hash: ExecutionBlockHash,
|
terminal_block_hash: ExecutionBlockHash,
|
||||||
|
shanghai_time: Option<u64>,
|
||||||
|
eip4844_time: Option<u64>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self::new_with_config(
|
Self::new_with_config(
|
||||||
handle,
|
handle,
|
||||||
@ -149,6 +164,8 @@ impl<T: EthSpec> MockServer<T> {
|
|||||||
terminal_difficulty,
|
terminal_difficulty,
|
||||||
terminal_block,
|
terminal_block,
|
||||||
terminal_block_hash,
|
terminal_block_hash,
|
||||||
|
shanghai_time,
|
||||||
|
eip4844_time,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -5,9 +5,6 @@ authors = ["Paul Hauner <paul@paulhauner.com>"]
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
autotests = false # using a single test binary compiles faster
|
autotests = false # using a single test binary compiles faster
|
||||||
|
|
||||||
[features]
|
|
||||||
withdrawals-processing = []
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
warp = { version = "0.3.2", features = ["tls"] }
|
warp = { version = "0.3.2", features = ["tls"] }
|
||||||
serde = { version = "1.0.116", features = ["derive"] }
|
serde = { version = "1.0.116", features = ["derive"] }
|
||||||
|
@ -1681,16 +1681,12 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
|
|
||||||
match chain.verify_bls_to_execution_change_for_gossip(address_change) {
|
match chain.verify_bls_to_execution_change_for_gossip(address_change) {
|
||||||
Ok(ObservationOutcome::New(verified_address_change)) => {
|
Ok(ObservationOutcome::New(verified_address_change)) => {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
publish_pubsub_message(
|
||||||
{
|
&network_tx,
|
||||||
publish_pubsub_message(
|
PubsubMessage::BlsToExecutionChange(Box::new(
|
||||||
&network_tx,
|
verified_address_change.as_inner().clone(),
|
||||||
PubsubMessage::BlsToExecutionChange(Box::new(
|
)),
|
||||||
verified_address_change.as_inner().clone(),
|
)?;
|
||||||
)),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
chain.import_bls_to_execution_change(verified_address_change);
|
chain.import_bls_to_execution_change(verified_address_change);
|
||||||
}
|
}
|
||||||
Ok(ObservationOutcome::AlreadyKnown) => {
|
Ok(ObservationOutcome::AlreadyKnown) => {
|
||||||
@ -2915,7 +2911,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
let is_live =
|
let is_live =
|
||||||
chain.validator_seen_at_epoch(index as usize, request_data.epoch);
|
chain.validator_seen_at_epoch(index as usize, request_data.epoch);
|
||||||
api_types::LivenessResponseData {
|
api_types::LivenessResponseData {
|
||||||
index: index as u64,
|
index,
|
||||||
epoch: request_data.epoch,
|
epoch: request_data.epoch,
|
||||||
is_live,
|
is_live,
|
||||||
}
|
}
|
||||||
@ -2951,7 +2947,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.and_then(
|
.and_then(
|
||||||
|sysinfo, app_start: std::time::Instant, data_dir, network_globals| {
|
|sysinfo, app_start: std::time::Instant, data_dir, network_globals| {
|
||||||
blocking_json_task(move || {
|
blocking_json_task(move || {
|
||||||
let app_uptime = app_start.elapsed().as_secs() as u64;
|
let app_uptime = app_start.elapsed().as_secs();
|
||||||
Ok(api_types::GenericResponse::from(observe_system_health_bn(
|
Ok(api_types::GenericResponse::from(observe_system_health_bn(
|
||||||
sysinfo,
|
sysinfo,
|
||||||
data_dir,
|
data_dir,
|
||||||
|
@ -194,6 +194,11 @@ async fn reconstruct_block<T: BeaconChainTypes>(
|
|||||||
.spec
|
.spec
|
||||||
.fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())),
|
.fork_name_at_epoch(block.slot().epoch(T::EthSpec::slots_per_epoch())),
|
||||||
)
|
)
|
||||||
|
.map_err(|e| {
|
||||||
|
warp_utils::reject::custom_server_error(format!(
|
||||||
|
"Default payload construction error: {e:?}"
|
||||||
|
))
|
||||||
|
})?
|
||||||
.into()
|
.into()
|
||||||
// If we already have an execution payload with this transactions root cached, use it.
|
// If we already have an execution payload with this transactions root cached, use it.
|
||||||
} else if let Some(cached_payload) =
|
} else if let Some(cached_payload) =
|
||||||
|
@ -7,7 +7,13 @@ use eth2::{BeaconNodeHttpClient, Timeouts};
|
|||||||
use http_api::{Config, Context};
|
use http_api::{Config, Context};
|
||||||
use lighthouse_network::{
|
use lighthouse_network::{
|
||||||
discv5::enr::{CombinedKey, EnrBuilder},
|
discv5::enr::{CombinedKey, EnrBuilder},
|
||||||
libp2p::{core::connection::ConnectionId, swarm::NetworkBehaviour},
|
libp2p::{
|
||||||
|
core::connection::ConnectionId,
|
||||||
|
swarm::{
|
||||||
|
behaviour::{ConnectionEstablished, FromSwarm},
|
||||||
|
NetworkBehaviour,
|
||||||
|
},
|
||||||
|
},
|
||||||
rpc::methods::{MetaData, MetaDataV2},
|
rpc::methods::{MetaData, MetaDataV2},
|
||||||
types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState},
|
types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield, SyncState},
|
||||||
ConnectedPoint, Enr, NetworkGlobals, PeerId, PeerManager,
|
ConnectedPoint, Enr, NetworkGlobals, PeerId, PeerManager,
|
||||||
@ -143,12 +149,18 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
|
|||||||
// add a peer
|
// add a peer
|
||||||
let peer_id = PeerId::random();
|
let peer_id = PeerId::random();
|
||||||
|
|
||||||
let connected_point = ConnectedPoint::Listener {
|
let endpoint = &ConnectedPoint::Listener {
|
||||||
local_addr: EXTERNAL_ADDR.parse().unwrap(),
|
local_addr: EXTERNAL_ADDR.parse().unwrap(),
|
||||||
send_back_addr: EXTERNAL_ADDR.parse().unwrap(),
|
send_back_addr: EXTERNAL_ADDR.parse().unwrap(),
|
||||||
};
|
};
|
||||||
let con_id = ConnectionId::new(1);
|
let connection_id = ConnectionId::new(1);
|
||||||
pm.inject_connection_established(&peer_id, &con_id, &connected_point, None, 0);
|
pm.on_swarm_event(FromSwarm::ConnectionEstablished(ConnectionEstablished {
|
||||||
|
peer_id,
|
||||||
|
connection_id,
|
||||||
|
endpoint,
|
||||||
|
failed_addresses: &[],
|
||||||
|
other_established: 0,
|
||||||
|
}));
|
||||||
*network_globals.sync_state.write() = SyncState::Synced;
|
*network_globals.sync_state.write() = SyncState::Synced;
|
||||||
|
|
||||||
let eth1_service =
|
let eth1_service =
|
||||||
|
@ -42,11 +42,12 @@ superstruct = "0.5.0"
|
|||||||
prometheus-client = "0.18.0"
|
prometheus-client = "0.18.0"
|
||||||
unused_port = { path = "../../common/unused_port" }
|
unused_port = { path = "../../common/unused_port" }
|
||||||
delay_map = "0.1.1"
|
delay_map = "0.1.1"
|
||||||
|
void = "1"
|
||||||
|
|
||||||
[dependencies.libp2p]
|
[dependencies.libp2p]
|
||||||
version = "0.48.0"
|
version = "0.50.0"
|
||||||
default-features = false
|
default-features = false
|
||||||
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"]
|
features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns", "tcp", "tokio", "plaintext", "secp256k1", "macros", "ecdsa"]
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
slog-term = "2.6.0"
|
slog-term = "2.6.0"
|
||||||
|
@ -22,12 +22,13 @@ use enr::{ATTESTATION_BITFIELD_ENR_KEY, ETH2_ENR_KEY, SYNC_COMMITTEE_BITFIELD_EN
|
|||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
use futures::stream::FuturesUnordered;
|
use futures::stream::FuturesUnordered;
|
||||||
use libp2p::multiaddr::Protocol;
|
use libp2p::multiaddr::Protocol;
|
||||||
|
use libp2p::swarm::behaviour::{DialFailure, FromSwarm};
|
||||||
use libp2p::swarm::AddressScore;
|
use libp2p::swarm::AddressScore;
|
||||||
pub use libp2p::{
|
pub use libp2p::{
|
||||||
core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId},
|
core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId},
|
||||||
swarm::{
|
swarm::{
|
||||||
handler::ConnectionHandler, DialError, NetworkBehaviour,
|
dummy::ConnectionHandler, DialError, NetworkBehaviour, NetworkBehaviourAction as NBAction,
|
||||||
NetworkBehaviourAction as NBAction, NotifyHandler, PollParameters, SubstreamProtocol,
|
NotifyHandler, PollParameters, SubstreamProtocol,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use lru::LruCache;
|
use lru::LruCache;
|
||||||
@ -927,11 +928,11 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
|
|
||||||
impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
|
impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
|
||||||
// Discovery is not a real NetworkBehaviour...
|
// Discovery is not a real NetworkBehaviour...
|
||||||
type ConnectionHandler = libp2p::swarm::handler::DummyConnectionHandler;
|
type ConnectionHandler = ConnectionHandler;
|
||||||
type OutEvent = DiscoveredPeers;
|
type OutEvent = DiscoveredPeers;
|
||||||
|
|
||||||
fn new_handler(&mut self) -> Self::ConnectionHandler {
|
fn new_handler(&mut self) -> Self::ConnectionHandler {
|
||||||
libp2p::swarm::handler::DummyConnectionHandler::default()
|
ConnectionHandler
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them.
|
// Handles the libp2p request to obtain multiaddrs for peer_id's in order to dial them.
|
||||||
@ -947,40 +948,6 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inject_event(
|
|
||||||
&mut self,
|
|
||||||
_: PeerId,
|
|
||||||
_: ConnectionId,
|
|
||||||
_: <Self::ConnectionHandler as ConnectionHandler>::OutEvent,
|
|
||||||
) {
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_dial_failure(
|
|
||||||
&mut self,
|
|
||||||
peer_id: Option<PeerId>,
|
|
||||||
_handler: Self::ConnectionHandler,
|
|
||||||
error: &DialError,
|
|
||||||
) {
|
|
||||||
if let Some(peer_id) = peer_id {
|
|
||||||
match error {
|
|
||||||
DialError::Banned
|
|
||||||
| DialError::LocalPeerId
|
|
||||||
| DialError::InvalidPeerId(_)
|
|
||||||
| DialError::ConnectionIo(_)
|
|
||||||
| DialError::NoAddresses
|
|
||||||
| DialError::Transport(_)
|
|
||||||
| DialError::WrongPeerId { .. } => {
|
|
||||||
// set peer as disconnected in discovery DHT
|
|
||||||
debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id);
|
|
||||||
self.disconnect_peer(&peer_id);
|
|
||||||
}
|
|
||||||
DialError::ConnectionLimit(_)
|
|
||||||
| DialError::DialPeerConditionFalse(_)
|
|
||||||
| DialError::Aborted => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Main execution loop to drive the behaviour
|
// Main execution loop to drive the behaviour
|
||||||
fn poll(
|
fn poll(
|
||||||
&mut self,
|
&mut self,
|
||||||
@ -1067,6 +1034,50 @@ impl<TSpec: EthSpec> NetworkBehaviour for Discovery<TSpec> {
|
|||||||
}
|
}
|
||||||
Poll::Pending
|
Poll::Pending
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn on_swarm_event(&mut self, event: FromSwarm<Self::ConnectionHandler>) {
|
||||||
|
match event {
|
||||||
|
FromSwarm::DialFailure(DialFailure { peer_id, error, .. }) => {
|
||||||
|
self.on_dial_failure(peer_id, error)
|
||||||
|
}
|
||||||
|
FromSwarm::ConnectionEstablished(_)
|
||||||
|
| FromSwarm::ConnectionClosed(_)
|
||||||
|
| FromSwarm::AddressChange(_)
|
||||||
|
| FromSwarm::ListenFailure(_)
|
||||||
|
| FromSwarm::NewListener(_)
|
||||||
|
| FromSwarm::NewListenAddr(_)
|
||||||
|
| FromSwarm::ExpiredListenAddr(_)
|
||||||
|
| FromSwarm::ListenerError(_)
|
||||||
|
| FromSwarm::ListenerClosed(_)
|
||||||
|
| FromSwarm::NewExternalAddr(_)
|
||||||
|
| FromSwarm::ExpiredExternalAddr(_) => {
|
||||||
|
// Ignore events not relevant to discovery
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TSpec: EthSpec> Discovery<TSpec> {
|
||||||
|
fn on_dial_failure(&mut self, peer_id: Option<PeerId>, error: &DialError) {
|
||||||
|
if let Some(peer_id) = peer_id {
|
||||||
|
match error {
|
||||||
|
DialError::Banned
|
||||||
|
| DialError::LocalPeerId
|
||||||
|
| DialError::InvalidPeerId(_)
|
||||||
|
| DialError::ConnectionIo(_)
|
||||||
|
| DialError::NoAddresses
|
||||||
|
| DialError::Transport(_)
|
||||||
|
| DialError::WrongPeerId { .. } => {
|
||||||
|
// set peer as disconnected in discovery DHT
|
||||||
|
debug!(self.log, "Marking peer disconnected in DHT"; "peer_id" => %peer_id);
|
||||||
|
self.disconnect_peer(&peer_id);
|
||||||
|
}
|
||||||
|
DialError::ConnectionLimit(_)
|
||||||
|
| DialError::DialPeerConditionFalse(_)
|
||||||
|
| DialError::Aborted => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -7,7 +7,7 @@ use crate::{NetworkGlobals, PeerId};
|
|||||||
use crate::{Subnet, SubnetDiscovery};
|
use crate::{Subnet, SubnetDiscovery};
|
||||||
use delay_map::HashSetDelay;
|
use delay_map::HashSetDelay;
|
||||||
use discv5::Enr;
|
use discv5::Enr;
|
||||||
use libp2p::identify::IdentifyInfo;
|
use libp2p::identify::Info as IdentifyInfo;
|
||||||
use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult};
|
use peerdb::{client::ClientKind, BanOperation, BanResult, ScoreUpdateResult};
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use slog::{debug, error, trace, warn};
|
use slog::{debug, error, trace, warn};
|
||||||
|
@ -1,14 +1,12 @@
|
|||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
use futures::StreamExt;
|
use futures::StreamExt;
|
||||||
use libp2p::core::connection::ConnectionId;
|
|
||||||
use libp2p::core::ConnectedPoint;
|
use libp2p::core::ConnectedPoint;
|
||||||
|
use libp2p::swarm::behaviour::{ConnectionClosed, ConnectionEstablished, DialFailure, FromSwarm};
|
||||||
use libp2p::swarm::dial_opts::{DialOpts, PeerCondition};
|
use libp2p::swarm::dial_opts::{DialOpts, PeerCondition};
|
||||||
use libp2p::swarm::handler::DummyConnectionHandler;
|
use libp2p::swarm::dummy::ConnectionHandler;
|
||||||
use libp2p::swarm::{
|
use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters};
|
||||||
ConnectionHandler, DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters,
|
use libp2p::PeerId;
|
||||||
};
|
|
||||||
use libp2p::{Multiaddr, PeerId};
|
|
||||||
use slog::{debug, error};
|
use slog::{debug, error};
|
||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
|
|
||||||
@ -20,23 +18,14 @@ use super::peerdb::BanResult;
|
|||||||
use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource};
|
use super::{ConnectingType, PeerManager, PeerManagerEvent, ReportSource};
|
||||||
|
|
||||||
impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
||||||
type ConnectionHandler = DummyConnectionHandler;
|
type ConnectionHandler = ConnectionHandler;
|
||||||
|
|
||||||
type OutEvent = PeerManagerEvent;
|
type OutEvent = PeerManagerEvent;
|
||||||
|
|
||||||
/* Required trait members */
|
/* Required trait members */
|
||||||
|
|
||||||
fn new_handler(&mut self) -> Self::ConnectionHandler {
|
fn new_handler(&mut self) -> Self::ConnectionHandler {
|
||||||
DummyConnectionHandler::default()
|
ConnectionHandler
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_event(
|
|
||||||
&mut self,
|
|
||||||
_: PeerId,
|
|
||||||
_: ConnectionId,
|
|
||||||
_: <DummyConnectionHandler as ConnectionHandler>::OutEvent,
|
|
||||||
) {
|
|
||||||
unreachable!("Dummy handler does not emit events")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn poll(
|
fn poll(
|
||||||
@ -114,19 +103,46 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
|||||||
Poll::Pending
|
Poll::Pending
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Overwritten trait members */
|
fn on_swarm_event(&mut self, event: FromSwarm<Self::ConnectionHandler>) {
|
||||||
|
match event {
|
||||||
|
FromSwarm::ConnectionEstablished(ConnectionEstablished {
|
||||||
|
peer_id,
|
||||||
|
endpoint,
|
||||||
|
other_established,
|
||||||
|
..
|
||||||
|
}) => self.on_connection_established(peer_id, endpoint, other_established),
|
||||||
|
FromSwarm::ConnectionClosed(ConnectionClosed {
|
||||||
|
peer_id,
|
||||||
|
remaining_established,
|
||||||
|
..
|
||||||
|
}) => self.on_connection_closed(peer_id, remaining_established),
|
||||||
|
FromSwarm::DialFailure(DialFailure { peer_id, .. }) => self.on_dial_failure(peer_id),
|
||||||
|
FromSwarm::AddressChange(_)
|
||||||
|
| FromSwarm::ListenFailure(_)
|
||||||
|
| FromSwarm::NewListener(_)
|
||||||
|
| FromSwarm::NewListenAddr(_)
|
||||||
|
| FromSwarm::ExpiredListenAddr(_)
|
||||||
|
| FromSwarm::ListenerError(_)
|
||||||
|
| FromSwarm::ListenerClosed(_)
|
||||||
|
| FromSwarm::NewExternalAddr(_)
|
||||||
|
| FromSwarm::ExpiredExternalAddr(_) => {
|
||||||
|
// The rest of the events we ignore since they are handled in their associated
|
||||||
|
// `SwarmEvent`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn inject_connection_established(
|
impl<TSpec: EthSpec> PeerManager<TSpec> {
|
||||||
|
fn on_connection_established(
|
||||||
&mut self,
|
&mut self,
|
||||||
peer_id: &PeerId,
|
peer_id: PeerId,
|
||||||
_connection_id: &ConnectionId,
|
|
||||||
endpoint: &ConnectedPoint,
|
endpoint: &ConnectedPoint,
|
||||||
_failed_addresses: Option<&Vec<Multiaddr>>,
|
|
||||||
other_established: usize,
|
other_established: usize,
|
||||||
) {
|
) {
|
||||||
debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => ?endpoint.to_endpoint());
|
debug!(self.log, "Connection established"; "peer_id" => %peer_id, "connection" => ?endpoint.to_endpoint());
|
||||||
if other_established == 0 {
|
if other_established == 0 {
|
||||||
self.events.push(PeerManagerEvent::MetaData(*peer_id));
|
self.events.push(PeerManagerEvent::MetaData(peer_id));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check NAT if metrics are enabled
|
// Check NAT if metrics are enabled
|
||||||
@ -135,20 +151,20 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check to make sure the peer is not supposed to be banned
|
// Check to make sure the peer is not supposed to be banned
|
||||||
match self.ban_status(peer_id) {
|
match self.ban_status(&peer_id) {
|
||||||
// TODO: directly emit the ban event?
|
// TODO: directly emit the ban event?
|
||||||
BanResult::BadScore => {
|
BanResult::BadScore => {
|
||||||
// This is a faulty state
|
// This is a faulty state
|
||||||
error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id);
|
error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id);
|
||||||
// Reban the peer
|
// Reban the peer
|
||||||
self.goodbye_peer(peer_id, GoodbyeReason::Banned, ReportSource::PeerManager);
|
self.goodbye_peer(&peer_id, GoodbyeReason::Banned, ReportSource::PeerManager);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
BanResult::BannedIp(ip_addr) => {
|
BanResult::BannedIp(ip_addr) => {
|
||||||
// A good peer has connected to us via a banned IP address. We ban the peer and
|
// A good peer has connected to us via a banned IP address. We ban the peer and
|
||||||
// prevent future connections.
|
// prevent future connections.
|
||||||
debug!(self.log, "Peer connected via banned IP. Banning"; "peer_id" => %peer_id, "banned_ip" => %ip_addr);
|
debug!(self.log, "Peer connected via banned IP. Banning"; "peer_id" => %peer_id, "banned_ip" => %ip_addr);
|
||||||
self.goodbye_peer(peer_id, GoodbyeReason::BannedIP, ReportSource::PeerManager);
|
self.goodbye_peer(&peer_id, GoodbyeReason::BannedIP, ReportSource::PeerManager);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
BanResult::NotBanned => {}
|
BanResult::NotBanned => {}
|
||||||
@ -162,11 +178,11 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
|||||||
.network_globals
|
.network_globals
|
||||||
.peers
|
.peers
|
||||||
.read()
|
.read()
|
||||||
.peer_info(peer_id)
|
.peer_info(&peer_id)
|
||||||
.map_or(true, |peer| !peer.has_future_duty())
|
.map_or(true, |peer| !peer.has_future_duty())
|
||||||
{
|
{
|
||||||
// Gracefully disconnect the peer.
|
// Gracefully disconnect the peer.
|
||||||
self.disconnect_peer(*peer_id, GoodbyeReason::TooManyPeers);
|
self.disconnect_peer(peer_id, GoodbyeReason::TooManyPeers);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -174,14 +190,14 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
|||||||
// does not need to know about these peers.
|
// does not need to know about these peers.
|
||||||
match endpoint {
|
match endpoint {
|
||||||
ConnectedPoint::Listener { send_back_addr, .. } => {
|
ConnectedPoint::Listener { send_back_addr, .. } => {
|
||||||
self.inject_connect_ingoing(peer_id, send_back_addr.clone(), None);
|
self.inject_connect_ingoing(&peer_id, send_back_addr.clone(), None);
|
||||||
self.events
|
self.events
|
||||||
.push(PeerManagerEvent::PeerConnectedIncoming(*peer_id));
|
.push(PeerManagerEvent::PeerConnectedIncoming(peer_id));
|
||||||
}
|
}
|
||||||
ConnectedPoint::Dialer { address, .. } => {
|
ConnectedPoint::Dialer { address, .. } => {
|
||||||
self.inject_connect_outgoing(peer_id, address.clone(), None);
|
self.inject_connect_outgoing(&peer_id, address.clone(), None);
|
||||||
self.events
|
self.events
|
||||||
.push(PeerManagerEvent::PeerConnectedOutgoing(*peer_id));
|
.push(PeerManagerEvent::PeerConnectedOutgoing(peer_id));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -189,14 +205,8 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
|||||||
self.update_connected_peer_metrics();
|
self.update_connected_peer_metrics();
|
||||||
metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT);
|
metrics::inc_counter(&metrics::PEER_CONNECT_EVENT_COUNT);
|
||||||
}
|
}
|
||||||
fn inject_connection_closed(
|
|
||||||
&mut self,
|
fn on_connection_closed(&mut self, peer_id: PeerId, remaining_established: usize) {
|
||||||
peer_id: &PeerId,
|
|
||||||
_: &ConnectionId,
|
|
||||||
_: &ConnectedPoint,
|
|
||||||
_: DummyConnectionHandler,
|
|
||||||
remaining_established: usize,
|
|
||||||
) {
|
|
||||||
if remaining_established > 0 {
|
if remaining_established > 0 {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -206,62 +216,33 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
|||||||
.network_globals
|
.network_globals
|
||||||
.peers
|
.peers
|
||||||
.read()
|
.read()
|
||||||
.is_connected_or_disconnecting(peer_id)
|
.is_connected_or_disconnecting(&peer_id)
|
||||||
{
|
{
|
||||||
// We are disconnecting the peer or the peer has already been connected.
|
// We are disconnecting the peer or the peer has already been connected.
|
||||||
// Both these cases, the peer has been previously registered by the peer manager and
|
// Both these cases, the peer has been previously registered by the peer manager and
|
||||||
// potentially the application layer.
|
// potentially the application layer.
|
||||||
// Inform the application.
|
// Inform the application.
|
||||||
self.events
|
self.events
|
||||||
.push(PeerManagerEvent::PeerDisconnected(*peer_id));
|
.push(PeerManagerEvent::PeerDisconnected(peer_id));
|
||||||
debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id);
|
debug!(self.log, "Peer disconnected"; "peer_id" => %peer_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE: It may be the case that a rejected node, due to too many peers is disconnected
|
// NOTE: It may be the case that a rejected node, due to too many peers is disconnected
|
||||||
// here and the peer manager has no knowledge of its connection. We insert it here for
|
// here and the peer manager has no knowledge of its connection. We insert it here for
|
||||||
// reference so that peer manager can track this peer.
|
// reference so that peer manager can track this peer.
|
||||||
self.inject_disconnect(peer_id);
|
self.inject_disconnect(&peer_id);
|
||||||
|
|
||||||
// Update the prometheus metrics
|
// Update the prometheus metrics
|
||||||
self.update_connected_peer_metrics();
|
self.update_connected_peer_metrics();
|
||||||
metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT);
|
metrics::inc_counter(&metrics::PEER_DISCONNECT_EVENT_COUNT);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inject_address_change(
|
|
||||||
&mut self,
|
|
||||||
_peer_id: &PeerId,
|
|
||||||
_connection_id: &ConnectionId,
|
|
||||||
old: &ConnectedPoint,
|
|
||||||
new: &ConnectedPoint,
|
|
||||||
) {
|
|
||||||
debug_assert!(
|
|
||||||
matches!(
|
|
||||||
(old, new),
|
|
||||||
(
|
|
||||||
// inbound remains inbound
|
|
||||||
ConnectedPoint::Listener { .. },
|
|
||||||
ConnectedPoint::Listener { .. }
|
|
||||||
) | (
|
|
||||||
// outbound remains outbound
|
|
||||||
ConnectedPoint::Dialer { .. },
|
|
||||||
ConnectedPoint::Dialer { .. }
|
|
||||||
)
|
|
||||||
),
|
|
||||||
"A peer has changed between inbound and outbound"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A dial attempt has failed.
|
/// A dial attempt has failed.
|
||||||
///
|
///
|
||||||
/// NOTE: It can be the case that we are dialing a peer and during the dialing process the peer
|
/// NOTE: It can be the case that we are dialing a peer and during the dialing process the peer
|
||||||
/// connects and the dial attempt later fails. To handle this, we only update the peer_db if
|
/// connects and the dial attempt later fails. To handle this, we only update the peer_db if
|
||||||
/// the peer is not already connected.
|
/// the peer is not already connected.
|
||||||
fn inject_dial_failure(
|
fn on_dial_failure(&mut self, peer_id: Option<PeerId>) {
|
||||||
&mut self,
|
|
||||||
peer_id: Option<PeerId>,
|
|
||||||
_handler: DummyConnectionHandler,
|
|
||||||
_error: &DialError,
|
|
||||||
) {
|
|
||||||
if let Some(peer_id) = peer_id {
|
if let Some(peer_id) = peer_id {
|
||||||
if !self.network_globals.peers.read().is_connected(&peer_id) {
|
if !self.network_globals.peers.read().is_connected(&peer_id) {
|
||||||
self.inject_disconnect(&peer_id);
|
self.inject_disconnect(&peer_id);
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
//!
|
//!
|
||||||
//! Currently using identify to fingerprint.
|
//! Currently using identify to fingerprint.
|
||||||
|
|
||||||
use libp2p::identify::IdentifyInfo;
|
use libp2p::identify::Info as IdentifyInfo;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use strum::{AsRefStr, EnumIter, IntoStaticStr};
|
use strum::{AsRefStr, EnumIter, IntoStaticStr};
|
||||||
|
|
||||||
|
@ -186,14 +186,7 @@ impl RealScore {
|
|||||||
|
|
||||||
/// Add an f64 to the score abiding by the limits.
|
/// Add an f64 to the score abiding by the limits.
|
||||||
fn add(&mut self, score: f64) {
|
fn add(&mut self, score: f64) {
|
||||||
let mut new_score = self.lighthouse_score + score;
|
let new_score = (self.lighthouse_score + score).clamp(MIN_SCORE, MAX_SCORE);
|
||||||
if new_score > MAX_SCORE {
|
|
||||||
new_score = MAX_SCORE;
|
|
||||||
}
|
|
||||||
if new_score < MIN_SCORE {
|
|
||||||
new_score = MIN_SCORE;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.set_lighthouse_score(new_score);
|
self.set_lighthouse_score(new_score);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -465,7 +465,7 @@ fn handle_length(
|
|||||||
// Note: length-prefix of > 10 bytes(uint64) would be a decoding error
|
// Note: length-prefix of > 10 bytes(uint64) would be a decoding error
|
||||||
match uvi_codec.decode(bytes).map_err(RPCError::from)? {
|
match uvi_codec.decode(bytes).map_err(RPCError::from)? {
|
||||||
Some(length) => {
|
Some(length) => {
|
||||||
*len = Some(length as usize);
|
*len = Some(length);
|
||||||
Ok(Some(length))
|
Ok(Some(length))
|
||||||
}
|
}
|
||||||
None => Ok(None), // need more bytes to decode length
|
None => Ok(None), // need more bytes to decode length
|
||||||
|
@ -327,61 +327,6 @@ where
|
|||||||
self.listen_protocol.clone()
|
self.listen_protocol.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn inject_fully_negotiated_inbound(
|
|
||||||
&mut self,
|
|
||||||
substream: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
|
|
||||||
_info: Self::InboundOpenInfo,
|
|
||||||
) {
|
|
||||||
// only accept new peer requests when active
|
|
||||||
if !matches!(self.state, HandlerState::Active) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let (req, substream) = substream;
|
|
||||||
let expected_responses = req.expected_responses();
|
|
||||||
|
|
||||||
// store requests that expect responses
|
|
||||||
if expected_responses > 0 {
|
|
||||||
if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS {
|
|
||||||
// Store the stream and tag the output.
|
|
||||||
let delay_key = self.inbound_substreams_delay.insert(
|
|
||||||
self.current_inbound_substream_id,
|
|
||||||
Duration::from_secs(RESPONSE_TIMEOUT),
|
|
||||||
);
|
|
||||||
let awaiting_stream = InboundState::Idle(substream);
|
|
||||||
self.inbound_substreams.insert(
|
|
||||||
self.current_inbound_substream_id,
|
|
||||||
InboundInfo {
|
|
||||||
state: awaiting_stream,
|
|
||||||
pending_items: VecDeque::with_capacity(expected_responses as usize),
|
|
||||||
delay_key: Some(delay_key),
|
|
||||||
protocol: req.protocol(),
|
|
||||||
request_start_time: Instant::now(),
|
|
||||||
remaining_chunks: expected_responses,
|
|
||||||
},
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
self.events_out.push(Err(HandlerErr::Inbound {
|
|
||||||
id: self.current_inbound_substream_id,
|
|
||||||
proto: req.protocol(),
|
|
||||||
error: RPCError::HandlerRejected,
|
|
||||||
}));
|
|
||||||
return self.shutdown(None);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we received a goodbye, shutdown the connection.
|
|
||||||
if let InboundRequest::Goodbye(_) = req {
|
|
||||||
self.shutdown(None);
|
|
||||||
}
|
|
||||||
|
|
||||||
self.events_out.push(Ok(RPCReceived::Request(
|
|
||||||
self.current_inbound_substream_id,
|
|
||||||
req,
|
|
||||||
)));
|
|
||||||
self.current_inbound_substream_id.0 += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_fully_negotiated_outbound(
|
fn inject_fully_negotiated_outbound(
|
||||||
&mut self,
|
&mut self,
|
||||||
out: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
|
out: <Self::OutboundProtocol as OutboundUpgrade<NegotiatedSubstream>>::Output,
|
||||||
@ -438,6 +383,64 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn inject_fully_negotiated_inbound(
|
||||||
|
&mut self,
|
||||||
|
substream: <Self::InboundProtocol as InboundUpgrade<NegotiatedSubstream>>::Output,
|
||||||
|
_info: Self::InboundOpenInfo,
|
||||||
|
) {
|
||||||
|
// only accept new peer requests when active
|
||||||
|
if !matches!(self.state, HandlerState::Active) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let (req, substream) = substream;
|
||||||
|
let expected_responses = req.expected_responses();
|
||||||
|
|
||||||
|
// store requests that expect responses
|
||||||
|
if expected_responses > 0 {
|
||||||
|
if self.inbound_substreams.len() < MAX_INBOUND_SUBSTREAMS {
|
||||||
|
// Store the stream and tag the output.
|
||||||
|
let delay_key = self.inbound_substreams_delay.insert(
|
||||||
|
self.current_inbound_substream_id,
|
||||||
|
Duration::from_secs(RESPONSE_TIMEOUT),
|
||||||
|
);
|
||||||
|
let awaiting_stream = InboundState::Idle(substream);
|
||||||
|
self.inbound_substreams.insert(
|
||||||
|
self.current_inbound_substream_id,
|
||||||
|
InboundInfo {
|
||||||
|
state: awaiting_stream,
|
||||||
|
pending_items: VecDeque::with_capacity(std::cmp::min(
|
||||||
|
expected_responses,
|
||||||
|
128,
|
||||||
|
) as usize),
|
||||||
|
delay_key: Some(delay_key),
|
||||||
|
protocol: req.protocol(),
|
||||||
|
request_start_time: Instant::now(),
|
||||||
|
remaining_chunks: expected_responses,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
self.events_out.push(Err(HandlerErr::Inbound {
|
||||||
|
id: self.current_inbound_substream_id,
|
||||||
|
proto: req.protocol(),
|
||||||
|
error: RPCError::HandlerRejected,
|
||||||
|
}));
|
||||||
|
return self.shutdown(None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we received a goodbye, shutdown the connection.
|
||||||
|
if let InboundRequest::Goodbye(_) = req {
|
||||||
|
self.shutdown(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.events_out.push(Ok(RPCReceived::Request(
|
||||||
|
self.current_inbound_substream_id,
|
||||||
|
req,
|
||||||
|
)));
|
||||||
|
self.current_inbound_substream_id.0 += 1;
|
||||||
|
}
|
||||||
|
|
||||||
fn inject_event(&mut self, rpc_event: Self::InEvent) {
|
fn inject_event(&mut self, rpc_event: Self::InEvent) {
|
||||||
match rpc_event {
|
match rpc_event {
|
||||||
RPCSend::Request(id, req) => self.send_request(id, req),
|
RPCSend::Request(id, req) => self.send_request(id, req),
|
||||||
|
@ -119,8 +119,8 @@ lazy_static! {
|
|||||||
pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M
|
pub(crate) const MAX_RPC_SIZE: usize = 1_048_576; // 1M
|
||||||
/// The maximum bytes that can be sent across the RPC post-merge.
|
/// The maximum bytes that can be sent across the RPC post-merge.
|
||||||
pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M
|
pub(crate) const MAX_RPC_SIZE_POST_MERGE: usize = 10 * 1_048_576; // 10M
|
||||||
//FIXME(sean) should these be the same?
|
|
||||||
pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M
|
pub(crate) const MAX_RPC_SIZE_POST_CAPELLA: usize = 10 * 1_048_576; // 10M
|
||||||
|
// FIXME(sean) should this be increased to account for blobs?
|
||||||
pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 10 * 1_048_576; // 10M
|
pub(crate) const MAX_RPC_SIZE_POST_EIP4844: usize = 10 * 1_048_576; // 10M
|
||||||
/// The protocol prefix the RPC protocol id.
|
/// The protocol prefix the RPC protocol id.
|
||||||
const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req";
|
const PROTOCOL_PREFIX: &str = "/eth2/beacon_chain/req";
|
||||||
|
@ -7,8 +7,8 @@ use libp2p::gossipsub::subscription_filter::{
|
|||||||
MaxCountSubscriptionFilter, WhitelistSubscriptionFilter,
|
MaxCountSubscriptionFilter, WhitelistSubscriptionFilter,
|
||||||
};
|
};
|
||||||
use libp2p::gossipsub::Gossipsub as BaseGossipsub;
|
use libp2p::gossipsub::Gossipsub as BaseGossipsub;
|
||||||
use libp2p::identify::Identify;
|
use libp2p::identify::Behaviour as Identify;
|
||||||
use libp2p::NetworkBehaviour;
|
use libp2p::swarm::NetworkBehaviour;
|
||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
|
|
||||||
use super::api_types::RequestId;
|
use super::api_types::RequestId;
|
||||||
|
@ -270,11 +270,11 @@ impl<TSpec: EthSpec> PeerScoreSettings<TSpec> {
|
|||||||
|
|
||||||
let modulo_smaller = max(
|
let modulo_smaller = max(
|
||||||
1,
|
1,
|
||||||
smaller_committee_size / self.target_aggregators_per_committee as usize,
|
smaller_committee_size / self.target_aggregators_per_committee,
|
||||||
);
|
);
|
||||||
let modulo_larger = max(
|
let modulo_larger = max(
|
||||||
1,
|
1,
|
||||||
(smaller_committee_size + 1) / self.target_aggregators_per_committee as usize,
|
(smaller_committee_size + 1) / self.target_aggregators_per_committee,
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
|
@ -29,7 +29,7 @@ use libp2p::gossipsub::subscription_filter::MaxCountSubscriptionFilter;
|
|||||||
use libp2p::gossipsub::{
|
use libp2p::gossipsub::{
|
||||||
GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId,
|
GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId,
|
||||||
};
|
};
|
||||||
use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent};
|
use libp2p::identify::{Behaviour as Identify, Config as IdentifyConfig, Event as IdentifyEvent};
|
||||||
use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol};
|
use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol};
|
||||||
use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent};
|
use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent};
|
||||||
use libp2p::PeerId;
|
use libp2p::PeerId;
|
||||||
@ -320,7 +320,7 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
|
|
||||||
// use the executor for libp2p
|
// use the executor for libp2p
|
||||||
struct Executor(task_executor::TaskExecutor);
|
struct Executor(task_executor::TaskExecutor);
|
||||||
impl libp2p::core::Executor for Executor {
|
impl libp2p::swarm::Executor for Executor {
|
||||||
fn exec(&self, f: Pin<Box<dyn futures::Future<Output = ()> + Send>>) {
|
fn exec(&self, f: Pin<Box<dyn futures::Future<Output = ()> + Send>>) {
|
||||||
self.0.spawn(f, "libp2p");
|
self.0.spawn(f, "libp2p");
|
||||||
}
|
}
|
||||||
@ -345,12 +345,16 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
.with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER));
|
.with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER));
|
||||||
|
|
||||||
(
|
(
|
||||||
SwarmBuilder::new(transport, behaviour, local_peer_id)
|
SwarmBuilder::with_executor(
|
||||||
.notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero"))
|
transport,
|
||||||
.connection_event_buffer_size(64)
|
behaviour,
|
||||||
.connection_limits(limits)
|
local_peer_id,
|
||||||
.executor(Box::new(Executor(executor)))
|
Executor(executor),
|
||||||
.build(),
|
)
|
||||||
|
.notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero"))
|
||||||
|
.connection_event_buffer_size(64)
|
||||||
|
.connection_limits(limits)
|
||||||
|
.build(),
|
||||||
bandwidth,
|
bandwidth,
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
@ -44,8 +44,7 @@ type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>;
|
|||||||
pub fn build_transport(
|
pub fn build_transport(
|
||||||
local_private_key: Keypair,
|
local_private_key: Keypair,
|
||||||
) -> std::io::Result<(BoxedTransport, Arc<BandwidthSinks>)> {
|
) -> std::io::Result<(BoxedTransport, Arc<BandwidthSinks>)> {
|
||||||
let tcp =
|
let tcp = libp2p::tcp::tokio::Transport::new(libp2p::tcp::Config::default().nodelay(true));
|
||||||
libp2p::tcp::TokioTcpTransport::new(libp2p::tcp::GenTcpConfig::default().nodelay(true));
|
|
||||||
let transport = libp2p::dns::TokioDnsConfig::system(tcp)?;
|
let transport = libp2p::dns::TokioDnsConfig::system(tcp)?;
|
||||||
#[cfg(feature = "libp2p-websocket")]
|
#[cfg(feature = "libp2p-websocket")]
|
||||||
let transport = {
|
let transport = {
|
||||||
@ -88,7 +87,7 @@ fn keypair_from_hex(hex_bytes: &str) -> error::Result<Keypair> {
|
|||||||
hex_bytes.to_string()
|
hex_bytes.to_string()
|
||||||
};
|
};
|
||||||
|
|
||||||
hex::decode(&hex_bytes)
|
hex::decode(hex_bytes)
|
||||||
.map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into())
|
.map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into())
|
||||||
.and_then(keypair_from_bytes)
|
.and_then(keypair_from_bytes)
|
||||||
}
|
}
|
||||||
|
@ -15,13 +15,6 @@ use types::{
|
|||||||
};
|
};
|
||||||
use unused_port::unused_tcp_port;
|
use unused_port::unused_tcp_port;
|
||||||
|
|
||||||
#[allow(clippy::type_complexity)]
|
|
||||||
#[allow(unused)]
|
|
||||||
pub mod behaviour;
|
|
||||||
#[allow(clippy::type_complexity)]
|
|
||||||
#[allow(unused)]
|
|
||||||
pub mod swarm;
|
|
||||||
|
|
||||||
type E = MinimalEthSpec;
|
type E = MinimalEthSpec;
|
||||||
type ReqId = usize;
|
type ReqId = usize;
|
||||||
|
|
@ -1,395 +0,0 @@
|
|||||||
// NOTE: Taken from libp2p's swarm's testing utils.
|
|
||||||
//
|
|
||||||
// Copyright 2020 Parity Technologies (UK) Ltd.
|
|
||||||
//
|
|
||||||
// Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
// copy of this software and associated documentation files (the "Software"),
|
|
||||||
// to deal in the Software without restriction, including without limitation
|
|
||||||
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
// and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
// Software is furnished to do so, subject to the following conditions:
|
|
||||||
//
|
|
||||||
// The above copyright notice and this permission notice shall be included in
|
|
||||||
// all copies or substantial portions of the Software.
|
|
||||||
//
|
|
||||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
|
||||||
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
||||||
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
||||||
// DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::task::{Context, Poll};
|
|
||||||
|
|
||||||
use libp2p::core::connection::{ConnectedPoint, ConnectionId};
|
|
||||||
use libp2p::core::transport::ListenerId;
|
|
||||||
use libp2p::swarm::handler::{ConnectionHandler, DummyConnectionHandler, IntoConnectionHandler};
|
|
||||||
use libp2p::swarm::{DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters};
|
|
||||||
use libp2p::{Multiaddr, PeerId};
|
|
||||||
|
|
||||||
/// A `MockBehaviour` is a `NetworkBehaviour` that allows for
|
|
||||||
/// the instrumentation of return values, without keeping
|
|
||||||
/// any further state.
|
|
||||||
pub struct MockBehaviour<
|
|
||||||
THandler = DummyConnectionHandler,
|
|
||||||
TOutEvent = <DummyConnectionHandler as ConnectionHandler>::OutEvent,
|
|
||||||
> where
|
|
||||||
THandler: ConnectionHandler,
|
|
||||||
{
|
|
||||||
/// The prototype protocols handler that is cloned for every
|
|
||||||
/// invocation of `new_handler`.
|
|
||||||
pub handler_proto: THandler,
|
|
||||||
/// The addresses to return from `addresses_of_peer`.
|
|
||||||
pub addresses: HashMap<PeerId, Vec<Multiaddr>>,
|
|
||||||
/// The next action to return from `poll`.
|
|
||||||
///
|
|
||||||
/// An action is only returned once.
|
|
||||||
pub next_action: Option<NetworkBehaviourAction<TOutEvent, THandler>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<THandler, TOutEvent> MockBehaviour<THandler, TOutEvent>
|
|
||||||
where
|
|
||||||
THandler: ConnectionHandler,
|
|
||||||
{
|
|
||||||
pub fn new(handler_proto: THandler) -> Self {
|
|
||||||
MockBehaviour {
|
|
||||||
handler_proto,
|
|
||||||
addresses: HashMap::new(),
|
|
||||||
next_action: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<THandler, TOutEvent> NetworkBehaviour for MockBehaviour<THandler, TOutEvent>
|
|
||||||
where
|
|
||||||
THandler: ConnectionHandler + Clone,
|
|
||||||
THandler::OutEvent: Clone,
|
|
||||||
TOutEvent: Send + 'static,
|
|
||||||
{
|
|
||||||
type ConnectionHandler = THandler;
|
|
||||||
type OutEvent = TOutEvent;
|
|
||||||
|
|
||||||
fn new_handler(&mut self) -> Self::ConnectionHandler {
|
|
||||||
self.handler_proto.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn addresses_of_peer(&mut self, p: &PeerId) -> Vec<Multiaddr> {
|
|
||||||
self.addresses.get(p).map_or(Vec::new(), |v| v.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_event(&mut self, _: PeerId, _: ConnectionId, _: THandler::OutEvent) {}
|
|
||||||
|
|
||||||
fn poll(
|
|
||||||
&mut self,
|
|
||||||
_: &mut Context,
|
|
||||||
_: &mut impl PollParameters,
|
|
||||||
) -> Poll<NetworkBehaviourAction<Self::OutEvent, Self::ConnectionHandler>> {
|
|
||||||
Option::take(&mut self.next_action).map_or(Poll::Pending, Poll::Ready)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A `CallTraceBehaviour` is a `NetworkBehaviour` that tracks invocations of callback methods and
|
|
||||||
/// their arguments, wrapping around an inner behaviour. It ensures certain invariants are met.
|
|
||||||
pub struct CallTraceBehaviour<TInner>
|
|
||||||
where
|
|
||||||
TInner: NetworkBehaviour,
|
|
||||||
{
|
|
||||||
inner: TInner,
|
|
||||||
|
|
||||||
pub addresses_of_peer: Vec<PeerId>,
|
|
||||||
pub inject_connection_established: Vec<(PeerId, ConnectionId, ConnectedPoint, usize)>,
|
|
||||||
pub inject_connection_closed: Vec<(PeerId, ConnectionId, ConnectedPoint, usize)>,
|
|
||||||
pub inject_event: Vec<(
|
|
||||||
PeerId,
|
|
||||||
ConnectionId,
|
|
||||||
<<TInner::ConnectionHandler as IntoConnectionHandler>::Handler as ConnectionHandler>::OutEvent,
|
|
||||||
)>,
|
|
||||||
pub inject_dial_failure: Vec<Option<PeerId>>,
|
|
||||||
pub inject_new_listener: Vec<ListenerId>,
|
|
||||||
pub inject_new_listen_addr: Vec<(ListenerId, Multiaddr)>,
|
|
||||||
pub inject_new_external_addr: Vec<Multiaddr>,
|
|
||||||
pub inject_expired_listen_addr: Vec<(ListenerId, Multiaddr)>,
|
|
||||||
pub inject_expired_external_addr: Vec<Multiaddr>,
|
|
||||||
pub inject_listener_error: Vec<ListenerId>,
|
|
||||||
pub inject_listener_closed: Vec<(ListenerId, bool)>,
|
|
||||||
pub poll: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TInner> CallTraceBehaviour<TInner>
|
|
||||||
where
|
|
||||||
TInner: NetworkBehaviour,
|
|
||||||
{
|
|
||||||
pub fn new(inner: TInner) -> Self {
|
|
||||||
Self {
|
|
||||||
inner,
|
|
||||||
addresses_of_peer: Vec::new(),
|
|
||||||
inject_connection_established: Vec::new(),
|
|
||||||
inject_connection_closed: Vec::new(),
|
|
||||||
inject_event: Vec::new(),
|
|
||||||
inject_dial_failure: Vec::new(),
|
|
||||||
inject_new_listener: Vec::new(),
|
|
||||||
inject_new_listen_addr: Vec::new(),
|
|
||||||
inject_new_external_addr: Vec::new(),
|
|
||||||
inject_expired_listen_addr: Vec::new(),
|
|
||||||
inject_expired_external_addr: Vec::new(),
|
|
||||||
inject_listener_error: Vec::new(),
|
|
||||||
inject_listener_closed: Vec::new(),
|
|
||||||
poll: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub fn reset(&mut self) {
|
|
||||||
self.addresses_of_peer = Vec::new();
|
|
||||||
self.inject_connection_established = Vec::new();
|
|
||||||
self.inject_connection_closed = Vec::new();
|
|
||||||
self.inject_event = Vec::new();
|
|
||||||
self.inject_dial_failure = Vec::new();
|
|
||||||
self.inject_new_listen_addr = Vec::new();
|
|
||||||
self.inject_new_external_addr = Vec::new();
|
|
||||||
self.inject_expired_listen_addr = Vec::new();
|
|
||||||
self.inject_listener_error = Vec::new();
|
|
||||||
self.inject_listener_closed = Vec::new();
|
|
||||||
self.poll = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn inner(&mut self) -> &mut TInner {
|
|
||||||
&mut self.inner
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks that when the expected number of closed connection notifications are received, a
|
|
||||||
/// given number of expected disconnections have been received as well.
|
|
||||||
///
|
|
||||||
/// Returns if the first condition is met.
|
|
||||||
pub fn assert_disconnected(
|
|
||||||
&self,
|
|
||||||
expected_closed_connections: usize,
|
|
||||||
expected_disconnections: usize,
|
|
||||||
) -> bool {
|
|
||||||
if self.inject_connection_closed.len() == expected_closed_connections {
|
|
||||||
assert_eq!(
|
|
||||||
self.inject_connection_closed
|
|
||||||
.iter()
|
|
||||||
.filter(|(.., remaining_established)| { *remaining_established == 0 })
|
|
||||||
.count(),
|
|
||||||
expected_disconnections
|
|
||||||
);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks that when the expected number of established connection notifications are received,
|
|
||||||
/// a given number of expected connections have been received as well.
|
|
||||||
///
|
|
||||||
/// Returns if the first condition is met.
|
|
||||||
pub fn assert_connected(
|
|
||||||
&self,
|
|
||||||
expected_established_connections: usize,
|
|
||||||
expected_connections: usize,
|
|
||||||
) -> bool {
|
|
||||||
if self.inject_connection_established.len() == expected_established_connections {
|
|
||||||
assert_eq!(
|
|
||||||
self.inject_connection_established
|
|
||||||
.iter()
|
|
||||||
.filter(|(.., reported_aditional_connections)| {
|
|
||||||
*reported_aditional_connections == 0
|
|
||||||
})
|
|
||||||
.count(),
|
|
||||||
expected_connections
|
|
||||||
);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<TInner> NetworkBehaviour for CallTraceBehaviour<TInner>
|
|
||||||
where
|
|
||||||
TInner: NetworkBehaviour,
|
|
||||||
<<TInner::ConnectionHandler as IntoConnectionHandler>::Handler as ConnectionHandler>::OutEvent:
|
|
||||||
Clone,
|
|
||||||
{
|
|
||||||
type ConnectionHandler = TInner::ConnectionHandler;
|
|
||||||
type OutEvent = TInner::OutEvent;
|
|
||||||
|
|
||||||
fn new_handler(&mut self) -> Self::ConnectionHandler {
|
|
||||||
self.inner.new_handler()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn addresses_of_peer(&mut self, p: &PeerId) -> Vec<Multiaddr> {
|
|
||||||
self.addresses_of_peer.push(*p);
|
|
||||||
self.inner.addresses_of_peer(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_connection_established(
|
|
||||||
&mut self,
|
|
||||||
p: &PeerId,
|
|
||||||
c: &ConnectionId,
|
|
||||||
e: &ConnectedPoint,
|
|
||||||
errors: Option<&Vec<Multiaddr>>,
|
|
||||||
other_established: usize,
|
|
||||||
) {
|
|
||||||
let mut other_peer_connections = self
|
|
||||||
.inject_connection_established
|
|
||||||
.iter()
|
|
||||||
.rev() // take last to first
|
|
||||||
.filter_map(|(peer, .., other_established)| {
|
|
||||||
if p == peer {
|
|
||||||
Some(other_established)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.take(other_established);
|
|
||||||
|
|
||||||
// We are informed that there are `other_established` additional connections. Ensure that the
|
|
||||||
// number of previous connections is consistent with this
|
|
||||||
if let Some(&prev) = other_peer_connections.next() {
|
|
||||||
if prev < other_established {
|
|
||||||
assert_eq!(
|
|
||||||
prev,
|
|
||||||
other_established - 1,
|
|
||||||
"Inconsistent connection reporting"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
assert_eq!(other_peer_connections.count(), other_established - 1);
|
|
||||||
} else {
|
|
||||||
assert_eq!(other_established, 0)
|
|
||||||
}
|
|
||||||
self.inject_connection_established
|
|
||||||
.push((*p, *c, e.clone(), other_established));
|
|
||||||
self.inner
|
|
||||||
.inject_connection_established(p, c, e, errors, other_established);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_connection_closed(
|
|
||||||
&mut self,
|
|
||||||
p: &PeerId,
|
|
||||||
c: &ConnectionId,
|
|
||||||
e: &ConnectedPoint,
|
|
||||||
handler: <Self::ConnectionHandler as IntoConnectionHandler>::Handler,
|
|
||||||
remaining_established: usize,
|
|
||||||
) {
|
|
||||||
let mut other_closed_connections = self
|
|
||||||
.inject_connection_established
|
|
||||||
.iter()
|
|
||||||
.rev() // take last to first
|
|
||||||
.filter_map(|(peer, .., remaining_established)| {
|
|
||||||
if p == peer {
|
|
||||||
Some(remaining_established)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.take(remaining_established);
|
|
||||||
|
|
||||||
// We are informed that there are `other_established` additional connections. Ensure that the
|
|
||||||
// number of previous connections is consistent with this
|
|
||||||
if let Some(&prev) = other_closed_connections.next() {
|
|
||||||
if prev < remaining_established {
|
|
||||||
assert_eq!(
|
|
||||||
prev,
|
|
||||||
remaining_established - 1,
|
|
||||||
"Inconsistent closed connection reporting"
|
|
||||||
)
|
|
||||||
}
|
|
||||||
assert_eq!(other_closed_connections.count(), remaining_established - 1);
|
|
||||||
} else {
|
|
||||||
assert_eq!(remaining_established, 0)
|
|
||||||
}
|
|
||||||
assert!(
|
|
||||||
self.inject_connection_established
|
|
||||||
.iter()
|
|
||||||
.any(|(peer, conn_id, endpoint, _)| (peer, conn_id, endpoint) == (p, c, e)),
|
|
||||||
"`inject_connection_closed` is called only for connections for \
|
|
||||||
which `inject_connection_established` was called first."
|
|
||||||
);
|
|
||||||
self.inject_connection_closed
|
|
||||||
.push((*p, *c, e.clone(), remaining_established));
|
|
||||||
self.inner
|
|
||||||
.inject_connection_closed(p, c, e, handler, remaining_established);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_event(
|
|
||||||
&mut self,
|
|
||||||
p: PeerId,
|
|
||||||
c: ConnectionId,
|
|
||||||
e: <<Self::ConnectionHandler as IntoConnectionHandler>::Handler as ConnectionHandler>::OutEvent,
|
|
||||||
) {
|
|
||||||
assert!(
|
|
||||||
self.inject_connection_established
|
|
||||||
.iter()
|
|
||||||
.any(|(peer_id, conn_id, ..)| *peer_id == p && c == *conn_id),
|
|
||||||
"`inject_event` is called for reported connections."
|
|
||||||
);
|
|
||||||
assert!(
|
|
||||||
!self
|
|
||||||
.inject_connection_closed
|
|
||||||
.iter()
|
|
||||||
.any(|(peer_id, conn_id, ..)| *peer_id == p && c == *conn_id),
|
|
||||||
"`inject_event` is never called for closed connections."
|
|
||||||
);
|
|
||||||
|
|
||||||
self.inject_event.push((p, c, e.clone()));
|
|
||||||
self.inner.inject_event(p, c, e);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_dial_failure(
|
|
||||||
&mut self,
|
|
||||||
p: Option<PeerId>,
|
|
||||||
handler: Self::ConnectionHandler,
|
|
||||||
error: &DialError,
|
|
||||||
) {
|
|
||||||
self.inject_dial_failure.push(p);
|
|
||||||
self.inner.inject_dial_failure(p, handler, error);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_new_listener(&mut self, id: ListenerId) {
|
|
||||||
self.inject_new_listener.push(id);
|
|
||||||
self.inner.inject_new_listener(id);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_new_listen_addr(&mut self, id: ListenerId, a: &Multiaddr) {
|
|
||||||
self.inject_new_listen_addr.push((id, a.clone()));
|
|
||||||
self.inner.inject_new_listen_addr(id, a);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_expired_listen_addr(&mut self, id: ListenerId, a: &Multiaddr) {
|
|
||||||
self.inject_expired_listen_addr.push((id, a.clone()));
|
|
||||||
self.inner.inject_expired_listen_addr(id, a);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_new_external_addr(&mut self, a: &Multiaddr) {
|
|
||||||
self.inject_new_external_addr.push(a.clone());
|
|
||||||
self.inner.inject_new_external_addr(a);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_expired_external_addr(&mut self, a: &Multiaddr) {
|
|
||||||
self.inject_expired_external_addr.push(a.clone());
|
|
||||||
self.inner.inject_expired_external_addr(a);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_listener_error(&mut self, l: ListenerId, e: &(dyn std::error::Error + 'static)) {
|
|
||||||
self.inject_listener_error.push(l);
|
|
||||||
self.inner.inject_listener_error(l, e);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inject_listener_closed(&mut self, l: ListenerId, r: Result<(), &std::io::Error>) {
|
|
||||||
self.inject_listener_closed.push((l, r.is_ok()));
|
|
||||||
self.inner.inject_listener_closed(l, r);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poll(
|
|
||||||
&mut self,
|
|
||||||
cx: &mut Context,
|
|
||||||
args: &mut impl PollParameters,
|
|
||||||
) -> Poll<NetworkBehaviourAction<Self::OutEvent, Self::ConnectionHandler>> {
|
|
||||||
self.poll += 1;
|
|
||||||
self.inner.poll(cx, args)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,99 +0,0 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
use std::pin::Pin;
|
|
||||||
|
|
||||||
use super::behaviour::{CallTraceBehaviour, MockBehaviour};
|
|
||||||
|
|
||||||
use futures::stream::Stream;
|
|
||||||
use futures::task::{Context, Poll};
|
|
||||||
use libp2p::swarm::handler::ConnectionHandler;
|
|
||||||
use libp2p::swarm::{IntoConnectionHandler, NetworkBehaviour, Swarm, SwarmBuilder, SwarmEvent};
|
|
||||||
use libp2p::{PeerId, Transport};
|
|
||||||
|
|
||||||
use futures::StreamExt;
|
|
||||||
|
|
||||||
pub fn new_test_swarm<B>(behaviour: B) -> Swarm<B>
|
|
||||||
where
|
|
||||||
B: NetworkBehaviour,
|
|
||||||
{
|
|
||||||
let id_keys = libp2p::identity::Keypair::generate_ed25519();
|
|
||||||
let local_public_key = id_keys.public();
|
|
||||||
let transport = libp2p::core::transport::MemoryTransport::default()
|
|
||||||
.upgrade(libp2p::core::upgrade::Version::V1)
|
|
||||||
.authenticate(libp2p::plaintext::PlainText2Config {
|
|
||||||
local_public_key: local_public_key.clone(),
|
|
||||||
})
|
|
||||||
.multiplex(libp2p::yamux::YamuxConfig::default())
|
|
||||||
.boxed();
|
|
||||||
SwarmBuilder::new(transport, behaviour, local_public_key.into()).build()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn random_multiaddr() -> libp2p::multiaddr::Multiaddr {
|
|
||||||
libp2p::multiaddr::Protocol::Memory(rand::random::<u64>()).into()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Bind a memory multiaddr to a compatible swarm.
|
|
||||||
pub async fn bind_listener<B: NetworkBehaviour>(
|
|
||||||
swarm: &mut Swarm<B>,
|
|
||||||
) -> libp2p::multiaddr::Multiaddr {
|
|
||||||
swarm.listen_on(random_multiaddr()).unwrap();
|
|
||||||
match swarm.select_next_some().await {
|
|
||||||
SwarmEvent::NewListenAddr {
|
|
||||||
listener_id: _,
|
|
||||||
address,
|
|
||||||
} => address,
|
|
||||||
_ => panic!("Testing swarm's first event should be a new listener"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct SwarmPool<B: NetworkBehaviour> {
|
|
||||||
swarms: HashMap<PeerId, Swarm<B>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: NetworkBehaviour> SwarmPool<B> {
|
|
||||||
pub fn with_capacity(capacity: usize) -> Self {
|
|
||||||
Self {
|
|
||||||
swarms: HashMap::with_capacity(capacity),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub fn insert(&mut self, swarm: Swarm<B>) -> PeerId {
|
|
||||||
let peer_id = *swarm.local_peer_id();
|
|
||||||
self.swarms.insert(peer_id, swarm);
|
|
||||||
peer_id
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn remove(&mut self, peer_id: &PeerId) {
|
|
||||||
self.swarms.remove(peer_id);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_mut(&mut self, peer_id: &PeerId) -> Option<&mut Swarm<B>> {
|
|
||||||
self.swarms.get_mut(peer_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn swarms(&self) -> &HashMap<PeerId, Swarm<B>> {
|
|
||||||
&self.swarms
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn swarms_mut(&mut self) -> &mut HashMap<PeerId, Swarm<B>> {
|
|
||||||
&mut self.swarms
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B> Stream for SwarmPool<B>
|
|
||||||
where
|
|
||||||
B: NetworkBehaviour,
|
|
||||||
<B as NetworkBehaviour>::ConnectionHandler: ConnectionHandler,
|
|
||||||
{
|
|
||||||
type Item = (PeerId,
|
|
||||||
SwarmEvent<<B as NetworkBehaviour>::OutEvent, <<<B as NetworkBehaviour>::ConnectionHandler as IntoConnectionHandler>::Handler as ConnectionHandler>::Error>);
|
|
||||||
|
|
||||||
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
|
||||||
let mut polls = self
|
|
||||||
.get_mut()
|
|
||||||
.swarms
|
|
||||||
.iter_mut()
|
|
||||||
.map(|(&peer_id, swarm)| swarm.map(move |ev| (peer_id, ev)))
|
|
||||||
.collect::<futures::stream::SelectAll<_>>();
|
|
||||||
polls.poll_next_unpin(cx)
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,203 +0,0 @@
|
|||||||
#![cfg(not(debug_assertions))]
|
|
||||||
|
|
||||||
mod common;
|
|
||||||
use std::{
|
|
||||||
collections::{HashMap, HashSet},
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
use common::{
|
|
||||||
behaviour::{CallTraceBehaviour, MockBehaviour},
|
|
||||||
swarm,
|
|
||||||
};
|
|
||||||
use lighthouse_network::{
|
|
||||||
peer_manager::{config::Config, PeerManagerEvent},
|
|
||||||
NetworkGlobals, PeerAction, PeerInfo, PeerManager, ReportSource,
|
|
||||||
};
|
|
||||||
use types::MinimalEthSpec as E;
|
|
||||||
|
|
||||||
use futures::StreamExt;
|
|
||||||
use libp2p::{
|
|
||||||
core::either::EitherError,
|
|
||||||
swarm::SwarmEvent,
|
|
||||||
swarm::{handler::DummyConnectionHandler, DummyBehaviour, KeepAlive, Swarm},
|
|
||||||
NetworkBehaviour,
|
|
||||||
};
|
|
||||||
|
|
||||||
use slog::debug;
|
|
||||||
|
|
||||||
/// Struct that mimics the lighthouse_network::Service with respect to handling peer manager
|
|
||||||
/// events.
|
|
||||||
// TODO: make this a real struct for more accurate testing.
|
|
||||||
struct Service {
|
|
||||||
swarm: Swarm<Behaviour>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Service {
|
|
||||||
async fn select_next_some(&mut self) -> SwarmEvent<Ev, EitherError<void::Void, void::Void>> {
|
|
||||||
let ev = self.swarm.select_next_some().await;
|
|
||||||
match &ev {
|
|
||||||
SwarmEvent::Behaviour(Ev(PeerManagerEvent::Banned(peer_id, _addr_vec))) => {
|
|
||||||
self.swarm.ban_peer_id(*peer_id);
|
|
||||||
}
|
|
||||||
SwarmEvent::Behaviour(Ev(PeerManagerEvent::UnBanned(peer_id, _addr_vec))) => {
|
|
||||||
self.swarm.unban_peer_id(*peer_id);
|
|
||||||
}
|
|
||||||
SwarmEvent::Behaviour(Ev(PeerManagerEvent::DisconnectPeer(peer_id, _reason))) => {
|
|
||||||
// directly disconnect here.
|
|
||||||
let _ = self.swarm.disconnect_peer_id(*peer_id);
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
ev
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
|
||||||
struct Ev(PeerManagerEvent);
|
|
||||||
impl From<void::Void> for Ev {
|
|
||||||
fn from(_: void::Void) -> Self {
|
|
||||||
unreachable!("No events are emmited")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl From<PeerManagerEvent> for Ev {
|
|
||||||
fn from(ev: PeerManagerEvent) -> Self {
|
|
||||||
Ev(ev)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(NetworkBehaviour)]
|
|
||||||
#[behaviour(out_event = "Ev")]
|
|
||||||
struct Behaviour {
|
|
||||||
pm_call_trace: CallTraceBehaviour<PeerManager<E>>,
|
|
||||||
sibling: MockBehaviour,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Behaviour {
|
|
||||||
fn new(pm: PeerManager<E>) -> Self {
|
|
||||||
Behaviour {
|
|
||||||
pm_call_trace: CallTraceBehaviour::new(pm),
|
|
||||||
sibling: MockBehaviour::new(DummyConnectionHandler {
|
|
||||||
// The peer manager votes No, so we make sure the combined handler stays alive this
|
|
||||||
// way.
|
|
||||||
keep_alive: KeepAlive::Yes,
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn banned_peers_consistency() {
|
|
||||||
let log = common::build_log(slog::Level::Debug, false);
|
|
||||||
let pm_log = log.new(slog::o!("who" => "[PM]"));
|
|
||||||
let globals: Arc<NetworkGlobals<E>> = Arc::new(NetworkGlobals::new_test_globals(&log));
|
|
||||||
|
|
||||||
// Build the peer manager.
|
|
||||||
let (mut pm_service, pm_addr) = {
|
|
||||||
let pm_config = Config {
|
|
||||||
discovery_enabled: false,
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
let pm = PeerManager::new(pm_config, globals.clone(), &pm_log).unwrap();
|
|
||||||
let mut pm_swarm = swarm::new_test_swarm(Behaviour::new(pm));
|
|
||||||
let pm_addr = swarm::bind_listener(&mut pm_swarm).await;
|
|
||||||
let service = Service { swarm: pm_swarm };
|
|
||||||
(service, pm_addr)
|
|
||||||
};
|
|
||||||
|
|
||||||
let excess_banned_peers = 15;
|
|
||||||
let peers_to_ban =
|
|
||||||
lighthouse_network::peer_manager::peerdb::MAX_BANNED_PEERS + excess_banned_peers;
|
|
||||||
|
|
||||||
// Build all the dummy peers needed.
|
|
||||||
let (mut swarm_pool, peers) = {
|
|
||||||
let mut pool = swarm::SwarmPool::with_capacity(peers_to_ban);
|
|
||||||
let mut peers = HashSet::with_capacity(peers_to_ban);
|
|
||||||
for _ in 0..peers_to_ban {
|
|
||||||
let mut peer_swarm =
|
|
||||||
swarm::new_test_swarm(DummyBehaviour::with_keep_alive(KeepAlive::Yes));
|
|
||||||
let _peer_addr = swarm::bind_listener(&mut peer_swarm).await;
|
|
||||||
// It is ok to dial all at the same time since the swarm handles an event at a time.
|
|
||||||
peer_swarm.dial(pm_addr.clone()).unwrap();
|
|
||||||
let peer_id = pool.insert(peer_swarm);
|
|
||||||
peers.insert(peer_id);
|
|
||||||
}
|
|
||||||
(pool, peers)
|
|
||||||
};
|
|
||||||
|
|
||||||
// we track banned peers at the swarm level here since there is no access to that info.
|
|
||||||
let mut swarm_banned_peers = HashMap::with_capacity(peers_to_ban);
|
|
||||||
let mut peers_unbanned = 0;
|
|
||||||
let timeout = tokio::time::sleep(tokio::time::Duration::from_secs(30));
|
|
||||||
futures::pin_mut!(timeout);
|
|
||||||
|
|
||||||
loop {
|
|
||||||
// poll the pm and dummy swarms.
|
|
||||||
tokio::select! {
|
|
||||||
pm_event = pm_service.select_next_some() => {
|
|
||||||
debug!(log, "[PM] {:?}", pm_event);
|
|
||||||
match pm_event {
|
|
||||||
SwarmEvent::Behaviour(Ev(ev)) => match ev {
|
|
||||||
PeerManagerEvent::Banned(peer_id, _) => {
|
|
||||||
let has_been_unbanned = false;
|
|
||||||
swarm_banned_peers.insert(peer_id, has_been_unbanned);
|
|
||||||
}
|
|
||||||
PeerManagerEvent::UnBanned(peer_id, _) => {
|
|
||||||
*swarm_banned_peers.get_mut(&peer_id).expect("Unbanned peer must be banned first") = true;
|
|
||||||
peers_unbanned += 1;
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
SwarmEvent::ConnectionEstablished {
|
|
||||||
peer_id,
|
|
||||||
endpoint: _,
|
|
||||||
num_established: _,
|
|
||||||
concurrent_dial_errors: _,
|
|
||||||
} => {
|
|
||||||
assert!(peers.contains(&peer_id));
|
|
||||||
// now we report the peer as banned.
|
|
||||||
pm_service
|
|
||||||
.swarm
|
|
||||||
.behaviour_mut()
|
|
||||||
.pm_call_trace
|
|
||||||
.inner()
|
|
||||||
.report_peer(
|
|
||||||
&peer_id,
|
|
||||||
PeerAction::Fatal,
|
|
||||||
ReportSource::Processor,
|
|
||||||
None,
|
|
||||||
""
|
|
||||||
);
|
|
||||||
},
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some((_peer_id, _peer_ev)) = swarm_pool.next() => {
|
|
||||||
// we need to poll the swarms to keep the peers going
|
|
||||||
}
|
|
||||||
_ = timeout.as_mut() => {
|
|
||||||
panic!("Test timeout.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if peers_unbanned == excess_banned_peers {
|
|
||||||
let pdb = globals.peers.read();
|
|
||||||
let inconsistencies = swarm_banned_peers
|
|
||||||
.into_iter()
|
|
||||||
.map(|(peer_id, was_unbanned)| {
|
|
||||||
was_unbanned
|
|
||||||
!= pdb.peer_info(&peer_id).map_or(
|
|
||||||
false, /* We forgot about a banned peer */
|
|
||||||
PeerInfo::is_banned,
|
|
||||||
)
|
|
||||||
});
|
|
||||||
assert_eq!(
|
|
||||||
inconsistencies
|
|
||||||
.filter(|is_consistent| *is_consistent)
|
|
||||||
.count(),
|
|
||||||
peers_to_ban
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -44,7 +44,7 @@ strum = "0.24.0"
|
|||||||
tokio-util = { version = "0.6.3", features = ["time"] }
|
tokio-util = { version = "0.6.3", features = ["time"] }
|
||||||
derivative = "2.2.0"
|
derivative = "2.2.0"
|
||||||
delay_map = "0.1.1"
|
delay_map = "0.1.1"
|
||||||
ethereum-types = { version = "0.12.1", optional = true }
|
ethereum-types = { version = "0.14.1", optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
deterministic_long_lived_attnets = [ "ethereum-types" ]
|
deterministic_long_lived_attnets = [ "ethereum-types" ]
|
||||||
|
@ -116,7 +116,8 @@ const MAX_AGGREGATED_ATTESTATION_REPROCESS_QUEUE_LEN: usize = 1_024;
|
|||||||
/// before we start dropping them.
|
/// before we start dropping them.
|
||||||
const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024;
|
const MAX_GOSSIP_BLOCK_QUEUE_LEN: usize = 1_024;
|
||||||
|
|
||||||
//FIXME(sean) verify
|
/// The maximum number of queued `SignedBeaconBlockAndBlobsSidecar` objects received on gossip that
|
||||||
|
/// will be stored before we start dropping them.
|
||||||
const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024;
|
const MAX_GOSSIP_BLOCK_AND_BLOB_QUEUE_LEN: usize = 1_024;
|
||||||
|
|
||||||
/// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but
|
/// The maximum number of queued `SignedBeaconBlock` objects received prior to their slot (but
|
||||||
@ -1213,7 +1214,6 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
// required to verify some attestations.
|
// required to verify some attestations.
|
||||||
} else if let Some(item) = gossip_block_queue.pop() {
|
} else if let Some(item) = gossip_block_queue.pop() {
|
||||||
self.spawn_worker(item, toolbox);
|
self.spawn_worker(item, toolbox);
|
||||||
//FIXME(sean)
|
|
||||||
} else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() {
|
} else if let Some(item) = gossip_block_and_blobs_sidecar_queue.pop() {
|
||||||
self.spawn_worker(item, toolbox);
|
self.spawn_worker(item, toolbox);
|
||||||
// Check the aggregates, *then* the unaggregates since we assume that
|
// Check the aggregates, *then* the unaggregates since we assume that
|
||||||
|
@ -1226,13 +1226,26 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
"peer" => %peer_id,
|
"peer" => %peer_id,
|
||||||
"error" => ?e
|
"error" => ?e
|
||||||
);
|
);
|
||||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
// We ignore pre-capella messages without penalizing peers.
|
||||||
// We penalize the peer slightly to prevent overuse of invalids.
|
if matches!(e, BeaconChainError::BlsToExecutionChangeBadFork(_)) {
|
||||||
self.gossip_penalize_peer(
|
self.propagate_validation_result(
|
||||||
peer_id,
|
message_id,
|
||||||
PeerAction::HighToleranceError,
|
peer_id,
|
||||||
"invalid_bls_to_execution_change",
|
MessageAcceptance::Ignore,
|
||||||
);
|
);
|
||||||
|
} else {
|
||||||
|
// We penalize the peer slightly to prevent overuse of invalids.
|
||||||
|
self.propagate_validation_result(
|
||||||
|
message_id,
|
||||||
|
peer_id,
|
||||||
|
MessageAcceptance::Reject,
|
||||||
|
);
|
||||||
|
self.gossip_penalize_peer(
|
||||||
|
peer_id,
|
||||||
|
PeerAction::HighToleranceError,
|
||||||
|
"invalid_bls_to_execution_change",
|
||||||
|
);
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -2,6 +2,7 @@ use super::*;
|
|||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
builder::{BeaconChainBuilder, Witness},
|
builder::{BeaconChainBuilder, Witness},
|
||||||
eth1_chain::CachingEth1Backend,
|
eth1_chain::CachingEth1Backend,
|
||||||
|
validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD,
|
||||||
BeaconChain,
|
BeaconChain,
|
||||||
};
|
};
|
||||||
use futures::prelude::*;
|
use futures::prelude::*;
|
||||||
@ -75,7 +76,7 @@ impl TestBeaconChain {
|
|||||||
Duration::from_millis(SLOT_DURATION_MILLIS),
|
Duration::from_millis(SLOT_DURATION_MILLIS),
|
||||||
))
|
))
|
||||||
.shutdown_sender(shutdown_tx)
|
.shutdown_sender(shutdown_tx)
|
||||||
.monitor_validators(true, vec![], log)
|
.monitor_validators(true, vec![], DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD, log)
|
||||||
.build()
|
.build()
|
||||||
.expect("should build"),
|
.expect("should build"),
|
||||||
);
|
);
|
||||||
|
@ -4,9 +4,6 @@ version = "0.2.0"
|
|||||||
authors = ["Michael Sproul <michael@sigmaprime.io>"]
|
authors = ["Michael Sproul <michael@sigmaprime.io>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[features]
|
|
||||||
withdrawals-processing = []
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
derivative = "2.1.1"
|
derivative = "2.1.1"
|
||||||
itertools = "0.10.0"
|
itertools = "0.10.0"
|
||||||
|
@ -49,7 +49,7 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> {
|
|||||||
let indices = get_attesting_indices::<T>(committee.committee, &fresh_validators).ok()?;
|
let indices = get_attesting_indices::<T>(committee.committee, &fresh_validators).ok()?;
|
||||||
let fresh_validators_rewards: HashMap<u64, u64> = indices
|
let fresh_validators_rewards: HashMap<u64, u64> = indices
|
||||||
.iter()
|
.iter()
|
||||||
.map(|i| *i as u64)
|
.copied()
|
||||||
.flat_map(|validator_index| {
|
.flat_map(|validator_index| {
|
||||||
let reward = base::get_base_reward(
|
let reward = base::get_base_reward(
|
||||||
state,
|
state,
|
||||||
|
@ -12,7 +12,8 @@ pub use attestation::AttMaxCover;
|
|||||||
pub use attestation_storage::{AttestationRef, SplitAttestation};
|
pub use attestation_storage::{AttestationRef, SplitAttestation};
|
||||||
pub use max_cover::MaxCover;
|
pub use max_cover::MaxCover;
|
||||||
pub use persistence::{
|
pub use persistence::{
|
||||||
PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV5,
|
PersistedOperationPool, PersistedOperationPoolV12, PersistedOperationPoolV14,
|
||||||
|
PersistedOperationPoolV5,
|
||||||
};
|
};
|
||||||
pub use reward_cache::RewardCache;
|
pub use reward_cache::RewardCache;
|
||||||
|
|
||||||
@ -51,7 +52,6 @@ pub struct OperationPool<T: EthSpec + Default> {
|
|||||||
/// Map from exiting validator to their exit data.
|
/// Map from exiting validator to their exit data.
|
||||||
voluntary_exits: RwLock<HashMap<u64, SigVerifiedOp<SignedVoluntaryExit, T>>>,
|
voluntary_exits: RwLock<HashMap<u64, SigVerifiedOp<SignedVoluntaryExit, T>>>,
|
||||||
/// Map from credential changing validator to their execution change data.
|
/// Map from credential changing validator to their execution change data.
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
bls_to_execution_changes: RwLock<HashMap<u64, SigVerifiedOp<SignedBlsToExecutionChange, T>>>,
|
bls_to_execution_changes: RwLock<HashMap<u64, SigVerifiedOp<SignedBlsToExecutionChange, T>>>,
|
||||||
/// Reward cache for accelerating attestation packing.
|
/// Reward cache for accelerating attestation packing.
|
||||||
reward_cache: RwLock<RewardCache>,
|
reward_cache: RwLock<RewardCache>,
|
||||||
@ -518,17 +518,10 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
&self,
|
&self,
|
||||||
verified_change: SigVerifiedOp<SignedBlsToExecutionChange, T>,
|
verified_change: SigVerifiedOp<SignedBlsToExecutionChange, T>,
|
||||||
) {
|
) {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
self.bls_to_execution_changes.write().insert(
|
||||||
{
|
verified_change.as_inner().message.validator_index,
|
||||||
self.bls_to_execution_changes.write().insert(
|
verified_change,
|
||||||
verified_change.as_inner().message.validator_index,
|
);
|
||||||
verified_change,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
#[cfg(not(feature = "withdrawals-processing"))]
|
|
||||||
{
|
|
||||||
drop(verified_change);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get a list of execution changes for inclusion in a block.
|
/// Get a list of execution changes for inclusion in a block.
|
||||||
@ -539,32 +532,19 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Vec<SignedBlsToExecutionChange> {
|
) -> Vec<SignedBlsToExecutionChange> {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
filter_limit_operations(
|
||||||
{
|
self.bls_to_execution_changes.read().values(),
|
||||||
filter_limit_operations(
|
|address_change| {
|
||||||
self.bls_to_execution_changes.read().values(),
|
address_change.signature_is_still_valid(&state.fork())
|
||||||
|address_change| {
|
&& state
|
||||||
address_change.signature_is_still_valid(&state.fork())
|
.get_validator(address_change.as_inner().message.validator_index as usize)
|
||||||
&& state
|
.map_or(false, |validator| {
|
||||||
.get_validator(
|
!validator.has_eth1_withdrawal_credential(spec)
|
||||||
address_change.as_inner().message.validator_index as usize,
|
})
|
||||||
)
|
},
|
||||||
.map_or(false, |validator| {
|
|address_change| address_change.as_inner().clone(),
|
||||||
!validator.has_eth1_withdrawal_credential(spec)
|
T::MaxBlsToExecutionChanges::to_usize(),
|
||||||
})
|
)
|
||||||
},
|
|
||||||
|address_change| address_change.as_inner().clone(),
|
|
||||||
T::MaxBlsToExecutionChanges::to_usize(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: remove this whole block once withdrwals-processing is removed
|
|
||||||
#[cfg(not(feature = "withdrawals-processing"))]
|
|
||||||
{
|
|
||||||
#[allow(clippy::drop_copy)]
|
|
||||||
drop((state, spec));
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prune BLS to execution changes that have been applied to the state more than 1 block ago.
|
/// Prune BLS to execution changes that have been applied to the state more than 1 block ago.
|
||||||
@ -579,32 +559,22 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
head_state: &BeaconState<T>,
|
head_state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) {
|
) {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
prune_validator_hash_map(
|
||||||
{
|
&mut self.bls_to_execution_changes.write(),
|
||||||
prune_validator_hash_map(
|
|validator_index, validator| {
|
||||||
&mut self.bls_to_execution_changes.write(),
|
validator.has_eth1_withdrawal_credential(spec)
|
||||||
|validator_index, validator| {
|
&& head_block
|
||||||
validator.has_eth1_withdrawal_credential(spec)
|
.message()
|
||||||
&& head_block
|
.body()
|
||||||
.message()
|
.bls_to_execution_changes()
|
||||||
.body()
|
.map_or(true, |recent_changes| {
|
||||||
.bls_to_execution_changes()
|
!recent_changes
|
||||||
.map_or(true, |recent_changes| {
|
.iter()
|
||||||
!recent_changes
|
.any(|c| c.message.validator_index == validator_index)
|
||||||
.iter()
|
})
|
||||||
.any(|c| c.message.validator_index == validator_index)
|
},
|
||||||
})
|
head_state,
|
||||||
},
|
);
|
||||||
head_state,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: remove this whole block once withdrwals-processing is removed
|
|
||||||
#[cfg(not(feature = "withdrawals-processing"))]
|
|
||||||
{
|
|
||||||
#[allow(clippy::drop_copy)]
|
|
||||||
drop((head_block, head_state, spec));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prune all types of transactions given the latest head state and head fork.
|
/// Prune all types of transactions given the latest head state and head fork.
|
||||||
@ -691,17 +661,11 @@ impl<T: EthSpec> OperationPool<T> {
|
|||||||
///
|
///
|
||||||
/// This method may return objects that are invalid for block inclusion.
|
/// This method may return objects that are invalid for block inclusion.
|
||||||
pub fn get_all_bls_to_execution_changes(&self) -> Vec<SignedBlsToExecutionChange> {
|
pub fn get_all_bls_to_execution_changes(&self) -> Vec<SignedBlsToExecutionChange> {
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
self.bls_to_execution_changes
|
||||||
{
|
.read()
|
||||||
self.bls_to_execution_changes
|
.iter()
|
||||||
.read()
|
.map(|(_, address_change)| address_change.as_inner().clone())
|
||||||
.iter()
|
.collect()
|
||||||
.map(|(_, address_change)| address_change.as_inner().clone())
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(feature = "withdrawals-processing"))]
|
|
||||||
vec![]
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1787,7 +1751,7 @@ mod release_tests {
|
|||||||
|
|
||||||
fn cross_fork_harness<E: EthSpec>() -> (BeaconChainHarness<EphemeralHarnessType<E>>, ChainSpec)
|
fn cross_fork_harness<E: EthSpec>() -> (BeaconChainHarness<EphemeralHarnessType<E>>, ChainSpec)
|
||||||
{
|
{
|
||||||
let mut spec = test_spec::<E>();
|
let mut spec = E::default_spec();
|
||||||
|
|
||||||
// Give some room to sign surround slashings.
|
// Give some room to sign surround slashings.
|
||||||
spec.altair_fork_epoch = Some(Epoch::new(3));
|
spec.altair_fork_epoch = Some(Epoch::new(3));
|
||||||
|
@ -18,7 +18,7 @@ type PersistedSyncContributions<T> = Vec<(SyncAggregateId, Vec<SyncCommitteeCont
|
|||||||
/// Operations are stored in arbitrary order, so it's not a good idea to compare instances
|
/// Operations are stored in arbitrary order, so it's not a good idea to compare instances
|
||||||
/// of this type (or its encoded form) for equality. Convert back to an `OperationPool` first.
|
/// of this type (or its encoded form) for equality. Convert back to an `OperationPool` first.
|
||||||
#[superstruct(
|
#[superstruct(
|
||||||
variants(V5, V12),
|
variants(V5, V12, V14),
|
||||||
variant_attributes(
|
variant_attributes(
|
||||||
derive(Derivative, PartialEq, Debug, Encode, Decode),
|
derive(Derivative, PartialEq, Debug, Encode, Decode),
|
||||||
derivative(Clone),
|
derivative(Clone),
|
||||||
@ -32,7 +32,7 @@ pub struct PersistedOperationPool<T: EthSpec> {
|
|||||||
#[superstruct(only(V5))]
|
#[superstruct(only(V5))]
|
||||||
pub attestations_v5: Vec<(AttestationId, Vec<Attestation<T>>)>,
|
pub attestations_v5: Vec<(AttestationId, Vec<Attestation<T>>)>,
|
||||||
/// Attestations and their attesting indices.
|
/// Attestations and their attesting indices.
|
||||||
#[superstruct(only(V12))]
|
#[superstruct(only(V12, V14))]
|
||||||
pub attestations: Vec<(Attestation<T>, Vec<u64>)>,
|
pub attestations: Vec<(Attestation<T>, Vec<u64>)>,
|
||||||
/// Mapping from sync contribution ID to sync contributions and aggregate.
|
/// Mapping from sync contribution ID to sync contributions and aggregate.
|
||||||
pub sync_contributions: PersistedSyncContributions<T>,
|
pub sync_contributions: PersistedSyncContributions<T>,
|
||||||
@ -40,20 +40,23 @@ pub struct PersistedOperationPool<T: EthSpec> {
|
|||||||
#[superstruct(only(V5))]
|
#[superstruct(only(V5))]
|
||||||
pub attester_slashings_v5: Vec<(AttesterSlashing<T>, ForkVersion)>,
|
pub attester_slashings_v5: Vec<(AttesterSlashing<T>, ForkVersion)>,
|
||||||
/// Attester slashings.
|
/// Attester slashings.
|
||||||
#[superstruct(only(V12))]
|
#[superstruct(only(V12, V14))]
|
||||||
pub attester_slashings: Vec<SigVerifiedOp<AttesterSlashing<T>, T>>,
|
pub attester_slashings: Vec<SigVerifiedOp<AttesterSlashing<T>, T>>,
|
||||||
/// [DEPRECATED] Proposer slashings.
|
/// [DEPRECATED] Proposer slashings.
|
||||||
#[superstruct(only(V5))]
|
#[superstruct(only(V5))]
|
||||||
pub proposer_slashings_v5: Vec<ProposerSlashing>,
|
pub proposer_slashings_v5: Vec<ProposerSlashing>,
|
||||||
/// Proposer slashings with fork information.
|
/// Proposer slashings with fork information.
|
||||||
#[superstruct(only(V12))]
|
#[superstruct(only(V12, V14))]
|
||||||
pub proposer_slashings: Vec<SigVerifiedOp<ProposerSlashing, T>>,
|
pub proposer_slashings: Vec<SigVerifiedOp<ProposerSlashing, T>>,
|
||||||
/// [DEPRECATED] Voluntary exits.
|
/// [DEPRECATED] Voluntary exits.
|
||||||
#[superstruct(only(V5))]
|
#[superstruct(only(V5))]
|
||||||
pub voluntary_exits_v5: Vec<SignedVoluntaryExit>,
|
pub voluntary_exits_v5: Vec<SignedVoluntaryExit>,
|
||||||
/// Voluntary exits with fork information.
|
/// Voluntary exits with fork information.
|
||||||
#[superstruct(only(V12))]
|
#[superstruct(only(V12, V14))]
|
||||||
pub voluntary_exits: Vec<SigVerifiedOp<SignedVoluntaryExit, T>>,
|
pub voluntary_exits: Vec<SigVerifiedOp<SignedVoluntaryExit, T>>,
|
||||||
|
/// BLS to Execution Changes
|
||||||
|
#[superstruct(only(V14))]
|
||||||
|
pub bls_to_execution_changes: Vec<SigVerifiedOp<SignedBlsToExecutionChange, T>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> PersistedOperationPool<T> {
|
impl<T: EthSpec> PersistedOperationPool<T> {
|
||||||
@ -99,12 +102,20 @@ impl<T: EthSpec> PersistedOperationPool<T> {
|
|||||||
.map(|(_, exit)| exit.clone())
|
.map(|(_, exit)| exit.clone())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
PersistedOperationPool::V12(PersistedOperationPoolV12 {
|
let bls_to_execution_changes = operation_pool
|
||||||
|
.bls_to_execution_changes
|
||||||
|
.read()
|
||||||
|
.iter()
|
||||||
|
.map(|(_, bls_to_execution_change)| bls_to_execution_change.clone())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
PersistedOperationPool::V14(PersistedOperationPoolV14 {
|
||||||
attestations,
|
attestations,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings,
|
attester_slashings,
|
||||||
proposer_slashings,
|
proposer_slashings,
|
||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
|
bls_to_execution_changes,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,24 +138,41 @@ impl<T: EthSpec> PersistedOperationPool<T> {
|
|||||||
);
|
);
|
||||||
let sync_contributions = RwLock::new(self.sync_contributions().iter().cloned().collect());
|
let sync_contributions = RwLock::new(self.sync_contributions().iter().cloned().collect());
|
||||||
let attestations = match self {
|
let attestations = match self {
|
||||||
PersistedOperationPool::V5(_) => return Err(OpPoolError::IncorrectOpPoolVariant),
|
PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => {
|
||||||
PersistedOperationPool::V12(pool) => {
|
return Err(OpPoolError::IncorrectOpPoolVariant)
|
||||||
|
}
|
||||||
|
PersistedOperationPool::V14(ref pool) => {
|
||||||
let mut map = AttestationMap::default();
|
let mut map = AttestationMap::default();
|
||||||
for (att, attesting_indices) in pool.attestations {
|
for (att, attesting_indices) in pool.attestations.clone() {
|
||||||
map.insert(att, attesting_indices);
|
map.insert(att, attesting_indices);
|
||||||
}
|
}
|
||||||
RwLock::new(map)
|
RwLock::new(map)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
let bls_to_execution_changes = match self {
|
||||||
|
PersistedOperationPool::V5(_) | PersistedOperationPool::V12(_) => {
|
||||||
|
return Err(OpPoolError::IncorrectOpPoolVariant)
|
||||||
|
}
|
||||||
|
PersistedOperationPool::V14(pool) => RwLock::new(
|
||||||
|
pool.bls_to_execution_changes
|
||||||
|
.iter()
|
||||||
|
.cloned()
|
||||||
|
.map(|bls_to_execution_change| {
|
||||||
|
(
|
||||||
|
bls_to_execution_change.as_inner().message.validator_index,
|
||||||
|
bls_to_execution_change,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
),
|
||||||
|
};
|
||||||
let op_pool = OperationPool {
|
let op_pool = OperationPool {
|
||||||
attestations,
|
attestations,
|
||||||
sync_contributions,
|
sync_contributions,
|
||||||
attester_slashings,
|
attester_slashings,
|
||||||
proposer_slashings,
|
proposer_slashings,
|
||||||
voluntary_exits,
|
voluntary_exits,
|
||||||
// FIXME(capella): implement schema migration for address changes in op pool
|
bls_to_execution_changes,
|
||||||
#[cfg(feature = "withdrawals-processing")]
|
|
||||||
bls_to_execution_changes: Default::default(),
|
|
||||||
reward_cache: Default::default(),
|
reward_cache: Default::default(),
|
||||||
_phantom: Default::default(),
|
_phantom: Default::default(),
|
||||||
};
|
};
|
||||||
@ -166,6 +194,20 @@ impl<T: EthSpec> StoreItem for PersistedOperationPoolV5<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: EthSpec> StoreItem for PersistedOperationPoolV12<T> {
|
||||||
|
fn db_column() -> DBColumn {
|
||||||
|
DBColumn::OpPool
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_store_bytes(&self) -> Vec<u8> {
|
||||||
|
self.as_ssz_bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {
|
||||||
|
PersistedOperationPoolV12::from_ssz_bytes(bytes).map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`.
|
/// Deserialization for `PersistedOperationPool` defaults to `PersistedOperationPool::V12`.
|
||||||
impl<T: EthSpec> StoreItem for PersistedOperationPool<T> {
|
impl<T: EthSpec> StoreItem for PersistedOperationPool<T> {
|
||||||
fn db_column() -> DBColumn {
|
fn db_column() -> DBColumn {
|
||||||
@ -178,8 +220,8 @@ impl<T: EthSpec> StoreItem for PersistedOperationPool<T> {
|
|||||||
|
|
||||||
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {
|
fn from_store_bytes(bytes: &[u8]) -> Result<Self, StoreError> {
|
||||||
// Default deserialization to the latest variant.
|
// Default deserialization to the latest variant.
|
||||||
PersistedOperationPoolV12::from_ssz_bytes(bytes)
|
PersistedOperationPoolV14::from_ssz_bytes(bytes)
|
||||||
.map(Self::V12)
|
.map(Self::V14)
|
||||||
.map_err(Into::into)
|
.map_err(Into::into)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -763,6 +763,17 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.value_name("PATH")
|
.value_name("PATH")
|
||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("validator-monitor-individual-tracking-threshold")
|
||||||
|
.long("validator-monitor-individual-tracking-threshold")
|
||||||
|
.help("Once the validator monitor reaches this number of local validators \
|
||||||
|
it will stop collecting per-validator Prometheus metrics and issuing \
|
||||||
|
per-validator logs. Instead, it will provide aggregate metrics and logs. \
|
||||||
|
This avoids infeasibly high cardinality in the Prometheus database and \
|
||||||
|
high log volume when using many validators. Defaults to 64.")
|
||||||
|
.value_name("INTEGER")
|
||||||
|
.takes_value(true)
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("disable-lock-timeouts")
|
Arg::with_name("disable-lock-timeouts")
|
||||||
.long("disable-lock-timeouts")
|
.long("disable-lock-timeouts")
|
||||||
@ -910,6 +921,13 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
Useful if you intend to run a non-validating beacon node.")
|
Useful if you intend to run a non-validating beacon node.")
|
||||||
.takes_value(false)
|
.takes_value(false)
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("disable-optimistic-finalized-sync")
|
||||||
|
.long("disable-optimistic-finalized-sync")
|
||||||
|
.help("Force Lighthouse to verify every execution block hash with the execution \
|
||||||
|
client during finalized sync. By default block hashes will be checked in \
|
||||||
|
Lighthouse and only passed to the EL if initial verification fails.")
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("light-client-server")
|
Arg::with_name("light-client-server")
|
||||||
.long("light-client-server")
|
.long("light-client-server")
|
||||||
|
@ -348,7 +348,6 @@ pub fn get_config<E: EthSpec>(
|
|||||||
let execution_timeout_multiplier =
|
let execution_timeout_multiplier =
|
||||||
clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?;
|
clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?;
|
||||||
el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier);
|
el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier);
|
||||||
el_config.spec = spec.clone();
|
|
||||||
|
|
||||||
// If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and
|
// If `--execution-endpoint` is provided, we should ignore any `--eth1-endpoints` values and
|
||||||
// use `--execution-endpoint` instead. Also, log a deprecation warning.
|
// use `--execution-endpoint` instead. Also, log a deprecation warning.
|
||||||
@ -693,6 +692,12 @@ pub fn get_config<E: EthSpec>(
|
|||||||
.extend_from_slice(&pubkeys);
|
.extend_from_slice(&pubkeys);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(count) =
|
||||||
|
clap_utils::parse_optional(cli_args, "validator-monitor-individual-tracking-threshold")?
|
||||||
|
{
|
||||||
|
client_config.validator_monitor_individual_tracking_threshold = count;
|
||||||
|
}
|
||||||
|
|
||||||
if cli_args.is_present("disable-lock-timeouts") {
|
if cli_args.is_present("disable-lock-timeouts") {
|
||||||
client_config.chain.enable_lock_timeouts = false;
|
client_config.chain.enable_lock_timeouts = false;
|
||||||
}
|
}
|
||||||
@ -759,6 +764,10 @@ pub fn get_config<E: EthSpec>(
|
|||||||
client_config.validator_monitor_auto = true;
|
client_config.validator_monitor_auto = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Optimistic finalized sync.
|
||||||
|
client_config.chain.optimistic_finalized_sync =
|
||||||
|
!cli_args.is_present("disable-optimistic-finalized-sync");
|
||||||
|
|
||||||
Ok(client_config)
|
Ok(client_config)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,6 +26,3 @@ lru = "0.7.1"
|
|||||||
sloggers = { version = "2.1.1", features = ["json"] }
|
sloggers = { version = "2.1.1", features = ["json"] }
|
||||||
directory = { path = "../../common/directory" }
|
directory = { path = "../../common/directory" }
|
||||||
strum = { version = "0.24.0", features = ["derive"] }
|
strum = { version = "0.24.0", features = ["derive"] }
|
||||||
|
|
||||||
[features]
|
|
||||||
withdrawals-processing = ["state_processing/withdrawals-processing"]
|
|
@ -18,6 +18,7 @@ use self::UpdatePattern::*;
|
|||||||
use crate::*;
|
use crate::*;
|
||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
use typenum::Unsigned;
|
use typenum::Unsigned;
|
||||||
|
use types::historical_summary::HistoricalSummary;
|
||||||
|
|
||||||
/// Description of how a `BeaconState` field is updated during state processing.
|
/// Description of how a `BeaconState` field is updated during state processing.
|
||||||
///
|
///
|
||||||
@ -26,7 +27,18 @@ use typenum::Unsigned;
|
|||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
pub enum UpdatePattern {
|
pub enum UpdatePattern {
|
||||||
/// The value is updated once per `n` slots.
|
/// The value is updated once per `n` slots.
|
||||||
OncePerNSlots { n: u64 },
|
OncePerNSlots {
|
||||||
|
n: u64,
|
||||||
|
/// The slot at which the field begins to accumulate values.
|
||||||
|
///
|
||||||
|
/// The field should not be read or written until `activation_slot` is reached, and the
|
||||||
|
/// activation slot should act as an offset when converting slots to vector indices.
|
||||||
|
activation_slot: Option<Slot>,
|
||||||
|
/// The slot at which the field ceases to accumulate values.
|
||||||
|
///
|
||||||
|
/// If this is `None` then the field is continually updated.
|
||||||
|
deactivation_slot: Option<Slot>,
|
||||||
|
},
|
||||||
/// The value is updated once per epoch, for the epoch `current_epoch - lag`.
|
/// The value is updated once per epoch, for the epoch `current_epoch - lag`.
|
||||||
OncePerEpoch { lag: u64 },
|
OncePerEpoch { lag: u64 },
|
||||||
}
|
}
|
||||||
@ -98,12 +110,30 @@ pub trait Field<E: EthSpec>: Copy {
|
|||||||
fn start_and_end_vindex(current_slot: Slot, spec: &ChainSpec) -> (usize, usize) {
|
fn start_and_end_vindex(current_slot: Slot, spec: &ChainSpec) -> (usize, usize) {
|
||||||
// We take advantage of saturating subtraction on slots and epochs
|
// We take advantage of saturating subtraction on slots and epochs
|
||||||
match Self::update_pattern(spec) {
|
match Self::update_pattern(spec) {
|
||||||
OncePerNSlots { n } => {
|
OncePerNSlots {
|
||||||
|
n,
|
||||||
|
activation_slot,
|
||||||
|
deactivation_slot,
|
||||||
|
} => {
|
||||||
// Per-slot changes exclude the index for the current slot, because
|
// Per-slot changes exclude the index for the current slot, because
|
||||||
// it won't be set until the slot completes (think of `state_roots`, `block_roots`).
|
// it won't be set until the slot completes (think of `state_roots`, `block_roots`).
|
||||||
// This also works for the `historical_roots` because at the `n`th slot, the 0th
|
// This also works for the `historical_roots` because at the `n`th slot, the 0th
|
||||||
// entry of the list is created, and before that the list is empty.
|
// entry of the list is created, and before that the list is empty.
|
||||||
let end_vindex = current_slot / n;
|
//
|
||||||
|
// To account for the switch from historical roots to historical summaries at
|
||||||
|
// Capella we also modify the current slot by the activation and deactivation slots.
|
||||||
|
// The activation slot acts as an offset (subtraction) while the deactivation slot
|
||||||
|
// acts as a clamp (min).
|
||||||
|
let slot_with_clamp = deactivation_slot.map_or(current_slot, |deactivation_slot| {
|
||||||
|
std::cmp::min(current_slot, deactivation_slot)
|
||||||
|
});
|
||||||
|
let slot_with_clamp_and_offset = if let Some(activation_slot) = activation_slot {
|
||||||
|
slot_with_clamp - activation_slot
|
||||||
|
} else {
|
||||||
|
// Return (0, 0) to indicate that the field should not be read/written.
|
||||||
|
return (0, 0);
|
||||||
|
};
|
||||||
|
let end_vindex = slot_with_clamp_and_offset / n;
|
||||||
let start_vindex = end_vindex - Self::Length::to_u64();
|
let start_vindex = end_vindex - Self::Length::to_u64();
|
||||||
(start_vindex.as_usize(), end_vindex.as_usize())
|
(start_vindex.as_usize(), end_vindex.as_usize())
|
||||||
}
|
}
|
||||||
@ -295,7 +325,11 @@ field!(
|
|||||||
Hash256,
|
Hash256,
|
||||||
T::SlotsPerHistoricalRoot,
|
T::SlotsPerHistoricalRoot,
|
||||||
DBColumn::BeaconBlockRoots,
|
DBColumn::BeaconBlockRoots,
|
||||||
|_| OncePerNSlots { n: 1 },
|
|_| OncePerNSlots {
|
||||||
|
n: 1,
|
||||||
|
activation_slot: Some(Slot::new(0)),
|
||||||
|
deactivation_slot: None
|
||||||
|
},
|
||||||
|state: &BeaconState<_>, index, _| safe_modulo_index(state.block_roots(), index)
|
|state: &BeaconState<_>, index, _| safe_modulo_index(state.block_roots(), index)
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -305,7 +339,11 @@ field!(
|
|||||||
Hash256,
|
Hash256,
|
||||||
T::SlotsPerHistoricalRoot,
|
T::SlotsPerHistoricalRoot,
|
||||||
DBColumn::BeaconStateRoots,
|
DBColumn::BeaconStateRoots,
|
||||||
|_| OncePerNSlots { n: 1 },
|
|_| OncePerNSlots {
|
||||||
|
n: 1,
|
||||||
|
activation_slot: Some(Slot::new(0)),
|
||||||
|
deactivation_slot: None,
|
||||||
|
},
|
||||||
|state: &BeaconState<_>, index, _| safe_modulo_index(state.state_roots(), index)
|
|state: &BeaconState<_>, index, _| safe_modulo_index(state.state_roots(), index)
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -315,8 +353,12 @@ field!(
|
|||||||
Hash256,
|
Hash256,
|
||||||
T::HistoricalRootsLimit,
|
T::HistoricalRootsLimit,
|
||||||
DBColumn::BeaconHistoricalRoots,
|
DBColumn::BeaconHistoricalRoots,
|
||||||
|_| OncePerNSlots {
|
|spec: &ChainSpec| OncePerNSlots {
|
||||||
n: T::SlotsPerHistoricalRoot::to_u64()
|
n: T::SlotsPerHistoricalRoot::to_u64(),
|
||||||
|
activation_slot: Some(Slot::new(0)),
|
||||||
|
deactivation_slot: spec
|
||||||
|
.capella_fork_epoch
|
||||||
|
.map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())),
|
||||||
},
|
},
|
||||||
|state: &BeaconState<_>, index, _| safe_modulo_index(state.historical_roots(), index)
|
|state: &BeaconState<_>, index, _| safe_modulo_index(state.historical_roots(), index)
|
||||||
);
|
);
|
||||||
@ -331,6 +373,27 @@ field!(
|
|||||||
|state: &BeaconState<_>, index, _| safe_modulo_index(state.randao_mixes(), index)
|
|state: &BeaconState<_>, index, _| safe_modulo_index(state.randao_mixes(), index)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
field!(
|
||||||
|
HistoricalSummaries,
|
||||||
|
VariableLengthField,
|
||||||
|
HistoricalSummary,
|
||||||
|
T::HistoricalRootsLimit,
|
||||||
|
DBColumn::BeaconHistoricalSummaries,
|
||||||
|
|spec: &ChainSpec| OncePerNSlots {
|
||||||
|
n: T::SlotsPerHistoricalRoot::to_u64(),
|
||||||
|
activation_slot: spec
|
||||||
|
.capella_fork_epoch
|
||||||
|
.map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())),
|
||||||
|
deactivation_slot: None,
|
||||||
|
},
|
||||||
|
|state: &BeaconState<_>, index, _| safe_modulo_index(
|
||||||
|
state
|
||||||
|
.historical_summaries()
|
||||||
|
.map_err(|_| ChunkError::InvalidFork)?,
|
||||||
|
index
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
pub fn store_updated_vector<F: Field<E>, E: EthSpec, S: KeyValueStore<E>>(
|
pub fn store_updated_vector<F: Field<E>, E: EthSpec, S: KeyValueStore<E>>(
|
||||||
field: F,
|
field: F,
|
||||||
store: &S,
|
store: &S,
|
||||||
@ -679,6 +742,7 @@ pub enum ChunkError {
|
|||||||
end_vindex: usize,
|
end_vindex: usize,
|
||||||
length: usize,
|
length: usize,
|
||||||
},
|
},
|
||||||
|
InvalidFork,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -801,7 +865,7 @@ mod test {
|
|||||||
|
|
||||||
fn needs_genesis_value_test_randao<F: Field<TestSpec>>(_: F) {
|
fn needs_genesis_value_test_randao<F: Field<TestSpec>>(_: F) {
|
||||||
let spec = &TestSpec::default_spec();
|
let spec = &TestSpec::default_spec();
|
||||||
let max = TestSpec::slots_per_epoch() as u64 * (F::Length::to_u64() - 1);
|
let max = TestSpec::slots_per_epoch() * (F::Length::to_u64() - 1);
|
||||||
for i in 0..max {
|
for i in 0..max {
|
||||||
assert!(
|
assert!(
|
||||||
F::slot_needs_genesis_value(Slot::new(i), spec),
|
F::slot_needs_genesis_value(Slot::new(i), spec),
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use crate::chunked_vector::{
|
use crate::chunked_vector::{
|
||||||
store_updated_vector, BlockRoots, HistoricalRoots, RandaoMixes, StateRoots,
|
store_updated_vector, BlockRoots, HistoricalRoots, HistoricalSummaries, RandaoMixes, StateRoots,
|
||||||
};
|
};
|
||||||
use crate::config::{
|
use crate::config::{
|
||||||
OnDiskStoreConfig, StoreConfig, DEFAULT_SLOTS_PER_RESTORE_POINT,
|
OnDiskStoreConfig, StoreConfig, DEFAULT_SLOTS_PER_RESTORE_POINT,
|
||||||
@ -952,6 +952,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
store_updated_vector(StateRoots, db, state, &self.spec, ops)?;
|
store_updated_vector(StateRoots, db, state, &self.spec, ops)?;
|
||||||
store_updated_vector(HistoricalRoots, db, state, &self.spec, ops)?;
|
store_updated_vector(HistoricalRoots, db, state, &self.spec, ops)?;
|
||||||
store_updated_vector(RandaoMixes, db, state, &self.spec, ops)?;
|
store_updated_vector(RandaoMixes, db, state, &self.spec, ops)?;
|
||||||
|
store_updated_vector(HistoricalSummaries, db, state, &self.spec, ops)?;
|
||||||
|
|
||||||
// 3. Store restore point.
|
// 3. Store restore point.
|
||||||
let restore_point_index = state.slot().as_u64() / self.config.slots_per_restore_point;
|
let restore_point_index = state.slot().as_u64() / self.config.slots_per_restore_point;
|
||||||
@ -1006,6 +1007,7 @@ impl<E: EthSpec, Hot: ItemStore<E>, Cold: ItemStore<E>> HotColdDB<E, Hot, Cold>
|
|||||||
partial_state.load_state_roots(&self.cold_db, &self.spec)?;
|
partial_state.load_state_roots(&self.cold_db, &self.spec)?;
|
||||||
partial_state.load_historical_roots(&self.cold_db, &self.spec)?;
|
partial_state.load_historical_roots(&self.cold_db, &self.spec)?;
|
||||||
partial_state.load_randao_mixes(&self.cold_db, &self.spec)?;
|
partial_state.load_randao_mixes(&self.cold_db, &self.spec)?;
|
||||||
|
partial_state.load_historical_summaries(&self.cold_db, &self.spec)?;
|
||||||
|
|
||||||
partial_state.try_into()
|
partial_state.try_into()
|
||||||
}
|
}
|
||||||
|
@ -215,6 +215,8 @@ pub enum DBColumn {
|
|||||||
/// For Optimistically Imported Merge Transition Blocks
|
/// For Optimistically Imported Merge Transition Blocks
|
||||||
#[strum(serialize = "otb")]
|
#[strum(serialize = "otb")]
|
||||||
OptimisticTransitionBlock,
|
OptimisticTransitionBlock,
|
||||||
|
#[strum(serialize = "bhs")]
|
||||||
|
BeaconHistoricalSummaries,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A block from the database, which might have an execution payload or not.
|
/// A block from the database, which might have an execution payload or not.
|
||||||
|
@ -4,7 +4,7 @@ use ssz::{Decode, Encode};
|
|||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use types::{Checkpoint, Hash256, Slot};
|
use types::{Checkpoint, Hash256, Slot};
|
||||||
|
|
||||||
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(13);
|
pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(14);
|
||||||
|
|
||||||
// All the keys that get stored under the `BeaconMeta` column.
|
// All the keys that get stored under the `BeaconMeta` column.
|
||||||
//
|
//
|
||||||
|
@ -1,12 +1,13 @@
|
|||||||
use crate::chunked_vector::{
|
use crate::chunked_vector::{
|
||||||
load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, RandaoMixes,
|
load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots,
|
||||||
StateRoots,
|
HistoricalSummaries, RandaoMixes, StateRoots,
|
||||||
};
|
};
|
||||||
use crate::{get_key_for_col, DBColumn, Error, KeyValueStore, KeyValueStoreOp};
|
use crate::{get_key_for_col, DBColumn, Error, KeyValueStore, KeyValueStoreOp};
|
||||||
use ssz::{Decode, DecodeError, Encode};
|
use ssz::{Decode, DecodeError, Encode};
|
||||||
use ssz_derive::{Decode, Encode};
|
use ssz_derive::{Decode, Encode};
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use types::historical_summary::HistoricalSummary;
|
||||||
use types::superstruct;
|
use types::superstruct;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
@ -104,16 +105,20 @@ where
|
|||||||
)]
|
)]
|
||||||
pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844<T>,
|
pub latest_execution_payload_header: ExecutionPayloadHeaderEip4844<T>,
|
||||||
|
|
||||||
// Withdrawals
|
// Capella
|
||||||
#[superstruct(only(Capella, Eip4844))]
|
#[superstruct(only(Capella, Eip4844))]
|
||||||
pub next_withdrawal_index: u64,
|
pub next_withdrawal_index: u64,
|
||||||
#[superstruct(only(Capella, Eip4844))]
|
#[superstruct(only(Capella, Eip4844))]
|
||||||
pub next_withdrawal_validator_index: u64,
|
pub next_withdrawal_validator_index: u64,
|
||||||
|
|
||||||
|
#[ssz(skip_serializing, skip_deserializing)]
|
||||||
|
#[superstruct(only(Capella, Eip4844))]
|
||||||
|
pub historical_summaries: Option<VariableList<HistoricalSummary, T::HistoricalRootsLimit>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Implement the conversion function from BeaconState -> PartialBeaconState.
|
/// Implement the conversion function from BeaconState -> PartialBeaconState.
|
||||||
macro_rules! impl_from_state_forgetful {
|
macro_rules! impl_from_state_forgetful {
|
||||||
($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => {
|
($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_fields_opt:ident),*]) => {
|
||||||
PartialBeaconState::$variant_name($struct_name {
|
PartialBeaconState::$variant_name($struct_name {
|
||||||
// Versioning
|
// Versioning
|
||||||
genesis_time: $s.genesis_time,
|
genesis_time: $s.genesis_time,
|
||||||
@ -154,6 +159,11 @@ macro_rules! impl_from_state_forgetful {
|
|||||||
// Variant-specific fields
|
// Variant-specific fields
|
||||||
$(
|
$(
|
||||||
$extra_fields: $s.$extra_fields.clone()
|
$extra_fields: $s.$extra_fields.clone()
|
||||||
|
),*,
|
||||||
|
|
||||||
|
// Variant-specific optional
|
||||||
|
$(
|
||||||
|
$extra_fields_opt: None
|
||||||
),*
|
),*
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -168,7 +178,8 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
outer,
|
outer,
|
||||||
Base,
|
Base,
|
||||||
PartialBeaconStateBase,
|
PartialBeaconStateBase,
|
||||||
[previous_epoch_attestations, current_epoch_attestations]
|
[previous_epoch_attestations, current_epoch_attestations],
|
||||||
|
[]
|
||||||
),
|
),
|
||||||
BeaconState::Altair(s) => impl_from_state_forgetful!(
|
BeaconState::Altair(s) => impl_from_state_forgetful!(
|
||||||
s,
|
s,
|
||||||
@ -181,7 +192,8 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
current_sync_committee,
|
current_sync_committee,
|
||||||
next_sync_committee,
|
next_sync_committee,
|
||||||
inactivity_scores
|
inactivity_scores
|
||||||
]
|
],
|
||||||
|
[]
|
||||||
),
|
),
|
||||||
BeaconState::Merge(s) => impl_from_state_forgetful!(
|
BeaconState::Merge(s) => impl_from_state_forgetful!(
|
||||||
s,
|
s,
|
||||||
@ -195,7 +207,8 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
next_sync_committee,
|
next_sync_committee,
|
||||||
inactivity_scores,
|
inactivity_scores,
|
||||||
latest_execution_payload_header
|
latest_execution_payload_header
|
||||||
]
|
],
|
||||||
|
[]
|
||||||
),
|
),
|
||||||
BeaconState::Capella(s) => impl_from_state_forgetful!(
|
BeaconState::Capella(s) => impl_from_state_forgetful!(
|
||||||
s,
|
s,
|
||||||
@ -211,7 +224,8 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
latest_execution_payload_header,
|
latest_execution_payload_header,
|
||||||
next_withdrawal_index,
|
next_withdrawal_index,
|
||||||
next_withdrawal_validator_index
|
next_withdrawal_validator_index
|
||||||
]
|
],
|
||||||
|
[historical_summaries]
|
||||||
),
|
),
|
||||||
BeaconState::Eip4844(s) => impl_from_state_forgetful!(
|
BeaconState::Eip4844(s) => impl_from_state_forgetful!(
|
||||||
s,
|
s,
|
||||||
@ -227,7 +241,8 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
latest_execution_payload_header,
|
latest_execution_payload_header,
|
||||||
next_withdrawal_index,
|
next_withdrawal_index,
|
||||||
next_withdrawal_validator_index
|
next_withdrawal_validator_index
|
||||||
]
|
],
|
||||||
|
[historical_summaries]
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -303,6 +318,23 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn load_historical_summaries<S: KeyValueStore<T>>(
|
||||||
|
&mut self,
|
||||||
|
store: &S,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let slot = self.slot();
|
||||||
|
if let Ok(historical_summaries) = self.historical_summaries_mut() {
|
||||||
|
if historical_summaries.is_none() {
|
||||||
|
*historical_summaries =
|
||||||
|
Some(load_variable_list_from_db::<HistoricalSummaries, T, _>(
|
||||||
|
store, slot, spec,
|
||||||
|
)?);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn load_randao_mixes<S: KeyValueStore<T>>(
|
pub fn load_randao_mixes<S: KeyValueStore<T>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
store: &S,
|
store: &S,
|
||||||
@ -326,7 +358,7 @@ impl<T: EthSpec> PartialBeaconState<T> {
|
|||||||
|
|
||||||
/// Implement the conversion from PartialBeaconState -> BeaconState.
|
/// Implement the conversion from PartialBeaconState -> BeaconState.
|
||||||
macro_rules! impl_try_into_beacon_state {
|
macro_rules! impl_try_into_beacon_state {
|
||||||
($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => {
|
($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_opt_fields:ident),*]) => {
|
||||||
BeaconState::$variant_name($struct_name {
|
BeaconState::$variant_name($struct_name {
|
||||||
// Versioning
|
// Versioning
|
||||||
genesis_time: $inner.genesis_time,
|
genesis_time: $inner.genesis_time,
|
||||||
@ -371,6 +403,11 @@ macro_rules! impl_try_into_beacon_state {
|
|||||||
// Variant-specific fields
|
// Variant-specific fields
|
||||||
$(
|
$(
|
||||||
$extra_fields: $inner.$extra_fields
|
$extra_fields: $inner.$extra_fields
|
||||||
|
),*,
|
||||||
|
|
||||||
|
// Variant-specific optional fields
|
||||||
|
$(
|
||||||
|
$extra_opt_fields: unpack_field($inner.$extra_opt_fields)?
|
||||||
),*
|
),*
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -389,7 +426,8 @@ impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
|
|||||||
inner,
|
inner,
|
||||||
Base,
|
Base,
|
||||||
BeaconStateBase,
|
BeaconStateBase,
|
||||||
[previous_epoch_attestations, current_epoch_attestations]
|
[previous_epoch_attestations, current_epoch_attestations],
|
||||||
|
[]
|
||||||
),
|
),
|
||||||
PartialBeaconState::Altair(inner) => impl_try_into_beacon_state!(
|
PartialBeaconState::Altair(inner) => impl_try_into_beacon_state!(
|
||||||
inner,
|
inner,
|
||||||
@ -401,7 +439,8 @@ impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
|
|||||||
current_sync_committee,
|
current_sync_committee,
|
||||||
next_sync_committee,
|
next_sync_committee,
|
||||||
inactivity_scores
|
inactivity_scores
|
||||||
]
|
],
|
||||||
|
[]
|
||||||
),
|
),
|
||||||
PartialBeaconState::Merge(inner) => impl_try_into_beacon_state!(
|
PartialBeaconState::Merge(inner) => impl_try_into_beacon_state!(
|
||||||
inner,
|
inner,
|
||||||
@ -414,7 +453,8 @@ impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
|
|||||||
next_sync_committee,
|
next_sync_committee,
|
||||||
inactivity_scores,
|
inactivity_scores,
|
||||||
latest_execution_payload_header
|
latest_execution_payload_header
|
||||||
]
|
],
|
||||||
|
[]
|
||||||
),
|
),
|
||||||
PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!(
|
PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!(
|
||||||
inner,
|
inner,
|
||||||
@ -429,7 +469,8 @@ impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
|
|||||||
latest_execution_payload_header,
|
latest_execution_payload_header,
|
||||||
next_withdrawal_index,
|
next_withdrawal_index,
|
||||||
next_withdrawal_validator_index
|
next_withdrawal_validator_index
|
||||||
]
|
],
|
||||||
|
[historical_summaries]
|
||||||
),
|
),
|
||||||
PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!(
|
PartialBeaconState::Eip4844(inner) => impl_try_into_beacon_state!(
|
||||||
inner,
|
inner,
|
||||||
@ -444,7 +485,8 @@ impl<E: EthSpec> TryInto<BeaconState<E>> for PartialBeaconState<E> {
|
|||||||
latest_execution_payload_header,
|
latest_execution_payload_header,
|
||||||
next_withdrawal_index,
|
next_withdrawal_index,
|
||||||
next_withdrawal_validator_index
|
next_withdrawal_validator_index
|
||||||
]
|
],
|
||||||
|
[historical_summaries]
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
Ok(state)
|
Ok(state)
|
||||||
|
@ -12,10 +12,10 @@ This number can be much higher depending on how many other validators are queued
|
|||||||
|
|
||||||
## Withdrawal of exited funds
|
## Withdrawal of exited funds
|
||||||
|
|
||||||
Even though users can perform a voluntary exit in phase 0, they **cannot withdraw their exited funds at this point in time**.
|
Even though users can currently perform a voluntary exit, they **cannot withdraw their exited funds at this point in time**.
|
||||||
This implies that the staked funds are effectively **frozen** until withdrawals are enabled in future phases.
|
This implies that the staked funds are effectively **frozen** until withdrawals are enabled in a future hard fork (Capella).
|
||||||
|
|
||||||
To understand the phased rollout strategy for Ethereum upgrades, please visit <https://ethereum.org/en/upgrades/#roadmap>.
|
To understand the rollout strategy for Ethereum upgrades, please visit <https://ethereum.org/en/upgrades>.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "boot_node"
|
name = "boot_node"
|
||||||
version = "3.3.0"
|
version = "3.4.0"
|
||||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
@ -23,7 +23,6 @@ status = [
|
|||||||
"check-msrv",
|
"check-msrv",
|
||||||
"slasher-tests",
|
"slasher-tests",
|
||||||
"syncing-simulator-ubuntu",
|
"syncing-simulator-ubuntu",
|
||||||
"disallowed-from-async-lint",
|
|
||||||
"compile-with-beta-compiler"
|
"compile-with-beta-compiler"
|
||||||
]
|
]
|
||||||
use_squash_merge = true
|
use_squash_merge = true
|
||||||
|
@ -189,7 +189,7 @@ impl ValidatorDefinitions {
|
|||||||
.write(true)
|
.write(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
.create_new(false)
|
.create_new(false)
|
||||||
.open(&config_path)
|
.open(config_path)
|
||||||
.map_err(Error::UnableToOpenFile)?;
|
.map_err(Error::UnableToOpenFile)?;
|
||||||
serde_yaml::from_reader(file).map_err(Error::UnableToParseFile)
|
serde_yaml::from_reader(file).map_err(Error::UnableToParseFile)
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ hex = "0.4.2"
|
|||||||
dirs = "3.0.1"
|
dirs = "3.0.1"
|
||||||
eth2_network_config = { path = "../eth2_network_config" }
|
eth2_network_config = { path = "../eth2_network_config" }
|
||||||
eth2_ssz = "0.4.1"
|
eth2_ssz = "0.4.1"
|
||||||
ethereum-types = "0.12.1"
|
ethereum-types = "0.14.1"
|
||||||
serde = "1.0.116"
|
serde = "1.0.116"
|
||||||
serde_json = "1.0.59"
|
serde_json = "1.0.59"
|
||||||
serde_yaml = "0.8.13"
|
serde_yaml = "0.8.13"
|
||||||
|
@ -35,4 +35,3 @@ procinfo = { version = "0.4.2", optional = true }
|
|||||||
[features]
|
[features]
|
||||||
default = ["lighthouse"]
|
default = ["lighthouse"]
|
||||||
lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"]
|
lighthouse = ["proto_array", "psutil", "procinfo", "store", "slashing_protection"]
|
||||||
withdrawals-processing = ["store/withdrawals-processing"]
|
|
@ -628,27 +628,6 @@ impl BeaconNodeHttpClient {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `POST beacon/blobs`
|
|
||||||
///
|
|
||||||
/// Returns `Ok(None)` on a 404 error.
|
|
||||||
pub async fn post_beacon_blobs<T: EthSpec>(
|
|
||||||
&self,
|
|
||||||
block: &BlobsSidecar<T>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let mut path = self.eth_path(V1)?;
|
|
||||||
|
|
||||||
path.path_segments_mut()
|
|
||||||
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
|
||||||
.push("beacon")
|
|
||||||
.push("blobs");
|
|
||||||
|
|
||||||
//FIXME(sean) should we re-use the proposal timeout? seems reasonable to..
|
|
||||||
self.post_with_timeout(path, block, self.timeouts.proposal)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `POST beacon/blinded_blocks`
|
/// `POST beacon/blinded_blocks`
|
||||||
///
|
///
|
||||||
/// Returns `Ok(None)` on a 404 error.
|
/// Returns `Ok(None)` on a 404 error.
|
||||||
|
@ -3,3 +3,7 @@
|
|||||||
- enr:-Ly4QBf76jLiCA_pDXoZjhyRbuwzFOscFY-MIKkPnmHPQbvaKhIDZutfe38G9ibzgQP0RKrTo3vcWOy4hf_8wOZ-U5MBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhBLGgjaJc2VjcDI1NmsxoQLGeo0Q4lDvxIjHjnkAqEuETTaFIjsNrEcSpdDhcHXWFYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA
|
- enr:-Ly4QBf76jLiCA_pDXoZjhyRbuwzFOscFY-MIKkPnmHPQbvaKhIDZutfe38G9ibzgQP0RKrTo3vcWOy4hf_8wOZ-U5MBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhBLGgjaJc2VjcDI1NmsxoQLGeo0Q4lDvxIjHjnkAqEuETTaFIjsNrEcSpdDhcHXWFYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA
|
||||||
- enr:-Ly4QLjZUWdqUO_RwyDqCAccIK5-MbLRD6A2c7oBuVbBgBnWDkEf0UKJVAaJqi2pO101WVQQLYSnYgz1Q3pRhYdrlFoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhANA8sSJc2VjcDI1NmsxoQK4TC_EK1jSs0VVPUpOjIo1rhJmff2SLBPFOWSXMwdLVYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA
|
- enr:-Ly4QLjZUWdqUO_RwyDqCAccIK5-MbLRD6A2c7oBuVbBgBnWDkEf0UKJVAaJqi2pO101WVQQLYSnYgz1Q3pRhYdrlFoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhANA8sSJc2VjcDI1NmsxoQK4TC_EK1jSs0VVPUpOjIo1rhJmff2SLBPFOWSXMwdLVYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA
|
||||||
- enr:-Ly4QKwX2rTFtKWKQHSGQFhquxsxL1jewO8JB1MG-jgHqAZVFWxnb3yMoQqnYSV1bk25-_jiLuhIulxar3RBWXEDm6EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhAN-qZeJc2VjcDI1NmsxoQI7EPGMpecl0QofLp4Wy_lYNCCChUFEH6kY7k-oBGkPFIhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA
|
- enr:-Ly4QKwX2rTFtKWKQHSGQFhquxsxL1jewO8JB1MG-jgHqAZVFWxnb3yMoQqnYSV1bk25-_jiLuhIulxar3RBWXEDm6EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhAN-qZeJc2VjcDI1NmsxoQI7EPGMpecl0QofLp4Wy_lYNCCChUFEH6kY7k-oBGkPFIhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA
|
||||||
|
- enr:-Ly4QPoChSQTleJROee1-k-4HOEgKqL9kLksE-tEiVqcY9kwF9V53aBg-MruD7Yx4Aks3LAeJpKXAS4ntMrIdqvQYc8Ch2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhGsWBHiJc2VjcDI1NmsxoQKwGQrwOSBJB_DtQOkFZVAY4YQfMAbUVxFpL5WgrzEddYhzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA
|
||||||
|
- enr:-Ly4QBbaKRSX4SncCOxTTL611Kxlz-zYFrIn-k_63jGIPK_wbvFghVUHJICPCxufgTX5h79jvgfPr-2hEEQEdziGQ5MCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhAMazo6Jc2VjcDI1NmsxoQKt-kbM9isuWp8djhyEq6-4MLv1Sy7dOXeMOMdPgwu9LohzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA
|
||||||
|
- enr:-Ly4QKJ5BzgFyJ6BaTlGY0C8ROzl508U3GA6qxdG5Gn2hxdke6nQO187pYlLvhp82Dez4PQn436Fts1F0WAm-_5l2LACh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhA-YLVKJc2VjcDI1NmsxoQI8_Lvr6p_TkcAu8KorKacfUEnoOon0tdO0qWhriPdBP4hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA
|
||||||
|
- enr:-Ly4QJMtoiX2bPnVbiQOJCLbtUlqdqZk7kCJQln_W1bp1vOHcxWowE-iMXkKC4_uOb0o73wAW71WYi80Dlsg-7a5wiICh2F0dG5ldHOIAAAAAAAAAACEZXRoMpCCS-QxAgAAZP__________gmlkgnY0gmlwhDbP3KmJc2VjcDI1NmsxoQNvcfKYUqcemLFlpKxl7JcQJwQ3L9unYL44gY2aEiRnI4hzeW5jbmV0cwCDdGNwgiMog3VkcIIjKA
|
||||||
|
@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!(
|
|||||||
// NOTE: using --match instead of --exclude for compatibility with old Git
|
// NOTE: using --match instead of --exclude for compatibility with old Git
|
||||||
"--match=thiswillnevermatchlol"
|
"--match=thiswillnevermatchlol"
|
||||||
],
|
],
|
||||||
prefix = "Lighthouse/v3.3.0-",
|
prefix = "Lighthouse/v3.4.0-",
|
||||||
fallback = "Lighthouse/v3.3.0"
|
fallback = "Lighthouse/v3.4.0"
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Returns `VERSION`, but with platform information appended to the end.
|
/// Returns `VERSION`, but with platform information appended to the end.
|
||||||
|
@ -196,7 +196,7 @@ impl<'a> Builder<'a> {
|
|||||||
if path.exists() {
|
if path.exists() {
|
||||||
return Err(Error::DepositDataAlreadyExists(path));
|
return Err(Error::DepositDataAlreadyExists(path));
|
||||||
} else {
|
} else {
|
||||||
let hex = format!("0x{}", hex::encode(&deposit_data));
|
let hex = format!("0x{}", hex::encode(deposit_data));
|
||||||
File::options()
|
File::options()
|
||||||
.write(true)
|
.write(true)
|
||||||
.read(true)
|
.read(true)
|
||||||
|
@ -5,7 +5,7 @@ authors = ["Michael Sproul <michael@sigmaprime.io>"]
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
ethereum-types = "0.12.1"
|
ethereum-types = "0.14.1"
|
||||||
eth2_ssz_types = "0.2.2"
|
eth2_ssz_types = "0.2.2"
|
||||||
eth2_hashing = "0.3.0"
|
eth2_hashing = "0.3.0"
|
||||||
eth2_ssz_derive = "0.3.1"
|
eth2_ssz_derive = "0.3.1"
|
||||||
|
@ -5,7 +5,7 @@ authors = ["Michael Sproul <michael@sigmaprime.io>"]
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
ethereum-types = "0.12.1"
|
ethereum-types = "0.14.1"
|
||||||
eth2_hashing = "0.3.0"
|
eth2_hashing = "0.3.0"
|
||||||
lazy_static = "1.4.0"
|
lazy_static = "1.4.0"
|
||||||
safe_arith = { path = "../safe_arith" }
|
safe_arith = { path = "../safe_arith" }
|
||||||
|
@ -11,4 +11,4 @@ serde = { version = "1.0.116", features = ["derive"] }
|
|||||||
serde_derive = "1.0.116"
|
serde_derive = "1.0.116"
|
||||||
serde_json = "1.0.58"
|
serde_json = "1.0.58"
|
||||||
hex = "0.4.2"
|
hex = "0.4.2"
|
||||||
ethereum-types = "0.12.1"
|
ethereum-types = "0.14.1"
|
||||||
|
@ -63,15 +63,15 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn encoding() {
|
fn encoding() {
|
||||||
let bytes = vec![0, 255];
|
let bytes = vec![0, 255];
|
||||||
let hex = encode(&bytes);
|
let hex = encode(bytes);
|
||||||
assert_eq!(hex.as_str(), "0x00ff");
|
assert_eq!(hex.as_str(), "0x00ff");
|
||||||
|
|
||||||
let bytes = vec![];
|
let bytes = vec![];
|
||||||
let hex = encode(&bytes);
|
let hex = encode(bytes);
|
||||||
assert_eq!(hex.as_str(), "0x");
|
assert_eq!(hex.as_str(), "0x");
|
||||||
|
|
||||||
let bytes = vec![1, 2, 3];
|
let bytes = vec![1, 2, 3];
|
||||||
let hex = encode(&bytes);
|
let hex = encode(bytes);
|
||||||
assert_eq!(hex.as_str(), "0x010203");
|
assert_eq!(hex.as_str(), "0x010203");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,7 @@ impl<'de> Visitor<'de> for QuantityVisitor {
|
|||||||
} else if stripped.starts_with('0') {
|
} else if stripped.starts_with('0') {
|
||||||
Err(de::Error::custom("cannot have leading zero"))
|
Err(de::Error::custom("cannot have leading zero"))
|
||||||
} else if stripped.len() % 2 != 0 {
|
} else if stripped.len() % 2 != 0 {
|
||||||
hex::decode(&format!("0{}", stripped))
|
hex::decode(format!("0{}", stripped))
|
||||||
.map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))
|
.map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))
|
||||||
} else {
|
} else {
|
||||||
hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))
|
hex::decode(stripped).map_err(|e| de::Error::custom(format!("invalid hex ({:?})", e)))
|
||||||
|
@ -13,7 +13,7 @@ name = "ssz"
|
|||||||
eth2_ssz_derive = "0.3.1"
|
eth2_ssz_derive = "0.3.1"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
ethereum-types = "0.12.1"
|
ethereum-types = "0.14.1"
|
||||||
smallvec = { version = "1.6.1", features = ["const_generics"] }
|
smallvec = { version = "1.6.1", features = ["const_generics"] }
|
||||||
itertools = "0.10.3"
|
itertools = "0.10.3"
|
||||||
|
|
||||||
|
@ -660,7 +660,7 @@ impl<N: 'static + Unsigned> arbitrary::Arbitrary<'_> for Bitfield<Fixed<N>> {
|
|||||||
let size = N::to_usize();
|
let size = N::to_usize();
|
||||||
let mut vec = smallvec![0u8; size];
|
let mut vec = smallvec![0u8; size];
|
||||||
u.fill_buffer(&mut vec)?;
|
u.fill_buffer(&mut vec)?;
|
||||||
Ok(Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?)
|
Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -672,7 +672,7 @@ impl<N: 'static + Unsigned> arbitrary::Arbitrary<'_> for Bitfield<Variable<N>> {
|
|||||||
let size = std::cmp::min(rand, max_size);
|
let size = std::cmp::min(rand, max_size);
|
||||||
let mut vec = smallvec![0u8; size];
|
let mut vec = smallvec![0u8; size];
|
||||||
u.fill_buffer(&mut vec)?;
|
u.fill_buffer(&mut vec)?;
|
||||||
Ok(Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)?)
|
Self::from_bytes(vec).map_err(|_| arbitrary::Error::IncorrectFormat)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user