Merged with unstable
This commit is contained in:
commit
8a04c3428e
2
.github/workflows/docker-antithesis.yml
vendored
2
.github/workflows/docker-antithesis.yml
vendored
@ -17,7 +17,7 @@ jobs:
|
|||||||
build-docker:
|
build-docker:
|
||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Update Rust
|
- name: Update Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Dockerhub login
|
- name: Dockerhub login
|
||||||
|
12
.github/workflows/docker.yml
vendored
12
.github/workflows/docker.yml
vendored
@ -22,7 +22,7 @@ jobs:
|
|||||||
# `unstable`, but for now we keep the two parts of the version separate for backwards
|
# `unstable`, but for now we keep the two parts of the version separate for backwards
|
||||||
# compatibility.
|
# compatibility.
|
||||||
extract-version:
|
extract-version:
|
||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-22.04
|
||||||
steps:
|
steps:
|
||||||
- name: Extract version (if stable)
|
- name: Extract version (if stable)
|
||||||
if: github.event.ref == 'refs/heads/stable'
|
if: github.event.ref == 'refs/heads/stable'
|
||||||
@ -44,7 +44,7 @@ jobs:
|
|||||||
VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }}
|
VERSION_SUFFIX: ${{ env.VERSION_SUFFIX }}
|
||||||
build-docker-single-arch:
|
build-docker-single-arch:
|
||||||
name: build-docker-${{ matrix.binary }}
|
name: build-docker-${{ matrix.binary }}
|
||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-22.04
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
binary: [aarch64,
|
binary: [aarch64,
|
||||||
@ -61,7 +61,7 @@ jobs:
|
|||||||
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
||||||
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Update Rust
|
- name: Update Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Dockerhub login
|
- name: Dockerhub login
|
||||||
@ -102,7 +102,7 @@ jobs:
|
|||||||
--push
|
--push
|
||||||
build-docker-multiarch:
|
build-docker-multiarch:
|
||||||
name: build-docker-multiarch${{ matrix.modernity }}
|
name: build-docker-multiarch${{ matrix.modernity }}
|
||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-22.04
|
||||||
needs: [build-docker-single-arch, extract-version]
|
needs: [build-docker-single-arch, extract-version]
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
@ -123,13 +123,13 @@ jobs:
|
|||||||
--amend ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }};
|
--amend ${IMAGE_NAME}:${VERSION}-amd64${VERSION_SUFFIX}${{ matrix.modernity }};
|
||||||
docker manifest push ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }}
|
docker manifest push ${IMAGE_NAME}:${VERSION}${VERSION_SUFFIX}${{ matrix.modernity }}
|
||||||
build-docker-lcli:
|
build-docker-lcli:
|
||||||
runs-on: ubuntu-18.04
|
runs-on: ubuntu-22.04
|
||||||
needs: [extract-version]
|
needs: [extract-version]
|
||||||
env:
|
env:
|
||||||
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
VERSION: ${{ needs.extract-version.outputs.VERSION }}
|
||||||
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
VERSION_SUFFIX: ${{ needs.extract-version.outputs.VERSION_SUFFIX }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Dockerhub login
|
- name: Dockerhub login
|
||||||
run: |
|
run: |
|
||||||
echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin
|
echo "${DOCKER_PASSWORD}" | docker login --username ${DOCKER_USERNAME} --password-stdin
|
||||||
|
2
.github/workflows/linkcheck.yml
vendored
2
.github/workflows/linkcheck.yml
vendored
@ -15,7 +15,7 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Create docker network
|
- name: Create docker network
|
||||||
run: docker network create book
|
run: docker network create book
|
||||||
|
8
.github/workflows/local-testnet.yml
vendored
8
.github/workflows/local-testnet.yml
vendored
@ -12,11 +12,11 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os:
|
os:
|
||||||
- ubuntu-18.04
|
- ubuntu-22.04
|
||||||
- macos-latest
|
- macos-12
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
@ -28,7 +28,7 @@ jobs:
|
|||||||
run: npm install ganache@latest --global
|
run: npm install ganache@latest --global
|
||||||
|
|
||||||
# https://github.com/actions/cache/blob/main/examples.md#rust---cargo
|
# https://github.com/actions/cache/blob/main/examples.md#rust---cargo
|
||||||
- uses: actions/cache@v2
|
- uses: actions/cache@v3
|
||||||
id: cache-cargo
|
id: cache-cargo
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
|
4
.github/workflows/publish-crate.yml
vendored
4
.github/workflows/publish-crate.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Extract tag
|
- name: Extract tag
|
||||||
run: echo "::set-output name=TAG::$(echo ${GITHUB_REF#refs/tags/})"
|
run: echo "TAG=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT
|
||||||
id: extract_tag
|
id: extract_tag
|
||||||
outputs:
|
outputs:
|
||||||
TAG: ${{ steps.extract_tag.outputs.TAG }}
|
TAG: ${{ steps.extract_tag.outputs.TAG }}
|
||||||
@ -30,7 +30,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
TAG: ${{ needs.extract-tag.outputs.TAG }}
|
TAG: ${{ needs.extract-tag.outputs.TAG }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v2
|
- uses: actions/checkout@v3
|
||||||
- name: Update Rust
|
- name: Update Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Cargo login
|
- name: Cargo login
|
||||||
|
8
.github/workflows/release.yml
vendored
8
.github/workflows/release.yml
vendored
@ -16,7 +16,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Extract version
|
- name: Extract version
|
||||||
run: echo "::set-output name=VERSION::$(echo ${GITHUB_REF#refs/tags/})"
|
run: echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT
|
||||||
id: extract_version
|
id: extract_version
|
||||||
outputs:
|
outputs:
|
||||||
VERSION: ${{ steps.extract_version.outputs.VERSION }}
|
VERSION: ${{ steps.extract_version.outputs.VERSION }}
|
||||||
@ -62,7 +62,7 @@ jobs:
|
|||||||
needs: extract-version
|
needs: extract-version
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout sources
|
- name: Checkout sources
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
- name: Build toolchain
|
- name: Build toolchain
|
||||||
uses: actions-rs/toolchain@v1
|
uses: actions-rs/toolchain@v1
|
||||||
with:
|
with:
|
||||||
@ -199,7 +199,7 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
# This is necessary for generating the changelog. It has to come before "Download Artifacts" or else it deletes the artifacts.
|
# This is necessary for generating the changelog. It has to come before "Download Artifacts" or else it deletes the artifacts.
|
||||||
- name: Checkout sources
|
- name: Checkout sources
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
@ -216,7 +216,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Generate Full Changelog
|
- name: Generate Full Changelog
|
||||||
id: changelog
|
id: changelog
|
||||||
run: echo "::set-output name=CHANGELOG::$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})"
|
run: echo "CHANGELOG=$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Create Release Draft
|
- name: Create Release Draft
|
||||||
env:
|
env:
|
||||||
|
103
.github/workflows/test-suite.yml
vendored
103
.github/workflows/test-suite.yml
vendored
@ -24,12 +24,12 @@ jobs:
|
|||||||
extract-msrv:
|
extract-msrv:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Extract Minimum Supported Rust Version (MSRV)
|
- name: Extract Minimum Supported Rust Version (MSRV)
|
||||||
run: |
|
run: |
|
||||||
metadata=$(cargo metadata --no-deps --format-version 1)
|
metadata=$(cargo metadata --no-deps --format-version 1)
|
||||||
msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version')
|
msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version')
|
||||||
echo "::set-output name=MSRV::$msrv"
|
echo "MSRV=$msrv" >> $GITHUB_OUTPUT
|
||||||
id: extract_msrv
|
id: extract_msrv
|
||||||
outputs:
|
outputs:
|
||||||
MSRV: ${{ steps.extract_msrv.outputs.MSRV }}
|
MSRV: ${{ steps.extract_msrv.outputs.MSRV }}
|
||||||
@ -37,7 +37,7 @@ jobs:
|
|||||||
name: cargo-fmt
|
name: cargo-fmt
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Check formatting with cargo fmt
|
- name: Check formatting with cargo fmt
|
||||||
@ -47,11 +47,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: sudo npm install -g ganache
|
run: sudo npm install -g ganache
|
||||||
- name: Run tests in release
|
- name: Run tests in release
|
||||||
@ -61,7 +63,7 @@ jobs:
|
|||||||
runs-on: windows-2019
|
runs-on: windows-2019
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Use Node.js
|
- name: Use Node.js
|
||||||
@ -89,11 +91,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run beacon_chain tests for all known forks
|
- name: Run beacon_chain tests for all known forks
|
||||||
run: make test-beacon-chain
|
run: make test-beacon-chain
|
||||||
op-pool-tests:
|
op-pool-tests:
|
||||||
@ -101,11 +105,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run operation_pool tests for all known forks
|
- name: Run operation_pool tests for all known forks
|
||||||
run: make test-op-pool
|
run: make test-op-pool
|
||||||
slasher-tests:
|
slasher-tests:
|
||||||
@ -113,7 +119,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Run slasher tests for all supported backends
|
- name: Run slasher tests for all supported backends
|
||||||
@ -123,11 +129,13 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: sudo npm install -g ganache
|
run: sudo npm install -g ganache
|
||||||
- name: Run tests in debug
|
- name: Run tests in debug
|
||||||
@ -137,11 +145,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run state_transition_vectors in release.
|
- name: Run state_transition_vectors in release.
|
||||||
run: make run-state-transition-tests
|
run: make run-state-transition-tests
|
||||||
ef-tests-ubuntu:
|
ef-tests-ubuntu:
|
||||||
@ -149,11 +159,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run consensus-spec-tests with blst, milagro and fake_crypto
|
- name: Run consensus-spec-tests with blst, milagro and fake_crypto
|
||||||
run: make test-ef
|
run: make test-ef
|
||||||
dockerfile-ubuntu:
|
dockerfile-ubuntu:
|
||||||
@ -161,7 +173,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Build the root Dockerfile
|
- name: Build the root Dockerfile
|
||||||
@ -173,11 +185,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: sudo npm install -g ganache
|
run: sudo npm install -g ganache
|
||||||
- name: Run the beacon chain sim that starts from an eth1 contract
|
- name: Run the beacon chain sim that starts from an eth1 contract
|
||||||
@ -187,11 +201,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: sudo npm install -g ganache
|
run: sudo npm install -g ganache
|
||||||
- name: Run the beacon chain sim and go through the merge transition
|
- name: Run the beacon chain sim and go through the merge transition
|
||||||
@ -201,11 +217,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: sudo npm install -g ganache
|
run: sudo npm install -g ganache
|
||||||
- name: Run the beacon chain sim without an eth1 connection
|
- name: Run the beacon chain sim without an eth1 connection
|
||||||
@ -215,11 +233,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: sudo npm install -g ganache
|
run: sudo npm install -g ganache
|
||||||
- name: Run the syncing simulator
|
- name: Run the syncing simulator
|
||||||
@ -229,11 +249,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install ganache
|
- name: Install ganache
|
||||||
run: sudo npm install -g ganache
|
run: sudo npm install -g ganache
|
||||||
- name: Install lighthouse and lcli
|
- name: Install lighthouse and lcli
|
||||||
@ -253,17 +275,19 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- uses: actions/setup-go@v2
|
- uses: actions/setup-go@v3
|
||||||
with:
|
with:
|
||||||
go-version: '1.17'
|
go-version: '1.17'
|
||||||
- uses: actions/setup-dotnet@v1
|
- uses: actions/setup-dotnet@v3
|
||||||
with:
|
with:
|
||||||
dotnet-version: '6.0.201'
|
dotnet-version: '6.0.201'
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run exec engine integration tests in release
|
- name: Run exec engine integration tests in release
|
||||||
run: make test-exec-engine
|
run: make test-exec-engine
|
||||||
check-benchmarks:
|
check-benchmarks:
|
||||||
@ -271,11 +295,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Typecheck benchmark code without running it
|
- name: Typecheck benchmark code without running it
|
||||||
run: make check-benches
|
run: make check-benches
|
||||||
check-consensus:
|
check-consensus:
|
||||||
@ -283,7 +309,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Typecheck consensus code in strict mode
|
- name: Typecheck consensus code in strict mode
|
||||||
@ -293,11 +319,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Lint code for quality and style with Clippy
|
- name: Lint code for quality and style with Clippy
|
||||||
run: make lint
|
run: make lint
|
||||||
- name: Certify Cargo.lock freshness
|
- name: Certify Cargo.lock freshness
|
||||||
@ -308,7 +336,7 @@ jobs:
|
|||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Install SigP Clippy fork
|
- name: Install SigP Clippy fork
|
||||||
run: |
|
run: |
|
||||||
cd ..
|
cd ..
|
||||||
@ -319,6 +347,8 @@ jobs:
|
|||||||
cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin
|
cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run Clippy with the disallowed-from-async lint
|
- name: Run Clippy with the disallowed-from-async lint
|
||||||
run: make nightly-lint
|
run: make nightly-lint
|
||||||
check-msrv:
|
check-msrv:
|
||||||
@ -326,11 +356,13 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [cargo-fmt, extract-msrv]
|
needs: [cargo-fmt, extract-msrv]
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }})
|
- name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }})
|
||||||
run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }}
|
run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }}
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Run cargo check
|
- name: Run cargo check
|
||||||
run: cargo check --workspace
|
run: cargo check --workspace
|
||||||
arbitrary-check:
|
arbitrary-check:
|
||||||
@ -338,7 +370,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Validate state_processing feature arbitrary-fuzz
|
- name: Validate state_processing feature arbitrary-fuzz
|
||||||
@ -348,7 +380,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Get latest version of stable Rust
|
- name: Get latest version of stable Rust
|
||||||
run: rustup update stable
|
run: rustup update stable
|
||||||
- name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database
|
- name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database
|
||||||
@ -358,7 +390,7 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose
|
- name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose
|
||||||
run: CARGO_HOME=$(readlink -f $HOME) make vendor
|
run: CARGO_HOME=$(readlink -f $HOME) make vendor
|
||||||
cargo-udeps:
|
cargo-udeps:
|
||||||
@ -366,13 +398,15 @@ jobs:
|
|||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: cargo-fmt
|
needs: cargo-fmt
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v1
|
- uses: actions/checkout@v3
|
||||||
- name: Install Rust (${{ env.PINNED_NIGHTLY }})
|
- name: Install Rust (${{ env.PINNED_NIGHTLY }})
|
||||||
run: rustup toolchain install $PINNED_NIGHTLY
|
run: rustup toolchain install $PINNED_NIGHTLY
|
||||||
# NOTE: cargo-udeps version is pinned until this issue is resolved:
|
# NOTE: cargo-udeps version is pinned until this issue is resolved:
|
||||||
# https://github.com/est31/cargo-udeps/issues/135
|
# https://github.com/est31/cargo-udeps/issues/135
|
||||||
- name: Install Protoc
|
- name: Install Protoc
|
||||||
uses: arduino/setup-protoc@v1
|
uses: arduino/setup-protoc@v1
|
||||||
|
with:
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
- name: Install cargo-udeps
|
- name: Install cargo-udeps
|
||||||
run: cargo install cargo-udeps --locked --force --version 0.1.30
|
run: cargo install cargo-udeps --locked --force --version 0.1.30
|
||||||
- name: Create Cargo config dir
|
- name: Create Cargo config dir
|
||||||
@ -384,3 +418,14 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
# Allow warnings on Nightly
|
# Allow warnings on Nightly
|
||||||
RUSTFLAGS: ""
|
RUSTFLAGS: ""
|
||||||
|
compile-with-beta-compiler:
|
||||||
|
name: compile-with-beta-compiler
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Install dependencies
|
||||||
|
run: sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler
|
||||||
|
- name: Use Rust beta
|
||||||
|
run: rustup override set beta
|
||||||
|
- name: Run make
|
||||||
|
run: make
|
||||||
|
957
Cargo.lock
generated
957
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -37,6 +37,7 @@ members = [
|
|||||||
"common/oneshot_broadcast",
|
"common/oneshot_broadcast",
|
||||||
"common/sensitive_url",
|
"common/sensitive_url",
|
||||||
"common/slot_clock",
|
"common/slot_clock",
|
||||||
|
"common/system_health",
|
||||||
"common/task_executor",
|
"common/task_executor",
|
||||||
"common/target_check",
|
"common/target_check",
|
||||||
"common/test_random_derive",
|
"common/test_random_derive",
|
||||||
|
@ -114,7 +114,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
|
|
||||||
pub fn cli_run<T: EthSpec>(
|
pub fn cli_run<T: EthSpec>(
|
||||||
matches: &ArgMatches,
|
matches: &ArgMatches,
|
||||||
mut env: Environment<T>,
|
env: Environment<T>,
|
||||||
validator_dir: PathBuf,
|
validator_dir: PathBuf,
|
||||||
) -> Result<(), String> {
|
) -> Result<(), String> {
|
||||||
let spec = env.core_context().eth2_config.spec;
|
let spec = env.core_context().eth2_config.spec;
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "beacon_node"
|
name = "beacon_node"
|
||||||
version = "3.2.1"
|
version = "3.3.0"
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
authors = ["Paul Hauner <paul@paulhauner.com>", "Age Manning <Age@AgeManning.com"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
@ -20,8 +20,7 @@ use crate::errors::{BeaconChainError as Error, BlockProductionError};
|
|||||||
use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
||||||
use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData};
|
use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData};
|
||||||
use crate::events::ServerSentEventHandler;
|
use crate::events::ServerSentEventHandler;
|
||||||
use crate::execution_payload::get_execution_payload;
|
use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, PreparePayloadHandle};
|
||||||
use crate::execution_payload::PreparePayloadHandle;
|
|
||||||
use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult};
|
use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult};
|
||||||
use crate::head_tracker::HeadTracker;
|
use crate::head_tracker::HeadTracker;
|
||||||
use crate::historical_blocks::HistoricalBlockError;
|
use crate::historical_blocks::HistoricalBlockError;
|
||||||
@ -80,7 +79,7 @@ use ssz::Encode;
|
|||||||
#[cfg(feature = "withdrawals")]
|
#[cfg(feature = "withdrawals")]
|
||||||
use state_processing::per_block_processing::get_expected_withdrawals;
|
use state_processing::per_block_processing::get_expected_withdrawals;
|
||||||
use state_processing::{
|
use state_processing::{
|
||||||
common::{get_attesting_indices_from_state, get_indexed_attestation},
|
common::get_attesting_indices_from_state,
|
||||||
per_block_processing,
|
per_block_processing,
|
||||||
per_block_processing::{
|
per_block_processing::{
|
||||||
errors::AttestationValidationError, verify_attestation_for_block_inclusion,
|
errors::AttestationValidationError, verify_attestation_for_block_inclusion,
|
||||||
@ -1010,6 +1009,46 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
Ok(self.store.get_state(state_root, slot)?)
|
Ok(self.store.get_state(state_root, slot)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Run a function with mutable access to a state for `block_root`.
|
||||||
|
///
|
||||||
|
/// The primary purpose of this function is to borrow a state with its tree hash cache
|
||||||
|
/// from the snapshot cache *without moving it*. This means that calls to this function should
|
||||||
|
/// be kept to an absolute minimum, because holding the snapshot cache lock has the ability
|
||||||
|
/// to delay block import.
|
||||||
|
///
|
||||||
|
/// If there is no appropriate state in the snapshot cache then one will be loaded from disk.
|
||||||
|
/// If no state is found on disk then `Ok(None)` will be returned.
|
||||||
|
///
|
||||||
|
/// The 2nd parameter to the closure is a bool indicating whether the snapshot cache was used,
|
||||||
|
/// which can inform logging/metrics.
|
||||||
|
///
|
||||||
|
/// NOTE: the medium-term plan is to delete this function and the snapshot cache in favour
|
||||||
|
/// of `tree-states`, where all caches are CoW and everything is good in the world.
|
||||||
|
pub fn with_mutable_state_for_block<F, V, Payload: AbstractExecPayload<T::EthSpec>>(
|
||||||
|
&self,
|
||||||
|
block: &SignedBeaconBlock<T::EthSpec, Payload>,
|
||||||
|
block_root: Hash256,
|
||||||
|
f: F,
|
||||||
|
) -> Result<Option<V>, Error>
|
||||||
|
where
|
||||||
|
F: FnOnce(&mut BeaconState<T::EthSpec>, bool) -> Result<V, Error>,
|
||||||
|
{
|
||||||
|
if let Some(state) = self
|
||||||
|
.snapshot_cache
|
||||||
|
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||||
|
.ok_or(Error::SnapshotCacheLockTimeout)?
|
||||||
|
.borrow_unadvanced_state_mut(block_root)
|
||||||
|
{
|
||||||
|
let cache_hit = true;
|
||||||
|
f(state, cache_hit).map(Some)
|
||||||
|
} else if let Some(mut state) = self.get_state(&block.state_root(), Some(block.slot()))? {
|
||||||
|
let cache_hit = false;
|
||||||
|
f(&mut state, cache_hit).map(Some)
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Return the sync committee at `slot + 1` from the canonical chain.
|
/// Return the sync committee at `slot + 1` from the canonical chain.
|
||||||
///
|
///
|
||||||
/// This is useful when dealing with sync committee messages, because messages are signed
|
/// This is useful when dealing with sync committee messages, because messages are signed
|
||||||
@ -2367,6 +2406,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
self: &Arc<Self>,
|
self: &Arc<Self>,
|
||||||
chain_segment: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
chain_segment: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||||
count_unrealized: CountUnrealized,
|
count_unrealized: CountUnrealized,
|
||||||
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> ChainSegmentResult<T::EthSpec> {
|
) -> ChainSegmentResult<T::EthSpec> {
|
||||||
let mut imported_blocks = 0;
|
let mut imported_blocks = 0;
|
||||||
|
|
||||||
@ -2435,6 +2475,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
signature_verified_block.block_root(),
|
signature_verified_block.block_root(),
|
||||||
signature_verified_block,
|
signature_verified_block,
|
||||||
count_unrealized,
|
count_unrealized,
|
||||||
|
notify_execution_layer,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
@ -2523,6 +2564,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
unverified_block: B,
|
unverified_block: B,
|
||||||
count_unrealized: CountUnrealized,
|
count_unrealized: CountUnrealized,
|
||||||
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
||||||
// Start the Prometheus timer.
|
// Start the Prometheus timer.
|
||||||
let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES);
|
let _full_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_TIMES);
|
||||||
@ -2536,8 +2578,11 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
// A small closure to group the verification and import errors.
|
// A small closure to group the verification and import errors.
|
||||||
let chain = self.clone();
|
let chain = self.clone();
|
||||||
let import_block = async move {
|
let import_block = async move {
|
||||||
let execution_pending =
|
let execution_pending = unverified_block.into_execution_pending_block(
|
||||||
unverified_block.into_execution_pending_block(block_root, &chain)?;
|
block_root,
|
||||||
|
&chain,
|
||||||
|
notify_execution_layer,
|
||||||
|
)?;
|
||||||
chain
|
chain
|
||||||
.import_execution_pending_block(execution_pending, count_unrealized)
|
.import_execution_pending_block(execution_pending, count_unrealized)
|
||||||
.await
|
.await
|
||||||
@ -2607,6 +2652,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
confirmed_state_roots,
|
confirmed_state_roots,
|
||||||
payload_verification_handle,
|
payload_verification_handle,
|
||||||
parent_eth1_finalization_data,
|
parent_eth1_finalization_data,
|
||||||
|
consensus_context,
|
||||||
} = execution_pending_block;
|
} = execution_pending_block;
|
||||||
|
|
||||||
let PayloadVerificationOutcome {
|
let PayloadVerificationOutcome {
|
||||||
@ -2660,6 +2706,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
count_unrealized,
|
count_unrealized,
|
||||||
parent_block,
|
parent_block,
|
||||||
parent_eth1_finalization_data,
|
parent_eth1_finalization_data,
|
||||||
|
consensus_context,
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
"payload_verification_handle",
|
"payload_verification_handle",
|
||||||
@ -2685,70 +2732,36 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
count_unrealized: CountUnrealized,
|
count_unrealized: CountUnrealized,
|
||||||
parent_block: SignedBlindedBeaconBlock<T::EthSpec>,
|
parent_block: SignedBlindedBeaconBlock<T::EthSpec>,
|
||||||
parent_eth1_finalization_data: Eth1FinalizationData,
|
parent_eth1_finalization_data: Eth1FinalizationData,
|
||||||
|
mut consensus_context: ConsensusContext<T::EthSpec>,
|
||||||
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
) -> Result<Hash256, BlockError<T::EthSpec>> {
|
||||||
|
// ----------------------------- BLOCK NOT YET ATTESTABLE ----------------------------------
|
||||||
|
// Everything in this initial section is on the hot path between processing the block and
|
||||||
|
// being able to attest to it. DO NOT add any extra processing in this initial section
|
||||||
|
// unless it must run before fork choice.
|
||||||
|
// -----------------------------------------------------------------------------------------
|
||||||
let current_slot = self.slot()?;
|
let current_slot = self.slot()?;
|
||||||
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
|
let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||||
|
let block = signed_block.message();
|
||||||
|
let post_exec_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_POST_EXEC_PROCESSING);
|
||||||
|
|
||||||
let attestation_observation_timer =
|
// Check against weak subjectivity checkpoint.
|
||||||
metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION);
|
self.check_block_against_weak_subjectivity_checkpoint(block, block_root, &state)?;
|
||||||
|
|
||||||
// Iterate through the attestations in the block and register them as an "observed
|
|
||||||
// attestation". This will stop us from propagating them on the gossip network.
|
|
||||||
for a in signed_block.message().body().attestations() {
|
|
||||||
match self.observed_attestations.write().observe_item(a, None) {
|
|
||||||
// If the observation was successful or if the slot for the attestation was too
|
|
||||||
// low, continue.
|
|
||||||
//
|
|
||||||
// We ignore `SlotTooLow` since this will be very common whilst syncing.
|
|
||||||
Ok(_) | Err(AttestationObservationError::SlotTooLow { .. }) => {}
|
|
||||||
Err(e) => return Err(BlockError::BeaconChainError(e.into())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics::stop_timer(attestation_observation_timer);
|
|
||||||
|
|
||||||
// If a slasher is configured, provide the attestations from the block.
|
|
||||||
if let Some(slasher) = self.slasher.as_ref() {
|
|
||||||
for attestation in signed_block.message().body().attestations() {
|
|
||||||
let committee =
|
|
||||||
state.get_beacon_committee(attestation.data.slot, attestation.data.index)?;
|
|
||||||
let indexed_attestation = get_indexed_attestation(committee.committee, attestation)
|
|
||||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
|
||||||
slasher.accept_attestation(indexed_attestation);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there are new validators in this block, update our pubkey cache.
|
// If there are new validators in this block, update our pubkey cache.
|
||||||
//
|
//
|
||||||
// We perform this _before_ adding the block to fork choice because the pubkey cache is
|
// The only keys imported here will be ones for validators deposited in this block, because
|
||||||
// used by attestation processing which will only process an attestation if the block is
|
// the cache *must* already have been updated for the parent block when it was imported.
|
||||||
// known to fork choice. This ordering ensure that the pubkey cache is always up-to-date.
|
// Newly deposited validators are not active and their keys are not required by other parts
|
||||||
self.validator_pubkey_cache
|
// of block processing. The reason we do this here and not after making the block attestable
|
||||||
|
// is so we don't have to think about lock ordering with respect to the fork choice lock.
|
||||||
|
// There are a bunch of places where we lock both fork choice and the pubkey cache and it
|
||||||
|
// would be difficult to check that they all lock fork choice first.
|
||||||
|
let mut kv_store_ops = self
|
||||||
|
.validator_pubkey_cache
|
||||||
.try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
|
.try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT)
|
||||||
.ok_or(Error::ValidatorPubkeyCacheLockTimeout)?
|
.ok_or(Error::ValidatorPubkeyCacheLockTimeout)?
|
||||||
.import_new_pubkeys(&state)?;
|
.import_new_pubkeys(&state)?;
|
||||||
|
|
||||||
// For the current and next epoch of this state, ensure we have the shuffling from this
|
|
||||||
// block in our cache.
|
|
||||||
for relative_epoch in &[RelativeEpoch::Current, RelativeEpoch::Next] {
|
|
||||||
let shuffling_id = AttestationShufflingId::new(block_root, &state, *relative_epoch)?;
|
|
||||||
|
|
||||||
let shuffling_is_cached = self
|
|
||||||
.shuffling_cache
|
|
||||||
.try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
|
||||||
.ok_or(Error::AttestationCacheLockTimeout)?
|
|
||||||
.contains(&shuffling_id);
|
|
||||||
|
|
||||||
if !shuffling_is_cached {
|
|
||||||
state.build_committee_cache(*relative_epoch, &self.spec)?;
|
|
||||||
let committee_cache = state.committee_cache(*relative_epoch)?;
|
|
||||||
self.shuffling_cache
|
|
||||||
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
|
||||||
.ok_or(Error::AttestationCacheLockTimeout)?
|
|
||||||
.insert_committee_cache(shuffling_id, committee_cache);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Apply the state to the attester cache, only if it is from the previous epoch or later.
|
// Apply the state to the attester cache, only if it is from the previous epoch or later.
|
||||||
//
|
//
|
||||||
// In a perfect scenario there should be no need to add previous-epoch states to the cache.
|
// In a perfect scenario there should be no need to add previous-epoch states to the cache.
|
||||||
@ -2760,52 +2773,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.map_err(BeaconChainError::from)?;
|
.map_err(BeaconChainError::from)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Alias for readability.
|
// Take an exclusive write-lock on fork choice. It's very important to prevent deadlocks by
|
||||||
let block = signed_block.message();
|
|
||||||
|
|
||||||
// Only perform the weak subjectivity check if it was configured.
|
|
||||||
if let Some(wss_checkpoint) = self.config.weak_subjectivity_checkpoint {
|
|
||||||
// Note: we're using the finalized checkpoint from the head state, rather than fork
|
|
||||||
// choice.
|
|
||||||
//
|
|
||||||
// We are doing this to ensure that we detect changes in finalization. It's possible
|
|
||||||
// that fork choice has already been updated to the finalized checkpoint in the block
|
|
||||||
// we're importing.
|
|
||||||
let current_head_finalized_checkpoint =
|
|
||||||
self.canonical_head.cached_head().finalized_checkpoint();
|
|
||||||
// Compare the existing finalized checkpoint with the incoming block's finalized checkpoint.
|
|
||||||
let new_finalized_checkpoint = state.finalized_checkpoint();
|
|
||||||
|
|
||||||
// This ensures we only perform the check once.
|
|
||||||
if (current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch)
|
|
||||||
&& (wss_checkpoint.epoch <= new_finalized_checkpoint.epoch)
|
|
||||||
{
|
|
||||||
if let Err(e) =
|
|
||||||
self.verify_weak_subjectivity_checkpoint(wss_checkpoint, block_root, &state)
|
|
||||||
{
|
|
||||||
let mut shutdown_sender = self.shutdown_sender();
|
|
||||||
crit!(
|
|
||||||
self.log,
|
|
||||||
"Weak subjectivity checkpoint verification failed while importing block!";
|
|
||||||
"block_root" => ?block_root,
|
|
||||||
"parent_root" => ?block.parent_root(),
|
|
||||||
"old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch,
|
|
||||||
"new_finalized_epoch" => ?new_finalized_checkpoint.epoch,
|
|
||||||
"weak_subjectivity_epoch" => ?wss_checkpoint.epoch,
|
|
||||||
"error" => ?e,
|
|
||||||
);
|
|
||||||
crit!(self.log, "You must use the `--purge-db` flag to clear the database and restart sync. You may be on a hostile network.");
|
|
||||||
shutdown_sender
|
|
||||||
.try_send(ShutdownReason::Failure(
|
|
||||||
"Weak subjectivity checkpoint verification failed. Provided block root is not a checkpoint."
|
|
||||||
))
|
|
||||||
.map_err(|err| BlockError::BeaconChainError(BeaconChainError::WeakSubjectivtyShutdownError(err)))?;
|
|
||||||
return Err(BlockError::WeakSubjectivityConflict);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take an exclusive write-lock on fork choice. It's very important prevent deadlocks by
|
|
||||||
// avoiding taking other locks whilst holding this lock.
|
// avoiding taking other locks whilst holding this lock.
|
||||||
let mut fork_choice = self.canonical_head.fork_choice_write_lock();
|
let mut fork_choice = self.canonical_head.fork_choice_write_lock();
|
||||||
|
|
||||||
@ -2835,77 +2803,6 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allow the validator monitor to learn about a new valid state.
|
|
||||||
self.validator_monitor
|
|
||||||
.write()
|
|
||||||
.process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), &state);
|
|
||||||
let validator_monitor = self.validator_monitor.read();
|
|
||||||
|
|
||||||
// Register each attester slashing in the block with fork choice.
|
|
||||||
for attester_slashing in block.body().attester_slashings() {
|
|
||||||
fork_choice.on_attester_slashing(attester_slashing);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Register each attestation in the block with the fork choice service.
|
|
||||||
for attestation in block.body().attestations() {
|
|
||||||
let _fork_choice_attestation_timer =
|
|
||||||
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
|
|
||||||
let attestation_target_epoch = attestation.data.target.epoch;
|
|
||||||
|
|
||||||
let committee =
|
|
||||||
state.get_beacon_committee(attestation.data.slot, attestation.data.index)?;
|
|
||||||
let indexed_attestation = get_indexed_attestation(committee.committee, attestation)
|
|
||||||
.map_err(|e| BlockError::BeaconChainError(e.into()))?;
|
|
||||||
|
|
||||||
match fork_choice.on_attestation(
|
|
||||||
current_slot,
|
|
||||||
&indexed_attestation,
|
|
||||||
AttestationFromBlock::True,
|
|
||||||
&self.spec,
|
|
||||||
) {
|
|
||||||
Ok(()) => Ok(()),
|
|
||||||
// Ignore invalid attestations whilst importing attestations from a block. The
|
|
||||||
// block might be very old and therefore the attestations useless to fork choice.
|
|
||||||
Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()),
|
|
||||||
Err(e) => Err(BlockError::BeaconChainError(e.into())),
|
|
||||||
}?;
|
|
||||||
|
|
||||||
// To avoid slowing down sync, only register attestations for the
|
|
||||||
// `observed_block_attesters` if they are from the previous epoch or later.
|
|
||||||
if attestation_target_epoch + 1 >= current_epoch {
|
|
||||||
let mut observed_block_attesters = self.observed_block_attesters.write();
|
|
||||||
for &validator_index in &indexed_attestation.attesting_indices {
|
|
||||||
if let Err(e) = observed_block_attesters
|
|
||||||
.observe_validator(attestation_target_epoch, validator_index as usize)
|
|
||||||
{
|
|
||||||
debug!(
|
|
||||||
self.log,
|
|
||||||
"Failed to register observed block attester";
|
|
||||||
"error" => ?e,
|
|
||||||
"epoch" => attestation_target_epoch,
|
|
||||||
"validator_index" => validator_index,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only register this with the validator monitor when the block is sufficiently close to
|
|
||||||
// the current slot.
|
|
||||||
if VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 * T::EthSpec::slots_per_epoch()
|
|
||||||
+ block.slot().as_u64()
|
|
||||||
>= current_slot.as_u64()
|
|
||||||
{
|
|
||||||
match fork_choice.get_block(&block.parent_root()) {
|
|
||||||
Some(parent_block) => validator_monitor.register_attestation_in_block(
|
|
||||||
&indexed_attestation,
|
|
||||||
parent_block.slot,
|
|
||||||
&self.spec,
|
|
||||||
),
|
|
||||||
None => warn!(self.log, "Failed to get parent block"; "slot" => %block.slot()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the block is recent enough and it was not optimistically imported, check to see if it
|
// If the block is recent enough and it was not optimistically imported, check to see if it
|
||||||
// becomes the head block. If so, apply it to the early attester cache. This will allow
|
// becomes the head block. If so, apply it to the early attester cache. This will allow
|
||||||
// attestations to the block without waiting for the block and state to be inserted to the
|
// attestations to the block without waiting for the block and state to be inserted to the
|
||||||
@ -2954,56 +2851,28 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
drop(post_exec_timer);
|
||||||
|
|
||||||
// Register sync aggregate with validator monitor
|
// ---------------------------- BLOCK PROBABLY ATTESTABLE ----------------------------------
|
||||||
if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
// Most blocks are now capable of being attested to thanks to the `early_attester_cache`
|
||||||
// `SyncCommittee` for the sync_aggregate should correspond to the duty slot
|
// cache above. Resume non-essential processing.
|
||||||
let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch());
|
// -----------------------------------------------------------------------------------------
|
||||||
let sync_committee = self.sync_committee_at_epoch(duty_epoch)?;
|
|
||||||
let participant_pubkeys = sync_committee
|
|
||||||
.pubkeys
|
|
||||||
.iter()
|
|
||||||
.zip(sync_aggregate.sync_committee_bits.iter())
|
|
||||||
.filter_map(|(pubkey, bit)| bit.then_some(pubkey))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
validator_monitor.register_sync_aggregate_in_block(
|
self.import_block_update_shuffling_cache(block_root, &mut state)?;
|
||||||
block.slot(),
|
self.import_block_observe_attestations(
|
||||||
block.parent_root(),
|
block,
|
||||||
participant_pubkeys,
|
&state,
|
||||||
);
|
&mut consensus_context,
|
||||||
}
|
current_epoch,
|
||||||
|
);
|
||||||
for exit in block.body().voluntary_exits() {
|
self.import_block_update_validator_monitor(
|
||||||
validator_monitor.register_block_voluntary_exit(&exit.message)
|
block,
|
||||||
}
|
&state,
|
||||||
|
&mut consensus_context,
|
||||||
for slashing in block.body().attester_slashings() {
|
current_slot,
|
||||||
validator_monitor.register_block_attester_slashing(slashing)
|
parent_block.slot(),
|
||||||
}
|
);
|
||||||
|
self.import_block_update_slasher(block, &state, &mut consensus_context);
|
||||||
for slashing in block.body().proposer_slashings() {
|
|
||||||
validator_monitor.register_block_proposer_slashing(slashing)
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(validator_monitor);
|
|
||||||
|
|
||||||
// Only present some metrics for blocks from the previous epoch or later.
|
|
||||||
//
|
|
||||||
// This helps avoid noise in the metrics during sync.
|
|
||||||
if block.slot().epoch(T::EthSpec::slots_per_epoch()) + 1 >= self.epoch()? {
|
|
||||||
metrics::observe(
|
|
||||||
&metrics::OPERATIONS_PER_BLOCK_ATTESTATION,
|
|
||||||
block.body().attestations().len() as f64,
|
|
||||||
);
|
|
||||||
|
|
||||||
if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
|
||||||
metrics::set_gauge(
|
|
||||||
&metrics::BLOCK_SYNC_AGGREGATE_SET_BITS,
|
|
||||||
sync_aggregate.num_set_bits() as i64,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE);
|
let db_write_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_WRITE);
|
||||||
|
|
||||||
@ -3020,7 +2889,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
ops.push(StoreOp::PutState(block.state_root(), &state));
|
ops.push(StoreOp::PutState(block.state_root(), &state));
|
||||||
let txn_lock = self.store.hot_db.begin_rw_transaction();
|
let txn_lock = self.store.hot_db.begin_rw_transaction();
|
||||||
|
|
||||||
if let Err(e) = self.store.do_atomically(ops) {
|
kv_store_ops.extend(self.store.convert_to_kv_batch(ops)?);
|
||||||
|
|
||||||
|
if let Err(e) = self.store.hot_db.do_atomically(kv_store_ops) {
|
||||||
error!(
|
error!(
|
||||||
self.log,
|
self.log,
|
||||||
"Database write failed!";
|
"Database write failed!";
|
||||||
@ -3028,6 +2899,10 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
"error" => ?e,
|
"error" => ?e,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Clear the early attester cache to prevent attestations which we would later be unable
|
||||||
|
// to verify due to the failure.
|
||||||
|
self.early_attester_cache.clear();
|
||||||
|
|
||||||
// Since the write failed, try to revert the canonical head back to what was stored
|
// Since the write failed, try to revert the canonical head back to what was stored
|
||||||
// in the database. This attempts to prevent inconsistency between the database and
|
// in the database. This attempts to prevent inconsistency between the database and
|
||||||
// fork choice.
|
// fork choice.
|
||||||
@ -3070,6 +2945,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
eth1_deposit_index: state.eth1_deposit_index(),
|
eth1_deposit_index: state.eth1_deposit_index(),
|
||||||
};
|
};
|
||||||
let current_finalized_checkpoint = state.finalized_checkpoint();
|
let current_finalized_checkpoint = state.finalized_checkpoint();
|
||||||
|
|
||||||
self.snapshot_cache
|
self.snapshot_cache
|
||||||
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
.try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT)
|
||||||
.ok_or(Error::SnapshotCacheLockTimeout)
|
.ok_or(Error::SnapshotCacheLockTimeout)
|
||||||
@ -3077,7 +2953,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
snapshot_cache.insert(
|
snapshot_cache.insert(
|
||||||
BeaconSnapshot {
|
BeaconSnapshot {
|
||||||
beacon_state: state,
|
beacon_state: state,
|
||||||
beacon_block: signed_block,
|
beacon_block: signed_block.clone(),
|
||||||
beacon_block_root: block_root,
|
beacon_block_root: block_root,
|
||||||
},
|
},
|
||||||
None,
|
None,
|
||||||
@ -3096,22 +2972,312 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
self.head_tracker
|
self.head_tracker
|
||||||
.register_block(block_root, parent_root, slot);
|
.register_block(block_root, parent_root, slot);
|
||||||
|
|
||||||
// Send an event to the `events` endpoint after fully processing the block.
|
|
||||||
if let Some(event_handler) = self.event_handler.as_ref() {
|
|
||||||
if event_handler.has_block_subscribers() {
|
|
||||||
event_handler.register(EventKind::Block(SseBlock {
|
|
||||||
slot,
|
|
||||||
block: block_root,
|
|
||||||
execution_optimistic: payload_verification_status.is_optimistic(),
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics::stop_timer(db_write_timer);
|
metrics::stop_timer(db_write_timer);
|
||||||
|
|
||||||
metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES);
|
metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES);
|
||||||
|
|
||||||
let block_delay_total = get_slot_delay_ms(block_time_imported, slot, &self.slot_clock);
|
// Update the deposit contract cache.
|
||||||
|
self.import_block_update_deposit_contract_finalization(
|
||||||
|
block,
|
||||||
|
block_root,
|
||||||
|
current_epoch,
|
||||||
|
current_finalized_checkpoint,
|
||||||
|
current_eth1_finalization_data,
|
||||||
|
parent_eth1_finalization_data,
|
||||||
|
parent_block.slot(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Inform the unknown block cache, in case it was waiting on this block.
|
||||||
|
self.pre_finalization_block_cache
|
||||||
|
.block_processed(block_root);
|
||||||
|
|
||||||
|
self.import_block_update_metrics_and_events(
|
||||||
|
block,
|
||||||
|
block_root,
|
||||||
|
block_time_imported,
|
||||||
|
payload_verification_status,
|
||||||
|
current_slot,
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(block_root)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check block's consistentency with any configured weak subjectivity checkpoint.
|
||||||
|
fn check_block_against_weak_subjectivity_checkpoint(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<T::EthSpec>,
|
||||||
|
block_root: Hash256,
|
||||||
|
state: &BeaconState<T::EthSpec>,
|
||||||
|
) -> Result<(), BlockError<T::EthSpec>> {
|
||||||
|
// Only perform the weak subjectivity check if it was configured.
|
||||||
|
let wss_checkpoint = if let Some(checkpoint) = self.config.weak_subjectivity_checkpoint {
|
||||||
|
checkpoint
|
||||||
|
} else {
|
||||||
|
return Ok(());
|
||||||
|
};
|
||||||
|
// Note: we're using the finalized checkpoint from the head state, rather than fork
|
||||||
|
// choice.
|
||||||
|
//
|
||||||
|
// We are doing this to ensure that we detect changes in finalization. It's possible
|
||||||
|
// that fork choice has already been updated to the finalized checkpoint in the block
|
||||||
|
// we're importing.
|
||||||
|
let current_head_finalized_checkpoint =
|
||||||
|
self.canonical_head.cached_head().finalized_checkpoint();
|
||||||
|
// Compare the existing finalized checkpoint with the incoming block's finalized checkpoint.
|
||||||
|
let new_finalized_checkpoint = state.finalized_checkpoint();
|
||||||
|
|
||||||
|
// This ensures we only perform the check once.
|
||||||
|
if current_head_finalized_checkpoint.epoch < wss_checkpoint.epoch
|
||||||
|
&& wss_checkpoint.epoch <= new_finalized_checkpoint.epoch
|
||||||
|
{
|
||||||
|
if let Err(e) =
|
||||||
|
self.verify_weak_subjectivity_checkpoint(wss_checkpoint, block_root, state)
|
||||||
|
{
|
||||||
|
let mut shutdown_sender = self.shutdown_sender();
|
||||||
|
crit!(
|
||||||
|
self.log,
|
||||||
|
"Weak subjectivity checkpoint verification failed while importing block!";
|
||||||
|
"block_root" => ?block_root,
|
||||||
|
"parent_root" => ?block.parent_root(),
|
||||||
|
"old_finalized_epoch" => ?current_head_finalized_checkpoint.epoch,
|
||||||
|
"new_finalized_epoch" => ?new_finalized_checkpoint.epoch,
|
||||||
|
"weak_subjectivity_epoch" => ?wss_checkpoint.epoch,
|
||||||
|
"error" => ?e
|
||||||
|
);
|
||||||
|
crit!(
|
||||||
|
self.log,
|
||||||
|
"You must use the `--purge-db` flag to clear the database and restart sync. \
|
||||||
|
You may be on a hostile network."
|
||||||
|
);
|
||||||
|
shutdown_sender
|
||||||
|
.try_send(ShutdownReason::Failure(
|
||||||
|
"Weak subjectivity checkpoint verification failed. \
|
||||||
|
Provided block root is not a checkpoint.",
|
||||||
|
))
|
||||||
|
.map_err(|err| {
|
||||||
|
BlockError::BeaconChainError(
|
||||||
|
BeaconChainError::WeakSubjectivtyShutdownError(err),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
return Err(BlockError::WeakSubjectivityConflict);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process a block for the validator monitor, including all its constituent messages.
|
||||||
|
fn import_block_update_validator_monitor(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<T::EthSpec>,
|
||||||
|
state: &BeaconState<T::EthSpec>,
|
||||||
|
ctxt: &mut ConsensusContext<T::EthSpec>,
|
||||||
|
current_slot: Slot,
|
||||||
|
parent_block_slot: Slot,
|
||||||
|
) {
|
||||||
|
// Only register blocks with the validator monitor when the block is sufficiently close to
|
||||||
|
// the current slot.
|
||||||
|
if VALIDATOR_MONITOR_HISTORIC_EPOCHS as u64 * T::EthSpec::slots_per_epoch()
|
||||||
|
+ block.slot().as_u64()
|
||||||
|
< current_slot.as_u64()
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow the validator monitor to learn about a new valid state.
|
||||||
|
self.validator_monitor
|
||||||
|
.write()
|
||||||
|
.process_valid_state(current_slot.epoch(T::EthSpec::slots_per_epoch()), state);
|
||||||
|
|
||||||
|
let validator_monitor = self.validator_monitor.read();
|
||||||
|
|
||||||
|
// Sync aggregate.
|
||||||
|
if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
||||||
|
// `SyncCommittee` for the sync_aggregate should correspond to the duty slot
|
||||||
|
let duty_epoch = block.slot().epoch(T::EthSpec::slots_per_epoch());
|
||||||
|
|
||||||
|
match self.sync_committee_at_epoch(duty_epoch) {
|
||||||
|
Ok(sync_committee) => {
|
||||||
|
let participant_pubkeys = sync_committee
|
||||||
|
.pubkeys
|
||||||
|
.iter()
|
||||||
|
.zip(sync_aggregate.sync_committee_bits.iter())
|
||||||
|
.filter_map(|(pubkey, bit)| bit.then_some(pubkey))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
validator_monitor.register_sync_aggregate_in_block(
|
||||||
|
block.slot(),
|
||||||
|
block.parent_root(),
|
||||||
|
participant_pubkeys,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
self.log,
|
||||||
|
"Unable to fetch sync committee";
|
||||||
|
"epoch" => duty_epoch,
|
||||||
|
"purpose" => "validator monitor",
|
||||||
|
"error" => ?e,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attestations.
|
||||||
|
for attestation in block.body().attestations() {
|
||||||
|
let indexed_attestation = match ctxt.get_indexed_attestation(state, attestation) {
|
||||||
|
Ok(indexed) => indexed,
|
||||||
|
Err(e) => {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Failed to get indexed attestation";
|
||||||
|
"purpose" => "validator monitor",
|
||||||
|
"attestation_slot" => attestation.data.slot,
|
||||||
|
"error" => ?e,
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
validator_monitor.register_attestation_in_block(
|
||||||
|
indexed_attestation,
|
||||||
|
parent_block_slot,
|
||||||
|
&self.spec,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
for exit in block.body().voluntary_exits() {
|
||||||
|
validator_monitor.register_block_voluntary_exit(&exit.message)
|
||||||
|
}
|
||||||
|
|
||||||
|
for slashing in block.body().attester_slashings() {
|
||||||
|
validator_monitor.register_block_attester_slashing(slashing)
|
||||||
|
}
|
||||||
|
|
||||||
|
for slashing in block.body().proposer_slashings() {
|
||||||
|
validator_monitor.register_block_proposer_slashing(slashing)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iterate through the attestations in the block and register them as "observed".
|
||||||
|
///
|
||||||
|
/// This will stop us from propagating them on the gossip network.
|
||||||
|
fn import_block_observe_attestations(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<T::EthSpec>,
|
||||||
|
state: &BeaconState<T::EthSpec>,
|
||||||
|
ctxt: &mut ConsensusContext<T::EthSpec>,
|
||||||
|
current_epoch: Epoch,
|
||||||
|
) {
|
||||||
|
// To avoid slowing down sync, only observe attestations if the block is from the
|
||||||
|
// previous epoch or later.
|
||||||
|
if state.current_epoch() + 1 < current_epoch {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let _timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_ATTESTATION_OBSERVATION);
|
||||||
|
|
||||||
|
for a in block.body().attestations() {
|
||||||
|
match self.observed_attestations.write().observe_item(a, None) {
|
||||||
|
// If the observation was successful or if the slot for the attestation was too
|
||||||
|
// low, continue.
|
||||||
|
//
|
||||||
|
// We ignore `SlotTooLow` since this will be very common whilst syncing.
|
||||||
|
Ok(_) | Err(AttestationObservationError::SlotTooLow { .. }) => {}
|
||||||
|
Err(e) => {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Failed to register observed attestation";
|
||||||
|
"error" => ?e,
|
||||||
|
"epoch" => a.data.target.epoch
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let indexed_attestation = match ctxt.get_indexed_attestation(state, a) {
|
||||||
|
Ok(indexed) => indexed,
|
||||||
|
Err(e) => {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Failed to get indexed attestation";
|
||||||
|
"purpose" => "observation",
|
||||||
|
"attestation_slot" => a.data.slot,
|
||||||
|
"error" => ?e,
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut observed_block_attesters = self.observed_block_attesters.write();
|
||||||
|
|
||||||
|
for &validator_index in &indexed_attestation.attesting_indices {
|
||||||
|
if let Err(e) = observed_block_attesters
|
||||||
|
.observe_validator(a.data.target.epoch, validator_index as usize)
|
||||||
|
{
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Failed to register observed block attester";
|
||||||
|
"error" => ?e,
|
||||||
|
"epoch" => a.data.target.epoch,
|
||||||
|
"validator_index" => validator_index,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// If a slasher is configured, provide the attestations from the block.
|
||||||
|
fn import_block_update_slasher(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<T::EthSpec>,
|
||||||
|
state: &BeaconState<T::EthSpec>,
|
||||||
|
ctxt: &mut ConsensusContext<T::EthSpec>,
|
||||||
|
) {
|
||||||
|
if let Some(slasher) = self.slasher.as_ref() {
|
||||||
|
for attestation in block.body().attestations() {
|
||||||
|
let indexed_attestation = match ctxt.get_indexed_attestation(state, attestation) {
|
||||||
|
Ok(indexed) => indexed,
|
||||||
|
Err(e) => {
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"Failed to get indexed attestation";
|
||||||
|
"purpose" => "slasher",
|
||||||
|
"attestation_slot" => attestation.data.slot,
|
||||||
|
"error" => ?e,
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
slasher.accept_attestation(indexed_attestation.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn import_block_update_metrics_and_events(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<T::EthSpec>,
|
||||||
|
block_root: Hash256,
|
||||||
|
block_time_imported: Duration,
|
||||||
|
payload_verification_status: PayloadVerificationStatus,
|
||||||
|
current_slot: Slot,
|
||||||
|
) {
|
||||||
|
// Only present some metrics for blocks from the previous epoch or later.
|
||||||
|
//
|
||||||
|
// This helps avoid noise in the metrics during sync.
|
||||||
|
if block.slot() + 2 * T::EthSpec::slots_per_epoch() >= current_slot {
|
||||||
|
metrics::observe(
|
||||||
|
&metrics::OPERATIONS_PER_BLOCK_ATTESTATION,
|
||||||
|
block.body().attestations().len() as f64,
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Ok(sync_aggregate) = block.body().sync_aggregate() {
|
||||||
|
metrics::set_gauge(
|
||||||
|
&metrics::BLOCK_SYNC_AGGREGATE_SET_BITS,
|
||||||
|
sync_aggregate.num_set_bits() as i64,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let block_delay_total =
|
||||||
|
get_slot_delay_ms(block_time_imported, block.slot(), &self.slot_clock);
|
||||||
|
|
||||||
// Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to
|
// Do not write to the cache for blocks older than 2 epochs, this helps reduce writes to
|
||||||
// the cache during sync.
|
// the cache during sync.
|
||||||
@ -3143,62 +3309,105 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do not write to eth1 finalization cache for blocks older than 5 epochs
|
if let Some(event_handler) = self.event_handler.as_ref() {
|
||||||
// this helps reduce noise during sync
|
if event_handler.has_block_subscribers() {
|
||||||
if block_delay_total
|
event_handler.register(EventKind::Block(SseBlock {
|
||||||
< self.slot_clock.slot_duration() * 5 * (T::EthSpec::slots_per_epoch() as u32)
|
slot: block.slot(),
|
||||||
{
|
block: block_root,
|
||||||
let parent_block_epoch = parent_block.slot().epoch(T::EthSpec::slots_per_epoch());
|
execution_optimistic: payload_verification_status.is_optimistic(),
|
||||||
if parent_block_epoch < current_epoch {
|
}));
|
||||||
// we've crossed epoch boundary, store Eth1FinalizationData
|
}
|
||||||
let (checkpoint, eth1_finalization_data) =
|
}
|
||||||
if current_slot % T::EthSpec::slots_per_epoch() == 0 {
|
}
|
||||||
// current block is the checkpoint
|
|
||||||
(
|
|
||||||
Checkpoint {
|
|
||||||
epoch: current_epoch,
|
|
||||||
root: block_root,
|
|
||||||
},
|
|
||||||
current_eth1_finalization_data,
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
// parent block is the checkpoint
|
|
||||||
(
|
|
||||||
Checkpoint {
|
|
||||||
epoch: current_epoch,
|
|
||||||
root: parent_block.canonical_root(),
|
|
||||||
},
|
|
||||||
parent_eth1_finalization_data,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(finalized_eth1_data) = self
|
fn import_block_update_shuffling_cache(
|
||||||
.eth1_finalization_cache
|
&self,
|
||||||
.try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT)
|
block_root: Hash256,
|
||||||
.and_then(|mut cache| {
|
state: &mut BeaconState<T::EthSpec>,
|
||||||
cache.insert(checkpoint, eth1_finalization_data);
|
) -> Result<(), BlockError<T::EthSpec>> {
|
||||||
cache.finalize(¤t_finalized_checkpoint)
|
// For the current and next epoch of this state, ensure we have the shuffling from this
|
||||||
})
|
// block in our cache.
|
||||||
{
|
for relative_epoch in [RelativeEpoch::Current, RelativeEpoch::Next] {
|
||||||
if let Some(eth1_chain) = self.eth1_chain.as_ref() {
|
let shuffling_id = AttestationShufflingId::new(block_root, state, relative_epoch)?;
|
||||||
let finalized_deposit_count = finalized_eth1_data.deposit_count;
|
|
||||||
eth1_chain.finalize_eth1_data(finalized_eth1_data);
|
let shuffling_is_cached = self
|
||||||
debug!(
|
.shuffling_cache
|
||||||
self.log,
|
.try_read_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||||
"called eth1_chain.finalize_eth1_data()";
|
.ok_or(Error::AttestationCacheLockTimeout)?
|
||||||
"epoch" => current_finalized_checkpoint.epoch,
|
.contains(&shuffling_id);
|
||||||
"deposit count" => finalized_deposit_count,
|
|
||||||
);
|
if !shuffling_is_cached {
|
||||||
}
|
state.build_committee_cache(relative_epoch, &self.spec)?;
|
||||||
|
let committee_cache = state.committee_cache(relative_epoch)?;
|
||||||
|
self.shuffling_cache
|
||||||
|
.try_write_for(ATTESTATION_CACHE_LOCK_TIMEOUT)
|
||||||
|
.ok_or(Error::AttestationCacheLockTimeout)?
|
||||||
|
.insert_committee_cache(shuffling_id, committee_cache);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
fn import_block_update_deposit_contract_finalization(
|
||||||
|
&self,
|
||||||
|
block: BeaconBlockRef<T::EthSpec>,
|
||||||
|
block_root: Hash256,
|
||||||
|
current_epoch: Epoch,
|
||||||
|
current_finalized_checkpoint: Checkpoint,
|
||||||
|
current_eth1_finalization_data: Eth1FinalizationData,
|
||||||
|
parent_eth1_finalization_data: Eth1FinalizationData,
|
||||||
|
parent_block_slot: Slot,
|
||||||
|
) {
|
||||||
|
// Do not write to eth1 finalization cache for blocks older than 5 epochs.
|
||||||
|
if block.slot().epoch(T::EthSpec::slots_per_epoch()) + 5 < current_epoch {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let parent_block_epoch = parent_block_slot.epoch(T::EthSpec::slots_per_epoch());
|
||||||
|
if parent_block_epoch < current_epoch {
|
||||||
|
// we've crossed epoch boundary, store Eth1FinalizationData
|
||||||
|
let (checkpoint, eth1_finalization_data) =
|
||||||
|
if block.slot() % T::EthSpec::slots_per_epoch() == 0 {
|
||||||
|
// current block is the checkpoint
|
||||||
|
(
|
||||||
|
Checkpoint {
|
||||||
|
epoch: current_epoch,
|
||||||
|
root: block_root,
|
||||||
|
},
|
||||||
|
current_eth1_finalization_data,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
// parent block is the checkpoint
|
||||||
|
(
|
||||||
|
Checkpoint {
|
||||||
|
epoch: current_epoch,
|
||||||
|
root: block.parent_root(),
|
||||||
|
},
|
||||||
|
parent_eth1_finalization_data,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(finalized_eth1_data) = self
|
||||||
|
.eth1_finalization_cache
|
||||||
|
.try_write_for(ETH1_FINALIZATION_CACHE_LOCK_TIMEOUT)
|
||||||
|
.and_then(|mut cache| {
|
||||||
|
cache.insert(checkpoint, eth1_finalization_data);
|
||||||
|
cache.finalize(¤t_finalized_checkpoint)
|
||||||
|
})
|
||||||
|
{
|
||||||
|
if let Some(eth1_chain) = self.eth1_chain.as_ref() {
|
||||||
|
let finalized_deposit_count = finalized_eth1_data.deposit_count;
|
||||||
|
eth1_chain.finalize_eth1_data(finalized_eth1_data);
|
||||||
|
debug!(
|
||||||
|
self.log,
|
||||||
|
"called eth1_chain.finalize_eth1_data()";
|
||||||
|
"epoch" => current_finalized_checkpoint.epoch,
|
||||||
|
"deposit count" => finalized_deposit_count,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Inform the unknown block cache, in case it was waiting on this block.
|
|
||||||
self.pre_finalization_block_cache
|
|
||||||
.block_processed(block_root);
|
|
||||||
|
|
||||||
Ok(block_root)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If configured, wait for the fork choice run at the start of the slot to complete.
|
/// If configured, wait for the fork choice run at the start of the slot to complete.
|
||||||
@ -3591,10 +3800,12 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
|
|||||||
// This will be a lot slower but guards against bugs in block production and can be
|
// This will be a lot slower but guards against bugs in block production and can be
|
||||||
// quickly rolled out without a release.
|
// quickly rolled out without a release.
|
||||||
if self.config.paranoid_block_proposal {
|
if self.config.paranoid_block_proposal {
|
||||||
|
let mut tmp_ctxt = ConsensusContext::new(state.slot());
|
||||||
attestations.retain(|att| {
|
attestations.retain(|att| {
|
||||||
verify_attestation_for_block_inclusion(
|
verify_attestation_for_block_inclusion(
|
||||||
&state,
|
&state,
|
||||||
att,
|
att,
|
||||||
|
&mut tmp_ctxt,
|
||||||
VerifySignatures::True,
|
VerifySignatures::True,
|
||||||
&self.spec,
|
&self.spec,
|
||||||
)
|
)
|
||||||
|
@ -45,29 +45,29 @@
|
|||||||
use crate::eth1_finalization_cache::Eth1FinalizationData;
|
use crate::eth1_finalization_cache::Eth1FinalizationData;
|
||||||
use crate::execution_payload::{
|
use crate::execution_payload::{
|
||||||
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
|
is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block,
|
||||||
AllowOptimisticImport, PayloadNotifier,
|
AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier,
|
||||||
};
|
};
|
||||||
use crate::snapshot_cache::PreProcessingSnapshot;
|
use crate::snapshot_cache::PreProcessingSnapshot;
|
||||||
use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS;
|
use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS;
|
||||||
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
use crate::validator_pubkey_cache::ValidatorPubkeyCache;
|
||||||
use crate::{
|
use crate::{
|
||||||
beacon_chain::{
|
beacon_chain::{
|
||||||
BeaconForkChoice, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, MAXIMUM_GOSSIP_CLOCK_DISPARITY,
|
BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT,
|
||||||
VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
|
MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT,
|
||||||
},
|
},
|
||||||
metrics, BeaconChain, BeaconChainError, BeaconChainTypes,
|
metrics, BeaconChain, BeaconChainError, BeaconChainTypes,
|
||||||
};
|
};
|
||||||
use derivative::Derivative;
|
use derivative::Derivative;
|
||||||
use eth2::types::EventKind;
|
use eth2::types::EventKind;
|
||||||
use execution_layer::PayloadStatus;
|
use execution_layer::PayloadStatus;
|
||||||
use fork_choice::PayloadVerificationStatus;
|
use fork_choice::{AttestationFromBlock, PayloadVerificationStatus};
|
||||||
use parking_lot::RwLockReadGuard;
|
use parking_lot::RwLockReadGuard;
|
||||||
use proto_array::Block as ProtoBlock;
|
use proto_array::Block as ProtoBlock;
|
||||||
use safe_arith::ArithError;
|
use safe_arith::ArithError;
|
||||||
use slog::{debug, error, warn, Logger};
|
use slog::{debug, error, warn, Logger};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
use state_processing::per_block_processing::is_merge_transition_block;
|
use state_processing::per_block_processing::{errors::IntoWithIndex, is_merge_transition_block};
|
||||||
use state_processing::{
|
use state_processing::{
|
||||||
block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError},
|
block_signature_verifier::{BlockSignatureVerifier, Error as BlockSignatureVerifierError},
|
||||||
per_block_processing, per_slot_processing,
|
per_block_processing, per_slot_processing,
|
||||||
@ -551,8 +551,22 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
|||||||
let pubkey_cache = get_validator_pubkey_cache(chain)?;
|
let pubkey_cache = get_validator_pubkey_cache(chain)?;
|
||||||
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
|
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
|
||||||
|
|
||||||
|
let mut signature_verified_blocks = Vec::with_capacity(chain_segment.len());
|
||||||
|
|
||||||
for (block_root, block) in &chain_segment {
|
for (block_root, block) in &chain_segment {
|
||||||
signature_verifier.include_all_signatures(block, Some(*block_root), None)?;
|
let mut consensus_context =
|
||||||
|
ConsensusContext::new(block.slot()).set_current_block_root(*block_root);
|
||||||
|
|
||||||
|
signature_verifier.include_all_signatures(block, &mut consensus_context)?;
|
||||||
|
|
||||||
|
// Save the block and its consensus context. The context will have had its proposer index
|
||||||
|
// and attesting indices filled in, which can be used to accelerate later block processing.
|
||||||
|
signature_verified_blocks.push(SignatureVerifiedBlock {
|
||||||
|
block: block.clone(),
|
||||||
|
block_root: *block_root,
|
||||||
|
parent: None,
|
||||||
|
consensus_context,
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if signature_verifier.verify().is_err() {
|
if signature_verifier.verify().is_err() {
|
||||||
@ -561,22 +575,6 @@ pub fn signature_verify_chain_segment<T: BeaconChainTypes>(
|
|||||||
|
|
||||||
drop(pubkey_cache);
|
drop(pubkey_cache);
|
||||||
|
|
||||||
let mut signature_verified_blocks = chain_segment
|
|
||||||
.into_iter()
|
|
||||||
.map(|(block_root, block)| {
|
|
||||||
// Proposer index has already been verified above during signature verification.
|
|
||||||
let consensus_context = ConsensusContext::new(block.slot())
|
|
||||||
.set_current_block_root(block_root)
|
|
||||||
.set_proposer_index(block.message().proposer_index());
|
|
||||||
SignatureVerifiedBlock {
|
|
||||||
block,
|
|
||||||
block_root,
|
|
||||||
parent: None,
|
|
||||||
consensus_context,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
if let Some(signature_verified_block) = signature_verified_blocks.first_mut() {
|
if let Some(signature_verified_block) = signature_verified_blocks.first_mut() {
|
||||||
signature_verified_block.parent = Some(parent);
|
signature_verified_block.parent = Some(parent);
|
||||||
}
|
}
|
||||||
@ -626,6 +624,7 @@ pub struct ExecutionPendingBlock<T: BeaconChainTypes> {
|
|||||||
pub parent_block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
|
pub parent_block: SignedBeaconBlock<T::EthSpec, BlindedPayload<T::EthSpec>>,
|
||||||
pub parent_eth1_finalization_data: Eth1FinalizationData,
|
pub parent_eth1_finalization_data: Eth1FinalizationData,
|
||||||
pub confirmed_state_roots: Vec<Hash256>,
|
pub confirmed_state_roots: Vec<Hash256>,
|
||||||
|
pub consensus_context: ConsensusContext<T::EthSpec>,
|
||||||
pub payload_verification_handle: PayloadVerificationHandle<T::EthSpec>,
|
pub payload_verification_handle: PayloadVerificationHandle<T::EthSpec>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -637,8 +636,9 @@ pub trait IntoExecutionPendingBlock<T: BeaconChainTypes>: Sized {
|
|||||||
self,
|
self,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
chain: &Arc<BeaconChain<T>>,
|
chain: &Arc<BeaconChain<T>>,
|
||||||
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> Result<ExecutionPendingBlock<T>, BlockError<T::EthSpec>> {
|
) -> Result<ExecutionPendingBlock<T>, BlockError<T::EthSpec>> {
|
||||||
self.into_execution_pending_block_slashable(block_root, chain)
|
self.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer)
|
||||||
.map(|execution_pending| {
|
.map(|execution_pending| {
|
||||||
// Supply valid block to slasher.
|
// Supply valid block to slasher.
|
||||||
if let Some(slasher) = chain.slasher.as_ref() {
|
if let Some(slasher) = chain.slasher.as_ref() {
|
||||||
@ -654,6 +654,7 @@ pub trait IntoExecutionPendingBlock<T: BeaconChainTypes>: Sized {
|
|||||||
self,
|
self,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
chain: &Arc<BeaconChain<T>>,
|
chain: &Arc<BeaconChain<T>>,
|
||||||
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>>;
|
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>>;
|
||||||
|
|
||||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec>;
|
fn block(&self) -> &SignedBeaconBlock<T::EthSpec>;
|
||||||
@ -900,10 +901,15 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for GossipVerifiedBlock<T
|
|||||||
self,
|
self,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
chain: &Arc<BeaconChain<T>>,
|
chain: &Arc<BeaconChain<T>>,
|
||||||
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
||||||
let execution_pending =
|
let execution_pending =
|
||||||
SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?;
|
SignatureVerifiedBlock::from_gossip_verified_block_check_slashable(self, chain)?;
|
||||||
execution_pending.into_execution_pending_block_slashable(block_root, chain)
|
execution_pending.into_execution_pending_block_slashable(
|
||||||
|
block_root,
|
||||||
|
chain,
|
||||||
|
notify_execution_layer,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||||
@ -945,13 +951,14 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
|||||||
|
|
||||||
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
|
let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec);
|
||||||
|
|
||||||
signature_verifier.include_all_signatures(&block, Some(block_root), None)?;
|
let mut consensus_context =
|
||||||
|
ConsensusContext::new(block.slot()).set_current_block_root(block_root);
|
||||||
|
|
||||||
|
signature_verifier.include_all_signatures(&block, &mut consensus_context)?;
|
||||||
|
|
||||||
if signature_verifier.verify().is_ok() {
|
if signature_verifier.verify().is_ok() {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
consensus_context: ConsensusContext::new(block.slot())
|
consensus_context,
|
||||||
.set_current_block_root(block_root)
|
|
||||||
.set_proposer_index(block.message().proposer_index()),
|
|
||||||
block,
|
block,
|
||||||
block_root,
|
block_root,
|
||||||
parent: Some(parent),
|
parent: Some(parent),
|
||||||
@ -996,16 +1003,16 @@ impl<T: BeaconChainTypes> SignatureVerifiedBlock<T> {
|
|||||||
|
|
||||||
// Gossip verification has already checked the proposer index. Use it to check the RANDAO
|
// Gossip verification has already checked the proposer index. Use it to check the RANDAO
|
||||||
// signature.
|
// signature.
|
||||||
let verified_proposer_index = Some(block.message().proposer_index());
|
let mut consensus_context = from.consensus_context;
|
||||||
signature_verifier
|
signature_verifier
|
||||||
.include_all_signatures_except_proposal(&block, verified_proposer_index)?;
|
.include_all_signatures_except_proposal(&block, &mut consensus_context)?;
|
||||||
|
|
||||||
if signature_verifier.verify().is_ok() {
|
if signature_verifier.verify().is_ok() {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
block,
|
block,
|
||||||
block_root: from.block_root,
|
block_root: from.block_root,
|
||||||
parent: Some(parent),
|
parent: Some(parent),
|
||||||
consensus_context: from.consensus_context,
|
consensus_context,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
Err(BlockError::InvalidSignature)
|
Err(BlockError::InvalidSignature)
|
||||||
@ -1033,6 +1040,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
|
|||||||
self,
|
self,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
chain: &Arc<BeaconChain<T>>,
|
chain: &Arc<BeaconChain<T>>,
|
||||||
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
||||||
let header = self.block.signed_block_header();
|
let header = self.block.signed_block_header();
|
||||||
let (parent, block) = if let Some(parent) = self.parent {
|
let (parent, block) = if let Some(parent) = self.parent {
|
||||||
@ -1048,6 +1056,7 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for SignatureVerifiedBloc
|
|||||||
parent,
|
parent,
|
||||||
self.consensus_context,
|
self.consensus_context,
|
||||||
chain,
|
chain,
|
||||||
|
notify_execution_layer,
|
||||||
)
|
)
|
||||||
.map_err(|e| BlockSlashInfo::SignatureValid(header, e))
|
.map_err(|e| BlockSlashInfo::SignatureValid(header, e))
|
||||||
}
|
}
|
||||||
@ -1064,13 +1073,14 @@ impl<T: BeaconChainTypes> IntoExecutionPendingBlock<T> for Arc<SignedBeaconBlock
|
|||||||
self,
|
self,
|
||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
chain: &Arc<BeaconChain<T>>,
|
chain: &Arc<BeaconChain<T>>,
|
||||||
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
) -> Result<ExecutionPendingBlock<T>, BlockSlashInfo<BlockError<T::EthSpec>>> {
|
||||||
// Perform an early check to prevent wasting time on irrelevant blocks.
|
// Perform an early check to prevent wasting time on irrelevant blocks.
|
||||||
let block_root = check_block_relevancy(&self, block_root, chain)
|
let block_root = check_block_relevancy(&self, block_root, chain)
|
||||||
.map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?;
|
.map_err(|e| BlockSlashInfo::SignatureNotChecked(self.signed_block_header(), e))?;
|
||||||
|
|
||||||
SignatureVerifiedBlock::check_slashable(self, block_root, chain)?
|
SignatureVerifiedBlock::check_slashable(self, block_root, chain)?
|
||||||
.into_execution_pending_block_slashable(block_root, chain)
|
.into_execution_pending_block_slashable(block_root, chain, notify_execution_layer)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
fn block(&self) -> &SignedBeaconBlock<T::EthSpec> {
|
||||||
@ -1092,6 +1102,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
parent: PreProcessingSnapshot<T::EthSpec>,
|
parent: PreProcessingSnapshot<T::EthSpec>,
|
||||||
mut consensus_context: ConsensusContext<T::EthSpec>,
|
mut consensus_context: ConsensusContext<T::EthSpec>,
|
||||||
chain: &Arc<BeaconChain<T>>,
|
chain: &Arc<BeaconChain<T>>,
|
||||||
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> Result<Self, BlockError<T::EthSpec>> {
|
) -> Result<Self, BlockError<T::EthSpec>> {
|
||||||
if let Some(parent) = chain
|
if let Some(parent) = chain
|
||||||
.canonical_head
|
.canonical_head
|
||||||
@ -1128,6 +1139,79 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
|
|
||||||
check_block_relevancy(&block, block_root, chain)?;
|
check_block_relevancy(&block, block_root, chain)?;
|
||||||
|
|
||||||
|
// Define a future that will verify the execution payload with an execution engine.
|
||||||
|
//
|
||||||
|
// We do this as early as possible so that later parts of this function can run in parallel
|
||||||
|
// with the payload verification.
|
||||||
|
let payload_notifier = PayloadNotifier::new(
|
||||||
|
chain.clone(),
|
||||||
|
block.clone(),
|
||||||
|
&parent.pre_state,
|
||||||
|
notify_execution_layer,
|
||||||
|
)?;
|
||||||
|
let is_valid_merge_transition_block =
|
||||||
|
is_merge_transition_block(&parent.pre_state, block.message().body());
|
||||||
|
let payload_verification_future = async move {
|
||||||
|
let chain = payload_notifier.chain.clone();
|
||||||
|
let block = payload_notifier.block.clone();
|
||||||
|
|
||||||
|
// If this block triggers the merge, check to ensure that it references valid execution
|
||||||
|
// blocks.
|
||||||
|
//
|
||||||
|
// The specification defines this check inside `on_block` in the fork-choice specification,
|
||||||
|
// however we perform the check here for two reasons:
|
||||||
|
//
|
||||||
|
// - There's no point in importing a block that will fail fork choice, so it's best to fail
|
||||||
|
// early.
|
||||||
|
// - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no
|
||||||
|
// calls to remote servers.
|
||||||
|
if is_valid_merge_transition_block {
|
||||||
|
validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?;
|
||||||
|
};
|
||||||
|
|
||||||
|
// The specification declares that this should be run *inside* `per_block_processing`,
|
||||||
|
// however we run it here to keep `per_block_processing` pure (i.e., no calls to external
|
||||||
|
// servers).
|
||||||
|
let payload_verification_status = payload_notifier.notify_new_payload().await?;
|
||||||
|
|
||||||
|
// If the payload did not validate or invalidate the block, check to see if this block is
|
||||||
|
// valid for optimistic import.
|
||||||
|
if payload_verification_status.is_optimistic() {
|
||||||
|
let block_hash_opt = block
|
||||||
|
.message()
|
||||||
|
.body()
|
||||||
|
.execution_payload()
|
||||||
|
.map(|full_payload| full_payload.block_hash());
|
||||||
|
|
||||||
|
// Ensure the block is a candidate for optimistic import.
|
||||||
|
if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await?
|
||||||
|
{
|
||||||
|
warn!(
|
||||||
|
chain.log,
|
||||||
|
"Rejecting optimistic block";
|
||||||
|
"block_hash" => ?block_hash_opt,
|
||||||
|
"msg" => "the execution engine is not synced"
|
||||||
|
);
|
||||||
|
return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(PayloadVerificationOutcome {
|
||||||
|
payload_verification_status,
|
||||||
|
is_valid_merge_transition_block,
|
||||||
|
})
|
||||||
|
};
|
||||||
|
// Spawn the payload verification future as a new task, but don't wait for it to complete.
|
||||||
|
// The `payload_verification_future` will be awaited later to ensure verification completed
|
||||||
|
// successfully.
|
||||||
|
let payload_verification_handle = chain
|
||||||
|
.task_executor
|
||||||
|
.spawn_handle(
|
||||||
|
payload_verification_future,
|
||||||
|
"execution_payload_verification",
|
||||||
|
)
|
||||||
|
.ok_or(BeaconChainError::RuntimeShutdown)?;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Advance the given `parent.beacon_state` to the slot of the given `block`.
|
* Advance the given `parent.beacon_state` to the slot of the given `block`.
|
||||||
*/
|
*/
|
||||||
@ -1232,79 +1316,11 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
summaries.push(summary);
|
summaries.push(summary);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
metrics::stop_timer(catchup_timer);
|
||||||
|
|
||||||
let block_slot = block.slot();
|
let block_slot = block.slot();
|
||||||
let state_current_epoch = state.current_epoch();
|
let state_current_epoch = state.current_epoch();
|
||||||
|
|
||||||
// Define a future that will verify the execution payload with an execution engine (but
|
|
||||||
// don't execute it yet).
|
|
||||||
let payload_notifier = PayloadNotifier::new(chain.clone(), block.clone(), &state)?;
|
|
||||||
let is_valid_merge_transition_block =
|
|
||||||
is_merge_transition_block(&state, block.message().body());
|
|
||||||
let payload_verification_future = async move {
|
|
||||||
let chain = payload_notifier.chain.clone();
|
|
||||||
let block = payload_notifier.block.clone();
|
|
||||||
|
|
||||||
// If this block triggers the merge, check to ensure that it references valid execution
|
|
||||||
// blocks.
|
|
||||||
//
|
|
||||||
// The specification defines this check inside `on_block` in the fork-choice specification,
|
|
||||||
// however we perform the check here for two reasons:
|
|
||||||
//
|
|
||||||
// - There's no point in importing a block that will fail fork choice, so it's best to fail
|
|
||||||
// early.
|
|
||||||
// - Doing the check here means we can keep our fork-choice implementation "pure". I.e., no
|
|
||||||
// calls to remote servers.
|
|
||||||
if is_valid_merge_transition_block {
|
|
||||||
validate_merge_block(&chain, block.message(), AllowOptimisticImport::Yes).await?;
|
|
||||||
};
|
|
||||||
|
|
||||||
// The specification declares that this should be run *inside* `per_block_processing`,
|
|
||||||
// however we run it here to keep `per_block_processing` pure (i.e., no calls to external
|
|
||||||
// servers).
|
|
||||||
//
|
|
||||||
// It is important that this function is called *after* `per_slot_processing`, since the
|
|
||||||
// `randao` may change.
|
|
||||||
let payload_verification_status = payload_notifier.notify_new_payload().await?;
|
|
||||||
|
|
||||||
// If the payload did not validate or invalidate the block, check to see if this block is
|
|
||||||
// valid for optimistic import.
|
|
||||||
if payload_verification_status.is_optimistic() {
|
|
||||||
let block_hash_opt = block
|
|
||||||
.message()
|
|
||||||
.body()
|
|
||||||
.execution_payload()
|
|
||||||
.map(|full_payload| full_payload.block_hash());
|
|
||||||
|
|
||||||
// Ensure the block is a candidate for optimistic import.
|
|
||||||
if !is_optimistic_candidate_block(&chain, block.slot(), block.parent_root()).await?
|
|
||||||
{
|
|
||||||
warn!(
|
|
||||||
chain.log,
|
|
||||||
"Rejecting optimistic block";
|
|
||||||
"block_hash" => ?block_hash_opt,
|
|
||||||
"msg" => "the execution engine is not synced"
|
|
||||||
);
|
|
||||||
return Err(ExecutionPayloadError::UnverifiedNonOptimisticCandidate.into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(PayloadVerificationOutcome {
|
|
||||||
payload_verification_status,
|
|
||||||
is_valid_merge_transition_block,
|
|
||||||
})
|
|
||||||
};
|
|
||||||
// Spawn the payload verification future as a new task, but don't wait for it to complete.
|
|
||||||
// The `payload_verification_future` will be awaited later to ensure verification completed
|
|
||||||
// successfully.
|
|
||||||
let payload_verification_handle = chain
|
|
||||||
.task_executor
|
|
||||||
.spawn_handle(
|
|
||||||
payload_verification_future,
|
|
||||||
"execution_payload_verification",
|
|
||||||
)
|
|
||||||
.ok_or(BeaconChainError::RuntimeShutdown)?;
|
|
||||||
|
|
||||||
// If the block is sufficiently recent, notify the validator monitor.
|
// If the block is sufficiently recent, notify the validator monitor.
|
||||||
if let Some(slot) = chain.slot_clock.now() {
|
if let Some(slot) = chain.slot_clock.now() {
|
||||||
let epoch = slot.epoch(T::EthSpec::slots_per_epoch());
|
let epoch = slot.epoch(T::EthSpec::slots_per_epoch());
|
||||||
@ -1331,8 +1347,6 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics::stop_timer(catchup_timer);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Build the committee caches on the state.
|
* Build the committee caches on the state.
|
||||||
*/
|
*/
|
||||||
@ -1422,6 +1436,44 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Apply the block's attestations to fork choice.
|
||||||
|
*
|
||||||
|
* We're running in parallel with the payload verification at this point, so this is
|
||||||
|
* free real estate.
|
||||||
|
*/
|
||||||
|
let current_slot = chain.slot()?;
|
||||||
|
let mut fork_choice = chain.canonical_head.fork_choice_write_lock();
|
||||||
|
|
||||||
|
// Register each attester slashing in the block with fork choice.
|
||||||
|
for attester_slashing in block.message().body().attester_slashings() {
|
||||||
|
fork_choice.on_attester_slashing(attester_slashing);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register each attestation in the block with fork choice.
|
||||||
|
for (i, attestation) in block.message().body().attestations().iter().enumerate() {
|
||||||
|
let _fork_choice_attestation_timer =
|
||||||
|
metrics::start_timer(&metrics::FORK_CHOICE_PROCESS_ATTESTATION_TIMES);
|
||||||
|
|
||||||
|
let indexed_attestation = consensus_context
|
||||||
|
.get_indexed_attestation(&state, attestation)
|
||||||
|
.map_err(|e| BlockError::PerBlockProcessingError(e.into_with_index(i)))?;
|
||||||
|
|
||||||
|
match fork_choice.on_attestation(
|
||||||
|
current_slot,
|
||||||
|
indexed_attestation,
|
||||||
|
AttestationFromBlock::True,
|
||||||
|
&chain.spec,
|
||||||
|
) {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
|
// Ignore invalid attestations whilst importing attestations from a block. The
|
||||||
|
// block might be very old and therefore the attestations useless to fork choice.
|
||||||
|
Err(ForkChoiceError::InvalidAttestation(_)) => Ok(()),
|
||||||
|
Err(e) => Err(BlockError::BeaconChainError(e.into())),
|
||||||
|
}?;
|
||||||
|
}
|
||||||
|
drop(fork_choice);
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
block,
|
block,
|
||||||
block_root,
|
block_root,
|
||||||
@ -1429,6 +1481,7 @@ impl<T: BeaconChainTypes> ExecutionPendingBlock<T> {
|
|||||||
parent_block: parent.beacon_block,
|
parent_block: parent.beacon_block,
|
||||||
parent_eth1_finalization_data,
|
parent_eth1_finalization_data,
|
||||||
confirmed_state_roots,
|
confirmed_state_roots,
|
||||||
|
consensus_context,
|
||||||
payload_verification_handle,
|
payload_verification_handle,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -45,6 +45,8 @@ pub struct ChainConfig {
|
|||||||
pub paranoid_block_proposal: bool,
|
pub paranoid_block_proposal: bool,
|
||||||
/// Whether to strictly count unrealized justified votes.
|
/// Whether to strictly count unrealized justified votes.
|
||||||
pub count_unrealized_full: CountUnrealizedFull,
|
pub count_unrealized_full: CountUnrealizedFull,
|
||||||
|
/// Optionally set timeout for calls to checkpoint sync endpoint.
|
||||||
|
pub checkpoint_sync_url_timeout: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ChainConfig {
|
impl Default for ChainConfig {
|
||||||
@ -65,6 +67,7 @@ impl Default for ChainConfig {
|
|||||||
always_reset_payload_statuses: false,
|
always_reset_payload_statuses: false,
|
||||||
paranoid_block_proposal: false,
|
paranoid_block_proposal: false,
|
||||||
count_unrealized_full: CountUnrealizedFull::default(),
|
count_unrealized_full: CountUnrealizedFull::default(),
|
||||||
|
checkpoint_sync_url_timeout: 60,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,16 @@ pub enum AllowOptimisticImport {
|
|||||||
No,
|
No,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Signal whether the execution payloads of new blocks should be
|
||||||
|
/// immediately verified with the EL or imported optimistically without
|
||||||
|
/// any EL communication.
|
||||||
|
#[derive(Default, Clone, Copy)]
|
||||||
|
pub enum NotifyExecutionLayer {
|
||||||
|
#[default]
|
||||||
|
Yes,
|
||||||
|
No,
|
||||||
|
}
|
||||||
|
|
||||||
/// Used to await the result of executing payload with a remote EE.
|
/// Used to await the result of executing payload with a remote EE.
|
||||||
pub struct PayloadNotifier<T: BeaconChainTypes> {
|
pub struct PayloadNotifier<T: BeaconChainTypes> {
|
||||||
pub chain: Arc<BeaconChain<T>>,
|
pub chain: Arc<BeaconChain<T>>,
|
||||||
@ -50,21 +60,28 @@ impl<T: BeaconChainTypes> PayloadNotifier<T> {
|
|||||||
chain: Arc<BeaconChain<T>>,
|
chain: Arc<BeaconChain<T>>,
|
||||||
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
block: Arc<SignedBeaconBlock<T::EthSpec>>,
|
||||||
state: &BeaconState<T::EthSpec>,
|
state: &BeaconState<T::EthSpec>,
|
||||||
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> Result<Self, BlockError<T::EthSpec>> {
|
) -> Result<Self, BlockError<T::EthSpec>> {
|
||||||
let payload_verification_status = if is_execution_enabled(state, block.message().body()) {
|
let payload_verification_status = match notify_execution_layer {
|
||||||
// Perform the initial stages of payload verification.
|
NotifyExecutionLayer::No => Some(PayloadVerificationStatus::Optimistic),
|
||||||
//
|
NotifyExecutionLayer::Yes => {
|
||||||
// We will duplicate these checks again during `per_block_processing`, however these checks
|
if is_execution_enabled(state, block.message().body()) {
|
||||||
// are cheap and doing them here ensures we protect the execution engine from junk.
|
// Perform the initial stages of payload verification.
|
||||||
partially_verify_execution_payload::<T::EthSpec, FullPayload<T::EthSpec>>(
|
//
|
||||||
state,
|
// We will duplicate these checks again during `per_block_processing`, however these checks
|
||||||
block.message().execution_payload()?,
|
// are cheap and doing them here ensures we protect the execution engine from junk.
|
||||||
&chain.spec,
|
partially_verify_execution_payload::<T::EthSpec, FullPayload<T::EthSpec>>(
|
||||||
)
|
state,
|
||||||
.map_err(BlockError::PerBlockProcessingError)?;
|
block.slot(),
|
||||||
None
|
block.message().execution_payload()?,
|
||||||
} else {
|
&chain.spec,
|
||||||
Some(PayloadVerificationStatus::Irrelevant)
|
)
|
||||||
|
.map_err(BlockError::PerBlockProcessingError)?;
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(PayloadVerificationStatus::Irrelevant)
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
@ -360,7 +377,8 @@ pub fn get_execution_payload<
|
|||||||
let spec = &chain.spec;
|
let spec = &chain.spec;
|
||||||
let current_epoch = state.current_epoch();
|
let current_epoch = state.current_epoch();
|
||||||
let is_merge_transition_complete = is_merge_transition_complete(state);
|
let is_merge_transition_complete = is_merge_transition_complete(state);
|
||||||
let timestamp = compute_timestamp_at_slot(state, spec).map_err(BeaconStateError::from)?;
|
let timestamp =
|
||||||
|
compute_timestamp_at_slot(state, state.slot(), spec).map_err(BeaconStateError::from)?;
|
||||||
let random = *state.get_randao_mix(current_epoch)?;
|
let random = *state.get_randao_mix(current_epoch)?;
|
||||||
let latest_execution_payload_header_block_hash =
|
let latest_execution_payload_header_block_hash =
|
||||||
state.latest_execution_payload_header()?.block_hash();
|
state.latest_execution_payload_header()?.block_hash();
|
||||||
|
@ -64,6 +64,7 @@ pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock};
|
|||||||
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
pub use eth1_chain::{Eth1Chain, Eth1ChainBackend};
|
||||||
pub use events::ServerSentEventHandler;
|
pub use events::ServerSentEventHandler;
|
||||||
pub use execution_layer::EngineState;
|
pub use execution_layer::EngineState;
|
||||||
|
pub use execution_payload::NotifyExecutionLayer;
|
||||||
pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters};
|
pub use fork_choice::{ExecutionStatus, ForkchoiceUpdateParameters};
|
||||||
pub use metrics::scrape_for_metrics;
|
pub use metrics::scrape_for_metrics;
|
||||||
pub use parking_lot;
|
pub use parking_lot;
|
||||||
|
@ -64,6 +64,11 @@ lazy_static! {
|
|||||||
"beacon_block_processing_state_root_seconds",
|
"beacon_block_processing_state_root_seconds",
|
||||||
"Time spent calculating the state root when processing a block."
|
"Time spent calculating the state root when processing a block."
|
||||||
);
|
);
|
||||||
|
pub static ref BLOCK_PROCESSING_POST_EXEC_PROCESSING: Result<Histogram> = try_create_histogram_with_buckets(
|
||||||
|
"beacon_block_processing_post_exec_pre_attestable_seconds",
|
||||||
|
"Time between finishing execution processing and the block becoming attestable",
|
||||||
|
linear_buckets(5e-3, 5e-3, 10)
|
||||||
|
);
|
||||||
pub static ref BLOCK_PROCESSING_DB_WRITE: Result<Histogram> = try_create_histogram(
|
pub static ref BLOCK_PROCESSING_DB_WRITE: Result<Histogram> = try_create_histogram(
|
||||||
"beacon_block_processing_db_write_seconds",
|
"beacon_block_processing_db_write_seconds",
|
||||||
"Time spent writing a newly processed block and state to DB"
|
"Time spent writing a newly processed block and state to DB"
|
||||||
|
@ -298,6 +298,27 @@ impl<T: EthSpec> SnapshotCache<T> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Borrow the state corresponding to `block_root` if it exists in the cache *unadvanced*.
|
||||||
|
///
|
||||||
|
/// Care must be taken not to mutate the state in an invalid way. This function should only
|
||||||
|
/// be used to mutate the *caches* of the state, for example the tree hash cache when
|
||||||
|
/// calculating a light client merkle proof.
|
||||||
|
pub fn borrow_unadvanced_state_mut(
|
||||||
|
&mut self,
|
||||||
|
block_root: Hash256,
|
||||||
|
) -> Option<&mut BeaconState<T>> {
|
||||||
|
self.snapshots
|
||||||
|
.iter_mut()
|
||||||
|
.find(|snapshot| {
|
||||||
|
// If the pre-state exists then state advance has already taken the state for
|
||||||
|
// `block_root` and mutated its tree hash cache. Rather than re-building it while
|
||||||
|
// holding the snapshot cache lock (>1 second), prefer to return `None` from this
|
||||||
|
// function and force the caller to load it from disk.
|
||||||
|
snapshot.beacon_block_root == block_root && snapshot.pre_state.is_none()
|
||||||
|
})
|
||||||
|
.map(|snapshot| &mut snapshot.beacon_state)
|
||||||
|
}
|
||||||
|
|
||||||
/// If there is a snapshot with `block_root`, clone it and return the clone.
|
/// If there is a snapshot with `block_root`, clone it and return the clone.
|
||||||
pub fn get_cloned(
|
pub fn get_cloned(
|
||||||
&self,
|
&self,
|
||||||
|
@ -2,7 +2,7 @@ pub use crate::persisted_beacon_chain::PersistedBeaconChain;
|
|||||||
pub use crate::{
|
pub use crate::{
|
||||||
beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY},
|
beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY},
|
||||||
migrate::MigratorConfig,
|
migrate::MigratorConfig,
|
||||||
BeaconChainError, ProduceBlockVerification,
|
BeaconChainError, NotifyExecutionLayer, ProduceBlockVerification,
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
builder::{BeaconChainBuilder, Witness},
|
builder::{BeaconChainBuilder, Witness},
|
||||||
@ -586,7 +586,7 @@ where
|
|||||||
|
|
||||||
pub fn get_timestamp_at_slot(&self) -> u64 {
|
pub fn get_timestamp_at_slot(&self) -> u64 {
|
||||||
let state = self.get_current_state();
|
let state = self.get_current_state();
|
||||||
compute_timestamp_at_slot(&state, &self.spec).unwrap()
|
compute_timestamp_at_slot(&state, state.slot(), &self.spec).unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_current_state_and_root(&self) -> (BeaconState<E>, Hash256) {
|
pub fn get_current_state_and_root(&self) -> (BeaconState<E>, Hash256) {
|
||||||
@ -1460,7 +1460,12 @@ where
|
|||||||
self.set_current_slot(slot);
|
self.set_current_slot(slot);
|
||||||
let block_hash: SignedBeaconBlockHash = self
|
let block_hash: SignedBeaconBlockHash = self
|
||||||
.chain
|
.chain
|
||||||
.process_block(block_root, Arc::new(block), CountUnrealized::True)
|
.process_block(
|
||||||
|
block_root,
|
||||||
|
Arc::new(block),
|
||||||
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
)
|
||||||
.await?
|
.await?
|
||||||
.into();
|
.into();
|
||||||
self.chain.recompute_head_at_current_slot().await;
|
self.chain.recompute_head_at_current_slot().await;
|
||||||
@ -1477,6 +1482,7 @@ where
|
|||||||
block.canonical_root(),
|
block.canonical_root(),
|
||||||
Arc::new(block),
|
Arc::new(block),
|
||||||
CountUnrealized::True,
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
.into();
|
.into();
|
||||||
|
@ -109,6 +109,11 @@ impl EpochSummary {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn register_block(&mut self, delay: Duration) {
|
||||||
|
self.blocks += 1;
|
||||||
|
Self::update_if_lt(&mut self.block_min_delay, delay);
|
||||||
|
}
|
||||||
|
|
||||||
pub fn register_unaggregated_attestation(&mut self, delay: Duration) {
|
pub fn register_unaggregated_attestation(&mut self, delay: Duration) {
|
||||||
self.attestations += 1;
|
self.attestations += 1;
|
||||||
Self::update_if_lt(&mut self.attestation_min_delay, delay);
|
Self::update_if_lt(&mut self.attestation_min_delay, delay);
|
||||||
@ -613,13 +618,6 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_validator_id(&self, validator_index: u64) -> Option<&str> {
|
|
||||||
self.indices
|
|
||||||
.get(&validator_index)
|
|
||||||
.and_then(|pubkey| self.validators.get(pubkey))
|
|
||||||
.map(|validator| validator.id.as_str())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_validator(&self, validator_index: u64) -> Option<&MonitoredValidator> {
|
fn get_validator(&self, validator_index: u64) -> Option<&MonitoredValidator> {
|
||||||
self.indices
|
self.indices
|
||||||
.get(&validator_index)
|
.get(&validator_index)
|
||||||
@ -685,7 +683,9 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
block_root: Hash256,
|
block_root: Hash256,
|
||||||
slot_clock: &S,
|
slot_clock: &S,
|
||||||
) {
|
) {
|
||||||
if let Some(id) = self.get_validator_id(block.proposer_index()) {
|
let epoch = block.slot().epoch(T::slots_per_epoch());
|
||||||
|
if let Some(validator) = self.get_validator(block.proposer_index()) {
|
||||||
|
let id = &validator.id;
|
||||||
let delay = get_block_delay_ms(seen_timestamp, block, slot_clock);
|
let delay = get_block_delay_ms(seen_timestamp, block, slot_clock);
|
||||||
|
|
||||||
metrics::inc_counter_vec(&metrics::VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL, &[src, id]);
|
metrics::inc_counter_vec(&metrics::VALIDATOR_MONITOR_BEACON_BLOCK_TOTAL, &[src, id]);
|
||||||
@ -704,6 +704,8 @@ impl<T: EthSpec> ValidatorMonitor<T> {
|
|||||||
"src" => src,
|
"src" => src,
|
||||||
"validator" => %id,
|
"validator" => %id,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
validator.with_epoch_summary(epoch, |summary| summary.register_block(delay));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3,7 +3,8 @@ use crate::{BeaconChainTypes, BeaconStore};
|
|||||||
use ssz::{Decode, Encode};
|
use ssz::{Decode, Encode};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
use store::{DBColumn, Error as StoreError, StoreItem};
|
use std::marker::PhantomData;
|
||||||
|
use store::{DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp, StoreItem};
|
||||||
use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes};
|
use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes};
|
||||||
|
|
||||||
/// Provides a mapping of `validator_index -> validator_publickey`.
|
/// Provides a mapping of `validator_index -> validator_publickey`.
|
||||||
@ -14,21 +15,17 @@ use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes};
|
|||||||
/// 2. To reduce the amount of public key _decompression_ required. A `BeaconState` stores public
|
/// 2. To reduce the amount of public key _decompression_ required. A `BeaconState` stores public
|
||||||
/// keys in compressed form and they are needed in decompressed form for signature verification.
|
/// keys in compressed form and they are needed in decompressed form for signature verification.
|
||||||
/// Decompression is expensive when many keys are involved.
|
/// Decompression is expensive when many keys are involved.
|
||||||
///
|
|
||||||
/// The cache has a `backing` that it uses to maintain a persistent, on-disk
|
|
||||||
/// copy of itself. This allows it to be restored between process invocations.
|
|
||||||
pub struct ValidatorPubkeyCache<T: BeaconChainTypes> {
|
pub struct ValidatorPubkeyCache<T: BeaconChainTypes> {
|
||||||
pubkeys: Vec<PublicKey>,
|
pubkeys: Vec<PublicKey>,
|
||||||
indices: HashMap<PublicKeyBytes, usize>,
|
indices: HashMap<PublicKeyBytes, usize>,
|
||||||
pubkey_bytes: Vec<PublicKeyBytes>,
|
pubkey_bytes: Vec<PublicKeyBytes>,
|
||||||
store: BeaconStore<T>,
|
_phantom: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
|
impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
|
||||||
/// Create a new public key cache using the keys in `state.validators`.
|
/// Create a new public key cache using the keys in `state.validators`.
|
||||||
///
|
///
|
||||||
/// Also creates a new persistence file, returning an error if there is already a file at
|
/// The new cache will be updated with the keys from `state` and immediately written to disk.
|
||||||
/// `persistence_path`.
|
|
||||||
pub fn new(
|
pub fn new(
|
||||||
state: &BeaconState<T::EthSpec>,
|
state: &BeaconState<T::EthSpec>,
|
||||||
store: BeaconStore<T>,
|
store: BeaconStore<T>,
|
||||||
@ -37,10 +34,11 @@ impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
|
|||||||
pubkeys: vec![],
|
pubkeys: vec![],
|
||||||
indices: HashMap::new(),
|
indices: HashMap::new(),
|
||||||
pubkey_bytes: vec![],
|
pubkey_bytes: vec![],
|
||||||
store,
|
_phantom: PhantomData,
|
||||||
};
|
};
|
||||||
|
|
||||||
cache.import_new_pubkeys(state)?;
|
let store_ops = cache.import_new_pubkeys(state)?;
|
||||||
|
store.hot_db.do_atomically(store_ops)?;
|
||||||
|
|
||||||
Ok(cache)
|
Ok(cache)
|
||||||
}
|
}
|
||||||
@ -69,17 +67,19 @@ impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
|
|||||||
pubkeys,
|
pubkeys,
|
||||||
indices,
|
indices,
|
||||||
pubkey_bytes,
|
pubkey_bytes,
|
||||||
store,
|
_phantom: PhantomData,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Scan the given `state` and add any new validator public keys.
|
/// Scan the given `state` and add any new validator public keys.
|
||||||
///
|
///
|
||||||
/// Does not delete any keys from `self` if they don't appear in `state`.
|
/// Does not delete any keys from `self` if they don't appear in `state`.
|
||||||
|
///
|
||||||
|
/// NOTE: The caller *must* commit the returned I/O batch as part of the block import process.
|
||||||
pub fn import_new_pubkeys(
|
pub fn import_new_pubkeys(
|
||||||
&mut self,
|
&mut self,
|
||||||
state: &BeaconState<T::EthSpec>,
|
state: &BeaconState<T::EthSpec>,
|
||||||
) -> Result<(), BeaconChainError> {
|
) -> Result<Vec<KeyValueStoreOp>, BeaconChainError> {
|
||||||
if state.validators().len() > self.pubkeys.len() {
|
if state.validators().len() > self.pubkeys.len() {
|
||||||
self.import(
|
self.import(
|
||||||
state.validators()[self.pubkeys.len()..]
|
state.validators()[self.pubkeys.len()..]
|
||||||
@ -87,12 +87,12 @@ impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
|
|||||||
.map(|v| v.pubkey),
|
.map(|v| v.pubkey),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(vec![])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds zero or more validators to `self`.
|
/// Adds zero or more validators to `self`.
|
||||||
fn import<I>(&mut self, validator_keys: I) -> Result<(), BeaconChainError>
|
fn import<I>(&mut self, validator_keys: I) -> Result<Vec<KeyValueStoreOp>, BeaconChainError>
|
||||||
where
|
where
|
||||||
I: Iterator<Item = PublicKeyBytes> + ExactSizeIterator,
|
I: Iterator<Item = PublicKeyBytes> + ExactSizeIterator,
|
||||||
{
|
{
|
||||||
@ -100,6 +100,7 @@ impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
|
|||||||
self.pubkeys.reserve(validator_keys.len());
|
self.pubkeys.reserve(validator_keys.len());
|
||||||
self.indices.reserve(validator_keys.len());
|
self.indices.reserve(validator_keys.len());
|
||||||
|
|
||||||
|
let mut store_ops = Vec::with_capacity(validator_keys.len());
|
||||||
for pubkey in validator_keys {
|
for pubkey in validator_keys {
|
||||||
let i = self.pubkeys.len();
|
let i = self.pubkeys.len();
|
||||||
|
|
||||||
@ -107,17 +108,11 @@ impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
|
|||||||
return Err(BeaconChainError::DuplicateValidatorPublicKey);
|
return Err(BeaconChainError::DuplicateValidatorPublicKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The item is written to disk _before_ it is written into
|
// Stage the new validator key for writing to disk.
|
||||||
// the local struct.
|
// It will be committed atomically when the block that introduced it is written to disk.
|
||||||
//
|
// Notably it is NOT written while the write lock on the cache is held.
|
||||||
// This means that a pubkey cache read from disk will always be equivalent to or
|
// See: https://github.com/sigp/lighthouse/issues/2327
|
||||||
// _later than_ the cache that was running in the previous instance of Lighthouse.
|
store_ops.push(DatabasePubkey(pubkey).as_kv_store_op(DatabasePubkey::key_for_index(i)));
|
||||||
//
|
|
||||||
// The motivation behind this ordering is that we do not want to have states that
|
|
||||||
// reference a pubkey that is not in our cache. However, it's fine to have pubkeys
|
|
||||||
// that are never referenced in a state.
|
|
||||||
self.store
|
|
||||||
.put_item(&DatabasePubkey::key_for_index(i), &DatabasePubkey(pubkey))?;
|
|
||||||
|
|
||||||
self.pubkeys.push(
|
self.pubkeys.push(
|
||||||
(&pubkey)
|
(&pubkey)
|
||||||
@ -129,7 +124,7 @@ impl<T: BeaconChainTypes> ValidatorPubkeyCache<T> {
|
|||||||
self.indices.insert(pubkey, i);
|
self.indices.insert(pubkey, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(store_ops)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the public key for a validator with index `i`.
|
/// Get the public key for a validator with index `i`.
|
||||||
@ -296,9 +291,10 @@ mod test {
|
|||||||
|
|
||||||
// Add some more keypairs.
|
// Add some more keypairs.
|
||||||
let (state, keypairs) = get_state(12);
|
let (state, keypairs) = get_state(12);
|
||||||
cache
|
let ops = cache
|
||||||
.import_new_pubkeys(&state)
|
.import_new_pubkeys(&state)
|
||||||
.expect("should import pubkeys");
|
.expect("should import pubkeys");
|
||||||
|
store.hot_db.do_atomically(ops).unwrap();
|
||||||
check_cache_get(&cache, &keypairs[..]);
|
check_cache_get(&cache, &keypairs[..]);
|
||||||
drop(cache);
|
drop(cache);
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
use beacon_chain::test_utils::{
|
use beacon_chain::test_utils::{
|
||||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
||||||
};
|
};
|
||||||
use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult};
|
use beacon_chain::{BeaconSnapshot, BlockError, ChainSegmentResult, NotifyExecutionLayer};
|
||||||
use fork_choice::CountUnrealized;
|
use fork_choice::CountUnrealized;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use logging::test_logger;
|
use logging::test_logger;
|
||||||
@ -147,14 +147,18 @@ async fn chain_segment_full_segment() {
|
|||||||
// Sneak in a little check to ensure we can process empty chain segments.
|
// Sneak in a little check to ensure we can process empty chain segments.
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(vec![], CountUnrealized::True)
|
.process_chain_segment(vec![], CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error()
|
.into_block_error()
|
||||||
.expect("should import empty chain segment");
|
.expect("should import empty chain segment");
|
||||||
|
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks.clone(), CountUnrealized::True)
|
.process_chain_segment(
|
||||||
|
blocks.clone(),
|
||||||
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.into_block_error()
|
.into_block_error()
|
||||||
.expect("should import chain segment");
|
.expect("should import chain segment");
|
||||||
@ -183,7 +187,11 @@ async fn chain_segment_varying_chunk_size() {
|
|||||||
for chunk in blocks.chunks(*chunk_size) {
|
for chunk in blocks.chunks(*chunk_size) {
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(chunk.to_vec(), CountUnrealized::True)
|
.process_chain_segment(
|
||||||
|
chunk.to_vec(),
|
||||||
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.into_block_error()
|
.into_block_error()
|
||||||
.unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size));
|
.unwrap_or_else(|_| panic!("should import chain segment of len {}", chunk_size));
|
||||||
@ -219,7 +227,7 @@ async fn chain_segment_non_linear_parent_roots() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True)
|
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::NonLinearParentRoots)
|
Err(BlockError::NonLinearParentRoots)
|
||||||
@ -239,7 +247,7 @@ async fn chain_segment_non_linear_parent_roots() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True)
|
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::NonLinearParentRoots)
|
Err(BlockError::NonLinearParentRoots)
|
||||||
@ -270,7 +278,7 @@ async fn chain_segment_non_linear_slots() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True)
|
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::NonLinearSlots)
|
Err(BlockError::NonLinearSlots)
|
||||||
@ -291,7 +299,7 @@ async fn chain_segment_non_linear_slots() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True)
|
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::NonLinearSlots)
|
Err(BlockError::NonLinearSlots)
|
||||||
@ -317,7 +325,7 @@ async fn assert_invalid_signature(
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True)
|
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::InvalidSignature)
|
Err(BlockError::InvalidSignature)
|
||||||
@ -339,7 +347,11 @@ async fn assert_invalid_signature(
|
|||||||
// imported prior to this test.
|
// imported prior to this test.
|
||||||
let _ = harness
|
let _ = harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(ancestor_blocks, CountUnrealized::True)
|
.process_chain_segment(
|
||||||
|
ancestor_blocks,
|
||||||
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
)
|
||||||
.await;
|
.await;
|
||||||
harness.chain.recompute_head_at_current_slot().await;
|
harness.chain.recompute_head_at_current_slot().await;
|
||||||
|
|
||||||
@ -349,6 +361,7 @@ async fn assert_invalid_signature(
|
|||||||
snapshots[block_index].beacon_block.canonical_root(),
|
snapshots[block_index].beacon_block.canonical_root(),
|
||||||
snapshots[block_index].beacon_block.clone(),
|
snapshots[block_index].beacon_block.clone(),
|
||||||
CountUnrealized::True,
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
assert!(
|
assert!(
|
||||||
@ -400,7 +413,11 @@ async fn invalid_signature_gossip_block() {
|
|||||||
.collect();
|
.collect();
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(ancestor_blocks, CountUnrealized::True)
|
.process_chain_segment(
|
||||||
|
ancestor_blocks,
|
||||||
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.into_block_error()
|
.into_block_error()
|
||||||
.expect("should import all blocks prior to the one being tested");
|
.expect("should import all blocks prior to the one being tested");
|
||||||
@ -412,7 +429,8 @@ async fn invalid_signature_gossip_block() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
signed_block.canonical_root(),
|
signed_block.canonical_root(),
|
||||||
Arc::new(signed_block),
|
Arc::new(signed_block),
|
||||||
CountUnrealized::True
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await,
|
.await,
|
||||||
Err(BlockError::InvalidSignature)
|
Err(BlockError::InvalidSignature)
|
||||||
@ -446,7 +464,7 @@ async fn invalid_signature_block_proposal() {
|
|||||||
matches!(
|
matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True)
|
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::InvalidSignature)
|
Err(BlockError::InvalidSignature)
|
||||||
@ -644,7 +662,7 @@ async fn invalid_signature_deposit() {
|
|||||||
!matches!(
|
!matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, CountUnrealized::True)
|
.process_chain_segment(blocks, CountUnrealized::True, NotifyExecutionLayer::Yes)
|
||||||
.await
|
.await
|
||||||
.into_block_error(),
|
.into_block_error(),
|
||||||
Err(BlockError::InvalidSignature)
|
Err(BlockError::InvalidSignature)
|
||||||
@ -725,6 +743,7 @@ async fn block_gossip_verification() {
|
|||||||
gossip_verified.block_root,
|
gossip_verified.block_root,
|
||||||
gossip_verified,
|
gossip_verified,
|
||||||
CountUnrealized::True,
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.expect("should import valid gossip verified block");
|
.expect("should import valid gossip verified block");
|
||||||
@ -996,6 +1015,7 @@ async fn verify_block_for_gossip_slashing_detection() {
|
|||||||
verified_block.block_root,
|
verified_block.block_root,
|
||||||
verified_block,
|
verified_block,
|
||||||
CountUnrealized::True,
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -1035,6 +1055,7 @@ async fn verify_block_for_gossip_doppelganger_detection() {
|
|||||||
verified_block.block_root,
|
verified_block.block_root,
|
||||||
verified_block,
|
verified_block,
|
||||||
CountUnrealized::True,
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -1180,7 +1201,8 @@ async fn add_base_block_to_altair_chain() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
base_block.canonical_root(),
|
base_block.canonical_root(),
|
||||||
Arc::new(base_block.clone()),
|
Arc::new(base_block.clone()),
|
||||||
CountUnrealized::True
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.err()
|
.err()
|
||||||
@ -1195,7 +1217,11 @@ async fn add_base_block_to_altair_chain() {
|
|||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(vec![Arc::new(base_block)], CountUnrealized::True)
|
.process_chain_segment(
|
||||||
|
vec![Arc::new(base_block)],
|
||||||
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
)
|
||||||
.await,
|
.await,
|
||||||
ChainSegmentResult::Failed {
|
ChainSegmentResult::Failed {
|
||||||
imported_blocks: 0,
|
imported_blocks: 0,
|
||||||
@ -1313,7 +1339,8 @@ async fn add_altair_block_to_base_chain() {
|
|||||||
.process_block(
|
.process_block(
|
||||||
altair_block.canonical_root(),
|
altair_block.canonical_root(),
|
||||||
Arc::new(altair_block.clone()),
|
Arc::new(altair_block.clone()),
|
||||||
CountUnrealized::True
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.err()
|
.err()
|
||||||
@ -1328,7 +1355,11 @@ async fn add_altair_block_to_base_chain() {
|
|||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
harness
|
harness
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(vec![Arc::new(altair_block)], CountUnrealized::True)
|
.process_chain_segment(
|
||||||
|
vec![Arc::new(altair_block)],
|
||||||
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes
|
||||||
|
)
|
||||||
.await,
|
.await,
|
||||||
ChainSegmentResult::Failed {
|
ChainSegmentResult::Failed {
|
||||||
imported_blocks: 0,
|
imported_blocks: 0,
|
||||||
|
@ -7,8 +7,8 @@ use beacon_chain::otb_verification_service::{
|
|||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
canonical_head::{CachedHead, CanonicalHead},
|
canonical_head::{CachedHead, CanonicalHead},
|
||||||
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
||||||
BeaconChainError, BlockError, ExecutionPayloadError, StateSkipConfig, WhenSlotSkipped,
|
BeaconChainError, BlockError, ExecutionPayloadError, NotifyExecutionLayer, StateSkipConfig,
|
||||||
INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
WhenSlotSkipped, INVALID_FINALIZED_MERGE_TRANSITION_BLOCK_SHUTDOWN_REASON,
|
||||||
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,
|
INVALID_JUSTIFIED_PAYLOAD_SHUTDOWN_REASON,
|
||||||
};
|
};
|
||||||
use execution_layer::{
|
use execution_layer::{
|
||||||
@ -696,6 +696,7 @@ async fn invalidates_all_descendants() {
|
|||||||
fork_block.canonical_root(),
|
fork_block.canonical_root(),
|
||||||
Arc::new(fork_block),
|
Arc::new(fork_block),
|
||||||
CountUnrealized::True,
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -792,6 +793,7 @@ async fn switches_heads() {
|
|||||||
fork_block.canonical_root(),
|
fork_block.canonical_root(),
|
||||||
Arc::new(fork_block),
|
Arc::new(fork_block),
|
||||||
CountUnrealized::True,
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@ -1040,7 +1042,7 @@ async fn invalid_parent() {
|
|||||||
|
|
||||||
// Ensure the block built atop an invalid payload is invalid for import.
|
// Ensure the block built atop an invalid payload is invalid for import.
|
||||||
assert!(matches!(
|
assert!(matches!(
|
||||||
rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True).await,
|
rig.harness.chain.process_block(block.canonical_root(), block.clone(), CountUnrealized::True, NotifyExecutionLayer::Yes).await,
|
||||||
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
|
Err(BlockError::ParentExecutionPayloadInvalid { parent_root: invalid_root })
|
||||||
if invalid_root == parent_root
|
if invalid_root == parent_root
|
||||||
));
|
));
|
||||||
@ -1322,7 +1324,12 @@ async fn build_optimistic_chain(
|
|||||||
for block in blocks {
|
for block in blocks {
|
||||||
rig.harness
|
rig.harness
|
||||||
.chain
|
.chain
|
||||||
.process_block(block.canonical_root(), block, CountUnrealized::True)
|
.process_block(
|
||||||
|
block.canonical_root(),
|
||||||
|
block,
|
||||||
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
@ -1882,6 +1889,7 @@ async fn recover_from_invalid_head_by_importing_blocks() {
|
|||||||
fork_block.canonical_root(),
|
fork_block.canonical_root(),
|
||||||
fork_block.clone(),
|
fork_block.clone(),
|
||||||
CountUnrealized::True,
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -7,8 +7,8 @@ use beacon_chain::test_utils::{
|
|||||||
};
|
};
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain,
|
historical_blocks::HistoricalBlockError, migrate::MigratorConfig, BeaconChain,
|
||||||
BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, ServerSentEventHandler,
|
BeaconChainError, BeaconChainTypes, BeaconSnapshot, ChainConfig, NotifyExecutionLayer,
|
||||||
WhenSlotSkipped,
|
ServerSentEventHandler, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use fork_choice::CountUnrealized;
|
use fork_choice::CountUnrealized;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
@ -2148,6 +2148,7 @@ async fn weak_subjectivity_sync() {
|
|||||||
full_block.canonical_root(),
|
full_block.canonical_root(),
|
||||||
Arc::new(full_block),
|
Arc::new(full_block),
|
||||||
CountUnrealized::True,
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
@ -6,7 +6,7 @@ use beacon_chain::{
|
|||||||
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType,
|
||||||
OP_POOL_DB_KEY,
|
OP_POOL_DB_KEY,
|
||||||
},
|
},
|
||||||
BeaconChain, StateSkipConfig, WhenSlotSkipped,
|
BeaconChain, NotifyExecutionLayer, StateSkipConfig, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
use fork_choice::CountUnrealized;
|
use fork_choice::CountUnrealized;
|
||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
@ -687,7 +687,8 @@ async fn run_skip_slot_test(skip_slots: u64) {
|
|||||||
.process_block(
|
.process_block(
|
||||||
harness_a.chain.head_snapshot().beacon_block_root,
|
harness_a.chain.head_snapshot().beacon_block_root,
|
||||||
harness_a.chain.head_snapshot().beacon_block.clone(),
|
harness_a.chain.head_snapshot().beacon_block.clone(),
|
||||||
CountUnrealized::True
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
|
@ -40,9 +40,6 @@ use types::{
|
|||||||
/// Interval between polling the eth1 node for genesis information.
|
/// Interval between polling the eth1 node for genesis information.
|
||||||
pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000;
|
pub const ETH1_GENESIS_UPDATE_INTERVAL_MILLIS: u64 = 7_000;
|
||||||
|
|
||||||
/// Timeout for checkpoint sync HTTP requests.
|
|
||||||
pub const CHECKPOINT_SYNC_HTTP_TIMEOUT: Duration = Duration::from_secs(60);
|
|
||||||
|
|
||||||
/// Builds a `Client` instance.
|
/// Builds a `Client` instance.
|
||||||
///
|
///
|
||||||
/// ## Notes
|
/// ## Notes
|
||||||
@ -273,8 +270,12 @@ where
|
|||||||
"remote_url" => %url,
|
"remote_url" => %url,
|
||||||
);
|
);
|
||||||
|
|
||||||
let remote =
|
let remote = BeaconNodeHttpClient::new(
|
||||||
BeaconNodeHttpClient::new(url, Timeouts::set_all(CHECKPOINT_SYNC_HTTP_TIMEOUT));
|
url,
|
||||||
|
Timeouts::set_all(Duration::from_secs(
|
||||||
|
config.chain.checkpoint_sync_url_timeout,
|
||||||
|
)),
|
||||||
|
);
|
||||||
let slots_per_epoch = TEthSpec::slots_per_epoch();
|
let slots_per_epoch = TEthSpec::slots_per_epoch();
|
||||||
|
|
||||||
let deposit_snapshot = if config.sync_eth1_chain {
|
let deposit_snapshot = if config.sync_eth1_chain {
|
||||||
|
@ -42,7 +42,7 @@ pub enum ClientGenesis {
|
|||||||
/// The core configuration of a Lighthouse beacon node.
|
/// The core configuration of a Lighthouse beacon node.
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
pub data_dir: PathBuf,
|
data_dir: PathBuf,
|
||||||
/// Name of the directory inside the data directory where the main "hot" DB is located.
|
/// Name of the directory inside the data directory where the main "hot" DB is located.
|
||||||
pub db_name: String,
|
pub db_name: String,
|
||||||
/// Path where the freezer database will be located.
|
/// Path where the freezer database will be located.
|
||||||
@ -103,6 +103,17 @@ impl Default for Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Config {
|
impl Config {
|
||||||
|
/// Updates the data directory for the Client.
|
||||||
|
pub fn set_data_dir(&mut self, data_dir: PathBuf) {
|
||||||
|
self.data_dir = data_dir.clone();
|
||||||
|
self.http_api.data_dir = data_dir;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the config's data_dir.
|
||||||
|
pub fn data_dir(&self) -> &PathBuf {
|
||||||
|
&self.data_dir
|
||||||
|
}
|
||||||
|
|
||||||
/// Get the database path without initialising it.
|
/// Get the database path without initialising it.
|
||||||
pub fn get_db_path(&self) -> PathBuf {
|
pub fn get_db_path(&self) -> PathBuf {
|
||||||
self.get_data_dir().join(&self.db_name)
|
self.get_data_dir().join(&self.db_name)
|
||||||
|
@ -751,10 +751,11 @@ impl Service {
|
|||||||
let deposit_count_to_finalize = eth1data_to_finalize.deposit_count;
|
let deposit_count_to_finalize = eth1data_to_finalize.deposit_count;
|
||||||
if deposit_count_to_finalize > already_finalized {
|
if deposit_count_to_finalize > already_finalized {
|
||||||
match self.finalize_deposits(eth1data_to_finalize) {
|
match self.finalize_deposits(eth1data_to_finalize) {
|
||||||
Err(e) => error!(
|
Err(e) => warn!(
|
||||||
self.log,
|
self.log,
|
||||||
"Failed to finalize deposit cache";
|
"Failed to finalize deposit cache";
|
||||||
"error" => ?e,
|
"error" => ?e,
|
||||||
|
"info" => "this should resolve on its own"
|
||||||
),
|
),
|
||||||
Ok(()) => info!(
|
Ok(()) => info!(
|
||||||
self.log,
|
self.log,
|
||||||
@ -814,9 +815,10 @@ impl Service {
|
|||||||
.block_by_hash(ð1_data.block_hash)
|
.block_by_hash(ð1_data.block_hash)
|
||||||
.cloned()
|
.cloned()
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
Error::FailedToFinalizeDeposit(
|
Error::FailedToFinalizeDeposit(format!(
|
||||||
"Finalized block not found in block cache".to_string(),
|
"Finalized block not found in block cache: {:?}",
|
||||||
)
|
eth1_data.block_hash
|
||||||
|
))
|
||||||
})?;
|
})?;
|
||||||
self.inner
|
self.inner
|
||||||
.deposit_cache
|
.deposit_cache
|
||||||
|
@ -13,6 +13,7 @@ pub use engine_api::*;
|
|||||||
pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc};
|
pub use engine_api::{http, http::deposit_methods, http::HttpJsonRpc};
|
||||||
use engines::{Engine, EngineError};
|
use engines::{Engine, EngineError};
|
||||||
pub use engines::{EngineState, ForkchoiceState};
|
pub use engines::{EngineState, ForkchoiceState};
|
||||||
|
use eth2::types::{builder_bid::SignedBuilderBid, ForkVersionedResponse};
|
||||||
use fork_choice::ForkchoiceUpdateParameters;
|
use fork_choice::ForkchoiceUpdateParameters;
|
||||||
use lru::LruCache;
|
use lru::LruCache;
|
||||||
use payload_status::process_payload_status;
|
use payload_status::process_payload_status;
|
||||||
@ -22,11 +23,13 @@ use serde::{Deserialize, Serialize};
|
|||||||
use slog::{crit, debug, error, info, trace, warn, Logger};
|
use slog::{crit, debug, error, info, trace, warn, Logger};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::fmt;
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||||
|
use strum::AsRefStr;
|
||||||
use task_executor::TaskExecutor;
|
use task_executor::TaskExecutor;
|
||||||
use tokio::{
|
use tokio::{
|
||||||
sync::{Mutex, MutexGuard, RwLock},
|
sync::{Mutex, MutexGuard, RwLock},
|
||||||
@ -35,12 +38,14 @@ use tokio::{
|
|||||||
use tokio_stream::wrappers::WatchStream;
|
use tokio_stream::wrappers::WatchStream;
|
||||||
#[cfg(feature = "withdrawals")]
|
#[cfg(feature = "withdrawals")]
|
||||||
use types::Withdrawal;
|
use types::Withdrawal;
|
||||||
use types::{AbstractExecPayload, Blob, ExecPayload, ExecutionPayloadEip4844, KzgCommitment};
|
use types::{AbstractExecPayload, Blob, ExecPayload, KzgCommitment};
|
||||||
use types::{
|
use types::{
|
||||||
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName,
|
BlindedPayload, BlockType, ChainSpec, Epoch, ExecutionBlockHash, ForkName,
|
||||||
ProposerPreparationData, PublicKeyBytes, SignedBeaconBlock, Slot,
|
ProposerPreparationData, PublicKeyBytes, Signature, SignedBeaconBlock, Slot, Uint256,
|
||||||
|
};
|
||||||
|
use types::{
|
||||||
|
ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadEip4844, ExecutionPayloadMerge,
|
||||||
};
|
};
|
||||||
use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge};
|
|
||||||
|
|
||||||
mod engine_api;
|
mod engine_api;
|
||||||
mod engines;
|
mod engines;
|
||||||
@ -71,6 +76,14 @@ const DEFAULT_SUGGESTED_FEE_RECIPIENT: [u8; 20] =
|
|||||||
|
|
||||||
const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60);
|
const CONFIG_POLL_INTERVAL: Duration = Duration::from_secs(60);
|
||||||
|
|
||||||
|
/// A payload alongside some information about where it came from.
|
||||||
|
enum ProvenancedPayload<P> {
|
||||||
|
/// A good ol' fashioned farm-to-table payload from your local EE.
|
||||||
|
Local(P),
|
||||||
|
/// A payload from a builder (e.g. mev-boost).
|
||||||
|
Builder(P),
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
NoEngine,
|
NoEngine,
|
||||||
@ -78,6 +91,7 @@ pub enum Error {
|
|||||||
ApiError(ApiError),
|
ApiError(ApiError),
|
||||||
Builder(builder_client::Error),
|
Builder(builder_client::Error),
|
||||||
NoHeaderFromBuilder,
|
NoHeaderFromBuilder,
|
||||||
|
CannotProduceHeader,
|
||||||
EngineError(Box<EngineError>),
|
EngineError(Box<EngineError>),
|
||||||
NotSynced,
|
NotSynced,
|
||||||
ShuttingDown,
|
ShuttingDown,
|
||||||
@ -615,7 +629,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
current_fork: ForkName,
|
current_fork: ForkName,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<BlockProposalContents<T, Payload>, Error> {
|
) -> Result<BlockProposalContents<T, Payload>, Error> {
|
||||||
match Payload::block_type() {
|
let payload_result = match Payload::block_type() {
|
||||||
BlockType::Blinded => {
|
BlockType::Blinded => {
|
||||||
let _timer = metrics::start_timer_vec(
|
let _timer = metrics::start_timer_vec(
|
||||||
&metrics::EXECUTION_LAYER_REQUEST_TIMES,
|
&metrics::EXECUTION_LAYER_REQUEST_TIMES,
|
||||||
@ -643,6 +657,40 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
current_fork,
|
current_fork,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
.map(ProvenancedPayload::Local)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Track some metrics and return the result.
|
||||||
|
match payload_result {
|
||||||
|
Ok(ProvenancedPayload::Local(block_proposal_contents)) => {
|
||||||
|
metrics::inc_counter_vec(
|
||||||
|
&metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME,
|
||||||
|
&[metrics::SUCCESS],
|
||||||
|
);
|
||||||
|
metrics::inc_counter_vec(
|
||||||
|
&metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE,
|
||||||
|
&[metrics::LOCAL],
|
||||||
|
);
|
||||||
|
Ok(block_proposal_contents)
|
||||||
|
}
|
||||||
|
Ok(ProvenancedPayload::Builder(block_proposal_contents)) => {
|
||||||
|
metrics::inc_counter_vec(
|
||||||
|
&metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME,
|
||||||
|
&[metrics::SUCCESS],
|
||||||
|
);
|
||||||
|
metrics::inc_counter_vec(
|
||||||
|
&metrics::EXECUTION_LAYER_GET_PAYLOAD_SOURCE,
|
||||||
|
&[metrics::BUILDER],
|
||||||
|
);
|
||||||
|
Ok(block_proposal_contents)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
metrics::inc_counter_vec(
|
||||||
|
&metrics::EXECUTION_LAYER_GET_PAYLOAD_OUTCOME,
|
||||||
|
&[metrics::FAILURE],
|
||||||
|
);
|
||||||
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -655,7 +703,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
builder_params: BuilderParams,
|
builder_params: BuilderParams,
|
||||||
current_fork: ForkName,
|
current_fork: ForkName,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<BlockProposalContents<T, Payload>, Error> {
|
) -> Result<ProvenancedPayload<BlockProposalContents<T, Payload>>, Error> {
|
||||||
if let Some(builder) = self.builder() {
|
if let Some(builder) = self.builder() {
|
||||||
let slot = builder_params.slot;
|
let slot = builder_params.slot;
|
||||||
let pubkey = builder_params.pubkey;
|
let pubkey = builder_params.pubkey;
|
||||||
@ -669,134 +717,213 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
"pubkey" => ?pubkey,
|
"pubkey" => ?pubkey,
|
||||||
"parent_hash" => ?parent_hash,
|
"parent_hash" => ?parent_hash,
|
||||||
);
|
);
|
||||||
let (relay_result, local_result) = tokio::join!(
|
|
||||||
builder.get_builder_header::<T, Payload>(slot, parent_hash, &pubkey),
|
// Wait for the builder *and* local EL to produce a payload (or return an error).
|
||||||
self.get_full_payload_caching(
|
let ((relay_result, relay_duration), (local_result, local_duration)) = tokio::join!(
|
||||||
parent_hash,
|
timed_future(metrics::GET_BLINDED_PAYLOAD_BUILDER, async {
|
||||||
payload_attributes,
|
builder
|
||||||
forkchoice_update_params,
|
.get_builder_header::<T, Payload>(slot, parent_hash, &pubkey)
|
||||||
current_fork,
|
.await
|
||||||
)
|
}),
|
||||||
|
timed_future(metrics::GET_BLINDED_PAYLOAD_LOCAL, async {
|
||||||
|
self.get_full_payload_caching::<Payload>(
|
||||||
|
parent_hash,
|
||||||
|
payload_attributes,
|
||||||
|
forkchoice_update_params,
|
||||||
|
current_fork,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
})
|
||||||
|
);
|
||||||
|
|
||||||
|
info!(
|
||||||
|
self.log(),
|
||||||
|
"Requested blinded execution payload";
|
||||||
|
"relay_fee_recipient" => match &relay_result {
|
||||||
|
Ok(Some(r)) => format!("{:?}", r.data.message.header.fee_recipient()),
|
||||||
|
Ok(None) => "empty response".to_string(),
|
||||||
|
Err(_) => "request failed".to_string(),
|
||||||
|
},
|
||||||
|
"relay_response_ms" => relay_duration.as_millis(),
|
||||||
|
"local_fee_recipient" => match &local_result {
|
||||||
|
Ok(proposal_contents) => format!("{:?}", proposal_contents.payload().fee_recipient()),
|
||||||
|
Err(_) => "request failed".to_string()
|
||||||
|
},
|
||||||
|
"local_response_ms" => local_duration.as_millis(),
|
||||||
|
"parent_hash" => ?parent_hash,
|
||||||
);
|
);
|
||||||
|
|
||||||
return match (relay_result, local_result) {
|
return match (relay_result, local_result) {
|
||||||
(Err(e), Ok(local)) => {
|
(Err(e), Ok(local)) => {
|
||||||
warn!(
|
warn!(
|
||||||
self.log(),
|
self.log(),
|
||||||
"Unable to retrieve a payload from a connected \
|
"Builder error when requesting payload";
|
||||||
builder, falling back to the local execution client: {e:?}"
|
"info" => "falling back to local execution client",
|
||||||
|
"relay_error" => ?e,
|
||||||
|
"local_block_hash" => ?local.payload().block_hash(),
|
||||||
|
"parent_hash" => ?parent_hash,
|
||||||
);
|
);
|
||||||
Ok(local)
|
Ok(ProvenancedPayload::Local(local))
|
||||||
}
|
}
|
||||||
(Ok(None), Ok(local)) => {
|
(Ok(None), Ok(local)) => {
|
||||||
info!(
|
info!(
|
||||||
self.log(),
|
self.log(),
|
||||||
"No payload provided by connected builder. \
|
"Builder did not return a payload";
|
||||||
Attempting to propose through local execution engine"
|
"info" => "falling back to local execution client",
|
||||||
|
"local_block_hash" => ?local.payload().block_hash(),
|
||||||
|
"parent_hash" => ?parent_hash,
|
||||||
);
|
);
|
||||||
Ok(local)
|
Ok(ProvenancedPayload::Local(local))
|
||||||
}
|
}
|
||||||
(Ok(Some(relay)), Ok(local)) => {
|
(Ok(Some(relay)), Ok(local)) => {
|
||||||
let local_payload = local.payload();
|
let header = &relay.data.message.header;
|
||||||
let is_signature_valid = relay.data.verify_signature(spec);
|
|
||||||
let header = relay.data.message.header;
|
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
self.log(),
|
self.log(),
|
||||||
"Received a payload header from the connected builder";
|
"Received local and builder payloads";
|
||||||
"block_hash" => ?header.block_hash(),
|
"relay_block_hash" => ?header.block_hash(),
|
||||||
|
"local_block_hash" => ?local.payload().block_hash(),
|
||||||
|
"parent_hash" => ?parent_hash,
|
||||||
);
|
);
|
||||||
|
|
||||||
let relay_value = relay.data.message.value;
|
match verify_builder_bid(
|
||||||
let configured_value = self.inner.builder_profit_threshold;
|
&relay,
|
||||||
if relay_value < configured_value {
|
parent_hash,
|
||||||
info!(
|
payload_attributes.prev_randao(),
|
||||||
self.log(),
|
payload_attributes.timestamp(),
|
||||||
"The value offered by the connected builder does not meet \
|
Some(local.payload().block_number()),
|
||||||
the configured profit threshold. Using local payload.";
|
self.inner.builder_profit_threshold,
|
||||||
"configured_value" => ?configured_value, "relay_value" => ?relay_value
|
spec,
|
||||||
);
|
) {
|
||||||
Ok(local)
|
Ok(()) => Ok(ProvenancedPayload::Builder(
|
||||||
} else if header.parent_hash() != parent_hash {
|
//FIXME(sean) the builder API needs to be updated
|
||||||
warn!(
|
// NOTE the comment above was removed in the
|
||||||
self.log(),
|
// rebase with unstable.. I think it goes
|
||||||
"Invalid parent hash from connected builder, \
|
// here now?
|
||||||
falling back to local execution engine."
|
BlockProposalContents::Payload(relay.data.message.header),
|
||||||
);
|
)),
|
||||||
Ok(local)
|
Err(reason) if !reason.payload_invalid() => {
|
||||||
} else if header.prev_randao() != payload_attributes.prev_randao() {
|
|
||||||
warn!(
|
|
||||||
self.log(),
|
|
||||||
"Invalid prev randao from connected builder, \
|
|
||||||
falling back to local execution engine."
|
|
||||||
);
|
|
||||||
Ok(local)
|
|
||||||
} else if header.timestamp() != local_payload.timestamp() {
|
|
||||||
warn!(
|
|
||||||
self.log(),
|
|
||||||
"Invalid timestamp from connected builder, \
|
|
||||||
falling back to local execution engine."
|
|
||||||
);
|
|
||||||
Ok(local)
|
|
||||||
} else if header.block_number() != local_payload.block_number() {
|
|
||||||
warn!(
|
|
||||||
self.log(),
|
|
||||||
"Invalid block number from connected builder, \
|
|
||||||
falling back to local execution engine."
|
|
||||||
);
|
|
||||||
Ok(local)
|
|
||||||
} else if !matches!(relay.version, Some(ForkName::Merge)) {
|
|
||||||
// Once fork information is added to the payload, we will need to
|
|
||||||
// check that the local and relay payloads match. At this point, if
|
|
||||||
// we are requesting a payload at all, we have to assume this is
|
|
||||||
// the Bellatrix fork.
|
|
||||||
warn!(
|
|
||||||
self.log(),
|
|
||||||
"Invalid fork from connected builder, falling \
|
|
||||||
back to local execution engine."
|
|
||||||
);
|
|
||||||
Ok(local)
|
|
||||||
} else if !is_signature_valid {
|
|
||||||
let pubkey_bytes = relay.data.message.pubkey;
|
|
||||||
warn!(self.log(), "Invalid signature for pubkey {pubkey_bytes} on \
|
|
||||||
bid from connected builder, falling back to local execution engine.");
|
|
||||||
Ok(local)
|
|
||||||
} else {
|
|
||||||
if header.fee_recipient() != payload_attributes.suggested_fee_recipient() {
|
|
||||||
info!(
|
info!(
|
||||||
self.log(),
|
self.log(),
|
||||||
"Fee recipient from connected builder does \
|
"Builder payload ignored";
|
||||||
not match, using it anyways."
|
"info" => "using local payload",
|
||||||
|
"reason" => %reason,
|
||||||
|
"relay_block_hash" => ?header.block_hash(),
|
||||||
|
"parent_hash" => ?parent_hash,
|
||||||
);
|
);
|
||||||
|
Ok(ProvenancedPayload::Local(local))
|
||||||
|
}
|
||||||
|
Err(reason) => {
|
||||||
|
metrics::inc_counter_vec(
|
||||||
|
&metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS,
|
||||||
|
&[reason.as_ref().as_ref()],
|
||||||
|
);
|
||||||
|
warn!(
|
||||||
|
self.log(),
|
||||||
|
"Builder returned invalid payload";
|
||||||
|
"info" => "using local payload",
|
||||||
|
"reason" => %reason,
|
||||||
|
"relay_block_hash" => ?header.block_hash(),
|
||||||
|
"parent_hash" => ?parent_hash,
|
||||||
|
);
|
||||||
|
Ok(ProvenancedPayload::Local(local))
|
||||||
}
|
}
|
||||||
//FIXME(sean) the builder API needs to be updated
|
|
||||||
Ok(BlockProposalContents::Payload(header))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(relay_result, Err(local_error)) => {
|
(Ok(Some(relay)), Err(local_error)) => {
|
||||||
warn!(self.log(), "Failure from local execution engine. Attempting to \
|
let header = &relay.data.message.header;
|
||||||
propose through connected builder"; "error" => ?local_error);
|
|
||||||
relay_result
|
info!(
|
||||||
.map_err(Error::Builder)?
|
self.log(),
|
||||||
.ok_or(Error::NoHeaderFromBuilder)
|
"Received builder payload with local error";
|
||||||
.map(|d| {
|
"relay_block_hash" => ?header.block_hash(),
|
||||||
|
"local_error" => ?local_error,
|
||||||
|
"parent_hash" => ?parent_hash,
|
||||||
|
);
|
||||||
|
|
||||||
|
match verify_builder_bid(
|
||||||
|
&relay,
|
||||||
|
parent_hash,
|
||||||
|
payload_attributes.prev_randao(),
|
||||||
|
payload_attributes.timestamp(),
|
||||||
|
None,
|
||||||
|
self.inner.builder_profit_threshold,
|
||||||
|
spec,
|
||||||
|
) {
|
||||||
|
Ok(()) => Ok(ProvenancedPayload::Builder(
|
||||||
//FIXME(sean) the builder API needs to be updated
|
//FIXME(sean) the builder API needs to be updated
|
||||||
BlockProposalContents::Payload(d.data.message.header)
|
// NOTE the comment above was removed in the
|
||||||
})
|
// rebase with unstable.. I think it goes
|
||||||
|
// here now?
|
||||||
|
BlockProposalContents::Payload(relay.data.message.header),
|
||||||
|
)),
|
||||||
|
// If the payload is valid then use it. The local EE failed
|
||||||
|
// to produce a payload so we have no alternative.
|
||||||
|
Err(e) if !e.payload_invalid() => Ok(ProvenancedPayload::Builder(
|
||||||
|
//FIXME(sean) the builder API needs to be updated
|
||||||
|
// NOTE the comment above was removed in the
|
||||||
|
// rebase with unstable.. I think it goes
|
||||||
|
// here now?
|
||||||
|
BlockProposalContents::Payload(relay.data.message.header),
|
||||||
|
)),
|
||||||
|
Err(reason) => {
|
||||||
|
metrics::inc_counter_vec(
|
||||||
|
&metrics::EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS,
|
||||||
|
&[reason.as_ref().as_ref()],
|
||||||
|
);
|
||||||
|
crit!(
|
||||||
|
self.log(),
|
||||||
|
"Builder returned invalid payload";
|
||||||
|
"info" => "no local payload either - unable to propose block",
|
||||||
|
"reason" => %reason,
|
||||||
|
"relay_block_hash" => ?header.block_hash(),
|
||||||
|
"parent_hash" => ?parent_hash,
|
||||||
|
);
|
||||||
|
Err(Error::CannotProduceHeader)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(Err(relay_error), Err(local_error)) => {
|
||||||
|
crit!(
|
||||||
|
self.log(),
|
||||||
|
"Unable to produce execution payload";
|
||||||
|
"info" => "the local EL and builder both failed - unable to propose block",
|
||||||
|
"relay_error" => ?relay_error,
|
||||||
|
"local_error" => ?local_error,
|
||||||
|
"parent_hash" => ?parent_hash,
|
||||||
|
);
|
||||||
|
|
||||||
|
Err(Error::CannotProduceHeader)
|
||||||
|
}
|
||||||
|
(Ok(None), Err(local_error)) => {
|
||||||
|
crit!(
|
||||||
|
self.log(),
|
||||||
|
"Unable to produce execution payload";
|
||||||
|
"info" => "the local EL failed and the builder returned nothing - \
|
||||||
|
the block proposal will be missed",
|
||||||
|
"local_error" => ?local_error,
|
||||||
|
"parent_hash" => ?parent_hash,
|
||||||
|
);
|
||||||
|
|
||||||
|
Err(Error::CannotProduceHeader)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
ChainHealth::Unhealthy(condition) => {
|
ChainHealth::Unhealthy(condition) => info!(
|
||||||
info!(self.log(), "Due to poor chain health the local execution engine will be used \
|
self.log(),
|
||||||
for payload construction. To adjust chain health conditions \
|
"Chain is unhealthy, using local payload";
|
||||||
Use `builder-fallback` prefixed flags";
|
"info" => "this helps protect the network. the --builder-fallback flags \
|
||||||
"failed_condition" => ?condition)
|
can adjust the expected health conditions.",
|
||||||
}
|
"failed_condition" => ?condition
|
||||||
|
),
|
||||||
// Intentional no-op, so we never attempt builder API proposals pre-merge.
|
// Intentional no-op, so we never attempt builder API proposals pre-merge.
|
||||||
ChainHealth::PreMerge => (),
|
ChainHealth::PreMerge => (),
|
||||||
ChainHealth::Optimistic => info!(self.log(), "The local execution engine is syncing \
|
ChainHealth::Optimistic => info!(
|
||||||
so the builder network cannot safely be used. Attempting \
|
self.log(),
|
||||||
to build a block with the local execution engine"),
|
"Chain is optimistic; can't build payload";
|
||||||
|
"info" => "the local execution engine is syncing and the builder network \
|
||||||
|
cannot safely be used - unable to propose block"
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.get_full_payload_caching(
|
self.get_full_payload_caching(
|
||||||
@ -806,6 +933,7 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
current_fork,
|
current_fork,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
.map(ProvenancedPayload::Local)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get a full payload without caching its result in the execution layer's payload cache.
|
/// Get a full payload without caching its result in the execution layer's payload cache.
|
||||||
@ -1547,18 +1675,223 @@ impl<T: EthSpec> ExecutionLayer<T> {
|
|||||||
"Sending block to builder";
|
"Sending block to builder";
|
||||||
"root" => ?block_root,
|
"root" => ?block_root,
|
||||||
);
|
);
|
||||||
|
|
||||||
if let Some(builder) = self.builder() {
|
if let Some(builder) = self.builder() {
|
||||||
builder
|
let (payload_result, duration) =
|
||||||
.post_builder_blinded_blocks(block)
|
timed_future(metrics::POST_BLINDED_PAYLOAD_BUILDER, async {
|
||||||
.await
|
builder
|
||||||
.map_err(Error::Builder)
|
.post_builder_blinded_blocks(block)
|
||||||
.map(|d| d.data)
|
.await
|
||||||
|
.map_err(Error::Builder)
|
||||||
|
.map(|d| d.data)
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match &payload_result {
|
||||||
|
Ok(payload) => {
|
||||||
|
metrics::inc_counter_vec(
|
||||||
|
&metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME,
|
||||||
|
&[metrics::SUCCESS],
|
||||||
|
);
|
||||||
|
info!(
|
||||||
|
self.log(),
|
||||||
|
"Builder successfully revealed payload";
|
||||||
|
"relay_response_ms" => duration.as_millis(),
|
||||||
|
"block_root" => ?block_root,
|
||||||
|
"fee_recipient" => ?payload.fee_recipient(),
|
||||||
|
"block_hash" => ?payload.block_hash(),
|
||||||
|
"parent_hash" => ?payload.parent_hash()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
metrics::inc_counter_vec(
|
||||||
|
&metrics::EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME,
|
||||||
|
&[metrics::FAILURE],
|
||||||
|
);
|
||||||
|
crit!(
|
||||||
|
self.log(),
|
||||||
|
"Builder failed to reveal payload";
|
||||||
|
"info" => "this relay failure may cause a missed proposal",
|
||||||
|
"error" => ?e,
|
||||||
|
"relay_response_ms" => duration.as_millis(),
|
||||||
|
"block_root" => ?block_root,
|
||||||
|
"parent_hash" => ?block
|
||||||
|
.message()
|
||||||
|
.execution_payload()
|
||||||
|
.map(|payload| format!("{}", payload.parent_hash()))
|
||||||
|
.unwrap_or_else(|_| "unknown".to_string())
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
payload_result
|
||||||
} else {
|
} else {
|
||||||
Err(Error::NoPayloadBuilder)
|
Err(Error::NoPayloadBuilder)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(AsRefStr)]
|
||||||
|
#[strum(serialize_all = "snake_case")]
|
||||||
|
enum InvalidBuilderPayload {
|
||||||
|
LowValue {
|
||||||
|
profit_threshold: Uint256,
|
||||||
|
payload_value: Uint256,
|
||||||
|
},
|
||||||
|
ParentHash {
|
||||||
|
payload: ExecutionBlockHash,
|
||||||
|
expected: ExecutionBlockHash,
|
||||||
|
},
|
||||||
|
PrevRandao {
|
||||||
|
payload: Hash256,
|
||||||
|
expected: Hash256,
|
||||||
|
},
|
||||||
|
Timestamp {
|
||||||
|
payload: u64,
|
||||||
|
expected: u64,
|
||||||
|
},
|
||||||
|
BlockNumber {
|
||||||
|
payload: u64,
|
||||||
|
expected: Option<u64>,
|
||||||
|
},
|
||||||
|
Fork {
|
||||||
|
payload: Option<ForkName>,
|
||||||
|
expected: ForkName,
|
||||||
|
},
|
||||||
|
Signature {
|
||||||
|
signature: Signature,
|
||||||
|
pubkey: PublicKeyBytes,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InvalidBuilderPayload {
|
||||||
|
/// Returns `true` if a payload is objectively invalid and should never be included on chain.
|
||||||
|
fn payload_invalid(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
// A low-value payload isn't invalid, it should just be avoided if possible.
|
||||||
|
InvalidBuilderPayload::LowValue { .. } => false,
|
||||||
|
InvalidBuilderPayload::ParentHash { .. } => true,
|
||||||
|
InvalidBuilderPayload::PrevRandao { .. } => true,
|
||||||
|
InvalidBuilderPayload::Timestamp { .. } => true,
|
||||||
|
InvalidBuilderPayload::BlockNumber { .. } => true,
|
||||||
|
InvalidBuilderPayload::Fork { .. } => true,
|
||||||
|
InvalidBuilderPayload::Signature { .. } => true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for InvalidBuilderPayload {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
InvalidBuilderPayload::LowValue {
|
||||||
|
profit_threshold,
|
||||||
|
payload_value,
|
||||||
|
} => write!(
|
||||||
|
f,
|
||||||
|
"payload value of {} does not meet user-configured profit-threshold of {}",
|
||||||
|
payload_value, profit_threshold
|
||||||
|
),
|
||||||
|
InvalidBuilderPayload::ParentHash { payload, expected } => {
|
||||||
|
write!(f, "payload block hash was {} not {}", payload, expected)
|
||||||
|
}
|
||||||
|
InvalidBuilderPayload::PrevRandao { payload, expected } => {
|
||||||
|
write!(f, "payload prev randao was {} not {}", payload, expected)
|
||||||
|
}
|
||||||
|
InvalidBuilderPayload::Timestamp { payload, expected } => {
|
||||||
|
write!(f, "payload timestamp was {} not {}", payload, expected)
|
||||||
|
}
|
||||||
|
InvalidBuilderPayload::BlockNumber { payload, expected } => {
|
||||||
|
write!(f, "payload block number was {} not {:?}", payload, expected)
|
||||||
|
}
|
||||||
|
InvalidBuilderPayload::Fork { payload, expected } => {
|
||||||
|
write!(f, "payload fork was {:?} not {}", payload, expected)
|
||||||
|
}
|
||||||
|
InvalidBuilderPayload::Signature { signature, pubkey } => write!(
|
||||||
|
f,
|
||||||
|
"invalid payload signature {} for pubkey {}",
|
||||||
|
signature, pubkey
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Perform some cursory, non-exhaustive validation of the bid returned from the builder.
|
||||||
|
fn verify_builder_bid<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
||||||
|
bid: &ForkVersionedResponse<SignedBuilderBid<T, Payload>>,
|
||||||
|
parent_hash: ExecutionBlockHash,
|
||||||
|
prev_randao: Hash256,
|
||||||
|
timestamp: u64,
|
||||||
|
block_number: Option<u64>,
|
||||||
|
profit_threshold: Uint256,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<(), Box<InvalidBuilderPayload>> {
|
||||||
|
let is_signature_valid = bid.data.verify_signature(spec);
|
||||||
|
let header = &bid.data.message.header;
|
||||||
|
let payload_value = bid.data.message.value;
|
||||||
|
|
||||||
|
// Avoid logging values that we can't represent with our Prometheus library.
|
||||||
|
let payload_value_gwei = bid.data.message.value / 1_000_000_000;
|
||||||
|
if payload_value_gwei <= Uint256::from(i64::max_value()) {
|
||||||
|
metrics::set_gauge_vec(
|
||||||
|
&metrics::EXECUTION_LAYER_PAYLOAD_BIDS,
|
||||||
|
&[metrics::BUILDER],
|
||||||
|
payload_value_gwei.low_u64() as i64,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if payload_value < profit_threshold {
|
||||||
|
Err(Box::new(InvalidBuilderPayload::LowValue {
|
||||||
|
profit_threshold,
|
||||||
|
payload_value,
|
||||||
|
}))
|
||||||
|
} else if header.parent_hash() != parent_hash {
|
||||||
|
Err(Box::new(InvalidBuilderPayload::ParentHash {
|
||||||
|
payload: header.parent_hash(),
|
||||||
|
expected: parent_hash,
|
||||||
|
}))
|
||||||
|
} else if header.prev_randao() != prev_randao {
|
||||||
|
Err(Box::new(InvalidBuilderPayload::PrevRandao {
|
||||||
|
payload: header.prev_randao(),
|
||||||
|
expected: prev_randao,
|
||||||
|
}))
|
||||||
|
} else if header.timestamp() != timestamp {
|
||||||
|
Err(Box::new(InvalidBuilderPayload::Timestamp {
|
||||||
|
payload: header.timestamp(),
|
||||||
|
expected: timestamp,
|
||||||
|
}))
|
||||||
|
} else if block_number.map_or(false, |n| n != header.block_number()) {
|
||||||
|
Err(Box::new(InvalidBuilderPayload::BlockNumber {
|
||||||
|
payload: header.block_number(),
|
||||||
|
expected: block_number,
|
||||||
|
}))
|
||||||
|
} else if !matches!(bid.version, Some(ForkName::Merge)) {
|
||||||
|
// Once fork information is added to the payload, we will need to
|
||||||
|
// check that the local and relay payloads match. At this point, if
|
||||||
|
// we are requesting a payload at all, we have to assume this is
|
||||||
|
// the Bellatrix fork.
|
||||||
|
Err(Box::new(InvalidBuilderPayload::Fork {
|
||||||
|
payload: bid.version,
|
||||||
|
expected: ForkName::Merge,
|
||||||
|
}))
|
||||||
|
} else if !is_signature_valid {
|
||||||
|
Err(Box::new(InvalidBuilderPayload::Signature {
|
||||||
|
signature: bid.data.signature.clone(),
|
||||||
|
pubkey: bid.data.message.pubkey,
|
||||||
|
}))
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A helper function to record the time it takes to execute a future.
|
||||||
|
async fn timed_future<F: Future<Output = T>, T>(metric: &str, future: F) -> (T, Duration) {
|
||||||
|
let start = Instant::now();
|
||||||
|
let result = future.await;
|
||||||
|
let duration = start.elapsed();
|
||||||
|
metrics::observe_timer_vec(&metrics::EXECUTION_LAYER_REQUEST_TIMES, &[metric], duration);
|
||||||
|
(result, duration)
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
@ -4,10 +4,17 @@ pub const HIT: &str = "hit";
|
|||||||
pub const MISS: &str = "miss";
|
pub const MISS: &str = "miss";
|
||||||
pub const GET_PAYLOAD: &str = "get_payload";
|
pub const GET_PAYLOAD: &str = "get_payload";
|
||||||
pub const GET_BLINDED_PAYLOAD: &str = "get_blinded_payload";
|
pub const GET_BLINDED_PAYLOAD: &str = "get_blinded_payload";
|
||||||
|
pub const GET_BLINDED_PAYLOAD_LOCAL: &str = "get_blinded_payload_local";
|
||||||
|
pub const GET_BLINDED_PAYLOAD_BUILDER: &str = "get_blinded_payload_builder";
|
||||||
|
pub const POST_BLINDED_PAYLOAD_BUILDER: &str = "post_blinded_payload_builder";
|
||||||
pub const NEW_PAYLOAD: &str = "new_payload";
|
pub const NEW_PAYLOAD: &str = "new_payload";
|
||||||
pub const FORKCHOICE_UPDATED: &str = "forkchoice_updated";
|
pub const FORKCHOICE_UPDATED: &str = "forkchoice_updated";
|
||||||
pub const GET_TERMINAL_POW_BLOCK_HASH: &str = "get_terminal_pow_block_hash";
|
pub const GET_TERMINAL_POW_BLOCK_HASH: &str = "get_terminal_pow_block_hash";
|
||||||
pub const IS_VALID_TERMINAL_POW_BLOCK_HASH: &str = "is_valid_terminal_pow_block_hash";
|
pub const IS_VALID_TERMINAL_POW_BLOCK_HASH: &str = "is_valid_terminal_pow_block_hash";
|
||||||
|
pub const LOCAL: &str = "local";
|
||||||
|
pub const BUILDER: &str = "builder";
|
||||||
|
pub const SUCCESS: &str = "success";
|
||||||
|
pub const FAILURE: &str = "failure";
|
||||||
|
|
||||||
lazy_static::lazy_static! {
|
lazy_static::lazy_static! {
|
||||||
pub static ref EXECUTION_LAYER_PROPOSER_INSERTED: Result<IntCounter> = try_create_int_counter(
|
pub static ref EXECUTION_LAYER_PROPOSER_INSERTED: Result<IntCounter> = try_create_int_counter(
|
||||||
@ -18,9 +25,11 @@ lazy_static::lazy_static! {
|
|||||||
"execution_layer_proposer_data_updated",
|
"execution_layer_proposer_data_updated",
|
||||||
"Count of times new proposer data is supplied",
|
"Count of times new proposer data is supplied",
|
||||||
);
|
);
|
||||||
pub static ref EXECUTION_LAYER_REQUEST_TIMES: Result<HistogramVec> = try_create_histogram_vec(
|
pub static ref EXECUTION_LAYER_REQUEST_TIMES: Result<HistogramVec> =
|
||||||
|
try_create_histogram_vec_with_buckets(
|
||||||
"execution_layer_request_times",
|
"execution_layer_request_times",
|
||||||
"Duration of calls to ELs",
|
"Duration of calls to ELs",
|
||||||
|
decimal_buckets(-2, 1),
|
||||||
&["method"]
|
&["method"]
|
||||||
);
|
);
|
||||||
pub static ref EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD: Result<Histogram> = try_create_histogram(
|
pub static ref EXECUTION_LAYER_PAYLOAD_ATTRIBUTES_LOOKAHEAD: Result<Histogram> = try_create_histogram(
|
||||||
@ -41,4 +50,29 @@ lazy_static::lazy_static! {
|
|||||||
"Indicates the payload status returned for a particular method",
|
"Indicates the payload status returned for a particular method",
|
||||||
&["method", "status"]
|
&["method", "status"]
|
||||||
);
|
);
|
||||||
|
pub static ref EXECUTION_LAYER_GET_PAYLOAD_OUTCOME: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||||
|
"execution_layer_get_payload_outcome",
|
||||||
|
"The success/failure outcomes from calling get_payload",
|
||||||
|
&["outcome"]
|
||||||
|
);
|
||||||
|
pub static ref EXECUTION_LAYER_BUILDER_REVEAL_PAYLOAD_OUTCOME: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||||
|
"execution_layer_builder_reveal_payload_outcome",
|
||||||
|
"The success/failure outcomes from a builder un-blinding a payload",
|
||||||
|
&["outcome"]
|
||||||
|
);
|
||||||
|
pub static ref EXECUTION_LAYER_GET_PAYLOAD_SOURCE: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||||
|
"execution_layer_get_payload_source",
|
||||||
|
"The source of each payload returned from get_payload",
|
||||||
|
&["source"]
|
||||||
|
);
|
||||||
|
pub static ref EXECUTION_LAYER_GET_PAYLOAD_BUILDER_REJECTIONS: Result<IntCounterVec> = try_create_int_counter_vec(
|
||||||
|
"execution_layer_get_payload_builder_rejections",
|
||||||
|
"The reasons why a payload from a builder was rejected",
|
||||||
|
&["reason"]
|
||||||
|
);
|
||||||
|
pub static ref EXECUTION_LAYER_PAYLOAD_BIDS: Result<IntGaugeVec> = try_create_int_gauge_vec(
|
||||||
|
"execution_layer_payload_bids",
|
||||||
|
"The gwei bid value of payloads received by local EEs or builders. Only shows values up to i64::max_value.",
|
||||||
|
&["source"]
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ pub fn new_env() -> Environment<MinimalEthSpec> {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn basic() {
|
fn basic() {
|
||||||
let mut env = new_env();
|
let env = new_env();
|
||||||
let log = env.core_context().log().clone();
|
let log = env.core_context().log().clone();
|
||||||
let mut spec = env.eth2_config().spec.clone();
|
let mut spec = env.eth2_config().spec.clone();
|
||||||
|
|
||||||
|
@ -36,6 +36,9 @@ safe_arith = {path = "../../consensus/safe_arith"}
|
|||||||
task_executor = { path = "../../common/task_executor" }
|
task_executor = { path = "../../common/task_executor" }
|
||||||
lru = "0.7.7"
|
lru = "0.7.7"
|
||||||
tree_hash = "0.4.1"
|
tree_hash = "0.4.1"
|
||||||
|
sysinfo = "0.26.5"
|
||||||
|
system_health = { path = "../../common/system_health" }
|
||||||
|
directory = { path = "../../common/directory" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
store = { path = "../store" }
|
store = { path = "../store" }
|
||||||
|
@ -26,12 +26,14 @@ use beacon_chain::{
|
|||||||
BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped,
|
BeaconChainTypes, ProduceBlockVerification, WhenSlotSkipped,
|
||||||
};
|
};
|
||||||
pub use block_id::BlockId;
|
pub use block_id::BlockId;
|
||||||
|
use directory::DEFAULT_ROOT_DIR;
|
||||||
use eth2::types::{
|
use eth2::types::{
|
||||||
self as api_types, EndpointVersion, SkipRandaoVerification, ValidatorId, ValidatorStatus,
|
self as api_types, EndpointVersion, SkipRandaoVerification, ValidatorId, ValidatorStatus,
|
||||||
};
|
};
|
||||||
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
|
use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage};
|
||||||
use lighthouse_version::version_with_platform;
|
use lighthouse_version::version_with_platform;
|
||||||
use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage};
|
use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage};
|
||||||
|
use parking_lot::RwLock;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use slog::{crit, debug, error, info, warn, Logger};
|
use slog::{crit, debug, error, info, warn, Logger};
|
||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
@ -43,6 +45,8 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use sysinfo::{System, SystemExt};
|
||||||
|
use system_health::observe_system_health_bn;
|
||||||
use tokio::sync::mpsc::{Sender, UnboundedSender};
|
use tokio::sync::mpsc::{Sender, UnboundedSender};
|
||||||
use tokio_stream::{wrappers::BroadcastStream, StreamExt};
|
use tokio_stream::{wrappers::BroadcastStream, StreamExt};
|
||||||
use types::{
|
use types::{
|
||||||
@ -110,6 +114,7 @@ pub struct Config {
|
|||||||
pub tls_config: Option<TlsConfig>,
|
pub tls_config: Option<TlsConfig>,
|
||||||
pub allow_sync_stalled: bool,
|
pub allow_sync_stalled: bool,
|
||||||
pub spec_fork_name: Option<ForkName>,
|
pub spec_fork_name: Option<ForkName>,
|
||||||
|
pub data_dir: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@ -122,6 +127,7 @@ impl Default for Config {
|
|||||||
tls_config: None,
|
tls_config: None,
|
||||||
allow_sync_stalled: false,
|
allow_sync_stalled: false,
|
||||||
spec_fork_name: None,
|
spec_fork_name: None,
|
||||||
|
data_dir: PathBuf::from(DEFAULT_ROOT_DIR),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -323,6 +329,10 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Create a `warp` filter for the data_dir.
|
||||||
|
let inner_data_dir = ctx.config.data_dir.clone();
|
||||||
|
let data_dir_filter = warp::any().map(move || inner_data_dir.clone());
|
||||||
|
|
||||||
// Create a `warp` filter that provides access to the beacon chain.
|
// Create a `warp` filter that provides access to the beacon chain.
|
||||||
let inner_ctx = ctx.clone();
|
let inner_ctx = ctx.clone();
|
||||||
let chain_filter =
|
let chain_filter =
|
||||||
@ -431,6 +441,37 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
let inner_ctx = ctx.clone();
|
let inner_ctx = ctx.clone();
|
||||||
let log_filter = warp::any().map(move || inner_ctx.log.clone());
|
let log_filter = warp::any().map(move || inner_ctx.log.clone());
|
||||||
|
|
||||||
|
// Create a `warp` filter that provides access to local system information.
|
||||||
|
let system_info = Arc::new(RwLock::new(sysinfo::System::new()));
|
||||||
|
{
|
||||||
|
// grab write access for initialisation
|
||||||
|
let mut system_info = system_info.write();
|
||||||
|
system_info.refresh_disks_list();
|
||||||
|
system_info.refresh_networks_list();
|
||||||
|
system_info.refresh_cpu_specifics(sysinfo::CpuRefreshKind::everything());
|
||||||
|
system_info.refresh_cpu();
|
||||||
|
} // end lock
|
||||||
|
|
||||||
|
let system_info_filter =
|
||||||
|
warp::any()
|
||||||
|
.map(move || system_info.clone())
|
||||||
|
.map(|sysinfo: Arc<RwLock<System>>| {
|
||||||
|
{
|
||||||
|
// refresh stats
|
||||||
|
let mut sysinfo_lock = sysinfo.write();
|
||||||
|
sysinfo_lock.refresh_memory();
|
||||||
|
sysinfo_lock.refresh_cpu_specifics(sysinfo::CpuRefreshKind::everything());
|
||||||
|
sysinfo_lock.refresh_cpu();
|
||||||
|
sysinfo_lock.refresh_system();
|
||||||
|
sysinfo_lock.refresh_networks();
|
||||||
|
sysinfo_lock.refresh_disks();
|
||||||
|
} // end lock
|
||||||
|
sysinfo
|
||||||
|
});
|
||||||
|
|
||||||
|
let app_start = std::time::Instant::now();
|
||||||
|
let app_start_filter = warp::any().map(move || app_start);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*
|
*
|
||||||
* Start of HTTP method definitions.
|
* Start of HTTP method definitions.
|
||||||
@ -891,6 +932,37 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// GET beacon/states/{state_id}/randao?epoch
|
||||||
|
let get_beacon_state_randao = beacon_states_path
|
||||||
|
.clone()
|
||||||
|
.and(warp::path("randao"))
|
||||||
|
.and(warp::query::<api_types::RandaoQuery>())
|
||||||
|
.and(warp::path::end())
|
||||||
|
.and_then(
|
||||||
|
|state_id: StateId, chain: Arc<BeaconChain<T>>, query: api_types::RandaoQuery| {
|
||||||
|
blocking_json_task(move || {
|
||||||
|
let (randao, execution_optimistic) = state_id
|
||||||
|
.map_state_and_execution_optimistic(
|
||||||
|
&chain,
|
||||||
|
|state, execution_optimistic| {
|
||||||
|
let epoch = query.epoch.unwrap_or_else(|| state.current_epoch());
|
||||||
|
let randao = *state.get_randao_mix(epoch).map_err(|e| {
|
||||||
|
warp_utils::reject::custom_bad_request(format!(
|
||||||
|
"epoch out of range: {e:?}"
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
Ok((randao, execution_optimistic))
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(
|
||||||
|
api_types::GenericResponse::from(api_types::RandaoMix { randao })
|
||||||
|
.add_execution_optimistic(execution_optimistic),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// GET beacon/headers
|
// GET beacon/headers
|
||||||
//
|
//
|
||||||
// Note: this endpoint only returns information about blocks in the canonical chain. Given that
|
// Note: this endpoint only returns information about blocks in the canonical chain. Given that
|
||||||
@ -1169,6 +1241,51 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// GET beacon/blinded_blocks/{block_id}
|
||||||
|
let get_beacon_blinded_block = eth_v1
|
||||||
|
.and(warp::path("beacon"))
|
||||||
|
.and(warp::path("blinded_blocks"))
|
||||||
|
.and(block_id_or_err)
|
||||||
|
.and(chain_filter.clone())
|
||||||
|
.and(warp::path::end())
|
||||||
|
.and(warp::header::optional::<api_types::Accept>("accept"))
|
||||||
|
.and_then(
|
||||||
|
|block_id: BlockId,
|
||||||
|
chain: Arc<BeaconChain<T>>,
|
||||||
|
accept_header: Option<api_types::Accept>| {
|
||||||
|
blocking_task(move || {
|
||||||
|
let (block, execution_optimistic) = block_id.blinded_block(&chain)?;
|
||||||
|
let fork_name = block
|
||||||
|
.fork_name(&chain.spec)
|
||||||
|
.map_err(inconsistent_fork_rejection)?;
|
||||||
|
|
||||||
|
match accept_header {
|
||||||
|
Some(api_types::Accept::Ssz) => Response::builder()
|
||||||
|
.status(200)
|
||||||
|
.header("Content-Type", "application/octet-stream")
|
||||||
|
.body(block.as_ssz_bytes().into())
|
||||||
|
.map_err(|e| {
|
||||||
|
warp_utils::reject::custom_server_error(format!(
|
||||||
|
"failed to create response: {}",
|
||||||
|
e
|
||||||
|
))
|
||||||
|
}),
|
||||||
|
_ => {
|
||||||
|
// Post as a V2 endpoint so we return the fork version.
|
||||||
|
execution_optimistic_fork_versioned_response(
|
||||||
|
V2,
|
||||||
|
fork_name,
|
||||||
|
execution_optimistic,
|
||||||
|
block,
|
||||||
|
)
|
||||||
|
.map(|res| warp::reply::json(&res).into_response())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.map(|resp| add_consensus_version_header(resp, fork_name))
|
||||||
|
})
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* beacon/pool
|
* beacon/pool
|
||||||
*/
|
*/
|
||||||
@ -2682,7 +2799,12 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.await
|
.await
|
||||||
.map(|resp| warp::reply::json(&resp))
|
.map(|resp| warp::reply::json(&resp))
|
||||||
.map_err(|e| {
|
.map_err(|e| {
|
||||||
error!(log, "Error from connected relay"; "error" => ?e);
|
error!(
|
||||||
|
log,
|
||||||
|
"Relay error when registering validator(s)";
|
||||||
|
"num_registrations" => filtered_registration_data.len(),
|
||||||
|
"error" => ?e
|
||||||
|
);
|
||||||
// Forward the HTTP status code if we are able to, otherwise fall back
|
// Forward the HTTP status code if we are able to, otherwise fall back
|
||||||
// to a server error.
|
// to a server error.
|
||||||
if let eth2::Error::ServerMessage(message) = e {
|
if let eth2::Error::ServerMessage(message) = e {
|
||||||
@ -2796,6 +2918,29 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// GET lighthouse/ui/health
|
||||||
|
let get_lighthouse_ui_health = warp::path("lighthouse")
|
||||||
|
.and(warp::path("ui"))
|
||||||
|
.and(warp::path("health"))
|
||||||
|
.and(warp::path::end())
|
||||||
|
.and(system_info_filter)
|
||||||
|
.and(app_start_filter)
|
||||||
|
.and(data_dir_filter)
|
||||||
|
.and(network_globals.clone())
|
||||||
|
.and_then(
|
||||||
|
|sysinfo, app_start: std::time::Instant, data_dir, network_globals| {
|
||||||
|
blocking_json_task(move || {
|
||||||
|
let app_uptime = app_start.elapsed().as_secs() as u64;
|
||||||
|
Ok(api_types::GenericResponse::from(observe_system_health_bn(
|
||||||
|
sysinfo,
|
||||||
|
data_dir,
|
||||||
|
app_uptime,
|
||||||
|
network_globals,
|
||||||
|
)))
|
||||||
|
})
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
// GET lighthouse/syncing
|
// GET lighthouse/syncing
|
||||||
let get_lighthouse_syncing = warp::path("lighthouse")
|
let get_lighthouse_syncing = warp::path("lighthouse")
|
||||||
.and(warp::path("syncing"))
|
.and(warp::path("syncing"))
|
||||||
@ -3214,10 +3359,12 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.or(get_beacon_state_validators.boxed())
|
.or(get_beacon_state_validators.boxed())
|
||||||
.or(get_beacon_state_committees.boxed())
|
.or(get_beacon_state_committees.boxed())
|
||||||
.or(get_beacon_state_sync_committees.boxed())
|
.or(get_beacon_state_sync_committees.boxed())
|
||||||
|
.or(get_beacon_state_randao.boxed())
|
||||||
.or(get_beacon_headers.boxed())
|
.or(get_beacon_headers.boxed())
|
||||||
.or(get_beacon_headers_block_id.boxed())
|
.or(get_beacon_headers_block_id.boxed())
|
||||||
.or(get_beacon_block.boxed())
|
.or(get_beacon_block.boxed())
|
||||||
.or(get_beacon_block_attestations.boxed())
|
.or(get_beacon_block_attestations.boxed())
|
||||||
|
.or(get_beacon_blinded_block.boxed())
|
||||||
.or(get_beacon_block_root.boxed())
|
.or(get_beacon_block_root.boxed())
|
||||||
.or(get_beacon_pool_attestations.boxed())
|
.or(get_beacon_pool_attestations.boxed())
|
||||||
.or(get_beacon_pool_attester_slashings.boxed())
|
.or(get_beacon_pool_attester_slashings.boxed())
|
||||||
@ -3244,6 +3391,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.or(get_validator_aggregate_attestation.boxed())
|
.or(get_validator_aggregate_attestation.boxed())
|
||||||
.or(get_validator_sync_committee_contribution.boxed())
|
.or(get_validator_sync_committee_contribution.boxed())
|
||||||
.or(get_lighthouse_health.boxed())
|
.or(get_lighthouse_health.boxed())
|
||||||
|
.or(get_lighthouse_ui_health.boxed())
|
||||||
.or(get_lighthouse_syncing.boxed())
|
.or(get_lighthouse_syncing.boxed())
|
||||||
.or(get_lighthouse_nat.boxed())
|
.or(get_lighthouse_nat.boxed())
|
||||||
.or(get_lighthouse_peers.boxed())
|
.or(get_lighthouse_peers.boxed())
|
||||||
@ -3263,6 +3411,7 @@ pub fn serve<T: BeaconChainTypes>(
|
|||||||
.or(get_lighthouse_merge_readiness.boxed())
|
.or(get_lighthouse_merge_readiness.boxed())
|
||||||
.or(get_events.boxed()),
|
.or(get_events.boxed()),
|
||||||
)
|
)
|
||||||
|
.boxed()
|
||||||
.or(warp::post().and(
|
.or(warp::post().and(
|
||||||
post_beacon_blocks
|
post_beacon_blocks
|
||||||
.boxed()
|
.boxed()
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
|
use beacon_chain::validator_monitor::{get_block_delay_ms, timestamp_now};
|
||||||
use beacon_chain::{BeaconChain, BeaconChainTypes, BlockError, CountUnrealized};
|
use beacon_chain::{
|
||||||
|
BeaconChain, BeaconChainTypes, BlockError, CountUnrealized, NotifyExecutionLayer,
|
||||||
|
};
|
||||||
use lighthouse_network::{PubsubMessage, SignedBeaconBlockAndBlobsSidecar};
|
use lighthouse_network::{PubsubMessage, SignedBeaconBlockAndBlobsSidecar};
|
||||||
use network::NetworkMessage;
|
use network::NetworkMessage;
|
||||||
use slog::{crit, error, info, warn, Logger};
|
use slog::{crit, error, info, warn, Logger};
|
||||||
@ -53,7 +55,12 @@ pub async fn publish_block<T: BeaconChainTypes>(
|
|||||||
let block_root = block_root.unwrap_or_else(|| block.canonical_root());
|
let block_root = block_root.unwrap_or_else(|| block.canonical_root());
|
||||||
|
|
||||||
match chain
|
match chain
|
||||||
.process_block(block_root, block.clone(), CountUnrealized::True)
|
.process_block(
|
||||||
|
block_root,
|
||||||
|
block.clone(),
|
||||||
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(root) => {
|
Ok(root) => {
|
||||||
|
@ -2,6 +2,7 @@ use beacon_chain::{
|
|||||||
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
test_utils::{BeaconChainHarness, EphemeralHarnessType},
|
||||||
BeaconChain, BeaconChainTypes,
|
BeaconChain, BeaconChainTypes,
|
||||||
};
|
};
|
||||||
|
use directory::DEFAULT_ROOT_DIR;
|
||||||
use eth2::{BeaconNodeHttpClient, Timeouts};
|
use eth2::{BeaconNodeHttpClient, Timeouts};
|
||||||
use http_api::{Config, Context};
|
use http_api::{Config, Context};
|
||||||
use lighthouse_network::{
|
use lighthouse_network::{
|
||||||
@ -142,6 +143,7 @@ pub async fn create_api_server_on_port<T: BeaconChainTypes>(
|
|||||||
allow_origin: None,
|
allow_origin: None,
|
||||||
tls_config: None,
|
tls_config: None,
|
||||||
allow_sync_stalled: false,
|
allow_sync_stalled: false,
|
||||||
|
data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR),
|
||||||
spec_fork_name: None,
|
spec_fork_name: None,
|
||||||
},
|
},
|
||||||
chain: Some(chain.clone()),
|
chain: Some(chain.clone()),
|
||||||
|
@ -745,6 +745,36 @@ impl ApiTester {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn test_beacon_states_randao(self) -> Self {
|
||||||
|
for state_id in self.interesting_state_ids() {
|
||||||
|
let mut state_opt = state_id
|
||||||
|
.state(&self.chain)
|
||||||
|
.ok()
|
||||||
|
.map(|(state, _execution_optimistic)| state);
|
||||||
|
|
||||||
|
let epoch_opt = state_opt.as_ref().map(|state| state.current_epoch());
|
||||||
|
let result = self
|
||||||
|
.client
|
||||||
|
.get_beacon_states_randao(state_id.0, epoch_opt)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.map(|res| res.data);
|
||||||
|
|
||||||
|
if result.is_none() && state_opt.is_none() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let state = state_opt.as_mut().expect("result should be none");
|
||||||
|
let randao_mix = state
|
||||||
|
.get_randao_mix(state.slot().epoch(E::slots_per_epoch()))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(result.unwrap().randao, *randao_mix);
|
||||||
|
}
|
||||||
|
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn test_beacon_headers_all_slots(self) -> Self {
|
pub async fn test_beacon_headers_all_slots(self) -> Self {
|
||||||
for slot in 0..CHAIN_LENGTH {
|
for slot in 0..CHAIN_LENGTH {
|
||||||
let slot = Slot::from(slot);
|
let slot = Slot::from(slot);
|
||||||
@ -1016,6 +1046,82 @@ impl ApiTester {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn test_beacon_blinded_blocks(self) -> Self {
|
||||||
|
for block_id in self.interesting_block_ids() {
|
||||||
|
let expected = block_id
|
||||||
|
.blinded_block(&self.chain)
|
||||||
|
.ok()
|
||||||
|
.map(|(block, _execution_optimistic)| block);
|
||||||
|
|
||||||
|
if let CoreBlockId::Slot(slot) = block_id.0 {
|
||||||
|
if expected.is_none() {
|
||||||
|
assert!(SKIPPED_SLOTS.contains(&slot.as_u64()));
|
||||||
|
} else {
|
||||||
|
assert!(!SKIPPED_SLOTS.contains(&slot.as_u64()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the JSON endpoint.
|
||||||
|
let json_result = self
|
||||||
|
.client
|
||||||
|
.get_beacon_blinded_blocks(block_id.0)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
if let (Some(json), Some(expected)) = (&json_result, &expected) {
|
||||||
|
assert_eq!(&json.data, expected, "{:?}", block_id);
|
||||||
|
assert_eq!(
|
||||||
|
json.version,
|
||||||
|
Some(expected.fork_name(&self.chain.spec).unwrap())
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
assert_eq!(json_result, None);
|
||||||
|
assert_eq!(expected, None);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the SSZ endpoint.
|
||||||
|
let ssz_result = self
|
||||||
|
.client
|
||||||
|
.get_beacon_blinded_blocks_ssz(block_id.0, &self.chain.spec)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(ssz_result.as_ref(), expected.as_ref(), "{:?}", block_id);
|
||||||
|
|
||||||
|
// Check that version headers are provided.
|
||||||
|
let url = self
|
||||||
|
.client
|
||||||
|
.get_beacon_blinded_blocks_path(block_id.0)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let builders: Vec<fn(RequestBuilder) -> RequestBuilder> = vec![
|
||||||
|
|b| b,
|
||||||
|
|b| b.accept(Accept::Ssz),
|
||||||
|
|b| b.accept(Accept::Json),
|
||||||
|
|b| b.accept(Accept::Any),
|
||||||
|
];
|
||||||
|
|
||||||
|
for req_builder in builders {
|
||||||
|
let raw_res = self
|
||||||
|
.client
|
||||||
|
.get_response(url.clone(), req_builder)
|
||||||
|
.await
|
||||||
|
.optional()
|
||||||
|
.unwrap();
|
||||||
|
if let (Some(raw_res), Some(expected)) = (&raw_res, &expected) {
|
||||||
|
assert_eq!(
|
||||||
|
raw_res.fork_name_from_header().unwrap(),
|
||||||
|
Some(expected.fork_name(&self.chain.spec).unwrap())
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
assert!(raw_res.is_none());
|
||||||
|
assert_eq!(expected, None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn test_beacon_blocks_attestations(self) -> Self {
|
pub async fn test_beacon_blocks_attestations(self) -> Self {
|
||||||
for block_id in self.interesting_block_ids() {
|
for block_id in self.interesting_block_ids() {
|
||||||
let result = self
|
let result = self
|
||||||
@ -3696,6 +3802,8 @@ async fn beacon_get() {
|
|||||||
.await
|
.await
|
||||||
.test_beacon_states_validator_id()
|
.test_beacon_states_validator_id()
|
||||||
.await
|
.await
|
||||||
|
.test_beacon_states_randao()
|
||||||
|
.await
|
||||||
.test_beacon_headers_all_slots()
|
.test_beacon_headers_all_slots()
|
||||||
.await
|
.await
|
||||||
.test_beacon_headers_all_parents()
|
.test_beacon_headers_all_parents()
|
||||||
@ -3704,6 +3812,8 @@ async fn beacon_get() {
|
|||||||
.await
|
.await
|
||||||
.test_beacon_blocks()
|
.test_beacon_blocks()
|
||||||
.await
|
.await
|
||||||
|
.test_beacon_blinded_blocks()
|
||||||
|
.await
|
||||||
.test_beacon_blocks_attestations()
|
.test_beacon_blocks_attestations()
|
||||||
.await
|
.await
|
||||||
.test_beacon_blocks_root()
|
.test_beacon_blocks_root()
|
||||||
|
@ -130,6 +130,9 @@ pub struct Config {
|
|||||||
|
|
||||||
/// Whether metrics are enabled.
|
/// Whether metrics are enabled.
|
||||||
pub metrics_enabled: bool,
|
pub metrics_enabled: bool,
|
||||||
|
|
||||||
|
/// Whether light client protocols should be enabled.
|
||||||
|
pub enable_light_client_server: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
@ -207,6 +210,7 @@ impl Default for Config {
|
|||||||
shutdown_after_sync: false,
|
shutdown_after_sync: false,
|
||||||
topics: Vec::new(),
|
topics: Vec::new(),
|
||||||
metrics_enabled: false,
|
metrics_enabled: false,
|
||||||
|
enable_light_client_server: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -284,9 +288,11 @@ impl From<u8> for NetworkLoad {
|
|||||||
/// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork.
|
/// Return a Lighthouse specific `GossipsubConfig` where the `message_id_fn` depends on the current fork.
|
||||||
pub fn gossipsub_config(network_load: u8, fork_context: Arc<ForkContext>) -> GossipsubConfig {
|
pub fn gossipsub_config(network_load: u8, fork_context: Arc<ForkContext>) -> GossipsubConfig {
|
||||||
// The function used to generate a gossipsub message id
|
// The function used to generate a gossipsub message id
|
||||||
// We use the first 8 bytes of SHA256(data) for content addressing
|
// We use the first 8 bytes of SHA256(topic, data) for content addressing
|
||||||
let fast_gossip_message_id =
|
let fast_gossip_message_id = |message: &RawGossipsubMessage| {
|
||||||
|message: &RawGossipsubMessage| FastMessageId::from(&Sha256::digest(&message.data)[..8]);
|
let data = [message.topic.as_str().as_bytes(), &message.data].concat();
|
||||||
|
FastMessageId::from(&Sha256::digest(data)[..8])
|
||||||
|
};
|
||||||
fn prefix(
|
fn prefix(
|
||||||
prefix: [u8; 4],
|
prefix: [u8; 4],
|
||||||
message: &GossipsubMessage,
|
message: &GossipsubMessage,
|
||||||
|
@ -834,6 +834,17 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
|
|
||||||
// Map each subnet query's min_ttl to the set of ENR's returned for that subnet.
|
// Map each subnet query's min_ttl to the set of ENR's returned for that subnet.
|
||||||
queries.iter().for_each(|query| {
|
queries.iter().for_each(|query| {
|
||||||
|
let query_str = match query.subnet {
|
||||||
|
Subnet::Attestation(_) => "attestation",
|
||||||
|
Subnet::SyncCommittee(_) => "sync_committee",
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(v) = metrics::get_int_counter(
|
||||||
|
&metrics::TOTAL_SUBNET_QUERIES,
|
||||||
|
&[query_str],
|
||||||
|
) {
|
||||||
|
v.inc();
|
||||||
|
}
|
||||||
// A subnet query has completed. Add back to the queue, incrementing retries.
|
// A subnet query has completed. Add back to the queue, incrementing retries.
|
||||||
self.add_subnet_query(query.subnet, query.min_ttl, query.retries + 1);
|
self.add_subnet_query(query.subnet, query.min_ttl, query.retries + 1);
|
||||||
|
|
||||||
@ -845,6 +856,12 @@ impl<TSpec: EthSpec> Discovery<TSpec> {
|
|||||||
.filter(|enr| subnet_predicate(enr))
|
.filter(|enr| subnet_predicate(enr))
|
||||||
.map(|enr| enr.peer_id())
|
.map(|enr| enr.peer_id())
|
||||||
.for_each(|peer_id| {
|
.for_each(|peer_id| {
|
||||||
|
if let Some(v) = metrics::get_int_counter(
|
||||||
|
&metrics::SUBNET_PEERS_FOUND,
|
||||||
|
&[query_str],
|
||||||
|
) {
|
||||||
|
v.inc();
|
||||||
|
}
|
||||||
let other_min_ttl = mapped_results.get_mut(&peer_id);
|
let other_min_ttl = mapped_results.get_mut(&peer_id);
|
||||||
|
|
||||||
// map peer IDs to the min_ttl furthest in the future
|
// map peer IDs to the min_ttl furthest in the future
|
||||||
|
@ -112,6 +112,19 @@ lazy_static! {
|
|||||||
&["client"]
|
&["client"]
|
||||||
);
|
);
|
||||||
|
|
||||||
|
pub static ref SUBNET_PEERS_FOUND: Result<IntCounterVec> =
|
||||||
|
try_create_int_counter_vec(
|
||||||
|
"discovery_query_peers_found",
|
||||||
|
"Total number of peers found in attestation subnets and sync subnets",
|
||||||
|
&["type"]
|
||||||
|
);
|
||||||
|
pub static ref TOTAL_SUBNET_QUERIES: Result<IntCounterVec> =
|
||||||
|
try_create_int_counter_vec(
|
||||||
|
"discovery_total_queries",
|
||||||
|
"Total number of discovery subnet queries",
|
||||||
|
&["type"]
|
||||||
|
);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Inbound/Outbound peers
|
* Inbound/Outbound peers
|
||||||
*/
|
*/
|
||||||
|
@ -405,7 +405,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
debug!(self.log, "Identified Peer"; "peer" => %peer_id,
|
debug!(self.log, "Identified Peer"; "peer" => %peer_id,
|
||||||
"protocol_version" => &info.protocol_version,
|
"protocol_version" => &info.protocol_version,
|
||||||
"agent_version" => &info.agent_version,
|
"agent_version" => &info.agent_version,
|
||||||
"listening_ addresses" => ?info.listen_addrs,
|
"listening_addresses" => ?info.listen_addrs,
|
||||||
"observed_address" => ?info.observed_addr,
|
"observed_address" => ?info.observed_addr,
|
||||||
"protocols" => ?info.protocols
|
"protocols" => ?info.protocols
|
||||||
);
|
);
|
||||||
@ -502,6 +502,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
Protocol::BlocksByRange => PeerAction::MidToleranceError,
|
Protocol::BlocksByRange => PeerAction::MidToleranceError,
|
||||||
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
|
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
|
||||||
Protocol::BlobsByRange => PeerAction::MidToleranceError,
|
Protocol::BlobsByRange => PeerAction::MidToleranceError,
|
||||||
|
Protocol::LightClientBootstrap => PeerAction::LowToleranceError,
|
||||||
Protocol::Goodbye => PeerAction::LowToleranceError,
|
Protocol::Goodbye => PeerAction::LowToleranceError,
|
||||||
Protocol::MetaData => PeerAction::LowToleranceError,
|
Protocol::MetaData => PeerAction::LowToleranceError,
|
||||||
Protocol::Status => PeerAction::LowToleranceError,
|
Protocol::Status => PeerAction::LowToleranceError,
|
||||||
@ -519,6 +520,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
Protocol::BlocksByRoot => return,
|
Protocol::BlocksByRoot => return,
|
||||||
Protocol::BlobsByRange => return,
|
Protocol::BlobsByRange => return,
|
||||||
Protocol::Goodbye => return,
|
Protocol::Goodbye => return,
|
||||||
|
Protocol::LightClientBootstrap => return,
|
||||||
Protocol::MetaData => PeerAction::LowToleranceError,
|
Protocol::MetaData => PeerAction::LowToleranceError,
|
||||||
Protocol::Status => PeerAction::LowToleranceError,
|
Protocol::Status => PeerAction::LowToleranceError,
|
||||||
}
|
}
|
||||||
@ -534,6 +536,7 @@ impl<TSpec: EthSpec> PeerManager<TSpec> {
|
|||||||
Protocol::BlocksByRange => PeerAction::MidToleranceError,
|
Protocol::BlocksByRange => PeerAction::MidToleranceError,
|
||||||
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
|
Protocol::BlocksByRoot => PeerAction::MidToleranceError,
|
||||||
Protocol::BlobsByRange => PeerAction::MidToleranceError,
|
Protocol::BlobsByRange => PeerAction::MidToleranceError,
|
||||||
|
Protocol::LightClientBootstrap => return,
|
||||||
Protocol::Goodbye => return,
|
Protocol::Goodbye => return,
|
||||||
Protocol::MetaData => return,
|
Protocol::MetaData => return,
|
||||||
Protocol::Status => return,
|
Protocol::Status => return,
|
||||||
|
@ -139,7 +139,7 @@ impl<TSpec: EthSpec> NetworkBehaviour for PeerManager<TSpec> {
|
|||||||
// TODO: directly emit the ban event?
|
// TODO: directly emit the ban event?
|
||||||
BanResult::BadScore => {
|
BanResult::BadScore => {
|
||||||
// This is a faulty state
|
// This is a faulty state
|
||||||
error!(self.log, "Connected to a banned peer, re-banning"; "peer_id" => %peer_id);
|
error!(self.log, "Connected to a banned peer. Re-banning"; "peer_id" => %peer_id);
|
||||||
// Reban the peer
|
// Reban the peer
|
||||||
self.goodbye_peer(peer_id, GoodbyeReason::Banned, ReportSource::PeerManager);
|
self.goodbye_peer(peer_id, GoodbyeReason::Banned, ReportSource::PeerManager);
|
||||||
return;
|
return;
|
||||||
|
@ -15,10 +15,11 @@ use std::io::{Read, Write};
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio_util::codec::{Decoder, Encoder};
|
use tokio_util::codec::{Decoder, Encoder};
|
||||||
|
use types::light_client_bootstrap::LightClientBootstrap;
|
||||||
use types::{
|
use types::{
|
||||||
BlobsSidecar, EthSpec, ForkContext, ForkName, SignedBeaconBlock, SignedBeaconBlockAltair,
|
BlobsSidecar, EthSpec, ForkContext, ForkName, Hash256, SignedBeaconBlock,
|
||||||
SignedBeaconBlockBase, SignedBeaconBlockCapella, SignedBeaconBlockEip4844,
|
SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockCapella,
|
||||||
SignedBeaconBlockMerge,
|
SignedBeaconBlockEip4844, SignedBeaconBlockMerge,
|
||||||
};
|
};
|
||||||
use unsigned_varint::codec::Uvi;
|
use unsigned_varint::codec::Uvi;
|
||||||
|
|
||||||
@ -72,6 +73,7 @@ impl<TSpec: EthSpec> Encoder<RPCCodedResponse<TSpec>> for SSZSnappyInboundCodec<
|
|||||||
RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(),
|
RPCResponse::BlocksByRange(res) => res.as_ssz_bytes(),
|
||||||
RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(),
|
RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(),
|
||||||
RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(),
|
RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(),
|
||||||
|
RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(),
|
||||||
RPCResponse::Pong(res) => res.data.as_ssz_bytes(),
|
RPCResponse::Pong(res) => res.data.as_ssz_bytes(),
|
||||||
RPCResponse::MetaData(res) =>
|
RPCResponse::MetaData(res) =>
|
||||||
// Encode the correct version of the MetaData response based on the negotiated version.
|
// Encode the correct version of the MetaData response based on the negotiated version.
|
||||||
@ -233,6 +235,7 @@ impl<TSpec: EthSpec> Encoder<OutboundRequest<TSpec>> for SSZSnappyOutboundCodec<
|
|||||||
OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(),
|
OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(),
|
||||||
OutboundRequest::Ping(req) => req.as_ssz_bytes(),
|
OutboundRequest::Ping(req) => req.as_ssz_bytes(),
|
||||||
OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode
|
OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode
|
||||||
|
OutboundRequest::LightClientBootstrap(req) => req.as_ssz_bytes(),
|
||||||
};
|
};
|
||||||
// SSZ encoded bytes should be within `max_packet_size`
|
// SSZ encoded bytes should be within `max_packet_size`
|
||||||
if bytes.len() > self.max_packet_size {
|
if bytes.len() > self.max_packet_size {
|
||||||
@ -486,7 +489,11 @@ fn handle_v1_request<T: EthSpec>(
|
|||||||
Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping {
|
Protocol::Ping => Ok(Some(InboundRequest::Ping(Ping {
|
||||||
data: u64::from_ssz_bytes(decoded_buffer)?,
|
data: u64::from_ssz_bytes(decoded_buffer)?,
|
||||||
}))),
|
}))),
|
||||||
|
Protocol::LightClientBootstrap => Ok(Some(InboundRequest::LightClientBootstrap(
|
||||||
|
LightClientBootstrapRequest {
|
||||||
|
root: Hash256::from_ssz_bytes(decoded_buffer)?,
|
||||||
|
},
|
||||||
|
))),
|
||||||
// MetaData requests return early from InboundUpgrade and do not reach the decoder.
|
// MetaData requests return early from InboundUpgrade and do not reach the decoder.
|
||||||
// Handle this case just for completeness.
|
// Handle this case just for completeness.
|
||||||
Protocol::MetaData => {
|
Protocol::MetaData => {
|
||||||
@ -562,6 +569,9 @@ fn handle_v1_response<T: EthSpec>(
|
|||||||
Protocol::MetaData => Ok(Some(RPCResponse::MetaData(MetaData::V1(
|
Protocol::MetaData => Ok(Some(RPCResponse::MetaData(MetaData::V1(
|
||||||
MetaDataV1::from_ssz_bytes(decoded_buffer)?,
|
MetaDataV1::from_ssz_bytes(decoded_buffer)?,
|
||||||
)))),
|
)))),
|
||||||
|
Protocol::LightClientBootstrap => Ok(Some(RPCResponse::LightClientBootstrap(
|
||||||
|
LightClientBootstrap::from_ssz_bytes(decoded_buffer)?,
|
||||||
|
))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -923,6 +933,9 @@ mod tests {
|
|||||||
OutboundRequest::MetaData(metadata) => {
|
OutboundRequest::MetaData(metadata) => {
|
||||||
assert_eq!(decoded, InboundRequest::MetaData(metadata))
|
assert_eq!(decoded, InboundRequest::MetaData(metadata))
|
||||||
}
|
}
|
||||||
|
OutboundRequest::LightClientBootstrap(bootstrap) => {
|
||||||
|
assert_eq!(decoded, InboundRequest::LightClientBootstrap(bootstrap))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -285,7 +285,7 @@ where
|
|||||||
} else {
|
} else {
|
||||||
if !matches!(response, RPCCodedResponse::StreamTermination(..)) {
|
if !matches!(response, RPCCodedResponse::StreamTermination(..)) {
|
||||||
// the stream is closed after sending the expected number of responses
|
// the stream is closed after sending the expected number of responses
|
||||||
trace!(self.log, "Inbound stream has expired, response not sent";
|
trace!(self.log, "Inbound stream has expired. Response not sent";
|
||||||
"response" => %response, "id" => inbound_id);
|
"response" => %response, "id" => inbound_id);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
@ -12,8 +12,10 @@ use std::ops::Deref;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use strum::IntoStaticStr;
|
use strum::IntoStaticStr;
|
||||||
use superstruct::superstruct;
|
use superstruct::superstruct;
|
||||||
use types::blobs_sidecar::BlobsSidecar;
|
use types::{
|
||||||
use types::{Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot};
|
blobs_sidecar::BlobsSidecar, light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec,
|
||||||
|
Hash256, SignedBeaconBlock, Slot,
|
||||||
|
};
|
||||||
|
|
||||||
/// Maximum number of blocks in a single request.
|
/// Maximum number of blocks in a single request.
|
||||||
pub type MaxRequestBlocks = U1024;
|
pub type MaxRequestBlocks = U1024;
|
||||||
@ -260,6 +262,9 @@ pub enum RPCResponse<T: EthSpec> {
|
|||||||
/// A response to a get BLOBS_BY_RANGE request
|
/// A response to a get BLOBS_BY_RANGE request
|
||||||
BlobsByRange(Arc<BlobsSidecar<T>>),
|
BlobsByRange(Arc<BlobsSidecar<T>>),
|
||||||
|
|
||||||
|
/// A response to a get LIGHTCLIENT_BOOTSTRAP request.
|
||||||
|
LightClientBootstrap(LightClientBootstrap<T>),
|
||||||
|
|
||||||
/// A PONG response to a PING request.
|
/// A PONG response to a PING request.
|
||||||
Pong(Ping),
|
Pong(Ping),
|
||||||
|
|
||||||
@ -293,6 +298,12 @@ pub enum RPCCodedResponse<T: EthSpec> {
|
|||||||
StreamTermination(ResponseTermination),
|
StreamTermination(ResponseTermination),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Request a light_client_bootstrap for lightclients peers.
|
||||||
|
#[derive(Encode, Decode, Clone, Debug, PartialEq)]
|
||||||
|
pub struct LightClientBootstrapRequest {
|
||||||
|
pub root: Hash256,
|
||||||
|
}
|
||||||
|
|
||||||
/// The code assigned to an erroneous `RPCResponse`.
|
/// The code assigned to an erroneous `RPCResponse`.
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr)]
|
#[derive(Debug, Clone, Copy, PartialEq, IntoStaticStr)]
|
||||||
#[strum(serialize_all = "snake_case")]
|
#[strum(serialize_all = "snake_case")]
|
||||||
@ -342,6 +353,7 @@ impl<T: EthSpec> RPCCodedResponse<T> {
|
|||||||
RPCResponse::BlobsByRange(_) => true,
|
RPCResponse::BlobsByRange(_) => true,
|
||||||
RPCResponse::Pong(_) => false,
|
RPCResponse::Pong(_) => false,
|
||||||
RPCResponse::MetaData(_) => false,
|
RPCResponse::MetaData(_) => false,
|
||||||
|
RPCResponse::LightClientBootstrap(_) => false,
|
||||||
},
|
},
|
||||||
RPCCodedResponse::Error(_, _) => true,
|
RPCCodedResponse::Error(_, _) => true,
|
||||||
// Stream terminations are part of responses that have chunks
|
// Stream terminations are part of responses that have chunks
|
||||||
@ -377,6 +389,7 @@ impl<T: EthSpec> RPCResponse<T> {
|
|||||||
RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange,
|
RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange,
|
||||||
RPCResponse::Pong(_) => Protocol::Ping,
|
RPCResponse::Pong(_) => Protocol::Ping,
|
||||||
RPCResponse::MetaData(_) => Protocol::MetaData,
|
RPCResponse::MetaData(_) => Protocol::MetaData,
|
||||||
|
RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -415,6 +428,9 @@ impl<T: EthSpec> std::fmt::Display for RPCResponse<T> {
|
|||||||
}
|
}
|
||||||
RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data),
|
RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data),
|
||||||
RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()),
|
RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()),
|
||||||
|
RPCResponse::LightClientBootstrap(bootstrap) => {
|
||||||
|
write!(f, "LightClientBootstrap Slot: {}", bootstrap.header.slot)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -27,8 +27,8 @@ pub(crate) use protocol::{InboundRequest, RPCProtocol};
|
|||||||
use crate::rpc::methods::MAX_REQUEST_BLOBS_SIDECARS;
|
use crate::rpc::methods::MAX_REQUEST_BLOBS_SIDECARS;
|
||||||
pub use handler::SubstreamId;
|
pub use handler::SubstreamId;
|
||||||
pub use methods::{
|
pub use methods::{
|
||||||
BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, MaxRequestBlocks,
|
BlocksByRangeRequest, BlocksByRootRequest, GoodbyeReason, LightClientBootstrapRequest,
|
||||||
RPCResponseErrorCode, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS,
|
MaxRequestBlocks, RPCResponseErrorCode, ResponseTermination, StatusMessage, MAX_REQUEST_BLOCKS,
|
||||||
};
|
};
|
||||||
pub(crate) use outbound::OutboundRequest;
|
pub(crate) use outbound::OutboundRequest;
|
||||||
pub use protocol::{max_rpc_size, Protocol, RPCError};
|
pub use protocol::{max_rpc_size, Protocol, RPCError};
|
||||||
@ -109,18 +109,24 @@ pub struct RPC<Id: ReqId, TSpec: EthSpec> {
|
|||||||
/// Queue of events to be processed.
|
/// Queue of events to be processed.
|
||||||
events: Vec<NetworkBehaviourAction<RPCMessage<Id, TSpec>, RPCHandler<Id, TSpec>>>,
|
events: Vec<NetworkBehaviourAction<RPCMessage<Id, TSpec>, RPCHandler<Id, TSpec>>>,
|
||||||
fork_context: Arc<ForkContext>,
|
fork_context: Arc<ForkContext>,
|
||||||
|
enable_light_client_server: bool,
|
||||||
/// Slog logger for RPC behaviour.
|
/// Slog logger for RPC behaviour.
|
||||||
log: slog::Logger,
|
log: slog::Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
|
impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
|
||||||
pub fn new(fork_context: Arc<ForkContext>, log: slog::Logger) -> Self {
|
pub fn new(
|
||||||
|
fork_context: Arc<ForkContext>,
|
||||||
|
enable_light_client_server: bool,
|
||||||
|
log: slog::Logger,
|
||||||
|
) -> Self {
|
||||||
let log = log.new(o!("service" => "libp2p_rpc"));
|
let log = log.new(o!("service" => "libp2p_rpc"));
|
||||||
let limiter = RPCRateLimiterBuilder::new()
|
let limiter = RPCRateLimiterBuilder::new()
|
||||||
.n_every(Protocol::MetaData, 2, Duration::from_secs(5))
|
.n_every(Protocol::MetaData, 2, Duration::from_secs(5))
|
||||||
.n_every(Protocol::Ping, 2, Duration::from_secs(10))
|
.n_every(Protocol::Ping, 2, Duration::from_secs(10))
|
||||||
.n_every(Protocol::Status, 5, Duration::from_secs(15))
|
.n_every(Protocol::Status, 5, Duration::from_secs(15))
|
||||||
.one_every(Protocol::Goodbye, Duration::from_secs(10))
|
.one_every(Protocol::Goodbye, Duration::from_secs(10))
|
||||||
|
.one_every(Protocol::LightClientBootstrap, Duration::from_secs(10))
|
||||||
.n_every(
|
.n_every(
|
||||||
Protocol::BlocksByRange,
|
Protocol::BlocksByRange,
|
||||||
methods::MAX_REQUEST_BLOCKS,
|
methods::MAX_REQUEST_BLOCKS,
|
||||||
@ -138,6 +144,7 @@ impl<Id: ReqId, TSpec: EthSpec> RPC<Id, TSpec> {
|
|||||||
limiter,
|
limiter,
|
||||||
events: Vec::new(),
|
events: Vec::new(),
|
||||||
fork_context,
|
fork_context,
|
||||||
|
enable_light_client_server,
|
||||||
log,
|
log,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -194,6 +201,7 @@ where
|
|||||||
RPCProtocol {
|
RPCProtocol {
|
||||||
fork_context: self.fork_context.clone(),
|
fork_context: self.fork_context.clone(),
|
||||||
max_rpc_size: max_rpc_size(&self.fork_context),
|
max_rpc_size: max_rpc_size(&self.fork_context),
|
||||||
|
enable_light_client_server: self.enable_light_client_server,
|
||||||
phantom: PhantomData,
|
phantom: PhantomData,
|
||||||
},
|
},
|
||||||
(),
|
(),
|
||||||
|
@ -39,6 +39,7 @@ pub enum OutboundRequest<TSpec: EthSpec> {
|
|||||||
BlocksByRange(OldBlocksByRangeRequest),
|
BlocksByRange(OldBlocksByRangeRequest),
|
||||||
BlocksByRoot(BlocksByRootRequest),
|
BlocksByRoot(BlocksByRootRequest),
|
||||||
BlobsByRange(BlobsByRangeRequest),
|
BlobsByRange(BlobsByRangeRequest),
|
||||||
|
LightClientBootstrap(LightClientBootstrapRequest),
|
||||||
Ping(Ping),
|
Ping(Ping),
|
||||||
MetaData(PhantomData<TSpec>),
|
MetaData(PhantomData<TSpec>),
|
||||||
}
|
}
|
||||||
@ -90,9 +91,12 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
|||||||
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
|
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
|
||||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
||||||
],
|
],
|
||||||
|
// Note: This match arm is technically unreachable as we only respond to light client requests
|
||||||
|
// that we generate from the beacon state.
|
||||||
|
// We do not make light client rpc requests from the beacon node
|
||||||
|
OutboundRequest::LightClientBootstrap(_) => vec![],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* These functions are used in the handler for stream management */
|
/* These functions are used in the handler for stream management */
|
||||||
|
|
||||||
/// Number of responses expected for this request.
|
/// Number of responses expected for this request.
|
||||||
@ -105,6 +109,7 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
|||||||
OutboundRequest::BlobsByRange(req) => req.count,
|
OutboundRequest::BlobsByRange(req) => req.count,
|
||||||
OutboundRequest::Ping(_) => 1,
|
OutboundRequest::Ping(_) => 1,
|
||||||
OutboundRequest::MetaData(_) => 1,
|
OutboundRequest::MetaData(_) => 1,
|
||||||
|
OutboundRequest::LightClientBootstrap(_) => 1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,6 +123,7 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
|||||||
OutboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
|
OutboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
|
||||||
OutboundRequest::Ping(_) => Protocol::Ping,
|
OutboundRequest::Ping(_) => Protocol::Ping,
|
||||||
OutboundRequest::MetaData(_) => Protocol::MetaData,
|
OutboundRequest::MetaData(_) => Protocol::MetaData,
|
||||||
|
OutboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,6 +136,7 @@ impl<TSpec: EthSpec> OutboundRequest<TSpec> {
|
|||||||
OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
|
OutboundRequest::BlocksByRange(_) => ResponseTermination::BlocksByRange,
|
||||||
OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot,
|
||||||
OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
|
OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange,
|
||||||
|
OutboundRequest::LightClientBootstrap(_) => unreachable!(),
|
||||||
OutboundRequest::Status(_) => unreachable!(),
|
OutboundRequest::Status(_) => unreachable!(),
|
||||||
OutboundRequest::Goodbye(_) => unreachable!(),
|
OutboundRequest::Goodbye(_) => unreachable!(),
|
||||||
OutboundRequest::Ping(_) => unreachable!(),
|
OutboundRequest::Ping(_) => unreachable!(),
|
||||||
@ -188,6 +195,9 @@ impl<TSpec: EthSpec> std::fmt::Display for OutboundRequest<TSpec> {
|
|||||||
OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
|
OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
|
||||||
OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
||||||
OutboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
OutboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
||||||
|
OutboundRequest::LightClientBootstrap(bootstrap) => {
|
||||||
|
write!(f, "Lightclient Bootstrap: {}", bootstrap.root)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -185,6 +185,8 @@ pub enum Protocol {
|
|||||||
Ping,
|
Ping,
|
||||||
/// The `MetaData` protocol name.
|
/// The `MetaData` protocol name.
|
||||||
MetaData,
|
MetaData,
|
||||||
|
/// The `LightClientBootstrap` protocol name.
|
||||||
|
LightClientBootstrap,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// RPC Versions
|
/// RPC Versions
|
||||||
@ -212,6 +214,7 @@ impl std::fmt::Display for Protocol {
|
|||||||
Protocol::BlobsByRange => "blobs_sidecars_by_range",
|
Protocol::BlobsByRange => "blobs_sidecars_by_range",
|
||||||
Protocol::Ping => "ping",
|
Protocol::Ping => "ping",
|
||||||
Protocol::MetaData => "metadata",
|
Protocol::MetaData => "metadata",
|
||||||
|
Protocol::LightClientBootstrap => "light_client_bootstrap",
|
||||||
};
|
};
|
||||||
f.write_str(repr)
|
f.write_str(repr)
|
||||||
}
|
}
|
||||||
@ -240,6 +243,7 @@ impl std::fmt::Display for Version {
|
|||||||
pub struct RPCProtocol<TSpec: EthSpec> {
|
pub struct RPCProtocol<TSpec: EthSpec> {
|
||||||
pub fork_context: Arc<ForkContext>,
|
pub fork_context: Arc<ForkContext>,
|
||||||
pub max_rpc_size: usize,
|
pub max_rpc_size: usize,
|
||||||
|
pub enable_light_client_server: bool,
|
||||||
pub phantom: PhantomData<TSpec>,
|
pub phantom: PhantomData<TSpec>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -249,7 +253,7 @@ impl<TSpec: EthSpec> UpgradeInfo for RPCProtocol<TSpec> {
|
|||||||
|
|
||||||
/// The list of supported RPC protocols for Lighthouse.
|
/// The list of supported RPC protocols for Lighthouse.
|
||||||
fn protocol_info(&self) -> Self::InfoIter {
|
fn protocol_info(&self) -> Self::InfoIter {
|
||||||
vec![
|
let mut supported_protocols = vec![
|
||||||
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(Protocol::Status, Version::V1, Encoding::SSZSnappy),
|
||||||
ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(Protocol::Goodbye, Version::V1, Encoding::SSZSnappy),
|
||||||
// V2 variants have higher preference then V1
|
// V2 variants have higher preference then V1
|
||||||
@ -260,7 +264,15 @@ impl<TSpec: EthSpec> UpgradeInfo for RPCProtocol<TSpec> {
|
|||||||
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(Protocol::Ping, Version::V1, Encoding::SSZSnappy),
|
||||||
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
|
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
|
||||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
||||||
]
|
];
|
||||||
|
if self.enable_light_client_server {
|
||||||
|
supported_protocols.push(ProtocolId::new(
|
||||||
|
Protocol::LightClientBootstrap,
|
||||||
|
Version::V1,
|
||||||
|
Encoding::SSZSnappy,
|
||||||
|
));
|
||||||
|
}
|
||||||
|
supported_protocols
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,6 +338,10 @@ impl ProtocolId {
|
|||||||
<Ping as Encode>::ssz_fixed_len(),
|
<Ping as Encode>::ssz_fixed_len(),
|
||||||
<Ping as Encode>::ssz_fixed_len(),
|
<Ping as Encode>::ssz_fixed_len(),
|
||||||
),
|
),
|
||||||
|
Protocol::LightClientBootstrap => RpcLimits::new(
|
||||||
|
<LightClientBootstrapRequest as Encode>::ssz_fixed_len(),
|
||||||
|
<LightClientBootstrapRequest as Encode>::ssz_fixed_len(),
|
||||||
|
),
|
||||||
Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty
|
Protocol::MetaData => RpcLimits::new(0, 0), // Metadata requests are empty
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -349,6 +365,10 @@ impl ProtocolId {
|
|||||||
<MetaDataV1<T> as Encode>::ssz_fixed_len(),
|
<MetaDataV1<T> as Encode>::ssz_fixed_len(),
|
||||||
<MetaDataV2<T> as Encode>::ssz_fixed_len(),
|
<MetaDataV2<T> as Encode>::ssz_fixed_len(),
|
||||||
),
|
),
|
||||||
|
Protocol::LightClientBootstrap => RpcLimits::new(
|
||||||
|
<LightClientBootstrapRequest as Encode>::ssz_fixed_len(),
|
||||||
|
<LightClientBootstrapRequest as Encode>::ssz_fixed_len(),
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -455,62 +475,13 @@ pub enum InboundRequest<TSpec: EthSpec> {
|
|||||||
BlocksByRange(OldBlocksByRangeRequest),
|
BlocksByRange(OldBlocksByRangeRequest),
|
||||||
BlocksByRoot(BlocksByRootRequest),
|
BlocksByRoot(BlocksByRootRequest),
|
||||||
BlobsByRange(BlobsByRangeRequest),
|
BlobsByRange(BlobsByRangeRequest),
|
||||||
|
LightClientBootstrap(LightClientBootstrapRequest),
|
||||||
Ping(Ping),
|
Ping(Ping),
|
||||||
MetaData(PhantomData<TSpec>),
|
MetaData(PhantomData<TSpec>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSpec: EthSpec> UpgradeInfo for InboundRequest<TSpec> {
|
|
||||||
type Info = ProtocolId;
|
|
||||||
type InfoIter = Vec<Self::Info>;
|
|
||||||
|
|
||||||
// add further protocols as we support more encodings/versions
|
|
||||||
fn protocol_info(&self) -> Self::InfoIter {
|
|
||||||
self.supported_protocols()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Implements the encoding per supported protocol for `RPCRequest`.
|
/// Implements the encoding per supported protocol for `RPCRequest`.
|
||||||
impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
||||||
pub fn supported_protocols(&self) -> Vec<ProtocolId> {
|
|
||||||
match self {
|
|
||||||
// add more protocols when versions/encodings are supported
|
|
||||||
InboundRequest::Status(_) => vec![ProtocolId::new(
|
|
||||||
Protocol::Status,
|
|
||||||
Version::V1,
|
|
||||||
Encoding::SSZSnappy,
|
|
||||||
)],
|
|
||||||
InboundRequest::Goodbye(_) => vec![ProtocolId::new(
|
|
||||||
Protocol::Goodbye,
|
|
||||||
Version::V1,
|
|
||||||
Encoding::SSZSnappy,
|
|
||||||
)],
|
|
||||||
InboundRequest::BlocksByRange(_) => vec![
|
|
||||||
// V2 has higher preference when negotiating a stream
|
|
||||||
ProtocolId::new(Protocol::BlocksByRange, Version::V2, Encoding::SSZSnappy),
|
|
||||||
ProtocolId::new(Protocol::BlocksByRange, Version::V1, Encoding::SSZSnappy),
|
|
||||||
],
|
|
||||||
InboundRequest::BlocksByRoot(_) => vec![
|
|
||||||
// V2 has higher preference when negotiating a stream
|
|
||||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V2, Encoding::SSZSnappy),
|
|
||||||
ProtocolId::new(Protocol::BlocksByRoot, Version::V1, Encoding::SSZSnappy),
|
|
||||||
],
|
|
||||||
InboundRequest::BlobsByRange(_) => vec![ProtocolId::new(
|
|
||||||
Protocol::BlobsByRange,
|
|
||||||
Version::V1,
|
|
||||||
Encoding::SSZSnappy,
|
|
||||||
)],
|
|
||||||
InboundRequest::Ping(_) => vec![ProtocolId::new(
|
|
||||||
Protocol::Ping,
|
|
||||||
Version::V1,
|
|
||||||
Encoding::SSZSnappy,
|
|
||||||
)],
|
|
||||||
InboundRequest::MetaData(_) => vec![
|
|
||||||
ProtocolId::new(Protocol::MetaData, Version::V2, Encoding::SSZSnappy),
|
|
||||||
ProtocolId::new(Protocol::MetaData, Version::V1, Encoding::SSZSnappy),
|
|
||||||
],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* These functions are used in the handler for stream management */
|
/* These functions are used in the handler for stream management */
|
||||||
|
|
||||||
/// Number of responses expected for this request.
|
/// Number of responses expected for this request.
|
||||||
@ -523,6 +494,7 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
|||||||
InboundRequest::BlobsByRange(req) => req.count,
|
InboundRequest::BlobsByRange(req) => req.count,
|
||||||
InboundRequest::Ping(_) => 1,
|
InboundRequest::Ping(_) => 1,
|
||||||
InboundRequest::MetaData(_) => 1,
|
InboundRequest::MetaData(_) => 1,
|
||||||
|
InboundRequest::LightClientBootstrap(_) => 1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -536,6 +508,7 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
|||||||
InboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
|
InboundRequest::BlobsByRange(_) => Protocol::BlobsByRange,
|
||||||
InboundRequest::Ping(_) => Protocol::Ping,
|
InboundRequest::Ping(_) => Protocol::Ping,
|
||||||
InboundRequest::MetaData(_) => Protocol::MetaData,
|
InboundRequest::MetaData(_) => Protocol::MetaData,
|
||||||
|
InboundRequest::LightClientBootstrap(_) => Protocol::LightClientBootstrap,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -552,6 +525,7 @@ impl<TSpec: EthSpec> InboundRequest<TSpec> {
|
|||||||
InboundRequest::Goodbye(_) => unreachable!(),
|
InboundRequest::Goodbye(_) => unreachable!(),
|
||||||
InboundRequest::Ping(_) => unreachable!(),
|
InboundRequest::Ping(_) => unreachable!(),
|
||||||
InboundRequest::MetaData(_) => unreachable!(),
|
InboundRequest::MetaData(_) => unreachable!(),
|
||||||
|
InboundRequest::LightClientBootstrap(_) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -656,6 +630,9 @@ impl<TSpec: EthSpec> std::fmt::Display for InboundRequest<TSpec> {
|
|||||||
InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
|
InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req),
|
||||||
InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data),
|
||||||
InboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
InboundRequest::MetaData(_) => write!(f, "MetaData request"),
|
||||||
|
InboundRequest::LightClientBootstrap(bootstrap) => {
|
||||||
|
write!(f, "LightClientBootstrap: {}", bootstrap.root)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -75,6 +75,8 @@ pub struct RPCRateLimiter {
|
|||||||
bbroots_rl: Limiter<PeerId>,
|
bbroots_rl: Limiter<PeerId>,
|
||||||
/// BlobsByRange rate limiter.
|
/// BlobsByRange rate limiter.
|
||||||
blbrange_rl: Limiter<PeerId>,
|
blbrange_rl: Limiter<PeerId>,
|
||||||
|
/// LightClientBootstrap rate limiter.
|
||||||
|
lcbootstrap_rl: Limiter<PeerId>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Error type for non conformant requests
|
/// Error type for non conformant requests
|
||||||
@ -102,6 +104,8 @@ pub struct RPCRateLimiterBuilder {
|
|||||||
bbroots_quota: Option<Quota>,
|
bbroots_quota: Option<Quota>,
|
||||||
/// Quota for the BlobsByRange protocol.
|
/// Quota for the BlobsByRange protocol.
|
||||||
blbrange_quota: Option<Quota>,
|
blbrange_quota: Option<Quota>,
|
||||||
|
/// Quota for the LightClientBootstrap protocol.
|
||||||
|
lcbootstrap_quota: Option<Quota>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RPCRateLimiterBuilder {
|
impl RPCRateLimiterBuilder {
|
||||||
@ -121,6 +125,7 @@ impl RPCRateLimiterBuilder {
|
|||||||
Protocol::BlocksByRange => self.bbrange_quota = q,
|
Protocol::BlocksByRange => self.bbrange_quota = q,
|
||||||
Protocol::BlocksByRoot => self.bbroots_quota = q,
|
Protocol::BlocksByRoot => self.bbroots_quota = q,
|
||||||
Protocol::BlobsByRange => self.blbrange_quota = q,
|
Protocol::BlobsByRange => self.blbrange_quota = q,
|
||||||
|
Protocol::LightClientBootstrap => self.lcbootstrap_quota = q,
|
||||||
}
|
}
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
@ -160,6 +165,9 @@ impl RPCRateLimiterBuilder {
|
|||||||
let bbrange_quota = self
|
let bbrange_quota = self
|
||||||
.bbrange_quota
|
.bbrange_quota
|
||||||
.ok_or("BlocksByRange quota not specified")?;
|
.ok_or("BlocksByRange quota not specified")?;
|
||||||
|
let lcbootstrap_quote = self
|
||||||
|
.lcbootstrap_quota
|
||||||
|
.ok_or("LightClientBootstrap quota not specified")?;
|
||||||
|
|
||||||
let blbrange_quota = self
|
let blbrange_quota = self
|
||||||
.blbrange_quota
|
.blbrange_quota
|
||||||
@ -173,6 +181,7 @@ impl RPCRateLimiterBuilder {
|
|||||||
let bbroots_rl = Limiter::from_quota(bbroots_quota)?;
|
let bbroots_rl = Limiter::from_quota(bbroots_quota)?;
|
||||||
let bbrange_rl = Limiter::from_quota(bbrange_quota)?;
|
let bbrange_rl = Limiter::from_quota(bbrange_quota)?;
|
||||||
let blbrange_rl = Limiter::from_quota(blbrange_quota)?;
|
let blbrange_rl = Limiter::from_quota(blbrange_quota)?;
|
||||||
|
let lcbootstrap_rl = Limiter::from_quota(lcbootstrap_quote)?;
|
||||||
|
|
||||||
// check for peers to prune every 30 seconds, starting in 30 seconds
|
// check for peers to prune every 30 seconds, starting in 30 seconds
|
||||||
let prune_every = tokio::time::Duration::from_secs(30);
|
let prune_every = tokio::time::Duration::from_secs(30);
|
||||||
@ -187,6 +196,7 @@ impl RPCRateLimiterBuilder {
|
|||||||
bbroots_rl,
|
bbroots_rl,
|
||||||
bbrange_rl,
|
bbrange_rl,
|
||||||
blbrange_rl,
|
blbrange_rl,
|
||||||
|
lcbootstrap_rl,
|
||||||
init_time: Instant::now(),
|
init_time: Instant::now(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -211,6 +221,7 @@ impl RPCRateLimiter {
|
|||||||
Protocol::BlocksByRange => &mut self.bbrange_rl,
|
Protocol::BlocksByRange => &mut self.bbrange_rl,
|
||||||
Protocol::BlocksByRoot => &mut self.bbroots_rl,
|
Protocol::BlocksByRoot => &mut self.bbroots_rl,
|
||||||
Protocol::BlobsByRange => &mut self.blbrange_rl,
|
Protocol::BlobsByRange => &mut self.blbrange_rl,
|
||||||
|
Protocol::LightClientBootstrap => &mut self.lcbootstrap_rl,
|
||||||
};
|
};
|
||||||
check(limiter)
|
check(limiter)
|
||||||
}
|
}
|
||||||
|
@ -1,13 +1,14 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use libp2p::core::connection::ConnectionId;
|
use libp2p::core::connection::ConnectionId;
|
||||||
|
use types::light_client_bootstrap::LightClientBootstrap;
|
||||||
use types::{BlobsSidecar, EthSpec, SignedBeaconBlock};
|
use types::{BlobsSidecar, EthSpec, SignedBeaconBlock};
|
||||||
|
|
||||||
use crate::rpc::methods::BlobsByRangeRequest;
|
use crate::rpc::methods::BlobsByRangeRequest;
|
||||||
use crate::rpc::{
|
use crate::rpc::{
|
||||||
methods::{
|
methods::{
|
||||||
BlocksByRangeRequest, BlocksByRootRequest, OldBlocksByRangeRequest, RPCCodedResponse,
|
BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest,
|
||||||
RPCResponse, ResponseTermination, StatusMessage,
|
OldBlocksByRangeRequest, RPCCodedResponse, RPCResponse, ResponseTermination, StatusMessage,
|
||||||
},
|
},
|
||||||
OutboundRequest, SubstreamId,
|
OutboundRequest, SubstreamId,
|
||||||
};
|
};
|
||||||
@ -37,6 +38,8 @@ pub enum Request {
|
|||||||
BlobsByRange(BlobsByRangeRequest),
|
BlobsByRange(BlobsByRangeRequest),
|
||||||
/// A request blocks root request.
|
/// A request blocks root request.
|
||||||
BlocksByRoot(BlocksByRootRequest),
|
BlocksByRoot(BlocksByRootRequest),
|
||||||
|
// light client bootstrap request
|
||||||
|
LightClientBootstrap(LightClientBootstrapRequest),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
|
impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
|
||||||
@ -51,6 +54,7 @@ impl<TSpec: EthSpec> std::convert::From<Request> for OutboundRequest<TSpec> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r),
|
Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r),
|
||||||
|
Request::LightClientBootstrap(b) => OutboundRequest::LightClientBootstrap(b),
|
||||||
Request::Status(s) => OutboundRequest::Status(s),
|
Request::Status(s) => OutboundRequest::Status(s),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -72,6 +76,8 @@ pub enum Response<TSpec: EthSpec> {
|
|||||||
BlobsByRange(Option<Arc<BlobsSidecar<TSpec>>>),
|
BlobsByRange(Option<Arc<BlobsSidecar<TSpec>>>),
|
||||||
/// A response to a get BLOCKS_BY_ROOT request.
|
/// A response to a get BLOCKS_BY_ROOT request.
|
||||||
BlocksByRoot(Option<Arc<SignedBeaconBlock<TSpec>>>),
|
BlocksByRoot(Option<Arc<SignedBeaconBlock<TSpec>>>),
|
||||||
|
/// A response to a LightClientUpdate request.
|
||||||
|
LightClientBootstrap(LightClientBootstrap<TSpec>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<TSpec: EthSpec> std::convert::From<Response<TSpec>> for RPCCodedResponse<TSpec> {
|
impl<TSpec: EthSpec> std::convert::From<Response<TSpec>> for RPCCodedResponse<TSpec> {
|
||||||
@ -90,6 +96,9 @@ impl<TSpec: EthSpec> std::convert::From<Response<TSpec>> for RPCCodedResponse<TS
|
|||||||
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange),
|
None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange),
|
||||||
},
|
},
|
||||||
Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)),
|
Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)),
|
||||||
|
Response::LightClientBootstrap(b) => {
|
||||||
|
RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,6 @@ use libp2p::gossipsub::subscription_filter::{
|
|||||||
};
|
};
|
||||||
use libp2p::gossipsub::Gossipsub as BaseGossipsub;
|
use libp2p::gossipsub::Gossipsub as BaseGossipsub;
|
||||||
use libp2p::identify::Identify;
|
use libp2p::identify::Identify;
|
||||||
use libp2p::swarm::NetworkBehaviour;
|
|
||||||
use libp2p::NetworkBehaviour;
|
use libp2p::NetworkBehaviour;
|
||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
|
|
||||||
|
@ -262,7 +262,11 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
(gossipsub, update_gossipsub_scores)
|
(gossipsub, update_gossipsub_scores)
|
||||||
};
|
};
|
||||||
|
|
||||||
let eth2_rpc = RPC::new(ctx.fork_context.clone(), log.clone());
|
let eth2_rpc = RPC::new(
|
||||||
|
ctx.fork_context.clone(),
|
||||||
|
config.enable_light_client_server,
|
||||||
|
log.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
let discovery = {
|
let discovery = {
|
||||||
// Build and start the discovery sub-behaviour
|
// Build and start the discovery sub-behaviour
|
||||||
@ -981,6 +985,9 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
Request::Status(_) => {
|
Request::Status(_) => {
|
||||||
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"])
|
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["status"])
|
||||||
}
|
}
|
||||||
|
Request::LightClientBootstrap(_) => {
|
||||||
|
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["light_client_bootstrap"])
|
||||||
|
}
|
||||||
Request::BlocksByRange { .. } => {
|
Request::BlocksByRange { .. } => {
|
||||||
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"])
|
metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_range"])
|
||||||
}
|
}
|
||||||
@ -1261,6 +1268,14 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
);
|
);
|
||||||
Some(event)
|
Some(event)
|
||||||
}
|
}
|
||||||
|
InboundRequest::LightClientBootstrap(req) => {
|
||||||
|
let event = self.build_request(
|
||||||
|
peer_request_id,
|
||||||
|
peer_id,
|
||||||
|
Request::LightClientBootstrap(req),
|
||||||
|
);
|
||||||
|
Some(event)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(RPCReceived::Response(id, resp)) => {
|
Ok(RPCReceived::Response(id, resp)) => {
|
||||||
@ -1291,6 +1306,10 @@ impl<AppReqId: ReqId, TSpec: EthSpec> Network<AppReqId, TSpec> {
|
|||||||
RPCResponse::BlocksByRoot(resp) => {
|
RPCResponse::BlocksByRoot(resp) => {
|
||||||
self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp)))
|
self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp)))
|
||||||
}
|
}
|
||||||
|
// Should never be reached
|
||||||
|
RPCResponse::LightClientBootstrap(bootstrap) => {
|
||||||
|
self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(RPCReceived::EndOfStream(id, termination)) => {
|
Ok(RPCReceived::EndOfStream(id, termination)) => {
|
||||||
|
@ -74,6 +74,17 @@ impl SyncState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn is_syncing_finalized(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
SyncState::SyncingFinalized { .. } => true,
|
||||||
|
SyncState::SyncingHead { .. } => false,
|
||||||
|
SyncState::SyncTransition => false,
|
||||||
|
SyncState::BackFillSyncing { .. } => false,
|
||||||
|
SyncState::Synced => false,
|
||||||
|
SyncState::Stalled => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns true if the node is synced.
|
/// Returns true if the node is synced.
|
||||||
///
|
///
|
||||||
/// NOTE: We consider the node synced if it is fetching old historical blocks.
|
/// NOTE: We consider the node synced if it is fetching old historical blocks.
|
||||||
|
@ -41,11 +41,12 @@
|
|||||||
use crate::sync::manager::BlockProcessType;
|
use crate::sync::manager::BlockProcessType;
|
||||||
use crate::{metrics, service::NetworkMessage, sync::SyncMessage};
|
use crate::{metrics, service::NetworkMessage, sync::SyncMessage};
|
||||||
use beacon_chain::parking_lot::Mutex;
|
use beacon_chain::parking_lot::Mutex;
|
||||||
use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock};
|
use beacon_chain::{BeaconChain, BeaconChainTypes, GossipVerifiedBlock, NotifyExecutionLayer};
|
||||||
use derivative::Derivative;
|
use derivative::Derivative;
|
||||||
use futures::stream::{Stream, StreamExt};
|
use futures::stream::{Stream, StreamExt};
|
||||||
use futures::task::Poll;
|
use futures::task::Poll;
|
||||||
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
|
use lighthouse_network::rpc::methods::BlobsByRangeRequest;
|
||||||
|
use lighthouse_network::rpc::LightClientBootstrapRequest;
|
||||||
use lighthouse_network::SignedBeaconBlockAndBlobsSidecar;
|
use lighthouse_network::SignedBeaconBlockAndBlobsSidecar;
|
||||||
use lighthouse_network::{
|
use lighthouse_network::{
|
||||||
rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage},
|
rpc::{BlocksByRangeRequest, BlocksByRootRequest, StatusMessage},
|
||||||
@ -169,6 +170,10 @@ const MAX_BLOCKS_BY_ROOTS_QUEUE_LEN: usize = 1_024;
|
|||||||
/// is activated.
|
/// is activated.
|
||||||
const MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN: usize = 16_384;
|
const MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN: usize = 16_384;
|
||||||
|
|
||||||
|
/// The maximum number of queued `LightClientBootstrapRequest` objects received from the network RPC that
|
||||||
|
/// will be stored before we start dropping them.
|
||||||
|
const MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN: usize = 1_024;
|
||||||
|
|
||||||
/// The name of the manager tokio task.
|
/// The name of the manager tokio task.
|
||||||
const MANAGER_TASK_NAME: &str = "beacon_processor_manager";
|
const MANAGER_TASK_NAME: &str = "beacon_processor_manager";
|
||||||
|
|
||||||
@ -210,6 +215,7 @@ pub const STATUS_PROCESSING: &str = "status_processing";
|
|||||||
pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request";
|
pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request";
|
||||||
pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request";
|
pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request";
|
||||||
pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request";
|
pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request";
|
||||||
|
pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap";
|
||||||
pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation";
|
pub const UNKNOWN_BLOCK_ATTESTATION: &str = "unknown_block_attestation";
|
||||||
pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate";
|
pub const UNKNOWN_BLOCK_AGGREGATE: &str = "unknown_block_aggregate";
|
||||||
pub const GOSSIP_BLS_TO_EXECUTION_CHANGE: &str = "gossip_bls_to_execution_change";
|
pub const GOSSIP_BLS_TO_EXECUTION_CHANGE: &str = "gossip_bls_to_execution_change";
|
||||||
@ -624,6 +630,22 @@ impl<T: BeaconChainTypes> WorkEvent<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create a new work event to process `LightClientBootstrap`s from the RPC network.
|
||||||
|
pub fn lightclient_bootstrap_request(
|
||||||
|
peer_id: PeerId,
|
||||||
|
request_id: PeerRequestId,
|
||||||
|
request: LightClientBootstrapRequest,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
drop_during_sync: true,
|
||||||
|
work: Work::LightClientBootstrapRequest {
|
||||||
|
peer_id,
|
||||||
|
request_id,
|
||||||
|
request,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Get a `str` representation of the type of work this `WorkEvent` contains.
|
/// Get a `str` representation of the type of work this `WorkEvent` contains.
|
||||||
pub fn work_type(&self) -> &'static str {
|
pub fn work_type(&self) -> &'static str {
|
||||||
self.work.str_id()
|
self.work.str_id()
|
||||||
@ -817,6 +839,11 @@ pub enum Work<T: BeaconChainTypes> {
|
|||||||
peer_id: PeerId,
|
peer_id: PeerId,
|
||||||
bls_to_execution_change: Box<SignedBlsToExecutionChange>,
|
bls_to_execution_change: Box<SignedBlsToExecutionChange>,
|
||||||
},
|
},
|
||||||
|
LightClientBootstrapRequest {
|
||||||
|
peer_id: PeerId,
|
||||||
|
request_id: PeerRequestId,
|
||||||
|
request: LightClientBootstrapRequest,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: BeaconChainTypes> Work<T> {
|
impl<T: BeaconChainTypes> Work<T> {
|
||||||
@ -841,6 +868,7 @@ impl<T: BeaconChainTypes> Work<T> {
|
|||||||
Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST,
|
Work::BlocksByRangeRequest { .. } => BLOCKS_BY_RANGE_REQUEST,
|
||||||
Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST,
|
Work::BlocksByRootsRequest { .. } => BLOCKS_BY_ROOTS_REQUEST,
|
||||||
Work::BlobsByRangeRequest { .. } => BLOBS_BY_RANGE_REQUEST,
|
Work::BlobsByRangeRequest { .. } => BLOBS_BY_RANGE_REQUEST,
|
||||||
|
Work::LightClientBootstrapRequest { .. } => LIGHT_CLIENT_BOOTSTRAP_REQUEST,
|
||||||
Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION,
|
Work::UnknownBlockAttestation { .. } => UNKNOWN_BLOCK_ATTESTATION,
|
||||||
Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE,
|
Work::UnknownBlockAggregate { .. } => UNKNOWN_BLOCK_AGGREGATE,
|
||||||
Work::GossipBlsToExecutionChange { .. } => GOSSIP_BLS_TO_EXECUTION_CHANGE,
|
Work::GossipBlsToExecutionChange { .. } => GOSSIP_BLS_TO_EXECUTION_CHANGE,
|
||||||
@ -992,6 +1020,7 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
let mut gossip_bls_to_execution_change_queue =
|
let mut gossip_bls_to_execution_change_queue =
|
||||||
FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN);
|
FifoQueue::new(MAX_BLS_TO_EXECUTION_CHANGE_QUEUE_LEN);
|
||||||
|
|
||||||
|
let mut lcbootstrap_queue = FifoQueue::new(MAX_LIGHT_CLIENT_BOOTSTRAP_QUEUE_LEN);
|
||||||
// Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to
|
// Channels for sending work to the re-process scheduler (`work_reprocessing_tx`) and to
|
||||||
// receive them back once they are ready (`ready_work_rx`).
|
// receive them back once they are ready (`ready_work_rx`).
|
||||||
let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN);
|
let (ready_work_tx, ready_work_rx) = mpsc::channel(MAX_SCHEDULED_WORK_QUEUE_LEN);
|
||||||
@ -1236,6 +1265,8 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
} else if let Some(item) = backfill_chain_segment.pop() {
|
} else if let Some(item) = backfill_chain_segment.pop() {
|
||||||
self.spawn_worker(item, toolbox);
|
self.spawn_worker(item, toolbox);
|
||||||
// This statement should always be the final else statement.
|
// This statement should always be the final else statement.
|
||||||
|
} else if let Some(item) = lcbootstrap_queue.pop() {
|
||||||
|
self.spawn_worker(item, toolbox);
|
||||||
} else {
|
} else {
|
||||||
// Let the journal know that a worker is freed and there's nothing else
|
// Let the journal know that a worker is freed and there's nothing else
|
||||||
// for it to do.
|
// for it to do.
|
||||||
@ -1342,6 +1373,9 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
Work::BlobsByRangeRequest { .. } => {
|
Work::BlobsByRangeRequest { .. } => {
|
||||||
blbrange_queue.push(work, work_id, &self.log)
|
blbrange_queue.push(work, work_id, &self.log)
|
||||||
}
|
}
|
||||||
|
Work::LightClientBootstrapRequest { .. } => {
|
||||||
|
lcbootstrap_queue.push(work, work_id, &self.log)
|
||||||
|
}
|
||||||
Work::UnknownBlockAttestation { .. } => {
|
Work::UnknownBlockAttestation { .. } => {
|
||||||
unknown_block_attestation_queue.push(work)
|
unknown_block_attestation_queue.push(work)
|
||||||
}
|
}
|
||||||
@ -1700,8 +1734,24 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
/*
|
/*
|
||||||
* Verification for a chain segment (multiple blocks).
|
* Verification for a chain segment (multiple blocks).
|
||||||
*/
|
*/
|
||||||
Work::ChainSegment { process_id, blocks } => task_spawner
|
Work::ChainSegment { process_id, blocks } => {
|
||||||
.spawn_async(async move { worker.process_chain_segment(process_id, blocks).await }),
|
let notify_execution_layer = if self
|
||||||
|
.network_globals
|
||||||
|
.sync_state
|
||||||
|
.read()
|
||||||
|
.is_syncing_finalized()
|
||||||
|
{
|
||||||
|
NotifyExecutionLayer::No
|
||||||
|
} else {
|
||||||
|
NotifyExecutionLayer::Yes
|
||||||
|
};
|
||||||
|
|
||||||
|
task_spawner.spawn_async(async move {
|
||||||
|
worker
|
||||||
|
.process_chain_segment(process_id, blocks, notify_execution_layer)
|
||||||
|
.await
|
||||||
|
})
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* Processing of Status Messages.
|
* Processing of Status Messages.
|
||||||
*/
|
*/
|
||||||
@ -1740,7 +1790,6 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
request,
|
request,
|
||||||
)
|
)
|
||||||
}),
|
}),
|
||||||
|
|
||||||
Work::BlobsByRangeRequest {
|
Work::BlobsByRangeRequest {
|
||||||
peer_id,
|
peer_id,
|
||||||
request_id,
|
request_id,
|
||||||
@ -1754,7 +1803,16 @@ impl<T: BeaconChainTypes> BeaconProcessor<T> {
|
|||||||
request,
|
request,
|
||||||
)
|
)
|
||||||
}),
|
}),
|
||||||
|
/*
|
||||||
|
* Processing of lightclient bootstrap requests from other peers.
|
||||||
|
*/
|
||||||
|
Work::LightClientBootstrapRequest {
|
||||||
|
peer_id,
|
||||||
|
request_id,
|
||||||
|
request,
|
||||||
|
} => task_spawner.spawn_blocking(move || {
|
||||||
|
worker.handle_light_client_bootstrap(peer_id, request_id, request)
|
||||||
|
}),
|
||||||
Work::UnknownBlockAttestation {
|
Work::UnknownBlockAttestation {
|
||||||
message_id,
|
message_id,
|
||||||
peer_id,
|
peer_id,
|
||||||
|
@ -8,7 +8,7 @@ use beacon_chain::{
|
|||||||
sync_committee_verification::{self, Error as SyncCommitteeError},
|
sync_committee_verification::{self, Error as SyncCommitteeError},
|
||||||
validator_monitor::get_block_delay_ms,
|
validator_monitor::get_block_delay_ms,
|
||||||
BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError,
|
BeaconChainError, BeaconChainTypes, BlockError, CountUnrealized, ForkChoiceError,
|
||||||
GossipVerifiedBlock,
|
GossipVerifiedBlock, NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
use lighthouse_network::{
|
use lighthouse_network::{
|
||||||
Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource,
|
Client, MessageAcceptance, MessageId, PeerAction, PeerId, ReportSource,
|
||||||
@ -812,7 +812,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
| Err(e @ BlockError::BlockIsAlreadyKnown)
|
| Err(e @ BlockError::BlockIsAlreadyKnown)
|
||||||
| Err(e @ BlockError::RepeatProposal { .. })
|
| Err(e @ BlockError::RepeatProposal { .. })
|
||||||
| Err(e @ BlockError::NotFinalizedDescendant { .. }) => {
|
| Err(e @ BlockError::NotFinalizedDescendant { .. }) => {
|
||||||
debug!(self.log, "Could not verify block for gossip, ignoring the block";
|
debug!(self.log, "Could not verify block for gossip. Ignoring the block";
|
||||||
"error" => %e);
|
"error" => %e);
|
||||||
// Prevent recurring behaviour by penalizing the peer slightly.
|
// Prevent recurring behaviour by penalizing the peer slightly.
|
||||||
self.gossip_penalize_peer(
|
self.gossip_penalize_peer(
|
||||||
@ -824,7 +824,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => {
|
Err(ref e @ BlockError::ExecutionPayloadError(ref epe)) if !epe.penalize_peer() => {
|
||||||
debug!(self.log, "Could not verify block for gossip, ignoring the block";
|
debug!(self.log, "Could not verify block for gossip. Ignoring the block";
|
||||||
"error" => %e);
|
"error" => %e);
|
||||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Ignore);
|
||||||
return None;
|
return None;
|
||||||
@ -846,7 +846,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
// TODO(merge): reconsider peer scoring for this event.
|
// TODO(merge): reconsider peer scoring for this event.
|
||||||
| Err(e @ BlockError::ParentExecutionPayloadInvalid { .. })
|
| Err(e @ BlockError::ParentExecutionPayloadInvalid { .. })
|
||||||
| Err(e @ BlockError::GenesisBlock) => {
|
| Err(e @ BlockError::GenesisBlock) => {
|
||||||
warn!(self.log, "Could not verify block for gossip, rejecting the block";
|
warn!(self.log, "Could not verify block for gossip. Rejecting the block";
|
||||||
"error" => %e);
|
"error" => %e);
|
||||||
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject);
|
||||||
self.gossip_penalize_peer(
|
self.gossip_penalize_peer(
|
||||||
@ -953,7 +953,12 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
|
|
||||||
match self
|
match self
|
||||||
.chain
|
.chain
|
||||||
.process_block(block_root, verified_block, CountUnrealized::True)
|
.process_block(
|
||||||
|
block_root,
|
||||||
|
verified_block,
|
||||||
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(block_root) => {
|
Ok(block_root) => {
|
||||||
|
@ -38,7 +38,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
/// Creates a log if there is an internal error.
|
/// Creates a log if there is an internal error.
|
||||||
fn send_network_message(&self, message: NetworkMessage<T::EthSpec>) {
|
fn send_network_message(&self, message: NetworkMessage<T::EthSpec>) {
|
||||||
self.network_tx.send(message).unwrap_or_else(|e| {
|
self.network_tx.send(message).unwrap_or_else(|e| {
|
||||||
debug!(self.log, "Could not send message to the network service, likely shutdown";
|
debug!(self.log, "Could not send message to the network service. Likely shutdown";
|
||||||
"error" => %e)
|
"error" => %e)
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ use slog::{debug, error};
|
|||||||
use slot_clock::SlotClock;
|
use slot_clock::SlotClock;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use task_executor::TaskExecutor;
|
use task_executor::TaskExecutor;
|
||||||
use types::{Epoch, EthSpec, Hash256, Slot};
|
use types::{light_client_bootstrap::LightClientBootstrap, Epoch, EthSpec, Hash256, Slot};
|
||||||
|
|
||||||
use super::Worker;
|
use super::Worker;
|
||||||
|
|
||||||
@ -205,6 +205,79 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Handle a `BlocksByRoot` request from the peer.
|
||||||
|
pub fn handle_light_client_bootstrap(
|
||||||
|
self,
|
||||||
|
peer_id: PeerId,
|
||||||
|
request_id: PeerRequestId,
|
||||||
|
request: LightClientBootstrapRequest,
|
||||||
|
) {
|
||||||
|
let block_root = request.root;
|
||||||
|
let state_root = match self.chain.get_blinded_block(&block_root) {
|
||||||
|
Ok(signed_block) => match signed_block {
|
||||||
|
Some(signed_block) => signed_block.state_root(),
|
||||||
|
None => {
|
||||||
|
self.send_error_response(
|
||||||
|
peer_id,
|
||||||
|
RPCResponseErrorCode::ResourceUnavailable,
|
||||||
|
"Bootstrap not avaiable".into(),
|
||||||
|
request_id,
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(_) => {
|
||||||
|
self.send_error_response(
|
||||||
|
peer_id,
|
||||||
|
RPCResponseErrorCode::ResourceUnavailable,
|
||||||
|
"Bootstrap not avaiable".into(),
|
||||||
|
request_id,
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let mut beacon_state = match self.chain.get_state(&state_root, None) {
|
||||||
|
Ok(beacon_state) => match beacon_state {
|
||||||
|
Some(state) => state,
|
||||||
|
None => {
|
||||||
|
self.send_error_response(
|
||||||
|
peer_id,
|
||||||
|
RPCResponseErrorCode::ResourceUnavailable,
|
||||||
|
"Bootstrap not avaiable".into(),
|
||||||
|
request_id,
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(_) => {
|
||||||
|
self.send_error_response(
|
||||||
|
peer_id,
|
||||||
|
RPCResponseErrorCode::ResourceUnavailable,
|
||||||
|
"Bootstrap not avaiable".into(),
|
||||||
|
request_id,
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let bootstrap = match LightClientBootstrap::from_beacon_state(&mut beacon_state) {
|
||||||
|
Ok(bootstrap) => bootstrap,
|
||||||
|
Err(_) => {
|
||||||
|
self.send_error_response(
|
||||||
|
peer_id,
|
||||||
|
RPCResponseErrorCode::ResourceUnavailable,
|
||||||
|
"Bootstrap not avaiable".into(),
|
||||||
|
request_id,
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
self.send_response(
|
||||||
|
peer_id,
|
||||||
|
Response::LightClientBootstrap(bootstrap),
|
||||||
|
request_id,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// Handle a `BlocksByRange` request from the peer.
|
/// Handle a `BlocksByRange` request from the peer.
|
||||||
pub fn handle_blocks_by_range_request(
|
pub fn handle_blocks_by_range_request(
|
||||||
self,
|
self,
|
||||||
|
@ -10,6 +10,7 @@ use crate::sync::{BatchProcessResult, ChainId};
|
|||||||
use beacon_chain::CountUnrealized;
|
use beacon_chain::CountUnrealized;
|
||||||
use beacon_chain::{
|
use beacon_chain::{
|
||||||
BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError,
|
BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError,
|
||||||
|
NotifyExecutionLayer,
|
||||||
};
|
};
|
||||||
use lighthouse_network::PeerAction;
|
use lighthouse_network::PeerAction;
|
||||||
use slog::{debug, error, info, warn};
|
use slog::{debug, error, info, warn};
|
||||||
@ -85,7 +86,12 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
let slot = block.slot();
|
let slot = block.slot();
|
||||||
let result = self
|
let result = self
|
||||||
.chain
|
.chain
|
||||||
.process_block(block_root, block, CountUnrealized::True)
|
.process_block(
|
||||||
|
block_root,
|
||||||
|
block,
|
||||||
|
CountUnrealized::True,
|
||||||
|
NotifyExecutionLayer::Yes,
|
||||||
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
|
metrics::inc_counter(&metrics::BEACON_PROCESSOR_RPC_BLOCK_IMPORTED_TOTAL);
|
||||||
@ -127,6 +133,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
&self,
|
&self,
|
||||||
sync_type: ChainSegmentProcessId,
|
sync_type: ChainSegmentProcessId,
|
||||||
downloaded_blocks: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
downloaded_blocks: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||||
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) {
|
) {
|
||||||
let result = match sync_type {
|
let result = match sync_type {
|
||||||
// this a request from the range sync
|
// this a request from the range sync
|
||||||
@ -136,7 +143,11 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
let sent_blocks = downloaded_blocks.len();
|
let sent_blocks = downloaded_blocks.len();
|
||||||
|
|
||||||
match self
|
match self
|
||||||
.process_blocks(downloaded_blocks.iter(), count_unrealized)
|
.process_blocks(
|
||||||
|
downloaded_blocks.iter(),
|
||||||
|
count_unrealized,
|
||||||
|
notify_execution_layer,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
(_, Ok(_)) => {
|
(_, Ok(_)) => {
|
||||||
@ -215,7 +226,11 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
// parent blocks are ordered from highest slot to lowest, so we need to process in
|
// parent blocks are ordered from highest slot to lowest, so we need to process in
|
||||||
// reverse
|
// reverse
|
||||||
match self
|
match self
|
||||||
.process_blocks(downloaded_blocks.iter().rev(), CountUnrealized::True)
|
.process_blocks(
|
||||||
|
downloaded_blocks.iter().rev(),
|
||||||
|
CountUnrealized::True,
|
||||||
|
notify_execution_layer,
|
||||||
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
(imported_blocks, Err(e)) => {
|
(imported_blocks, Err(e)) => {
|
||||||
@ -246,11 +261,12 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
&self,
|
&self,
|
||||||
downloaded_blocks: impl Iterator<Item = &'a Arc<SignedBeaconBlock<T::EthSpec>>>,
|
downloaded_blocks: impl Iterator<Item = &'a Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||||
count_unrealized: CountUnrealized,
|
count_unrealized: CountUnrealized,
|
||||||
|
notify_execution_layer: NotifyExecutionLayer,
|
||||||
) -> (usize, Result<(), ChainSegmentFailed>) {
|
) -> (usize, Result<(), ChainSegmentFailed>) {
|
||||||
let blocks: Vec<Arc<_>> = downloaded_blocks.cloned().collect();
|
let blocks: Vec<Arc<_>> = downloaded_blocks.cloned().collect();
|
||||||
match self
|
match self
|
||||||
.chain
|
.chain
|
||||||
.process_chain_segment(blocks, count_unrealized)
|
.process_chain_segment(blocks, count_unrealized, notify_execution_layer)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
ChainSegmentResult::Successful { imported_blocks } => {
|
ChainSegmentResult::Successful { imported_blocks } => {
|
||||||
@ -428,7 +444,7 @@ impl<T: BeaconChainTypes> Worker<T> {
|
|||||||
} else {
|
} else {
|
||||||
// The block is in the future, but not too far.
|
// The block is in the future, but not too far.
|
||||||
debug!(
|
debug!(
|
||||||
self.log, "Block is slightly ahead of our slot clock, ignoring.";
|
self.log, "Block is slightly ahead of our slot clock. Ignoring.";
|
||||||
"present_slot" => present_slot,
|
"present_slot" => present_slot,
|
||||||
"block_slot" => block_slot,
|
"block_slot" => block_slot,
|
||||||
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
|
"FUTURE_SLOT_TOLERANCE" => FUTURE_SLOT_TOLERANCE,
|
||||||
|
@ -171,6 +171,9 @@ impl<T: BeaconChainTypes> Router<T> {
|
|||||||
Request::BlobsByRange(request) => self
|
Request::BlobsByRange(request) => self
|
||||||
.processor
|
.processor
|
||||||
.on_blobs_by_range_request(peer_id, id, request),
|
.on_blobs_by_range_request(peer_id, id, request),
|
||||||
|
Request::LightClientBootstrap(request) => self
|
||||||
|
.processor
|
||||||
|
.on_lightclient_bootstrap(peer_id, id, request),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,6 +202,7 @@ impl<T: BeaconChainTypes> Router<T> {
|
|||||||
self.processor
|
self.processor
|
||||||
.on_blobs_by_range_response(peer_id, request_id, beacon_blob);
|
.on_blobs_by_range_response(peer_id, request_id, beacon_blob);
|
||||||
}
|
}
|
||||||
|
Response::LightClientBootstrap(_) => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,6 +172,19 @@ impl<T: BeaconChainTypes> Processor<T> {
|
|||||||
peer_id, request_id, request,
|
peer_id, request_id, request,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Handle a `LightClientBootstrap` request from the peer.
|
||||||
|
pub fn on_lightclient_bootstrap(
|
||||||
|
&mut self,
|
||||||
|
peer_id: PeerId,
|
||||||
|
request_id: PeerRequestId,
|
||||||
|
request: LightClientBootstrapRequest,
|
||||||
|
) {
|
||||||
|
self.send_beacon_processor_work(BeaconWorkEvent::lightclient_bootstrap_request(
|
||||||
|
peer_id, request_id, request,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
/// Handle a `BlocksByRange` request from the peer.
|
/// Handle a `BlocksByRange` request from the peer.
|
||||||
pub fn on_blocks_by_range_request(
|
pub fn on_blocks_by_range_request(
|
||||||
&mut self,
|
&mut self,
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use std::collections::hash_map::Entry;
|
use std::collections::hash_map::Entry;
|
||||||
|
use std::collections::HashMap;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use beacon_chain::{BeaconChainTypes, BlockError};
|
use beacon_chain::{BeaconChainTypes, BlockError};
|
||||||
@ -13,6 +14,7 @@ use store::{Hash256, SignedBeaconBlock};
|
|||||||
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent};
|
use crate::beacon_processor::{ChainSegmentProcessId, WorkEvent};
|
||||||
use crate::metrics;
|
use crate::metrics;
|
||||||
|
|
||||||
|
use self::parent_lookup::PARENT_FAIL_TOLERANCE;
|
||||||
use self::{
|
use self::{
|
||||||
parent_lookup::{ParentLookup, VerifyError},
|
parent_lookup::{ParentLookup, VerifyError},
|
||||||
single_block_lookup::SingleBlockRequest,
|
single_block_lookup::SingleBlockRequest,
|
||||||
@ -36,8 +38,11 @@ const FAILED_CHAINS_CACHE_EXPIRY_SECONDS: u64 = 60;
|
|||||||
const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3;
|
const SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS: u8 = 3;
|
||||||
|
|
||||||
pub(crate) struct BlockLookups<T: BeaconChainTypes> {
|
pub(crate) struct BlockLookups<T: BeaconChainTypes> {
|
||||||
/// A collection of parent block lookups.
|
/// Parent chain lookups being downloaded.
|
||||||
parent_queue: SmallVec<[ParentLookup<T>; 3]>,
|
parent_lookups: SmallVec<[ParentLookup<T>; 3]>,
|
||||||
|
|
||||||
|
processing_parent_lookups:
|
||||||
|
HashMap<Hash256, (Vec<Hash256>, SingleBlockRequest<PARENT_FAIL_TOLERANCE>)>,
|
||||||
|
|
||||||
/// A cache of failed chain lookups to prevent duplicate searches.
|
/// A cache of failed chain lookups to prevent duplicate searches.
|
||||||
failed_chains: LRUTimeCache<Hash256>,
|
failed_chains: LRUTimeCache<Hash256>,
|
||||||
@ -55,7 +60,8 @@ pub(crate) struct BlockLookups<T: BeaconChainTypes> {
|
|||||||
impl<T: BeaconChainTypes> BlockLookups<T> {
|
impl<T: BeaconChainTypes> BlockLookups<T> {
|
||||||
pub fn new(log: Logger) -> Self {
|
pub fn new(log: Logger) -> Self {
|
||||||
Self {
|
Self {
|
||||||
parent_queue: Default::default(),
|
parent_lookups: Default::default(),
|
||||||
|
processing_parent_lookups: Default::default(),
|
||||||
failed_chains: LRUTimeCache::new(Duration::from_secs(
|
failed_chains: LRUTimeCache::new(Duration::from_secs(
|
||||||
FAILED_CHAINS_CACHE_EXPIRY_SECONDS,
|
FAILED_CHAINS_CACHE_EXPIRY_SECONDS,
|
||||||
)),
|
)),
|
||||||
@ -78,6 +84,23 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if self.parent_lookups.iter_mut().any(|parent_req| {
|
||||||
|
parent_req.add_peer(&hash, &peer_id) || parent_req.contains_block(&hash)
|
||||||
|
}) {
|
||||||
|
// If the block was already downloaded, or is being downloaded in this moment, do not
|
||||||
|
// request it.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if self
|
||||||
|
.processing_parent_lookups
|
||||||
|
.values()
|
||||||
|
.any(|(hashes, _last_parent_request)| hashes.contains(&hash))
|
||||||
|
{
|
||||||
|
// we are already processing this block, ignore it.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
self.log,
|
self.log,
|
||||||
"Searching for block";
|
"Searching for block";
|
||||||
@ -118,8 +141,8 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
|
|
||||||
// Make sure this block is not already downloaded, and that neither it or its parent is
|
// Make sure this block is not already downloaded, and that neither it or its parent is
|
||||||
// being searched for.
|
// being searched for.
|
||||||
if self.parent_queue.iter_mut().any(|parent_req| {
|
if self.parent_lookups.iter_mut().any(|parent_req| {
|
||||||
parent_req.contains_block(&block)
|
parent_req.contains_block(&block_root)
|
||||||
|| parent_req.add_peer(&block_root, &peer_id)
|
|| parent_req.add_peer(&block_root, &peer_id)
|
||||||
|| parent_req.add_peer(&parent_root, &peer_id)
|
|| parent_req.add_peer(&parent_root, &peer_id)
|
||||||
}) {
|
}) {
|
||||||
@ -127,6 +150,15 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if self
|
||||||
|
.processing_parent_lookups
|
||||||
|
.values()
|
||||||
|
.any(|(hashes, _peers)| hashes.contains(&block_root) || hashes.contains(&parent_root))
|
||||||
|
{
|
||||||
|
// we are already processing this block, ignore it.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
let parent_lookup = ParentLookup::new(block_root, block, peer_id);
|
let parent_lookup = ParentLookup::new(block_root, block, peer_id);
|
||||||
self.request_parent(parent_lookup, cx);
|
self.request_parent(parent_lookup, cx);
|
||||||
}
|
}
|
||||||
@ -207,11 +239,11 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
cx: &mut SyncNetworkContext<T>,
|
cx: &mut SyncNetworkContext<T>,
|
||||||
) {
|
) {
|
||||||
let mut parent_lookup = if let Some(pos) = self
|
let mut parent_lookup = if let Some(pos) = self
|
||||||
.parent_queue
|
.parent_lookups
|
||||||
.iter()
|
.iter()
|
||||||
.position(|request| request.pending_response(id))
|
.position(|request| request.pending_response(id))
|
||||||
{
|
{
|
||||||
self.parent_queue.remove(pos)
|
self.parent_lookups.remove(pos)
|
||||||
} else {
|
} else {
|
||||||
if block.is_some() {
|
if block.is_some() {
|
||||||
debug!(self.log, "Response for a parent lookup request that was not found"; "peer_id" => %peer_id);
|
debug!(self.log, "Response for a parent lookup request that was not found"; "peer_id" => %peer_id);
|
||||||
@ -233,13 +265,13 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
)
|
)
|
||||||
.is_ok()
|
.is_ok()
|
||||||
{
|
{
|
||||||
self.parent_queue.push(parent_lookup)
|
self.parent_lookups.push(parent_lookup)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(None) => {
|
Ok(None) => {
|
||||||
// Request finished successfully, nothing else to do. It will be removed after the
|
// Request finished successfully, nothing else to do. It will be removed after the
|
||||||
// processing result arrives.
|
// processing result arrives.
|
||||||
self.parent_queue.push(parent_lookup);
|
self.parent_lookups.push(parent_lookup);
|
||||||
}
|
}
|
||||||
Err(e) => match e {
|
Err(e) => match e {
|
||||||
VerifyError::RootMismatch
|
VerifyError::RootMismatch
|
||||||
@ -276,7 +308,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
|
|
||||||
metrics::set_gauge(
|
metrics::set_gauge(
|
||||||
&metrics::SYNC_PARENT_BLOCK_LOOKUPS,
|
&metrics::SYNC_PARENT_BLOCK_LOOKUPS,
|
||||||
self.parent_queue.len() as i64,
|
self.parent_lookups.len() as i64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,11 +356,11 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
|
|
||||||
/* Check disconnection for parent lookups */
|
/* Check disconnection for parent lookups */
|
||||||
while let Some(pos) = self
|
while let Some(pos) = self
|
||||||
.parent_queue
|
.parent_lookups
|
||||||
.iter_mut()
|
.iter_mut()
|
||||||
.position(|req| req.check_peer_disconnected(peer_id).is_err())
|
.position(|req| req.check_peer_disconnected(peer_id).is_err())
|
||||||
{
|
{
|
||||||
let parent_lookup = self.parent_queue.remove(pos);
|
let parent_lookup = self.parent_lookups.remove(pos);
|
||||||
trace!(self.log, "Parent lookup's peer disconnected"; &parent_lookup);
|
trace!(self.log, "Parent lookup's peer disconnected"; &parent_lookup);
|
||||||
self.request_parent(parent_lookup, cx);
|
self.request_parent(parent_lookup, cx);
|
||||||
}
|
}
|
||||||
@ -342,11 +374,11 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
cx: &mut SyncNetworkContext<T>,
|
cx: &mut SyncNetworkContext<T>,
|
||||||
) {
|
) {
|
||||||
if let Some(pos) = self
|
if let Some(pos) = self
|
||||||
.parent_queue
|
.parent_lookups
|
||||||
.iter()
|
.iter()
|
||||||
.position(|request| request.pending_response(id))
|
.position(|request| request.pending_response(id))
|
||||||
{
|
{
|
||||||
let mut parent_lookup = self.parent_queue.remove(pos);
|
let mut parent_lookup = self.parent_lookups.remove(pos);
|
||||||
parent_lookup.download_failed();
|
parent_lookup.download_failed();
|
||||||
trace!(self.log, "Parent lookup request failed"; &parent_lookup);
|
trace!(self.log, "Parent lookup request failed"; &parent_lookup);
|
||||||
self.request_parent(parent_lookup, cx);
|
self.request_parent(parent_lookup, cx);
|
||||||
@ -355,7 +387,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
};
|
};
|
||||||
metrics::set_gauge(
|
metrics::set_gauge(
|
||||||
&metrics::SYNC_PARENT_BLOCK_LOOKUPS,
|
&metrics::SYNC_PARENT_BLOCK_LOOKUPS,
|
||||||
self.parent_queue.len() as i64,
|
self.parent_lookups.len() as i64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -470,7 +502,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
cx: &mut SyncNetworkContext<T>,
|
cx: &mut SyncNetworkContext<T>,
|
||||||
) {
|
) {
|
||||||
let (mut parent_lookup, peer_id) = if let Some((pos, peer)) = self
|
let (mut parent_lookup, peer_id) = if let Some((pos, peer)) = self
|
||||||
.parent_queue
|
.parent_lookups
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.find_map(|(pos, request)| {
|
.find_map(|(pos, request)| {
|
||||||
@ -478,7 +510,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
.get_processing_peer(chain_hash)
|
.get_processing_peer(chain_hash)
|
||||||
.map(|peer| (pos, peer))
|
.map(|peer| (pos, peer))
|
||||||
}) {
|
}) {
|
||||||
(self.parent_queue.remove(pos), peer)
|
(self.parent_lookups.remove(pos), peer)
|
||||||
} else {
|
} else {
|
||||||
return debug!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash);
|
return debug!(self.log, "Process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash);
|
||||||
};
|
};
|
||||||
@ -520,13 +552,13 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let chain_hash = parent_lookup.chain_hash();
|
let (chain_hash, blocks, hashes, request) = parent_lookup.parts_for_processing();
|
||||||
let blocks = parent_lookup.chain_blocks();
|
|
||||||
let process_id = ChainSegmentProcessId::ParentLookup(chain_hash);
|
let process_id = ChainSegmentProcessId::ParentLookup(chain_hash);
|
||||||
|
|
||||||
match beacon_processor_send.try_send(WorkEvent::chain_segment(process_id, blocks)) {
|
match beacon_processor_send.try_send(WorkEvent::chain_segment(process_id, blocks)) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
self.parent_queue.push(parent_lookup);
|
self.processing_parent_lookups
|
||||||
|
.insert(chain_hash, (hashes, request));
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!(
|
error!(
|
||||||
@ -580,7 +612,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
|
|
||||||
metrics::set_gauge(
|
metrics::set_gauge(
|
||||||
&metrics::SYNC_PARENT_BLOCK_LOOKUPS,
|
&metrics::SYNC_PARENT_BLOCK_LOOKUPS,
|
||||||
self.parent_queue.len() as i64,
|
self.parent_lookups.len() as i64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -590,14 +622,11 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
result: BatchProcessResult,
|
result: BatchProcessResult,
|
||||||
cx: &mut SyncNetworkContext<T>,
|
cx: &mut SyncNetworkContext<T>,
|
||||||
) {
|
) {
|
||||||
let parent_lookup = if let Some(pos) = self
|
let request = match self.processing_parent_lookups.remove(&chain_hash) {
|
||||||
.parent_queue
|
Some((_hashes, request)) => request,
|
||||||
.iter()
|
None => {
|
||||||
.position(|request| request.chain_hash() == chain_hash)
|
return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash, "result" => ?result)
|
||||||
{
|
}
|
||||||
self.parent_queue.remove(pos)
|
|
||||||
} else {
|
|
||||||
return debug!(self.log, "Chain process response for a parent lookup request that was not found"; "chain_hash" => %chain_hash);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result);
|
debug!(self.log, "Parent chain processed"; "chain_hash" => %chain_hash, "result" => ?result);
|
||||||
@ -609,8 +638,8 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
imported_blocks: _,
|
imported_blocks: _,
|
||||||
penalty,
|
penalty,
|
||||||
} => {
|
} => {
|
||||||
self.failed_chains.insert(parent_lookup.chain_hash());
|
self.failed_chains.insert(chain_hash);
|
||||||
for &peer_id in parent_lookup.used_peers() {
|
for peer_id in request.used_peers {
|
||||||
cx.report_peer(peer_id, penalty, "parent_chain_failure")
|
cx.report_peer(peer_id, penalty, "parent_chain_failure")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -621,7 +650,7 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
|
|
||||||
metrics::set_gauge(
|
metrics::set_gauge(
|
||||||
&metrics::SYNC_PARENT_BLOCK_LOOKUPS,
|
&metrics::SYNC_PARENT_BLOCK_LOOKUPS,
|
||||||
self.parent_queue.len() as i64,
|
self.parent_lookups.len() as i64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -697,14 +726,14 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
}
|
}
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
debug!(self.log, "Requesting parent"; &parent_lookup);
|
debug!(self.log, "Requesting parent"; &parent_lookup);
|
||||||
self.parent_queue.push(parent_lookup)
|
self.parent_lookups.push(parent_lookup)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// We remove and add back again requests so we want this updated regardless of outcome.
|
// We remove and add back again requests so we want this updated regardless of outcome.
|
||||||
metrics::set_gauge(
|
metrics::set_gauge(
|
||||||
&metrics::SYNC_PARENT_BLOCK_LOOKUPS,
|
&metrics::SYNC_PARENT_BLOCK_LOOKUPS,
|
||||||
self.parent_queue.len() as i64,
|
self.parent_lookups.len() as i64,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -715,6 +744,6 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
|
|||||||
|
|
||||||
/// Drops all the parent chain requests and returns how many requests were dropped.
|
/// Drops all the parent chain requests and returns how many requests were dropped.
|
||||||
pub fn drop_parent_chain_requests(&mut self) -> usize {
|
pub fn drop_parent_chain_requests(&mut self) -> usize {
|
||||||
self.parent_queue.drain(..).len()
|
self.parent_lookups.drain(..).len()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ pub(crate) struct ParentLookup<T: BeaconChainTypes> {
|
|||||||
/// The root of the block triggering this parent request.
|
/// The root of the block triggering this parent request.
|
||||||
chain_hash: Hash256,
|
chain_hash: Hash256,
|
||||||
/// The blocks that have currently been downloaded.
|
/// The blocks that have currently been downloaded.
|
||||||
downloaded_blocks: Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
downloaded_blocks: Vec<RootBlockTuple<T::EthSpec>>,
|
||||||
/// Request of the last parent.
|
/// Request of the last parent.
|
||||||
current_parent_request: SingleBlockRequest<PARENT_FAIL_TOLERANCE>,
|
current_parent_request: SingleBlockRequest<PARENT_FAIL_TOLERANCE>,
|
||||||
/// Id of the last parent request.
|
/// Id of the last parent request.
|
||||||
@ -53,10 +53,10 @@ pub enum RequestError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<T: BeaconChainTypes> ParentLookup<T> {
|
impl<T: BeaconChainTypes> ParentLookup<T> {
|
||||||
pub fn contains_block(&self, block: &SignedBeaconBlock<T::EthSpec>) -> bool {
|
pub fn contains_block(&self, block_root: &Hash256) -> bool {
|
||||||
self.downloaded_blocks
|
self.downloaded_blocks
|
||||||
.iter()
|
.iter()
|
||||||
.any(|d_block| d_block.as_ref() == block)
|
.any(|(root, _d_block)| root == block_root)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new(
|
pub fn new(
|
||||||
@ -68,7 +68,7 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
|||||||
|
|
||||||
Self {
|
Self {
|
||||||
chain_hash: block_root,
|
chain_hash: block_root,
|
||||||
downloaded_blocks: vec![block],
|
downloaded_blocks: vec![(block_root, block)],
|
||||||
current_parent_request,
|
current_parent_request,
|
||||||
current_parent_request_id: None,
|
current_parent_request_id: None,
|
||||||
}
|
}
|
||||||
@ -100,7 +100,8 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
|||||||
|
|
||||||
pub fn add_block(&mut self, block: Arc<SignedBeaconBlock<T::EthSpec>>) {
|
pub fn add_block(&mut self, block: Arc<SignedBeaconBlock<T::EthSpec>>) {
|
||||||
let next_parent = block.parent_root();
|
let next_parent = block.parent_root();
|
||||||
self.downloaded_blocks.push(block);
|
let current_root = self.current_parent_request.hash;
|
||||||
|
self.downloaded_blocks.push((current_root, block));
|
||||||
self.current_parent_request.hash = next_parent;
|
self.current_parent_request.hash = next_parent;
|
||||||
self.current_parent_request.state = single_block_lookup::State::AwaitingDownload;
|
self.current_parent_request.state = single_block_lookup::State::AwaitingDownload;
|
||||||
self.current_parent_request_id = None;
|
self.current_parent_request_id = None;
|
||||||
@ -110,6 +111,32 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
|||||||
self.current_parent_request_id == Some(req_id)
|
self.current_parent_request_id == Some(req_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Consumes the parent request and destructures it into it's parts.
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
|
pub fn parts_for_processing(
|
||||||
|
self,
|
||||||
|
) -> (
|
||||||
|
Hash256,
|
||||||
|
Vec<Arc<SignedBeaconBlock<T::EthSpec>>>,
|
||||||
|
Vec<Hash256>,
|
||||||
|
SingleBlockRequest<PARENT_FAIL_TOLERANCE>,
|
||||||
|
) {
|
||||||
|
let ParentLookup {
|
||||||
|
chain_hash,
|
||||||
|
downloaded_blocks,
|
||||||
|
current_parent_request,
|
||||||
|
current_parent_request_id: _,
|
||||||
|
} = self;
|
||||||
|
let block_count = downloaded_blocks.len();
|
||||||
|
let mut blocks = Vec::with_capacity(block_count);
|
||||||
|
let mut hashes = Vec::with_capacity(block_count);
|
||||||
|
for (hash, block) in downloaded_blocks {
|
||||||
|
blocks.push(block);
|
||||||
|
hashes.push(hash);
|
||||||
|
}
|
||||||
|
(chain_hash, blocks, hashes, current_parent_request)
|
||||||
|
}
|
||||||
|
|
||||||
/// Get the parent lookup's chain hash.
|
/// Get the parent lookup's chain hash.
|
||||||
pub fn chain_hash(&self) -> Hash256 {
|
pub fn chain_hash(&self) -> Hash256 {
|
||||||
self.chain_hash
|
self.chain_hash
|
||||||
@ -125,10 +152,6 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
|
|||||||
self.current_parent_request_id = None;
|
self.current_parent_request_id = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn chain_blocks(&mut self) -> Vec<Arc<SignedBeaconBlock<T::EthSpec>>> {
|
|
||||||
std::mem::take(&mut self.downloaded_blocks)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Verifies that the received block is what we requested. If so, parent lookup now waits for
|
/// Verifies that the received block is what we requested. If so, parent lookup now waits for
|
||||||
/// the processing result of the block.
|
/// the processing result of the block.
|
||||||
pub fn verify_block(
|
pub fn verify_block(
|
||||||
|
@ -259,7 +259,7 @@ fn test_single_block_lookup_becomes_parent_request() {
|
|||||||
assert_eq!(bl.single_block_lookups.len(), 0);
|
assert_eq!(bl.single_block_lookups.len(), 0);
|
||||||
rig.expect_parent_request();
|
rig.expect_parent_request();
|
||||||
rig.expect_empty_network();
|
rig.expect_empty_network();
|
||||||
assert_eq!(bl.parent_queue.len(), 1);
|
assert_eq!(bl.parent_lookups.len(), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -287,7 +287,7 @@ fn test_parent_lookup_happy_path() {
|
|||||||
was_non_empty: true,
|
was_non_empty: true,
|
||||||
};
|
};
|
||||||
bl.parent_chain_processed(chain_hash, process_result, &mut cx);
|
bl.parent_chain_processed(chain_hash, process_result, &mut cx);
|
||||||
assert_eq!(bl.parent_queue.len(), 0);
|
assert_eq!(bl.parent_lookups.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -324,7 +324,7 @@ fn test_parent_lookup_wrong_response() {
|
|||||||
was_non_empty: true,
|
was_non_empty: true,
|
||||||
};
|
};
|
||||||
bl.parent_chain_processed(chain_hash, process_result, &mut cx);
|
bl.parent_chain_processed(chain_hash, process_result, &mut cx);
|
||||||
assert_eq!(bl.parent_queue.len(), 0);
|
assert_eq!(bl.parent_lookups.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -356,7 +356,7 @@ fn test_parent_lookup_empty_response() {
|
|||||||
was_non_empty: true,
|
was_non_empty: true,
|
||||||
};
|
};
|
||||||
bl.parent_chain_processed(chain_hash, process_result, &mut cx);
|
bl.parent_chain_processed(chain_hash, process_result, &mut cx);
|
||||||
assert_eq!(bl.parent_queue.len(), 0);
|
assert_eq!(bl.parent_lookups.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -387,7 +387,7 @@ fn test_parent_lookup_rpc_failure() {
|
|||||||
was_non_empty: true,
|
was_non_empty: true,
|
||||||
};
|
};
|
||||||
bl.parent_chain_processed(chain_hash, process_result, &mut cx);
|
bl.parent_chain_processed(chain_hash, process_result, &mut cx);
|
||||||
assert_eq!(bl.parent_queue.len(), 0);
|
assert_eq!(bl.parent_lookups.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -419,11 +419,11 @@ fn test_parent_lookup_too_many_attempts() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if i < parent_lookup::PARENT_FAIL_TOLERANCE {
|
if i < parent_lookup::PARENT_FAIL_TOLERANCE {
|
||||||
assert_eq!(bl.parent_queue[0].failed_attempts(), dbg!(i));
|
assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(bl.parent_queue.len(), 0);
|
assert_eq!(bl.parent_lookups.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -450,11 +450,11 @@ fn test_parent_lookup_too_many_download_attempts_no_blacklist() {
|
|||||||
rig.expect_penalty();
|
rig.expect_penalty();
|
||||||
}
|
}
|
||||||
if i < parent_lookup::PARENT_FAIL_TOLERANCE {
|
if i < parent_lookup::PARENT_FAIL_TOLERANCE {
|
||||||
assert_eq!(bl.parent_queue[0].failed_attempts(), dbg!(i));
|
assert_eq!(bl.parent_lookups[0].failed_attempts(), dbg!(i));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(bl.parent_queue.len(), 0);
|
assert_eq!(bl.parent_lookups.len(), 0);
|
||||||
assert!(!bl.failed_chains.contains(&block_hash));
|
assert!(!bl.failed_chains.contains(&block_hash));
|
||||||
assert!(!bl.failed_chains.contains(&parent.canonical_root()));
|
assert!(!bl.failed_chains.contains(&parent.canonical_root()));
|
||||||
}
|
}
|
||||||
@ -491,7 +491,7 @@ fn test_parent_lookup_too_many_processing_attempts_must_blacklist() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
assert!(bl.failed_chains.contains(&block_hash));
|
assert!(bl.failed_chains.contains(&block_hash));
|
||||||
assert_eq!(bl.parent_queue.len(), 0);
|
assert_eq!(bl.parent_lookups.len(), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -545,7 +545,7 @@ fn test_parent_lookup_disconnection() {
|
|||||||
&mut cx,
|
&mut cx,
|
||||||
);
|
);
|
||||||
bl.peer_disconnected(&peer_id, &mut cx);
|
bl.peer_disconnected(&peer_id, &mut cx);
|
||||||
assert!(bl.parent_queue.is_empty());
|
assert!(bl.parent_lookups.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -598,5 +598,78 @@ fn test_parent_lookup_ignored_response() {
|
|||||||
// Return an Ignored result. The request should be dropped
|
// Return an Ignored result. The request should be dropped
|
||||||
bl.parent_block_processed(chain_hash, BlockProcessResult::Ignored, &mut cx);
|
bl.parent_block_processed(chain_hash, BlockProcessResult::Ignored, &mut cx);
|
||||||
rig.expect_empty_network();
|
rig.expect_empty_network();
|
||||||
assert_eq!(bl.parent_queue.len(), 0);
|
assert_eq!(bl.parent_lookups.len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This is a regression test.
|
||||||
|
#[test]
|
||||||
|
fn test_same_chain_race_condition() {
|
||||||
|
let (mut bl, mut cx, mut rig) = TestRig::test_setup(Some(Level::Debug));
|
||||||
|
|
||||||
|
#[track_caller]
|
||||||
|
fn parent_lookups_consistency(bl: &BlockLookups<T>) {
|
||||||
|
let hashes: Vec<_> = bl
|
||||||
|
.parent_lookups
|
||||||
|
.iter()
|
||||||
|
.map(|req| req.chain_hash())
|
||||||
|
.collect();
|
||||||
|
let expected = hashes.len();
|
||||||
|
assert_eq!(
|
||||||
|
expected,
|
||||||
|
hashes
|
||||||
|
.into_iter()
|
||||||
|
.collect::<std::collections::HashSet<_>>()
|
||||||
|
.len(),
|
||||||
|
"duplicated chain hashes in parent queue"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
// if we use one or two blocks it will match on the hash or the parent hash, so make a longer
|
||||||
|
// chain.
|
||||||
|
let depth = 4;
|
||||||
|
let mut blocks = Vec::<Arc<SignedBeaconBlock<E>>>::with_capacity(depth);
|
||||||
|
while blocks.len() < depth {
|
||||||
|
let parent = blocks
|
||||||
|
.last()
|
||||||
|
.map(|b| b.canonical_root())
|
||||||
|
.unwrap_or_else(Hash256::random);
|
||||||
|
let block = Arc::new(rig.block_with_parent(parent));
|
||||||
|
blocks.push(block);
|
||||||
|
}
|
||||||
|
|
||||||
|
let peer_id = PeerId::random();
|
||||||
|
let trigger_block = blocks.pop().unwrap();
|
||||||
|
let chain_hash = trigger_block.canonical_root();
|
||||||
|
bl.search_parent(chain_hash, trigger_block.clone(), peer_id, &mut cx);
|
||||||
|
|
||||||
|
for (i, block) in blocks.into_iter().rev().enumerate() {
|
||||||
|
let id = rig.expect_parent_request();
|
||||||
|
// the block
|
||||||
|
bl.parent_lookup_response(id, peer_id, Some(block.clone()), D, &mut cx);
|
||||||
|
// the stream termination
|
||||||
|
bl.parent_lookup_response(id, peer_id, None, D, &mut cx);
|
||||||
|
// the processing request
|
||||||
|
rig.expect_block_process();
|
||||||
|
// the processing result
|
||||||
|
if i + 2 == depth {
|
||||||
|
// one block was removed
|
||||||
|
bl.parent_block_processed(chain_hash, BlockError::BlockIsAlreadyKnown.into(), &mut cx)
|
||||||
|
} else {
|
||||||
|
bl.parent_block_processed(chain_hash, BlockError::ParentUnknown(block).into(), &mut cx)
|
||||||
|
}
|
||||||
|
parent_lookups_consistency(&bl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Processing succeeds, now the rest of the chain should be sent for processing.
|
||||||
|
rig.expect_parent_chain_process();
|
||||||
|
|
||||||
|
// Try to get this block again while the chain is being processed. We should not request it again.
|
||||||
|
let peer_id = PeerId::random();
|
||||||
|
bl.search_parent(chain_hash, trigger_block, peer_id, &mut cx);
|
||||||
|
parent_lookups_consistency(&bl);
|
||||||
|
|
||||||
|
let process_result = BatchProcessResult::Success {
|
||||||
|
was_non_empty: true,
|
||||||
|
};
|
||||||
|
bl.parent_chain_processed(chain_hash, process_result, &mut cx);
|
||||||
|
assert_eq!(bl.parent_lookups.len(), 0);
|
||||||
}
|
}
|
||||||
|
@ -643,7 +643,7 @@ impl<T: BeaconChainTypes> SyncManager<T> {
|
|||||||
|
|
||||||
// Some logs.
|
// Some logs.
|
||||||
if dropped_single_blocks_requests > 0 || dropped_parent_chain_requests > 0 {
|
if dropped_single_blocks_requests > 0 || dropped_parent_chain_requests > 0 {
|
||||||
debug!(self.log, "Execution engine not online, dropping active requests.";
|
debug!(self.log, "Execution engine not online. Dropping active requests.";
|
||||||
"dropped_single_blocks_requests" => dropped_single_blocks_requests,
|
"dropped_single_blocks_requests" => dropped_single_blocks_requests,
|
||||||
"dropped_parent_chain_requests" => dropped_parent_chain_requests,
|
"dropped_parent_chain_requests" => dropped_parent_chain_requests,
|
||||||
);
|
);
|
||||||
|
@ -242,7 +242,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
source: ReportSource::SyncService,
|
source: ReportSource::SyncService,
|
||||||
})
|
})
|
||||||
.unwrap_or_else(|_| {
|
.unwrap_or_else(|_| {
|
||||||
warn!(self.log, "Could not report peer, channel failed");
|
warn!(self.log, "Could not report peer: channel failed");
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -257,7 +257,7 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
|
|||||||
msg,
|
msg,
|
||||||
})
|
})
|
||||||
.unwrap_or_else(|e| {
|
.unwrap_or_else(|e| {
|
||||||
warn!(self.log, "Could not report peer, channel failed"; "error"=> %e);
|
warn!(self.log, "Could not report peer: channel failed"; "error"=> %e);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -714,6 +714,14 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
.takes_value(true)
|
.takes_value(true)
|
||||||
.conflicts_with("checkpoint-state")
|
.conflicts_with("checkpoint-state")
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("checkpoint-sync-url-timeout")
|
||||||
|
.long("checkpoint-sync-url-timeout")
|
||||||
|
.help("Set the timeout for checkpoint sync calls to remote beacon node HTTP endpoint.")
|
||||||
|
.value_name("SECONDS")
|
||||||
|
.takes_value(true)
|
||||||
|
.default_value("60")
|
||||||
|
)
|
||||||
.arg(
|
.arg(
|
||||||
Arg::with_name("reconstruct-historic-states")
|
Arg::with_name("reconstruct-historic-states")
|
||||||
.long("reconstruct-historic-states")
|
.long("reconstruct-historic-states")
|
||||||
@ -860,4 +868,19 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> {
|
|||||||
Useful if you intend to run a non-validating beacon node.")
|
Useful if you intend to run a non-validating beacon node.")
|
||||||
.takes_value(false)
|
.takes_value(false)
|
||||||
)
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("light-client-server")
|
||||||
|
.long("light-client-server")
|
||||||
|
.help("Act as a full node supporting light clients on the p2p network \
|
||||||
|
[experimental]")
|
||||||
|
.takes_value(false)
|
||||||
|
)
|
||||||
|
.arg(
|
||||||
|
Arg::with_name("gui")
|
||||||
|
.long("gui")
|
||||||
|
.hidden(true)
|
||||||
|
.help("Enable the graphical user interface and all its requirements. \
|
||||||
|
This is equivalent to --http and --validator-monitor-auto.")
|
||||||
|
.takes_value(false)
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
@ -14,6 +14,7 @@ use std::cmp::max;
|
|||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
|
use std::net::Ipv6Addr;
|
||||||
use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs};
|
use std::net::{IpAddr, Ipv4Addr, ToSocketAddrs};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
@ -34,13 +35,13 @@ pub fn get_config<E: EthSpec>(
|
|||||||
let spec = &context.eth2_config.spec;
|
let spec = &context.eth2_config.spec;
|
||||||
let log = context.log();
|
let log = context.log();
|
||||||
|
|
||||||
let mut client_config = ClientConfig {
|
let mut client_config = ClientConfig::default();
|
||||||
data_dir: get_data_dir(cli_args),
|
|
||||||
..Default::default()
|
// Update the client's data directory
|
||||||
};
|
client_config.set_data_dir(get_data_dir(cli_args));
|
||||||
|
|
||||||
// If necessary, remove any existing database and configuration
|
// If necessary, remove any existing database and configuration
|
||||||
if client_config.data_dir.exists() && cli_args.is_present("purge-db") {
|
if client_config.data_dir().exists() && cli_args.is_present("purge-db") {
|
||||||
// Remove the chain_db.
|
// Remove the chain_db.
|
||||||
let chain_db = client_config.get_db_path();
|
let chain_db = client_config.get_db_path();
|
||||||
if chain_db.exists() {
|
if chain_db.exists() {
|
||||||
@ -57,11 +58,11 @@ pub fn get_config<E: EthSpec>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Create `datadir` and any non-existing parent directories.
|
// Create `datadir` and any non-existing parent directories.
|
||||||
fs::create_dir_all(&client_config.data_dir)
|
fs::create_dir_all(client_config.data_dir())
|
||||||
.map_err(|e| format!("Failed to create data dir: {}", e))?;
|
.map_err(|e| format!("Failed to create data dir: {}", e))?;
|
||||||
|
|
||||||
// logs the chosen data directory
|
// logs the chosen data directory
|
||||||
let mut log_dir = client_config.data_dir.clone();
|
let mut log_dir = client_config.data_dir().clone();
|
||||||
// remove /beacon from the end
|
// remove /beacon from the end
|
||||||
log_dir.pop();
|
log_dir.pop();
|
||||||
info!(log, "Data directory initialised"; "datadir" => log_dir.into_os_string().into_string().expect("Datadir should be a valid os string"));
|
info!(log, "Data directory initialised"; "datadir" => log_dir.into_os_string().into_string().expect("Datadir should be a valid os string"));
|
||||||
@ -69,10 +70,13 @@ pub fn get_config<E: EthSpec>(
|
|||||||
/*
|
/*
|
||||||
* Networking
|
* Networking
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
let data_dir_ref = client_config.data_dir().clone();
|
||||||
|
|
||||||
set_network_config(
|
set_network_config(
|
||||||
&mut client_config.network,
|
&mut client_config.network,
|
||||||
cli_args,
|
cli_args,
|
||||||
&client_config.data_dir,
|
&data_dir_ref,
|
||||||
log,
|
log,
|
||||||
false,
|
false,
|
||||||
)?;
|
)?;
|
||||||
@ -303,7 +307,7 @@ pub fn get_config<E: EthSpec>(
|
|||||||
} else if let Some(jwt_secret_key) = cli_args.value_of("execution-jwt-secret-key") {
|
} else if let Some(jwt_secret_key) = cli_args.value_of("execution-jwt-secret-key") {
|
||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
secret_file = client_config.data_dir.join(DEFAULT_JWT_FILE);
|
secret_file = client_config.data_dir().join(DEFAULT_JWT_FILE);
|
||||||
let mut jwt_secret_key_file = File::create(secret_file.clone())
|
let mut jwt_secret_key_file = File::create(secret_file.clone())
|
||||||
.map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?;
|
.map_err(|e| format!("Error while creating jwt_secret_key file: {:?}", e))?;
|
||||||
jwt_secret_key_file
|
jwt_secret_key_file
|
||||||
@ -332,7 +336,7 @@ pub fn get_config<E: EthSpec>(
|
|||||||
clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?;
|
clap_utils::parse_optional(cli_args, "suggested-fee-recipient")?;
|
||||||
el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?;
|
el_config.jwt_id = clap_utils::parse_optional(cli_args, "execution-jwt-id")?;
|
||||||
el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?;
|
el_config.jwt_version = clap_utils::parse_optional(cli_args, "execution-jwt-version")?;
|
||||||
el_config.default_datadir = client_config.data_dir.clone();
|
el_config.default_datadir = client_config.data_dir().clone();
|
||||||
el_config.builder_profit_threshold =
|
el_config.builder_profit_threshold =
|
||||||
clap_utils::parse_required(cli_args, "builder-profit-threshold")?;
|
clap_utils::parse_required(cli_args, "builder-profit-threshold")?;
|
||||||
let execution_timeout_multiplier =
|
let execution_timeout_multiplier =
|
||||||
@ -441,6 +445,8 @@ pub fn get_config<E: EthSpec>(
|
|||||||
.extend_from_slice(boot_nodes)
|
.extend_from_slice(boot_nodes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
client_config.chain.checkpoint_sync_url_timeout =
|
||||||
|
clap_utils::parse_required::<u64>(cli_args, "checkpoint-sync-url-timeout")?;
|
||||||
|
|
||||||
client_config.genesis = if let Some(genesis_state_bytes) =
|
client_config.genesis = if let Some(genesis_state_bytes) =
|
||||||
eth2_network_config.genesis_state_bytes.clone()
|
eth2_network_config.genesis_state_bytes.clone()
|
||||||
@ -571,7 +577,7 @@ pub fn get_config<E: EthSpec>(
|
|||||||
let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") {
|
let slasher_dir = if let Some(slasher_dir) = cli_args.value_of("slasher-dir") {
|
||||||
PathBuf::from(slasher_dir)
|
PathBuf::from(slasher_dir)
|
||||||
} else {
|
} else {
|
||||||
client_config.data_dir.join("slasher_db")
|
client_config.data_dir().join("slasher_db")
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut slasher_config = slasher::Config::new(slasher_dir);
|
let mut slasher_config = slasher::Config::new(slasher_dir);
|
||||||
@ -703,6 +709,12 @@ pub fn get_config<E: EthSpec>(
|
|||||||
client_config.chain.builder_fallback_disable_checks =
|
client_config.chain.builder_fallback_disable_checks =
|
||||||
cli_args.is_present("builder-fallback-disable-checks");
|
cli_args.is_present("builder-fallback-disable-checks");
|
||||||
|
|
||||||
|
// Graphical user interface config.
|
||||||
|
if cli_args.is_present("gui") {
|
||||||
|
client_config.http_api.enabled = true;
|
||||||
|
client_config.validator_monitor_auto = true;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(client_config)
|
Ok(client_config)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -832,9 +844,11 @@ pub fn set_network_config(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if cli_args.is_present("enr-match") {
|
if cli_args.is_present("enr-match") {
|
||||||
// set the enr address to localhost if the address is 0.0.0.0
|
// set the enr address to localhost if the address is unspecified
|
||||||
if config.listen_address == "0.0.0.0".parse::<IpAddr>().expect("valid ip addr") {
|
if config.listen_address == IpAddr::V4(Ipv4Addr::UNSPECIFIED) {
|
||||||
config.enr_address = Some("127.0.0.1".parse::<IpAddr>().expect("valid ip addr"));
|
config.enr_address = Some(IpAddr::V4(Ipv4Addr::LOCALHOST));
|
||||||
|
} else if config.listen_address == IpAddr::V6(Ipv6Addr::UNSPECIFIED) {
|
||||||
|
config.enr_address = Some(IpAddr::V6(Ipv6Addr::LOCALHOST));
|
||||||
} else {
|
} else {
|
||||||
config.enr_address = Some(config.listen_address);
|
config.enr_address = Some(config.listen_address);
|
||||||
}
|
}
|
||||||
@ -914,6 +928,9 @@ pub fn set_network_config(
|
|||||||
config.discv5_config.table_filter = |_| true;
|
config.discv5_config.table_filter = |_| true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Light client server config.
|
||||||
|
config.enable_light_client_server = cli_args.is_present("light-client-server");
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,9 +1,6 @@
|
|||||||
# Summary
|
# Summary
|
||||||
|
|
||||||
* [Introduction](./intro.md)
|
* [Introduction](./intro.md)
|
||||||
* [Become a Validator](./mainnet-validator.md)
|
|
||||||
* [Become a Testnet Validator](./testnet-validator.md)
|
|
||||||
* [Merge Migration](./merge-migration.md)
|
|
||||||
* [Installation](./installation.md)
|
* [Installation](./installation.md)
|
||||||
* [System Requirements](./system-requirements.md)
|
* [System Requirements](./system-requirements.md)
|
||||||
* [Pre-Built Binaries](./installation-binaries.md)
|
* [Pre-Built Binaries](./installation-binaries.md)
|
||||||
@ -13,6 +10,9 @@
|
|||||||
* [Cross-Compiling](./cross-compiling.md)
|
* [Cross-Compiling](./cross-compiling.md)
|
||||||
* [Homebrew](./homebrew.md)
|
* [Homebrew](./homebrew.md)
|
||||||
* [Update Priorities](./installation-priorities.md)
|
* [Update Priorities](./installation-priorities.md)
|
||||||
|
* [Run a Node](./run_a_node.md)
|
||||||
|
* [Become a Validator](./mainnet-validator.md)
|
||||||
|
* [Become a Testnet Validator](./testnet-validator.md)
|
||||||
* [Key Management](./key-management.md)
|
* [Key Management](./key-management.md)
|
||||||
* [Create a wallet](./wallet-create.md)
|
* [Create a wallet](./wallet-create.md)
|
||||||
* [Create a validator](./validator-create.md)
|
* [Create a validator](./validator-create.md)
|
||||||
@ -46,6 +46,7 @@
|
|||||||
* [Pre-Releases](./advanced-pre-releases.md)
|
* [Pre-Releases](./advanced-pre-releases.md)
|
||||||
* [Release Candidates](./advanced-release-candidates.md)
|
* [Release Candidates](./advanced-release-candidates.md)
|
||||||
* [MEV and Lighthouse](./builders.md)
|
* [MEV and Lighthouse](./builders.md)
|
||||||
|
* [Merge Migration](./merge-migration.md)
|
||||||
* [Contributing](./contributing.md)
|
* [Contributing](./contributing.md)
|
||||||
* [Development Environment](./setup.md)
|
* [Development Environment](./setup.md)
|
||||||
* [FAQs](./faq.md)
|
* [FAQs](./faq.md)
|
||||||
|
@ -62,6 +62,43 @@ curl -X GET "http://localhost:5052/lighthouse/health" -H "accept: application/j
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### `/lighthouse/ui/health`
|
||||||
|
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -X GET "http://localhost:5052/lighthouse/ui/health" -H "accept: application/json" | jq
|
||||||
|
```
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"total_memory": 16443219968,
|
||||||
|
"free_memory": 1283739648,
|
||||||
|
"used_memory": 5586264064,
|
||||||
|
"sys_loadavg_1": 0.59,
|
||||||
|
"sys_loadavg_5": 1.13,
|
||||||
|
"sys_loadavg_15": 2.41,
|
||||||
|
"cpu_cores": 4,
|
||||||
|
"cpu_threads": 8,
|
||||||
|
"global_cpu_frequency": 3.4,
|
||||||
|
"disk_bytes_total": 502390845440,
|
||||||
|
"disk_bytes_free": 9981386752,
|
||||||
|
"network_name": "wlp0s20f3",
|
||||||
|
"network_bytes_total_received": 14105556611,
|
||||||
|
"network_bytes_total_transmit": 3649489389,
|
||||||
|
"nat_open": true,
|
||||||
|
"connected_peers": 80,
|
||||||
|
"sync_state": "Synced",
|
||||||
|
"system_uptime": 660706,
|
||||||
|
"app_uptime": 105,
|
||||||
|
"system_name": "Arch Linux",
|
||||||
|
"kernel_version": "5.19.13-arch1-1",
|
||||||
|
"os_version": "Linux rolling Arch Linux",
|
||||||
|
"host_name": "Computer1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### `/lighthouse/syncing`
|
### `/lighthouse/syncing`
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
@ -6,6 +6,7 @@ HTTP Path | Description |
|
|||||||
| --- | -- |
|
| --- | -- |
|
||||||
[`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version.
|
[`GET /lighthouse/version`](#get-lighthouseversion) | Get the Lighthouse software version.
|
||||||
[`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine.
|
[`GET /lighthouse/health`](#get-lighthousehealth) | Get information about the host machine.
|
||||||
|
[`GET /lighthouse/ui/health`](#get-lighthouseuihealth) | Get information about the host machine. Focused for UI applications.
|
||||||
[`GET /lighthouse/spec`](#get-lighthousespec) | Get the Ethereum proof-of-stake consensus specification used by the validator.
|
[`GET /lighthouse/spec`](#get-lighthousespec) | Get the Ethereum proof-of-stake consensus specification used by the validator.
|
||||||
[`GET /lighthouse/auth`](#get-lighthouseauth) | Get the location of the authorization token.
|
[`GET /lighthouse/auth`](#get-lighthouseauth) | Get the location of the authorization token.
|
||||||
[`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators.
|
[`GET /lighthouse/validators`](#get-lighthousevalidators) | List all validators.
|
||||||
@ -77,6 +78,45 @@ Returns information regarding the health of the host machine.
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## `GET /lighthouse/ui/health`
|
||||||
|
|
||||||
|
Returns information regarding the health of the host machine.
|
||||||
|
|
||||||
|
### HTTP Specification
|
||||||
|
|
||||||
|
| Property | Specification |
|
||||||
|
|-------------------|--------------------------------------------|
|
||||||
|
| Path | `/lighthouse/ui/health` |
|
||||||
|
| Method | GET |
|
||||||
|
| Required Headers | [`Authorization`](./api-vc-auth-header.md) |
|
||||||
|
| Typical Responses | 200 |
|
||||||
|
|
||||||
|
### Example Response Body
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"total_memory": 16443219968,
|
||||||
|
"free_memory": 1283739648,
|
||||||
|
"used_memory": 5586264064,
|
||||||
|
"sys_loadavg_1": 0.59,
|
||||||
|
"sys_loadavg_5": 1.13,
|
||||||
|
"sys_loadavg_15": 2.41,
|
||||||
|
"cpu_cores": 4,
|
||||||
|
"cpu_threads": 8,
|
||||||
|
"global_cpu_frequency": 3.4,
|
||||||
|
"disk_bytes_total": 502390845440,
|
||||||
|
"disk_bytes_free": 9981386752,
|
||||||
|
"system_uptime": 660706,
|
||||||
|
"app_uptime": 105,
|
||||||
|
"system_name": "Arch Linux",
|
||||||
|
"kernel_version": "5.19.13-arch1-1",
|
||||||
|
"os_version": "Linux rolling Arch Linux",
|
||||||
|
"host_name": "Computer1"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## `GET /lighthouse/spec`
|
## `GET /lighthouse/spec`
|
||||||
|
|
||||||
Returns the Ethereum proof-of-stake consensus specification loaded for this validator.
|
Returns the Ethereum proof-of-stake consensus specification loaded for this validator.
|
||||||
|
@ -24,6 +24,8 @@ validator client or the slasher**.
|
|||||||
| v2.5.0 | Aug 2022 | v11 | yes |
|
| v2.5.0 | Aug 2022 | v11 | yes |
|
||||||
| v3.0.0 | Aug 2022 | v11 | yes |
|
| v3.0.0 | Aug 2022 | v11 | yes |
|
||||||
| v3.1.0 | Sep 2022 | v12 | yes |
|
| v3.1.0 | Sep 2022 | v12 | yes |
|
||||||
|
| v3.2.0 | Oct 2022 | v12 | yes |
|
||||||
|
| v3.3.0 | TBD | v13 | yes |
|
||||||
|
|
||||||
> **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release
|
> **Note**: All point releases (e.g. v2.3.1) are schema-compatible with the prior minor release
|
||||||
> (e.g. v2.3.0).
|
> (e.g. v2.3.0).
|
||||||
|
@ -18,6 +18,7 @@ We implement the specification as defined in the
|
|||||||
You may read this book from start to finish, or jump to some of these topics:
|
You may read this book from start to finish, or jump to some of these topics:
|
||||||
|
|
||||||
- Follow the [Installation Guide](./installation.md) to install Lighthouse.
|
- Follow the [Installation Guide](./installation.md) to install Lighthouse.
|
||||||
|
- Run your very [own beacon node](./run_a_node.md).
|
||||||
- Learn about [becoming a mainnet validator](./mainnet-validator.md).
|
- Learn about [becoming a mainnet validator](./mainnet-validator.md).
|
||||||
- Get hacking with the [Development Environment Guide](./setup.md).
|
- Get hacking with the [Development Environment Guide](./setup.md).
|
||||||
- Utilize the whole stack by starting a [local testnet](./setup.md#local-testnets).
|
- Utilize the whole stack by starting a [local testnet](./setup.md#local-testnets).
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
# Merge Migration
|
# Merge Migration
|
||||||
|
|
||||||
This document provides detail for users who want to run a merge-ready Lighthouse node.
|
This document provides detail for users who want to run a Lighthouse node on post-merge Ethereum.
|
||||||
|
|
||||||
> The merge is occurring on mainnet in September. You _must_ have a merge-ready setup by September 6
|
> The merge occurred on mainnet in September 2022.
|
||||||
> 2022.
|
|
||||||
|
|
||||||
## Necessary Configuration
|
## Necessary Configuration
|
||||||
|
|
||||||
@ -27,12 +26,9 @@ engine to a merge-ready version.
|
|||||||
You must configure your node to be merge-ready before the Bellatrix fork occurs on the network
|
You must configure your node to be merge-ready before the Bellatrix fork occurs on the network
|
||||||
on which your node is operating.
|
on which your node is operating.
|
||||||
|
|
||||||
* **Mainnet**: the Bellatrix fork is scheduled for epoch 144896, September 6 2022 11:34 UTC.
|
* **Gnosis**: the Bellatrix fork has not yet been scheduled.
|
||||||
You must ensure your node configuration is updated before then in order to continue following
|
* **Mainnet**, **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**: the Bellatrix fork has
|
||||||
the chain. We recommend updating your configuration now.
|
already occurred. You must have a merge-ready configuration right now.
|
||||||
|
|
||||||
* **Goerli (Prater)**, **Ropsten**, **Sepolia**, **Kiln**: the Bellatrix fork has already occurred.
|
|
||||||
You must have a merge-ready configuration right now.
|
|
||||||
|
|
||||||
## Connecting to an execution engine
|
## Connecting to an execution engine
|
||||||
|
|
||||||
@ -65,6 +61,7 @@ the relevant page for your execution engine for the required flags:
|
|||||||
- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients)
|
- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients)
|
||||||
- [Nethermind: Running Nethermind Post Merge](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge)
|
- [Nethermind: Running Nethermind Post Merge](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge)
|
||||||
- [Besu: Prepare For The Merge](https://besu.hyperledger.org/en/stable/HowTo/Upgrade/Prepare-for-The-Merge/)
|
- [Besu: Prepare For The Merge](https://besu.hyperledger.org/en/stable/HowTo/Upgrade/Prepare-for-The-Merge/)
|
||||||
|
- [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer)
|
||||||
|
|
||||||
Once you have configured your execution engine to open up the engine API (usually on port 8551) you
|
Once you have configured your execution engine to open up the engine API (usually on port 8551) you
|
||||||
should add the URL to your `lighthouse bn` flags with `--execution-endpoint <URL>`, as well as
|
should add the URL to your `lighthouse bn` flags with `--execution-endpoint <URL>`, as well as
|
||||||
|
@ -55,42 +55,27 @@ In our previous example, we listed `http://192.168.1.1:5052` as a redundant
|
|||||||
node. Apart from having sufficient resources, the backup node should have the
|
node. Apart from having sufficient resources, the backup node should have the
|
||||||
following flags:
|
following flags:
|
||||||
|
|
||||||
- `--staking`: starts the HTTP API server and ensures the execution chain is synced.
|
- `--http`: starts the HTTP API server.
|
||||||
- `--http-address 0.0.0.0`: this allows *any* external IP address to access the
|
- `--http-address 0.0.0.0`: this allows *any* external IP address to access the
|
||||||
HTTP server (a firewall should be configured to deny unauthorized access to port
|
HTTP server (a firewall should be configured to deny unauthorized access to port
|
||||||
`5052`). This is only required if your backup node is on a different host.
|
`5052`). This is only required if your backup node is on a different host.
|
||||||
- `--subscribe-all-subnets`: ensures that the beacon node subscribes to *all*
|
- `--execution-endpoint`: see [Merge Migration](./merge-migration.md).
|
||||||
subnets, not just on-demand requests from validators.
|
- `--execution-jwt`: see [Merge Migration](./merge-migration.md).
|
||||||
- `--import-all-attestations`: ensures that the beacon node performs
|
|
||||||
aggregation on all seen attestations.
|
|
||||||
|
|
||||||
Subsequently, one could use the following command to provide a backup beacon
|
For example one could use the following command to provide a backup beacon node:
|
||||||
node:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
lighthouse bn \
|
lighthouse bn \
|
||||||
--staking \
|
--http \
|
||||||
--http-address 0.0.0.0 \
|
--http-address 0.0.0.0 \
|
||||||
--subscribe-all-subnets \
|
--execution-endpoint http://localhost:8551 \
|
||||||
--import-all-attestations
|
--execution-jwt /secrets/jwt.hex
|
||||||
```
|
```
|
||||||
|
|
||||||
### Resource usage of redundant Beacon Nodes
|
Prior to v3.2.0 fallback beacon nodes also required the `--subscribe-all-subnets` and
|
||||||
|
`--import-all-attestations` flags. These flags are no longer required as the validator client will
|
||||||
The `--subscribe-all-subnets` and `--import-all-attestations` flags typically
|
now broadcast subscriptions to all connected beacon nodes by default. This broadcast behaviour
|
||||||
cause a significant increase in resource consumption. A doubling in CPU
|
can be disabled using the `--disable-run-on-all` flag for `lighthouse vc`.
|
||||||
utilization and RAM consumption is expected.
|
|
||||||
|
|
||||||
The increase in resource consumption is due to the fact that the beacon node is
|
|
||||||
now processing, validating, aggregating and forwarding *all* attestations,
|
|
||||||
whereas previously it was likely only doing a fraction of this work. Without
|
|
||||||
these flags, subscription to attestation subnets and aggregation of
|
|
||||||
attestations is only performed for validators which [explicitly request
|
|
||||||
subscriptions][subscribe-api].
|
|
||||||
|
|
||||||
There are 64 subnets and each validator will result in a subscription to *at
|
|
||||||
least* one subnet. So, using the two aforementioned flags will result in
|
|
||||||
resource consumption akin to running 64+ validators.
|
|
||||||
|
|
||||||
## Redundant execution nodes
|
## Redundant execution nodes
|
||||||
|
|
||||||
|
171
book/src/run_a_node.md
Normal file
171
book/src/run_a_node.md
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
# Run a Node
|
||||||
|
|
||||||
|
This document provides detail for users who want to run a Lighthouse beacon node.
|
||||||
|
You should be finished with one [Installation](./installation.md) method of your choice to continue with the following steps:
|
||||||
|
|
||||||
|
1. Set up an [execution node](#step-1-set-up-an-execution-node);
|
||||||
|
1. Enable [checkpoint sync](#step-2-choose-a-checkpoint-sync-provider);
|
||||||
|
1. Run [Lighthouse](#step-3-run-lighthouse);
|
||||||
|
1. [Check logs](#step-4-check-logs); and
|
||||||
|
1. [Further readings](#step-5-further-readings).
|
||||||
|
|
||||||
|
Checkpoint sync is *optional*; however, we recommend it since it is substantially faster
|
||||||
|
than syncing from genesis while still providing the same functionality.
|
||||||
|
|
||||||
|
## Step 1: Set up an execution node
|
||||||
|
|
||||||
|
The Lighthouse beacon node *must* connect to an execution engine in order to validate the transactions
|
||||||
|
present in blocks. Two flags are used to configure this connection:
|
||||||
|
|
||||||
|
- `--execution-endpoint`: the *URL* of the execution engine API. Often this will be
|
||||||
|
`http://localhost:8551`.
|
||||||
|
- `--execution-jwt`: the *path* to the file containing the JWT secret shared by Lighthouse and the
|
||||||
|
execution engine. This is a mandatory form of authentication that ensures that Lighthouse
|
||||||
|
has authority to control the execution engine.
|
||||||
|
|
||||||
|
Each execution engine has its own flags for configuring the engine API and JWT.
|
||||||
|
Please consult the relevant page of your execution engine for the required flags:
|
||||||
|
|
||||||
|
- [Geth: Connecting to Consensus Clients](https://geth.ethereum.org/docs/interface/consensus-clients)
|
||||||
|
- [Nethermind: Running Nethermind & CL](https://docs.nethermind.io/nethermind/first-steps-with-nethermind/running-nethermind-post-merge)
|
||||||
|
- [Besu: Connect to Mainnet](https://besu.hyperledger.org/en/stable/public-networks/get-started/connect/mainnet/)
|
||||||
|
- [Erigon: Beacon Chain (Consensus Layer)](https://github.com/ledgerwatch/erigon#beacon-chain-consensus-layer)
|
||||||
|
|
||||||
|
The execution engine connection must be *exclusive*, i.e. you must have one execution node
|
||||||
|
per beacon node. The reason for this is that the beacon node _controls_ the execution node.
|
||||||
|
|
||||||
|
## Step 2: Choose a checkpoint sync provider
|
||||||
|
|
||||||
|
Lighthouse supports fast sync from a recent finalized checkpoint.
|
||||||
|
The checkpoint sync is done using a [public endpoint](#use-a-community-checkpoint-sync-endpoint)
|
||||||
|
provided by the Ethereum community.
|
||||||
|
|
||||||
|
In [step 3](#step-3-run-lighthouse), when running Lighthouse,
|
||||||
|
we will enable checkpoint sync by providing the URL to the `--checkpoint-sync-url` flag.
|
||||||
|
|
||||||
|
### Use a community checkpoint sync endpoint
|
||||||
|
|
||||||
|
The Ethereum community provides various [public endpoints](https://eth-clients.github.io/checkpoint-sync-endpoints/) for you to choose from for your initial checkpoint state. Select one for your network and use it as the URL.
|
||||||
|
|
||||||
|
For example, the URL for Sigma Prime's checkpoint sync server for mainnet is `https://mainnet.checkpoint.sigp.io`,
|
||||||
|
which we will use in [step 3](#step-3-run-lighthouse).
|
||||||
|
|
||||||
|
## Step 3: Run Lighthouse
|
||||||
|
|
||||||
|
To run Lighthouse, we use the three flags from the steps above:
|
||||||
|
- `--execution-endpoint`;
|
||||||
|
- `--execution-jwt`; and
|
||||||
|
- `--checkpoint-sync-url`.
|
||||||
|
|
||||||
|
Additionally, we run Lighthouse with the `--network` flag, which selects a network:
|
||||||
|
|
||||||
|
- `lighthouse` (no flag): Mainnet.
|
||||||
|
- `lighthouse --network mainnet`: Mainnet.
|
||||||
|
- `lighthouse --network goerli`: Goerli (testnet).
|
||||||
|
|
||||||
|
Using the correct `--network` flag is very important; using the wrong flag can
|
||||||
|
result in penalties, slashings or lost deposits. As a rule of thumb, *always*
|
||||||
|
provide a `--network` flag instead of relying on the default.
|
||||||
|
|
||||||
|
For the testnets we support [Goerli](https://goerli.net/) (`--network goerli`),
|
||||||
|
[Sepolia](https://sepolia.dev/) (`--network sepolia`), and [Gnosis chain](https://www.gnosis.io/) (`--network gnosis`).
|
||||||
|
|
||||||
|
Minor modifications depend on if you want to run your node while [staking](#staking) or [non-staking](#non-staking).
|
||||||
|
In the following, we will provide examples of what a Lighthouse setup could look like.
|
||||||
|
|
||||||
|
### Staking
|
||||||
|
|
||||||
|
```
|
||||||
|
lighthouse bn \
|
||||||
|
--network mainnet \
|
||||||
|
--execution-endpoint http://localhost:8551 \
|
||||||
|
--execution-jwt /secrets/jwt.hex \
|
||||||
|
--checkpoint-sync-url https://mainnet.checkpoint.sigp.io \
|
||||||
|
--http
|
||||||
|
```
|
||||||
|
|
||||||
|
A Lighthouse beacon node can be configured to expose an HTTP server by supplying the `--http` flag.
|
||||||
|
The default listen address is `127.0.0.1:5052`.
|
||||||
|
The HTTP API is required for the beacon node to accept connections from the *validator client*, which manages keys.
|
||||||
|
|
||||||
|
### Non-staking
|
||||||
|
|
||||||
|
```
|
||||||
|
lighthouse bn \
|
||||||
|
--network mainnet \
|
||||||
|
--execution-endpoint http://localhost:8551 \
|
||||||
|
--execution-jwt /secrets/jwt.hex \
|
||||||
|
--checkpoint-sync-url https://mainnet.checkpoint.sigp.io \
|
||||||
|
--disable-deposit-contract-sync
|
||||||
|
```
|
||||||
|
|
||||||
|
Since we are not staking, we can use the `--disable-deposit-contract-sync` flag.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
Once Lighthouse runs, we can monitor the logs to see if it is syncing correctly.
|
||||||
|
|
||||||
|
## Step 4: Check logs
|
||||||
|
Several logs help you identify if Lighthouse is running correctly.
|
||||||
|
|
||||||
|
### Logs - Checkpoint sync
|
||||||
|
Lighthouse will print a message to indicate that checkpoint sync is being used:
|
||||||
|
|
||||||
|
```
|
||||||
|
INFO Starting checkpoint sync remote_url: http://remote-bn:8000/, service: beacon
|
||||||
|
```
|
||||||
|
|
||||||
|
After a short time (usually less than a minute), it will log the details of the checkpoint
|
||||||
|
loaded from the remote beacon node:
|
||||||
|
|
||||||
|
```
|
||||||
|
INFO Loaded checkpoint block and state state_root: 0xe8252c68784a8d5cc7e5429b0e95747032dd1dcee0d1dc9bdaf6380bf90bc8a6, block_root: 0x5508a20147299b1a7fe9dbea1a8b3bf979f74c52e7242039bd77cbff62c0695a, slot: 2034720, service: beacon
|
||||||
|
```
|
||||||
|
|
||||||
|
Once the checkpoint is loaded Lighthouse will sync forwards to the head of the chain.
|
||||||
|
|
||||||
|
If a validator client is connected to the node then it will be able to start completing its duties
|
||||||
|
as soon as forwards sync completes.
|
||||||
|
|
||||||
|
> **Security Note**: You should cross-reference the `block_root` and `slot` of the loaded checkpoint
|
||||||
|
> against a trusted source like another [public endpoint](https://eth-clients.github.io/checkpoint-sync-endpoints/),
|
||||||
|
> a friend's node, or a block explorer.
|
||||||
|
|
||||||
|
#### Backfilling Blocks
|
||||||
|
|
||||||
|
Once forwards sync completes, Lighthouse will commence a "backfill sync" to download the blocks
|
||||||
|
from the checkpoint back to genesis.
|
||||||
|
|
||||||
|
The beacon node will log messages similar to the following each minute while it completes backfill
|
||||||
|
sync:
|
||||||
|
|
||||||
|
```
|
||||||
|
INFO Downloading historical blocks est_time: 5 hrs 0 mins, speed: 111.96 slots/sec, distance: 2020451 slots (40 weeks 0 days), service: slot_notifier
|
||||||
|
```
|
||||||
|
|
||||||
|
Once backfill is complete, a `INFO Historical block download complete` log will be emitted.
|
||||||
|
|
||||||
|
Check out the [FAQ](./checkpoint-sync.md#faq) for more information on checkpoint sync.
|
||||||
|
|
||||||
|
### Logs - Syncing
|
||||||
|
|
||||||
|
You should see that Lighthouse remains in sync and marks blocks
|
||||||
|
as `verified` indicating that they have been processed successfully by the execution engine:
|
||||||
|
|
||||||
|
```
|
||||||
|
INFO Synced, slot: 3690668, block: 0x1244…cb92, epoch: 115333, finalized_epoch: 115331, finalized_root: 0x0764…2a3d, exec_hash: 0x929c…1ff6 (verified), peers: 78
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Step 5: Further readings
|
||||||
|
|
||||||
|
Several other resources are the next logical step to explore after running your beacon node:
|
||||||
|
|
||||||
|
- Learn how to [become a validator](./mainnet-validator.md);
|
||||||
|
- Explore how to [manage your keys](./key-management.md);
|
||||||
|
- Research on [validator management](./validator-management.md);
|
||||||
|
- Dig into the [APIs](./api.md) that the beacon node and validator client provide;
|
||||||
|
- Study even more about [checkpoint sync](./checkpoint-sync.md); or
|
||||||
|
- Investigate what steps had to be taken in the past to execute a smooth [merge migration](./merge-migration.md).
|
||||||
|
|
||||||
|
Finally, if you a struggling with anything, join our [Discord](https://discord.gg/cyAszAh). We are happy to help!
|
@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "boot_node"
|
name = "boot_node"
|
||||||
version = "3.2.1"
|
version = "3.3.0"
|
||||||
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
authors = ["Sigma Prime <contact@sigmaprime.io>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
use beacon_node::{get_data_dir, set_network_config};
|
use beacon_node::{get_data_dir, set_network_config};
|
||||||
use clap::ArgMatches;
|
use clap::ArgMatches;
|
||||||
use eth2_network_config::Eth2NetworkConfig;
|
use eth2_network_config::Eth2NetworkConfig;
|
||||||
|
use lighthouse_network::discv5::enr::EnrBuilder;
|
||||||
|
use lighthouse_network::discv5::IpMode;
|
||||||
use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr};
|
use lighthouse_network::discv5::{enr::CombinedKey, Discv5Config, Enr};
|
||||||
use lighthouse_network::{
|
use lighthouse_network::{
|
||||||
discovery::{create_enr_builder_from_config, load_enr_from_disk, use_or_load_enr},
|
discovery::{load_enr_from_disk, use_or_load_enr},
|
||||||
load_private_key, CombinedKeyExt, NetworkConfig,
|
load_private_key, CombinedKeyExt, NetworkConfig,
|
||||||
};
|
};
|
||||||
use serde_derive::{Deserialize, Serialize};
|
use serde_derive::{Deserialize, Serialize};
|
||||||
@ -70,6 +72,15 @@ impl<T: EthSpec> BootNodeConfig<T> {
|
|||||||
// the address to listen on
|
// the address to listen on
|
||||||
let listen_socket =
|
let listen_socket =
|
||||||
SocketAddr::new(network_config.listen_address, network_config.discovery_port);
|
SocketAddr::new(network_config.listen_address, network_config.discovery_port);
|
||||||
|
if listen_socket.is_ipv6() {
|
||||||
|
// create ipv6 sockets and enable ipv4 mapped addresses.
|
||||||
|
network_config.discv5_config.ip_mode = IpMode::Ip6 {
|
||||||
|
enable_mapped_addresses: true,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
// Set explicitly as ipv4 otherwise
|
||||||
|
network_config.discv5_config.ip_mode = IpMode::Ip4;
|
||||||
|
}
|
||||||
|
|
||||||
let private_key = load_private_key(&network_config, &logger);
|
let private_key = load_private_key(&network_config, &logger);
|
||||||
let local_key = CombinedKey::from_libp2p(&private_key)?;
|
let local_key = CombinedKey::from_libp2p(&private_key)?;
|
||||||
@ -104,7 +115,29 @@ impl<T: EthSpec> BootNodeConfig<T> {
|
|||||||
// Build the local ENR
|
// Build the local ENR
|
||||||
|
|
||||||
let mut local_enr = {
|
let mut local_enr = {
|
||||||
let mut builder = create_enr_builder_from_config(&network_config, false);
|
let mut builder = EnrBuilder::new("v4");
|
||||||
|
// Set the enr address if specified. Set also the port.
|
||||||
|
// NOTE: if the port is specified but the the address is not, the port won't be
|
||||||
|
// set since it can't be known if it's an ipv6 or ipv4 udp port.
|
||||||
|
if let Some(enr_address) = network_config.enr_address {
|
||||||
|
match enr_address {
|
||||||
|
std::net::IpAddr::V4(ipv4_addr) => {
|
||||||
|
builder.ip4(ipv4_addr);
|
||||||
|
if let Some(port) = network_config.enr_udp_port {
|
||||||
|
builder.udp4(port);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
std::net::IpAddr::V6(ipv6_addr) => {
|
||||||
|
builder.ip6(ipv6_addr);
|
||||||
|
if let Some(port) = network_config.enr_udp_port {
|
||||||
|
builder.udp6(port);
|
||||||
|
// We are enabling mapped addresses in the boot node in this case,
|
||||||
|
// so advertise an udp4 port as well.
|
||||||
|
builder.udp4(port);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// If we know of the ENR field, add it to the initial construction
|
// If we know of the ENR field, add it to the initial construction
|
||||||
if let Some(enr_fork_bytes) = enr_fork {
|
if let Some(enr_fork_bytes) = enr_fork {
|
||||||
|
@ -9,53 +9,63 @@ use slog::info;
|
|||||||
use types::EthSpec;
|
use types::EthSpec;
|
||||||
|
|
||||||
pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
||||||
|
let BootNodeConfig {
|
||||||
|
listen_socket,
|
||||||
|
boot_nodes,
|
||||||
|
local_enr,
|
||||||
|
local_key,
|
||||||
|
discv5_config,
|
||||||
|
..
|
||||||
|
} = config;
|
||||||
|
|
||||||
// Print out useful information about the generated ENR
|
// Print out useful information about the generated ENR
|
||||||
|
|
||||||
let enr_socket = config
|
let enr_v4_socket = local_enr.udp4_socket();
|
||||||
.local_enr
|
let enr_v6_socket = local_enr.udp6_socket();
|
||||||
.udp4_socket()
|
let eth2_field = local_enr
|
||||||
.expect("Enr has a UDP socket");
|
|
||||||
let eth2_field = config
|
|
||||||
.local_enr
|
|
||||||
.eth2()
|
.eth2()
|
||||||
.map(|fork_id| hex::encode(fork_id.fork_digest))
|
.map(|fork_id| hex::encode(fork_id.fork_digest))
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
|
||||||
info!(log, "Configuration parameters"; "listening_address" => format!("{}:{}", config.listen_socket.ip(), config.listen_socket.port()), "broadcast_address" => format!("{}:{}",enr_socket.ip(), enr_socket.port()), "eth2" => eth2_field);
|
let pretty_v4_socket = enr_v4_socket.as_ref().map(|addr| addr.to_string());
|
||||||
|
let pretty_v6_socket = enr_v6_socket.as_ref().map(|addr| addr.to_string());
|
||||||
|
info!(
|
||||||
|
log, "Configuration parameters";
|
||||||
|
"listening_address" => %listen_socket,
|
||||||
|
"advertised_v4_address" => ?pretty_v4_socket,
|
||||||
|
"advertised_v6_address" => ?pretty_v6_socket,
|
||||||
|
"eth2" => eth2_field
|
||||||
|
);
|
||||||
|
|
||||||
info!(log, "Identity established"; "peer_id" => config.local_enr.peer_id().to_string(), "node_id" => config.local_enr.node_id().to_string());
|
info!(log, "Identity established"; "peer_id" => %local_enr.peer_id(), "node_id" => %local_enr.node_id());
|
||||||
|
|
||||||
// build the contactable multiaddr list, adding the p2p protocol
|
// build the contactable multiaddr list, adding the p2p protocol
|
||||||
info!(log, "Contact information"; "enr" => config.local_enr.to_base64());
|
info!(log, "Contact information"; "enr" => local_enr.to_base64());
|
||||||
info!(log, "Contact information"; "multiaddrs" => format!("{:?}", config.local_enr.multiaddr_p2p()));
|
info!(log, "Contact information"; "multiaddrs" => ?local_enr.multiaddr_p2p());
|
||||||
|
|
||||||
// construct the discv5 server
|
// construct the discv5 server
|
||||||
let mut discv5 = Discv5::new(
|
let mut discv5 = Discv5::new(local_enr.clone(), local_key, discv5_config).unwrap();
|
||||||
config.local_enr.clone(),
|
|
||||||
config.local_key,
|
|
||||||
config.discv5_config,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// If there are any bootnodes add them to the routing table
|
// If there are any bootnodes add them to the routing table
|
||||||
for enr in config.boot_nodes {
|
for enr in boot_nodes {
|
||||||
info!(
|
info!(
|
||||||
log,
|
log,
|
||||||
"Adding bootnode";
|
"Adding bootnode";
|
||||||
"address" => ?enr.udp4_socket(),
|
"ipv4_address" => ?enr.udp4_socket(),
|
||||||
"peer_id" => enr.peer_id().to_string(),
|
"ipv6_address" => ?enr.udp6_socket(),
|
||||||
"node_id" => enr.node_id().to_string()
|
"peer_id" => ?enr.peer_id(),
|
||||||
|
"node_id" => ?enr.node_id()
|
||||||
);
|
);
|
||||||
if enr != config.local_enr {
|
if enr != local_enr {
|
||||||
if let Err(e) = discv5.add_enr(enr) {
|
if let Err(e) = discv5.add_enr(enr) {
|
||||||
slog::warn!(log, "Failed adding ENR"; "error" => e.to_string());
|
slog::warn!(log, "Failed adding ENR"; "error" => ?e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// start the server
|
// start the server
|
||||||
if let Err(e) = discv5.start(config.listen_socket).await {
|
if let Err(e) = discv5.start(listen_socket).await {
|
||||||
slog::crit!(log, "Could not start discv5 server"; "error" => e.to_string());
|
slog::crit!(log, "Could not start discv5 server"; "error" => %e);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,7 +82,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
|||||||
let mut event_stream = match discv5.event_stream().await {
|
let mut event_stream = match discv5.event_stream().await {
|
||||||
Ok(stream) => stream,
|
Ok(stream) => stream,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
slog::crit!(log, "Failed to obtain event stream"; "error" => e.to_string());
|
slog::crit!(log, "Failed to obtain event stream"; "error" => %e);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -81,9 +91,35 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
|||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = metric_interval.tick() => {
|
_ = metric_interval.tick() => {
|
||||||
|
// Get some ipv4/ipv6 stats to add in the metrics.
|
||||||
|
let mut ipv4_only_reachable: usize = 0;
|
||||||
|
let mut ipv6_only_reachable: usize= 0;
|
||||||
|
let mut ipv4_ipv6_reachable: usize = 0;
|
||||||
|
let mut unreachable_nodes: usize = 0;
|
||||||
|
for enr in discv5.kbuckets().iter_ref().filter_map(|entry| entry.status.is_connected().then_some(entry.node.value)) {
|
||||||
|
let declares_ipv4 = enr.udp4_socket().is_some();
|
||||||
|
let declares_ipv6 = enr.udp6_socket().is_some();
|
||||||
|
match (declares_ipv4, declares_ipv6) {
|
||||||
|
(true, true) => ipv4_ipv6_reachable += 1,
|
||||||
|
(true, false) => ipv4_only_reachable += 1,
|
||||||
|
(false, true) => ipv6_only_reachable += 1,
|
||||||
|
(false, false) => unreachable_nodes += 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// display server metrics
|
// display server metrics
|
||||||
let metrics = discv5.metrics();
|
let metrics = discv5.metrics();
|
||||||
info!(log, "Server metrics"; "connected_peers" => discv5.connected_peers(), "active_sessions" => metrics.active_sessions, "requests/s" => format!("{:.2}", metrics.unsolicited_requests_per_second));
|
info!(
|
||||||
|
log, "Server metrics";
|
||||||
|
"connected_peers" => discv5.connected_peers(),
|
||||||
|
"active_sessions" => metrics.active_sessions,
|
||||||
|
"requests/s" => format_args!("{:.2}", metrics.unsolicited_requests_per_second),
|
||||||
|
"ipv4_nodes" => ipv4_only_reachable,
|
||||||
|
"ipv6_nodes" => ipv6_only_reachable,
|
||||||
|
"ipv6_and_ipv4_nodes" => ipv4_ipv6_reachable,
|
||||||
|
"unreachable_nodes" => unreachable_nodes,
|
||||||
|
);
|
||||||
|
|
||||||
}
|
}
|
||||||
Some(event) = event_stream.recv() => {
|
Some(event) = event_stream.recv() => {
|
||||||
match event {
|
match event {
|
||||||
@ -95,7 +131,7 @@ pub async fn run<T: EthSpec>(config: BootNodeConfig<T>, log: slog::Logger) {
|
|||||||
Discv5Event::TalkRequest(_) => {} // Ignore
|
Discv5Event::TalkRequest(_) => {} // Ignore
|
||||||
Discv5Event::NodeInserted { .. } => {} // Ignore
|
Discv5Event::NodeInserted { .. } => {} // Ignore
|
||||||
Discv5Event::SocketUpdated(socket_addr) => {
|
Discv5Event::SocketUpdated(socket_addr) => {
|
||||||
info!(log, "External socket address updated"; "socket_addr" => format!("{:?}", socket_addr));
|
info!(log, "Advertised socket address updated"; "socket_addr" => %socket_addr);
|
||||||
}
|
}
|
||||||
Discv5Event::SessionEstablished{ .. } => {} // Ignore
|
Discv5Event::SessionEstablished{ .. } => {} // Ignore
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,8 @@ status = [
|
|||||||
"check-msrv",
|
"check-msrv",
|
||||||
"slasher-tests",
|
"slasher-tests",
|
||||||
"syncing-simulator-ubuntu",
|
"syncing-simulator-ubuntu",
|
||||||
"disallowed-from-async-lint"
|
"disallowed-from-async-lint",
|
||||||
|
"compile-with-beta-compiler"
|
||||||
]
|
]
|
||||||
use_squash_merge = true
|
use_squash_merge = true
|
||||||
timeout_sec = 10800
|
timeout_sec = 10800
|
||||||
|
@ -518,6 +518,29 @@ impl BeaconNodeHttpClient {
|
|||||||
self.get(path).await
|
self.get(path).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// `GET beacon/states/{state_id}/randao?epoch`
|
||||||
|
pub async fn get_beacon_states_randao(
|
||||||
|
&self,
|
||||||
|
state_id: StateId,
|
||||||
|
epoch: Option<Epoch>,
|
||||||
|
) -> Result<Option<ExecutionOptimisticResponse<RandaoMix>>, Error> {
|
||||||
|
let mut path = self.eth_path(V1)?;
|
||||||
|
|
||||||
|
path.path_segments_mut()
|
||||||
|
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||||
|
.push("beacon")
|
||||||
|
.push("states")
|
||||||
|
.push(&state_id.to_string())
|
||||||
|
.push("randao");
|
||||||
|
|
||||||
|
if let Some(epoch) = epoch {
|
||||||
|
path.query_pairs_mut()
|
||||||
|
.append_pair("epoch", &epoch.to_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
self.get_opt(path).await
|
||||||
|
}
|
||||||
|
|
||||||
/// `GET beacon/states/{state_id}/validators/{validator_id}`
|
/// `GET beacon/states/{state_id}/validators/{validator_id}`
|
||||||
///
|
///
|
||||||
/// Returns `Ok(None)` on a 404 error.
|
/// Returns `Ok(None)` on a 404 error.
|
||||||
@ -657,6 +680,17 @@ impl BeaconNodeHttpClient {
|
|||||||
Ok(path)
|
Ok(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Path for `v1/beacon/blinded_blocks/{block_id}`
|
||||||
|
pub fn get_beacon_blinded_blocks_path(&self, block_id: BlockId) -> Result<Url, Error> {
|
||||||
|
let mut path = self.eth_path(V1)?;
|
||||||
|
path.path_segments_mut()
|
||||||
|
.map_err(|()| Error::InvalidUrl(self.server.clone()))?
|
||||||
|
.push("beacon")
|
||||||
|
.push("blinded_blocks")
|
||||||
|
.push(&block_id.to_string());
|
||||||
|
Ok(path)
|
||||||
|
}
|
||||||
|
|
||||||
/// `GET v2/beacon/blocks`
|
/// `GET v2/beacon/blocks`
|
||||||
///
|
///
|
||||||
/// Returns `Ok(None)` on a 404 error.
|
/// Returns `Ok(None)` on a 404 error.
|
||||||
@ -701,6 +735,51 @@ impl BeaconNodeHttpClient {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// `GET v1/beacon/blinded_blocks/{block_id}`
|
||||||
|
///
|
||||||
|
/// Returns `Ok(None)` on a 404 error.
|
||||||
|
pub async fn get_beacon_blinded_blocks<T: EthSpec>(
|
||||||
|
&self,
|
||||||
|
block_id: BlockId,
|
||||||
|
) -> Result<Option<ExecutionOptimisticForkVersionedResponse<SignedBlindedBeaconBlock<T>>>, Error>
|
||||||
|
{
|
||||||
|
let path = self.get_beacon_blinded_blocks_path(block_id)?;
|
||||||
|
let response = match self.get_response(path, |b| b).await.optional()? {
|
||||||
|
Some(res) => res,
|
||||||
|
None => return Ok(None),
|
||||||
|
};
|
||||||
|
|
||||||
|
// If present, use the fork provided in the headers to decode the block. Gracefully handle
|
||||||
|
// missing and malformed fork names by falling back to regular deserialisation.
|
||||||
|
let (block, version, execution_optimistic) = match response.fork_name_from_header() {
|
||||||
|
Ok(Some(fork_name)) => {
|
||||||
|
let (data, (version, execution_optimistic)) =
|
||||||
|
map_fork_name_with!(fork_name, SignedBlindedBeaconBlock, {
|
||||||
|
let ExecutionOptimisticForkVersionedResponse {
|
||||||
|
version,
|
||||||
|
execution_optimistic,
|
||||||
|
data,
|
||||||
|
} = response.json().await?;
|
||||||
|
(data, (version, execution_optimistic))
|
||||||
|
});
|
||||||
|
(data, version, execution_optimistic)
|
||||||
|
}
|
||||||
|
Ok(None) | Err(_) => {
|
||||||
|
let ExecutionOptimisticForkVersionedResponse {
|
||||||
|
version,
|
||||||
|
execution_optimistic,
|
||||||
|
data,
|
||||||
|
} = response.json().await?;
|
||||||
|
(data, version, execution_optimistic)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(Some(ExecutionOptimisticForkVersionedResponse {
|
||||||
|
version,
|
||||||
|
execution_optimistic,
|
||||||
|
data: block,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
/// `GET v1/beacon/blocks` (LEGACY)
|
/// `GET v1/beacon/blocks` (LEGACY)
|
||||||
///
|
///
|
||||||
/// Returns `Ok(None)` on a 404 error.
|
/// Returns `Ok(None)` on a 404 error.
|
||||||
@ -735,6 +814,24 @@ impl BeaconNodeHttpClient {
|
|||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// `GET beacon/blinded_blocks/{block_id}` as SSZ
|
||||||
|
///
|
||||||
|
/// Returns `Ok(None)` on a 404 error.
|
||||||
|
pub async fn get_beacon_blinded_blocks_ssz<T: EthSpec>(
|
||||||
|
&self,
|
||||||
|
block_id: BlockId,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<Option<SignedBlindedBeaconBlock<T>>, Error> {
|
||||||
|
let path = self.get_beacon_blinded_blocks_path(block_id)?;
|
||||||
|
|
||||||
|
self.get_bytes_opt_accept_header(path, Accept::Ssz, self.timeouts.get_beacon_blocks_ssz)
|
||||||
|
.await?
|
||||||
|
.map(|bytes| {
|
||||||
|
SignedBlindedBeaconBlock::from_ssz_bytes(&bytes, spec).map_err(Error::InvalidSsz)
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
/// `GET beacon/blocks/{block_id}/root`
|
/// `GET beacon/blocks/{block_id}/root`
|
||||||
///
|
///
|
||||||
/// Returns `Ok(None)` on a 404 error.
|
/// Returns `Ok(None)` on a 404 error.
|
||||||
|
@ -455,6 +455,11 @@ pub struct SyncCommitteesQuery {
|
|||||||
pub epoch: Option<Epoch>,
|
pub epoch: Option<Epoch>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct RandaoQuery {
|
||||||
|
pub epoch: Option<Epoch>,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct AttestationPoolQuery {
|
pub struct AttestationPoolQuery {
|
||||||
pub slot: Option<Slot>,
|
pub slot: Option<Slot>,
|
||||||
@ -486,6 +491,11 @@ pub struct SyncCommitteeByValidatorIndices {
|
|||||||
pub validator_aggregates: Vec<SyncSubcommittee>,
|
pub validator_aggregates: Vec<SyncSubcommittee>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct RandaoMix {
|
||||||
|
pub randao: Hash256,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
#[serde(transparent)]
|
#[serde(transparent)]
|
||||||
pub struct SyncSubcommittee {
|
pub struct SyncSubcommittee {
|
||||||
|
@ -6,8 +6,8 @@ PRESET_BASE: 'gnosis'
|
|||||||
|
|
||||||
# Transition
|
# Transition
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# TBD, 2**256-2**10 is a placeholder
|
# Estimated on Dec 5, 2022
|
||||||
TERMINAL_TOTAL_DIFFICULTY: 115792089237316195423570985008687907853269984665640564039457584007913129638912
|
TERMINAL_TOTAL_DIFFICULTY: 8626000000000000000000058750000000000000000000
|
||||||
# By default, don't use these params
|
# By default, don't use these params
|
||||||
TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
|
TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000
|
||||||
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
|
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615
|
||||||
@ -35,7 +35,7 @@ ALTAIR_FORK_VERSION: 0x01000064
|
|||||||
ALTAIR_FORK_EPOCH: 512
|
ALTAIR_FORK_EPOCH: 512
|
||||||
# Merge
|
# Merge
|
||||||
BELLATRIX_FORK_VERSION: 0x02000064
|
BELLATRIX_FORK_VERSION: 0x02000064
|
||||||
BELLATRIX_FORK_EPOCH: 18446744073709551615
|
BELLATRIX_FORK_EPOCH: 385536
|
||||||
# Sharding
|
# Sharding
|
||||||
SHARDING_FORK_VERSION: 0x03000064
|
SHARDING_FORK_VERSION: 0x03000064
|
||||||
SHARDING_FORK_EPOCH: 18446744073709551615
|
SHARDING_FORK_EPOCH: 18446744073709551615
|
||||||
|
@ -226,7 +226,7 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use ssz::Encode;
|
use ssz::Encode;
|
||||||
use tempfile::Builder as TempBuilder;
|
use tempfile::Builder as TempBuilder;
|
||||||
use types::{Config, Eth1Data, GnosisEthSpec, Hash256, MainnetEthSpec, GNOSIS};
|
use types::{Config, Eth1Data, GnosisEthSpec, Hash256, MainnetEthSpec};
|
||||||
|
|
||||||
type E = MainnetEthSpec;
|
type E = MainnetEthSpec;
|
||||||
|
|
||||||
@ -250,6 +250,13 @@ mod tests {
|
|||||||
assert_eq!(spec, config.chain_spec::<E>().unwrap());
|
assert_eq!(spec, config.chain_spec::<E>().unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn gnosis_config_eq_chain_spec() {
|
||||||
|
let config = Eth2NetworkConfig::from_hardcoded_net(&GNOSIS).unwrap();
|
||||||
|
let spec = ChainSpec::gnosis();
|
||||||
|
assert_eq!(spec, config.chain_spec::<GnosisEthSpec>().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn mainnet_genesis_state() {
|
fn mainnet_genesis_state() {
|
||||||
let config = Eth2NetworkConfig::from_hardcoded_net(&MAINNET).unwrap();
|
let config = Eth2NetworkConfig::from_hardcoded_net(&MAINNET).unwrap();
|
||||||
@ -270,7 +277,7 @@ mod tests {
|
|||||||
.unwrap_or_else(|_| panic!("{:?}", net.name));
|
.unwrap_or_else(|_| panic!("{:?}", net.name));
|
||||||
|
|
||||||
// Ensure we can parse the YAML config to a chain spec.
|
// Ensure we can parse the YAML config to a chain spec.
|
||||||
if net.name == GNOSIS {
|
if net.name == types::GNOSIS {
|
||||||
config.chain_spec::<GnosisEthSpec>().unwrap();
|
config.chain_spec::<GnosisEthSpec>().unwrap();
|
||||||
} else {
|
} else {
|
||||||
config.chain_spec::<MainnetEthSpec>().unwrap();
|
config.chain_spec::<MainnetEthSpec>().unwrap();
|
||||||
|
@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!(
|
|||||||
// NOTE: using --match instead of --exclude for compatibility with old Git
|
// NOTE: using --match instead of --exclude for compatibility with old Git
|
||||||
"--match=thiswillnevermatchlol"
|
"--match=thiswillnevermatchlol"
|
||||||
],
|
],
|
||||||
prefix = "Lighthouse/v3.2.1-",
|
prefix = "Lighthouse/v3.3.0-",
|
||||||
fallback = "Lighthouse/v3.2.1"
|
fallback = "Lighthouse/v3.3.0"
|
||||||
);
|
);
|
||||||
|
|
||||||
/// Returns `VERSION`, but with platform information appended to the end.
|
/// Returns `VERSION`, but with platform information appended to the end.
|
||||||
|
13
common/system_health/Cargo.toml
Normal file
13
common/system_health/Cargo.toml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
[package]
|
||||||
|
name = "system_health"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2021"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
lighthouse_network = { path = "../../beacon_node/lighthouse_network" }
|
||||||
|
types = { path = "../../consensus/types" }
|
||||||
|
sysinfo = "0.26.5"
|
||||||
|
serde = "1.0.116"
|
||||||
|
serde_derive = "1.0.116"
|
||||||
|
serde_json = "1.0.58"
|
||||||
|
parking_lot = "0.12.0"
|
241
common/system_health/src/lib.rs
Normal file
241
common/system_health/src/lib.rs
Normal file
@ -0,0 +1,241 @@
|
|||||||
|
use lighthouse_network::{types::SyncState, NetworkGlobals};
|
||||||
|
use parking_lot::RwLock;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use sysinfo::{CpuExt, DiskExt, NetworkExt, NetworksExt, System, SystemExt};
|
||||||
|
use types::EthSpec;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct SystemHealth {
|
||||||
|
/// Total memory of the system.
|
||||||
|
pub total_memory: u64,
|
||||||
|
/// Total free memory available to the system.
|
||||||
|
pub free_memory: u64,
|
||||||
|
/// Total used memory.
|
||||||
|
pub used_memory: u64,
|
||||||
|
|
||||||
|
/// System load average over 1 minute.
|
||||||
|
pub sys_loadavg_1: f64,
|
||||||
|
/// System load average over 5 minutes.
|
||||||
|
pub sys_loadavg_5: f64,
|
||||||
|
/// System load average over 15 minutes.
|
||||||
|
pub sys_loadavg_15: f64,
|
||||||
|
|
||||||
|
/// Total cpu cores.
|
||||||
|
pub cpu_cores: usize,
|
||||||
|
/// Total cpu threads.
|
||||||
|
pub cpu_threads: usize,
|
||||||
|
/// The global cpu frequency.
|
||||||
|
pub global_cpu_frequency: f32,
|
||||||
|
|
||||||
|
/// Total capacity of disk.
|
||||||
|
pub disk_bytes_total: u64,
|
||||||
|
/// Free space in disk.
|
||||||
|
pub disk_bytes_free: u64,
|
||||||
|
|
||||||
|
/// System uptime.
|
||||||
|
pub system_uptime: u64,
|
||||||
|
/// Application uptime.
|
||||||
|
pub app_uptime: u64,
|
||||||
|
/// The System name
|
||||||
|
pub system_name: String,
|
||||||
|
/// Kernel version
|
||||||
|
pub kernel_version: String,
|
||||||
|
/// OS version
|
||||||
|
pub os_version: String,
|
||||||
|
/// Hostname
|
||||||
|
pub host_name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System related health, specific to the UI for the validator client.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct SystemHealthVC {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub system_health: SystemHealth,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System related health, specific to the UI for the Beacon Node.
|
||||||
|
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct SystemHealthBN {
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub system_health: SystemHealth,
|
||||||
|
/// The name of the network that uses the most traffic.
|
||||||
|
pub network_name: String,
|
||||||
|
/// Total bytes received over the main interface.
|
||||||
|
pub network_bytes_total_received: u64,
|
||||||
|
/// Total bytes sent over the main interface.
|
||||||
|
pub network_bytes_total_transmit: u64,
|
||||||
|
|
||||||
|
/// The current NAT status.
|
||||||
|
pub nat_open: bool,
|
||||||
|
/// The current number of connected peers.
|
||||||
|
pub connected_peers: usize,
|
||||||
|
/// The current syncing state of the consensus node.
|
||||||
|
pub sync_state: SyncState,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Populates the system health.
|
||||||
|
fn observe_system_health(
|
||||||
|
sysinfo: Arc<RwLock<System>>,
|
||||||
|
data_dir: PathBuf,
|
||||||
|
app_uptime: u64,
|
||||||
|
) -> SystemHealth {
|
||||||
|
let sysinfo = sysinfo.read();
|
||||||
|
let loadavg = sysinfo.load_average();
|
||||||
|
|
||||||
|
let cpus = sysinfo.cpus();
|
||||||
|
|
||||||
|
let disks = sysinfo.disks();
|
||||||
|
|
||||||
|
let system_uptime = sysinfo.uptime();
|
||||||
|
|
||||||
|
// Helper functions to extract specific data
|
||||||
|
|
||||||
|
// Find fs associated with the data dir location and report this
|
||||||
|
let (disk_bytes_total, disk_bytes_free) = {
|
||||||
|
// There is no clean way to find this in an OS-agnostic way. We take a simple approach,
|
||||||
|
// which is attempt to match the mount_point to the data_dir. If this cannot be done, we
|
||||||
|
// just fallback to the root fs.
|
||||||
|
|
||||||
|
let mut root_fs_disk = None;
|
||||||
|
let mut other_matching_fs = None;
|
||||||
|
|
||||||
|
for disk in disks.iter() {
|
||||||
|
if disk.mount_point() == Path::new("/")
|
||||||
|
|| disk.mount_point() == Path::new("C:\\")
|
||||||
|
|| disk.mount_point() == Path::new("/System/Volumes/Data")
|
||||||
|
{
|
||||||
|
// Found the usual default root_fs
|
||||||
|
root_fs_disk = Some(disk);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have other file systems, compare these to the data_dir of Lighthouse and
|
||||||
|
// prioritize these.
|
||||||
|
if data_dir
|
||||||
|
.to_str()
|
||||||
|
.map(|path| {
|
||||||
|
if let Some(mount_str) = disk.mount_point().to_str() {
|
||||||
|
path.contains(mount_str)
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap_or(false)
|
||||||
|
{
|
||||||
|
other_matching_fs = Some(disk);
|
||||||
|
break; // Don't bother finding other competing fs.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we found a file system other than the root, report this, otherwise just report the
|
||||||
|
// root fs
|
||||||
|
let fs = other_matching_fs.or(root_fs_disk);
|
||||||
|
|
||||||
|
// If the root fs is not known, just add up the total of all known partitions
|
||||||
|
match fs {
|
||||||
|
Some(fs) => (fs.total_space(), fs.available_space()),
|
||||||
|
None => {
|
||||||
|
// If we can't find a known partition, just add them all up
|
||||||
|
disks.iter().fold((0, 0), |mut current_sizes, disk| {
|
||||||
|
current_sizes.0 += disk.total_space();
|
||||||
|
current_sizes.1 += disk.available_space();
|
||||||
|
current_sizes
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Attempt to get the clock speed from the name of the CPU
|
||||||
|
let cpu_frequency_from_name = cpus.iter().next().and_then(|cpu| {
|
||||||
|
cpu.brand()
|
||||||
|
.split_once("GHz")
|
||||||
|
.and_then(|(result, _)| result.trim().rsplit_once(' '))
|
||||||
|
.and_then(|(_, result)| result.parse::<f32>().ok())
|
||||||
|
});
|
||||||
|
|
||||||
|
let global_cpu_frequency = match cpu_frequency_from_name {
|
||||||
|
Some(freq) => freq,
|
||||||
|
None => {
|
||||||
|
// Get the frequency from average measured frequencies
|
||||||
|
let global_cpu_frequency: f32 =
|
||||||
|
cpus.iter().map(|cpu| cpu.frequency()).sum::<u64>() as f32 / cpus.len() as f32;
|
||||||
|
// Shift to ghz to 1dp
|
||||||
|
(global_cpu_frequency / 100.0).round() / 10.0
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
SystemHealth {
|
||||||
|
total_memory: sysinfo.total_memory(),
|
||||||
|
free_memory: sysinfo.free_memory(),
|
||||||
|
used_memory: sysinfo.used_memory(),
|
||||||
|
sys_loadavg_1: loadavg.one,
|
||||||
|
sys_loadavg_5: loadavg.five,
|
||||||
|
sys_loadavg_15: loadavg.fifteen,
|
||||||
|
cpu_cores: sysinfo.physical_core_count().unwrap_or(0),
|
||||||
|
cpu_threads: cpus.len(),
|
||||||
|
global_cpu_frequency,
|
||||||
|
disk_bytes_total,
|
||||||
|
disk_bytes_free,
|
||||||
|
system_uptime,
|
||||||
|
app_uptime,
|
||||||
|
system_name: sysinfo.name().unwrap_or_else(|| String::from("")),
|
||||||
|
kernel_version: sysinfo.kernel_version().unwrap_or_else(|| "".into()),
|
||||||
|
os_version: sysinfo.long_os_version().unwrap_or_else(|| "".into()),
|
||||||
|
host_name: sysinfo.host_name().unwrap_or_else(|| "".into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Observes the Validator client system health.
|
||||||
|
pub fn observe_system_health_vc(
|
||||||
|
sysinfo: Arc<RwLock<System>>,
|
||||||
|
data_dir: PathBuf,
|
||||||
|
app_uptime: u64,
|
||||||
|
) -> SystemHealthVC {
|
||||||
|
SystemHealthVC {
|
||||||
|
system_health: observe_system_health(sysinfo, data_dir, app_uptime),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Observes the Beacon Node system health.
|
||||||
|
pub fn observe_system_health_bn<TSpec: EthSpec>(
|
||||||
|
sysinfo: Arc<RwLock<System>>,
|
||||||
|
data_dir: PathBuf,
|
||||||
|
app_uptime: u64,
|
||||||
|
network_globals: Arc<NetworkGlobals<TSpec>>,
|
||||||
|
) -> SystemHealthBN {
|
||||||
|
let system_health = observe_system_health(sysinfo.clone(), data_dir, app_uptime);
|
||||||
|
|
||||||
|
// Find the network with the most traffic and assume this is the main network
|
||||||
|
let sysinfo = sysinfo.read();
|
||||||
|
let networks = sysinfo.networks();
|
||||||
|
let (network_name, network_bytes_total_received, network_bytes_total_transmit) = networks
|
||||||
|
.iter()
|
||||||
|
.max_by_key(|(_name, network)| network.total_received())
|
||||||
|
.map(|(name, network)| {
|
||||||
|
(
|
||||||
|
name.clone(),
|
||||||
|
network.total_received(),
|
||||||
|
network.total_transmitted(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| (String::from("None"), 0, 0));
|
||||||
|
|
||||||
|
// Determine if the NAT is open or not.
|
||||||
|
let nat_open = lighthouse_network::metrics::NAT_OPEN
|
||||||
|
.as_ref()
|
||||||
|
.map(|v| v.get())
|
||||||
|
.unwrap_or(0)
|
||||||
|
!= 0;
|
||||||
|
|
||||||
|
SystemHealthBN {
|
||||||
|
system_health,
|
||||||
|
network_name,
|
||||||
|
network_bytes_total_received,
|
||||||
|
network_bytes_total_transmit,
|
||||||
|
nat_open,
|
||||||
|
connected_peers: network_globals.connected_peers(),
|
||||||
|
sync_state: network_globals.sync_state(),
|
||||||
|
}
|
||||||
|
}
|
@ -22,7 +22,7 @@ pub trait BitfieldBehaviour: Clone {}
|
|||||||
/// A marker struct used to declare SSZ `Variable` behaviour on a `Bitfield`.
|
/// A marker struct used to declare SSZ `Variable` behaviour on a `Bitfield`.
|
||||||
///
|
///
|
||||||
/// See the [`Bitfield`](struct.Bitfield.html) docs for usage.
|
/// See the [`Bitfield`](struct.Bitfield.html) docs for usage.
|
||||||
#[derive(Clone, PartialEq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
pub struct Variable<N> {
|
pub struct Variable<N> {
|
||||||
_phantom: PhantomData<N>,
|
_phantom: PhantomData<N>,
|
||||||
}
|
}
|
||||||
@ -30,7 +30,7 @@ pub struct Variable<N> {
|
|||||||
/// A marker struct used to declare SSZ `Fixed` behaviour on a `Bitfield`.
|
/// A marker struct used to declare SSZ `Fixed` behaviour on a `Bitfield`.
|
||||||
///
|
///
|
||||||
/// See the [`Bitfield`](struct.Bitfield.html) docs for usage.
|
/// See the [`Bitfield`](struct.Bitfield.html) docs for usage.
|
||||||
#[derive(Clone, PartialEq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
pub struct Fixed<N> {
|
pub struct Fixed<N> {
|
||||||
_phantom: PhantomData<N>,
|
_phantom: PhantomData<N>,
|
||||||
}
|
}
|
||||||
@ -96,7 +96,7 @@ pub type BitVector<N> = Bitfield<Fixed<N>>;
|
|||||||
/// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest
|
/// byte (by `Vec` index) stores the lowest bit-indices and the right-most bit stores the lowest
|
||||||
/// bit-index. E.g., `smallvec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set.
|
/// bit-index. E.g., `smallvec![0b0000_0001, 0b0000_0010]` has bits `0, 9` set.
|
||||||
#[derive(Clone, Debug, Derivative)]
|
#[derive(Clone, Debug, Derivative)]
|
||||||
#[derivative(PartialEq, Hash(bound = ""))]
|
#[derivative(PartialEq, Eq, Hash(bound = ""))]
|
||||||
pub struct Bitfield<T> {
|
pub struct Bitfield<T> {
|
||||||
bytes: SmallVec<[u8; SMALLVEC_LEN]>,
|
bytes: SmallVec<[u8; SMALLVEC_LEN]>,
|
||||||
len: usize,
|
len: usize,
|
||||||
|
@ -1,8 +1,11 @@
|
|||||||
|
use crate::common::get_indexed_attestation;
|
||||||
|
use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError};
|
||||||
|
use std::collections::{hash_map::Entry, HashMap};
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use tree_hash::TreeHash;
|
use tree_hash::TreeHash;
|
||||||
use types::{
|
use types::{
|
||||||
AbstractExecPayload, BeaconState, BeaconStateError, ChainSpec, EthSpec, Hash256,
|
AbstractExecPayload, Attestation, AttestationData, BeaconState, BeaconStateError, BitList,
|
||||||
SignedBeaconBlock, Slot,
|
ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -13,6 +16,9 @@ pub struct ConsensusContext<T: EthSpec> {
|
|||||||
proposer_index: Option<u64>,
|
proposer_index: Option<u64>,
|
||||||
/// Block root of the block at `slot`.
|
/// Block root of the block at `slot`.
|
||||||
current_block_root: Option<Hash256>,
|
current_block_root: Option<Hash256>,
|
||||||
|
/// Cache of indexed attestations constructed during block processing.
|
||||||
|
indexed_attestations:
|
||||||
|
HashMap<(AttestationData, BitList<T::MaxValidatorsPerCommittee>), IndexedAttestation<T>>,
|
||||||
_phantom: PhantomData<T>,
|
_phantom: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -20,6 +26,7 @@ pub struct ConsensusContext<T: EthSpec> {
|
|||||||
pub enum ContextError {
|
pub enum ContextError {
|
||||||
BeaconState(BeaconStateError),
|
BeaconState(BeaconStateError),
|
||||||
SlotMismatch { slot: Slot, expected: Slot },
|
SlotMismatch { slot: Slot, expected: Slot },
|
||||||
|
EpochMismatch { epoch: Epoch, expected: Epoch },
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<BeaconStateError> for ContextError {
|
impl From<BeaconStateError> for ContextError {
|
||||||
@ -34,6 +41,7 @@ impl<T: EthSpec> ConsensusContext<T> {
|
|||||||
slot,
|
slot,
|
||||||
proposer_index: None,
|
proposer_index: None,
|
||||||
current_block_root: None,
|
current_block_root: None,
|
||||||
|
indexed_attestations: HashMap::new(),
|
||||||
_phantom: PhantomData,
|
_phantom: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -43,13 +51,39 @@ impl<T: EthSpec> ConsensusContext<T> {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Strict method for fetching the proposer index.
|
||||||
|
///
|
||||||
|
/// Gets the proposer index for `self.slot` while ensuring that it matches `state.slot()`. This
|
||||||
|
/// method should be used in block processing and almost everywhere the proposer index is
|
||||||
|
/// required. If the slot check is too restrictive, see `get_proposer_index_from_epoch_state`.
|
||||||
pub fn get_proposer_index(
|
pub fn get_proposer_index(
|
||||||
&mut self,
|
&mut self,
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<u64, ContextError> {
|
) -> Result<u64, ContextError> {
|
||||||
self.check_slot(state.slot())?;
|
self.check_slot(state.slot())?;
|
||||||
|
self.get_proposer_index_no_checks(state, spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// More liberal method for fetching the proposer index.
|
||||||
|
///
|
||||||
|
/// Fetches the proposer index for `self.slot` but does not require the state to be from an
|
||||||
|
/// exactly matching slot (merely a matching epoch). This is useful in batch verification where
|
||||||
|
/// we want to extract the proposer index from a single state for every slot in the epoch.
|
||||||
|
pub fn get_proposer_index_from_epoch_state(
|
||||||
|
&mut self,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<u64, ContextError> {
|
||||||
|
self.check_epoch(state.current_epoch())?;
|
||||||
|
self.get_proposer_index_no_checks(state, spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_proposer_index_no_checks(
|
||||||
|
&mut self,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
spec: &ChainSpec,
|
||||||
|
) -> Result<u64, ContextError> {
|
||||||
if let Some(proposer_index) = self.proposer_index {
|
if let Some(proposer_index) = self.proposer_index {
|
||||||
return Ok(proposer_index);
|
return Ok(proposer_index);
|
||||||
}
|
}
|
||||||
@ -89,4 +123,39 @@ impl<T: EthSpec> ConsensusContext<T> {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn check_epoch(&self, epoch: Epoch) -> Result<(), ContextError> {
|
||||||
|
let expected = self.slot.epoch(T::slots_per_epoch());
|
||||||
|
if epoch == expected {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(ContextError::EpochMismatch { epoch, expected })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_indexed_attestation(
|
||||||
|
&mut self,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
attestation: &Attestation<T>,
|
||||||
|
) -> Result<&IndexedAttestation<T>, BlockOperationError<AttestationInvalid>> {
|
||||||
|
let key = (
|
||||||
|
attestation.data.clone(),
|
||||||
|
attestation.aggregation_bits.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
|
match self.indexed_attestations.entry(key) {
|
||||||
|
Entry::Occupied(occupied) => Ok(occupied.into_mut()),
|
||||||
|
Entry::Vacant(vacant) => {
|
||||||
|
let committee =
|
||||||
|
state.get_beacon_committee(attestation.data.slot, attestation.data.index)?;
|
||||||
|
let indexed_attestation =
|
||||||
|
get_indexed_attestation(committee.committee, attestation)?;
|
||||||
|
Ok(vacant.insert(indexed_attestation))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn num_cached_indexed_attestations(&self) -> usize {
|
||||||
|
self.indexed_attestations.len()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -120,16 +120,13 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
let verify_signatures = match block_signature_strategy {
|
let verify_signatures = match block_signature_strategy {
|
||||||
BlockSignatureStrategy::VerifyBulk => {
|
BlockSignatureStrategy::VerifyBulk => {
|
||||||
// Verify all signatures in the block at once.
|
// Verify all signatures in the block at once.
|
||||||
let block_root = Some(ctxt.get_current_block_root(signed_block)?);
|
|
||||||
let proposer_index = Some(ctxt.get_proposer_index(state, spec)?);
|
|
||||||
block_verify!(
|
block_verify!(
|
||||||
BlockSignatureVerifier::verify_entire_block(
|
BlockSignatureVerifier::verify_entire_block(
|
||||||
state,
|
state,
|
||||||
|i| get_pubkey_from_state(state, i),
|
|i| get_pubkey_from_state(state, i),
|
||||||
|pk_bytes| pk_bytes.decompress().ok().map(Cow::Owned),
|
|pk_bytes| pk_bytes.decompress().ok().map(Cow::Owned),
|
||||||
signed_block,
|
signed_block,
|
||||||
block_root,
|
ctxt,
|
||||||
proposer_index,
|
|
||||||
spec
|
spec
|
||||||
)
|
)
|
||||||
.is_ok(),
|
.is_ok(),
|
||||||
@ -352,6 +349,7 @@ pub fn get_new_eth1_data<T: EthSpec>(
|
|||||||
/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload
|
/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/beacon-chain.md#process_execution_payload
|
||||||
pub fn partially_verify_execution_payload<'payload, T: EthSpec, Payload: AbstractExecPayload<T>>(
|
pub fn partially_verify_execution_payload<'payload, T: EthSpec, Payload: AbstractExecPayload<T>>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
|
block_slot: Slot,
|
||||||
payload: Payload::Ref<'payload>,
|
payload: Payload::Ref<'payload>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
@ -372,7 +370,7 @@ pub fn partially_verify_execution_payload<'payload, T: EthSpec, Payload: Abstrac
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
let timestamp = compute_timestamp_at_slot(state, spec)?;
|
let timestamp = compute_timestamp_at_slot(state, block_slot, spec)?;
|
||||||
block_verify!(
|
block_verify!(
|
||||||
payload.timestamp() == timestamp,
|
payload.timestamp() == timestamp,
|
||||||
BlockProcessingError::ExecutionInvalidTimestamp {
|
BlockProcessingError::ExecutionInvalidTimestamp {
|
||||||
@ -396,7 +394,7 @@ pub fn process_execution_payload<'payload, T: EthSpec, Payload: AbstractExecPayl
|
|||||||
payload: Payload::Ref<'payload>,
|
payload: Payload::Ref<'payload>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
partially_verify_execution_payload::<T, Payload>(state, payload, spec)?;
|
partially_verify_execution_payload::<T, Payload>(state, state.slot(), payload, spec)?;
|
||||||
|
|
||||||
match state.latest_execution_payload_header_mut()? {
|
match state.latest_execution_payload_header_mut()? {
|
||||||
ExecutionPayloadHeaderRefMut::Merge(header_mut) => {
|
ExecutionPayloadHeaderRefMut::Merge(header_mut) => {
|
||||||
@ -459,9 +457,10 @@ pub fn is_execution_enabled<T: EthSpec, Payload: AbstractExecPayload<T>>(
|
|||||||
/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot
|
/// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot
|
||||||
pub fn compute_timestamp_at_slot<T: EthSpec>(
|
pub fn compute_timestamp_at_slot<T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
|
block_slot: Slot,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<u64, ArithError> {
|
) -> Result<u64, ArithError> {
|
||||||
let slots_since_genesis = state.slot().as_u64().safe_sub(spec.genesis_slot.as_u64())?;
|
let slots_since_genesis = block_slot.as_u64().safe_sub(spec.genesis_slot.as_u64())?;
|
||||||
slots_since_genesis
|
slots_since_genesis
|
||||||
.safe_mul(spec.seconds_per_slot)
|
.safe_mul(spec.seconds_per_slot)
|
||||||
.and_then(|since_genesis| state.genesis_time().safe_add(since_genesis))
|
.and_then(|since_genesis| state.genesis_time().safe_add(since_genesis))
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
#![allow(clippy::integer_arithmetic)]
|
#![allow(clippy::integer_arithmetic)]
|
||||||
|
|
||||||
use super::signature_sets::{Error as SignatureSetError, *};
|
use super::signature_sets::{Error as SignatureSetError, *};
|
||||||
use crate::common::get_indexed_attestation;
|
|
||||||
use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError};
|
use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError};
|
||||||
|
use crate::{ConsensusContext, ContextError};
|
||||||
use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet};
|
use bls::{verify_signature_sets, PublicKey, PublicKeyBytes, SignatureSet};
|
||||||
use rayon::prelude::*;
|
use rayon::prelude::*;
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
@ -28,6 +28,8 @@ pub enum Error {
|
|||||||
IncorrectBlockProposer { block: u64, local_shuffling: u64 },
|
IncorrectBlockProposer { block: u64, local_shuffling: u64 },
|
||||||
/// Failed to load a signature set. The block may be invalid or we failed to process it.
|
/// Failed to load a signature set. The block may be invalid or we failed to process it.
|
||||||
SignatureSetError(SignatureSetError),
|
SignatureSetError(SignatureSetError),
|
||||||
|
/// Error related to the consensus context, likely the proposer index or block root calc.
|
||||||
|
ContextError(ContextError),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<BeaconStateError> for Error {
|
impl From<BeaconStateError> for Error {
|
||||||
@ -36,6 +38,12 @@ impl From<BeaconStateError> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<ContextError> for Error {
|
||||||
|
fn from(e: ContextError) -> Error {
|
||||||
|
Error::ContextError(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<SignatureSetError> for Error {
|
impl From<SignatureSetError> for Error {
|
||||||
fn from(e: SignatureSetError) -> Error {
|
fn from(e: SignatureSetError) -> Error {
|
||||||
match e {
|
match e {
|
||||||
@ -122,12 +130,11 @@ where
|
|||||||
get_pubkey: F,
|
get_pubkey: F,
|
||||||
decompressor: D,
|
decompressor: D,
|
||||||
block: &'a SignedBeaconBlock<T, Payload>,
|
block: &'a SignedBeaconBlock<T, Payload>,
|
||||||
block_root: Option<Hash256>,
|
ctxt: &mut ConsensusContext<T>,
|
||||||
verified_proposer_index: Option<u64>,
|
|
||||||
spec: &'a ChainSpec,
|
spec: &'a ChainSpec,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut verifier = Self::new(state, get_pubkey, decompressor, spec);
|
let mut verifier = Self::new(state, get_pubkey, decompressor, spec);
|
||||||
verifier.include_all_signatures(block, block_root, verified_proposer_index)?;
|
verifier.include_all_signatures(block, ctxt)?;
|
||||||
verifier.verify()
|
verifier.verify()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,11 +142,14 @@ where
|
|||||||
pub fn include_all_signatures<Payload: AbstractExecPayload<T>>(
|
pub fn include_all_signatures<Payload: AbstractExecPayload<T>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
block: &'a SignedBeaconBlock<T, Payload>,
|
block: &'a SignedBeaconBlock<T, Payload>,
|
||||||
block_root: Option<Hash256>,
|
ctxt: &mut ConsensusContext<T>,
|
||||||
verified_proposer_index: Option<u64>,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
let block_root = Some(ctxt.get_current_block_root(block)?);
|
||||||
|
let verified_proposer_index =
|
||||||
|
Some(ctxt.get_proposer_index_from_epoch_state(self.state, self.spec)?);
|
||||||
|
|
||||||
self.include_block_proposal(block, block_root, verified_proposer_index)?;
|
self.include_block_proposal(block, block_root, verified_proposer_index)?;
|
||||||
self.include_all_signatures_except_proposal(block, verified_proposer_index)?;
|
self.include_all_signatures_except_proposal(block, ctxt)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -149,12 +159,14 @@ where
|
|||||||
pub fn include_all_signatures_except_proposal<Payload: AbstractExecPayload<T>>(
|
pub fn include_all_signatures_except_proposal<Payload: AbstractExecPayload<T>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
block: &'a SignedBeaconBlock<T, Payload>,
|
block: &'a SignedBeaconBlock<T, Payload>,
|
||||||
verified_proposer_index: Option<u64>,
|
ctxt: &mut ConsensusContext<T>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
let verified_proposer_index =
|
||||||
|
Some(ctxt.get_proposer_index_from_epoch_state(self.state, self.spec)?);
|
||||||
self.include_randao_reveal(block, verified_proposer_index)?;
|
self.include_randao_reveal(block, verified_proposer_index)?;
|
||||||
self.include_proposer_slashings(block)?;
|
self.include_proposer_slashings(block)?;
|
||||||
self.include_attester_slashings(block)?;
|
self.include_attester_slashings(block)?;
|
||||||
self.include_attestations(block)?;
|
self.include_attestations(block, ctxt)?;
|
||||||
// Deposits are not included because they can legally have invalid signatures.
|
// Deposits are not included because they can legally have invalid signatures.
|
||||||
self.include_exits(block)?;
|
self.include_exits(block)?;
|
||||||
self.include_sync_aggregate(block)?;
|
self.include_sync_aggregate(block)?;
|
||||||
@ -262,7 +274,8 @@ where
|
|||||||
pub fn include_attestations<Payload: AbstractExecPayload<T>>(
|
pub fn include_attestations<Payload: AbstractExecPayload<T>>(
|
||||||
&mut self,
|
&mut self,
|
||||||
block: &'a SignedBeaconBlock<T, Payload>,
|
block: &'a SignedBeaconBlock<T, Payload>,
|
||||||
) -> Result<Vec<IndexedAttestation<T>>> {
|
ctxt: &mut ConsensusContext<T>,
|
||||||
|
) -> Result<()> {
|
||||||
self.sets
|
self.sets
|
||||||
.sets
|
.sets
|
||||||
.reserve(block.message().body().attestations().len());
|
.reserve(block.message().body().attestations().len());
|
||||||
@ -272,28 +285,18 @@ where
|
|||||||
.body()
|
.body()
|
||||||
.attestations()
|
.attestations()
|
||||||
.iter()
|
.iter()
|
||||||
.try_fold(
|
.try_for_each(|attestation| {
|
||||||
Vec::with_capacity(block.message().body().attestations().len()),
|
let indexed_attestation = ctxt.get_indexed_attestation(self.state, attestation)?;
|
||||||
|mut vec, attestation| {
|
|
||||||
let committee = self
|
|
||||||
.state
|
|
||||||
.get_beacon_committee(attestation.data.slot, attestation.data.index)?;
|
|
||||||
let indexed_attestation =
|
|
||||||
get_indexed_attestation(committee.committee, attestation)?;
|
|
||||||
|
|
||||||
self.sets.push(indexed_attestation_signature_set(
|
self.sets.push(indexed_attestation_signature_set(
|
||||||
self.state,
|
self.state,
|
||||||
self.get_pubkey.clone(),
|
self.get_pubkey.clone(),
|
||||||
&attestation.signature,
|
&attestation.signature,
|
||||||
&indexed_attestation,
|
indexed_attestation,
|
||||||
self.spec,
|
self.spec,
|
||||||
)?);
|
)?);
|
||||||
|
Ok(())
|
||||||
vec.push(indexed_attestation);
|
})
|
||||||
|
|
||||||
Ok(vec)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.map_err(Error::into)
|
.map_err(Error::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,8 +63,14 @@ pub mod base {
|
|||||||
|
|
||||||
// Verify and apply each attestation.
|
// Verify and apply each attestation.
|
||||||
for (i, attestation) in attestations.iter().enumerate() {
|
for (i, attestation) in attestations.iter().enumerate() {
|
||||||
verify_attestation_for_block_inclusion(state, attestation, verify_signatures, spec)
|
verify_attestation_for_block_inclusion(
|
||||||
.map_err(|e| e.into_with_index(i))?;
|
state,
|
||||||
|
attestation,
|
||||||
|
ctxt,
|
||||||
|
verify_signatures,
|
||||||
|
spec,
|
||||||
|
)
|
||||||
|
.map_err(|e| e.into_with_index(i))?;
|
||||||
|
|
||||||
let pending_attestation = PendingAttestation {
|
let pending_attestation = PendingAttestation {
|
||||||
aggregation_bits: attestation.aggregation_bits.clone(),
|
aggregation_bits: attestation.aggregation_bits.clone(),
|
||||||
@ -100,19 +106,11 @@ pub mod altair {
|
|||||||
ctxt: &mut ConsensusContext<T>,
|
ctxt: &mut ConsensusContext<T>,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
let proposer_index = ctxt.get_proposer_index(state, spec)?;
|
|
||||||
attestations
|
attestations
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.try_for_each(|(i, attestation)| {
|
.try_for_each(|(i, attestation)| {
|
||||||
process_attestation(
|
process_attestation(state, attestation, i, ctxt, verify_signatures, spec)
|
||||||
state,
|
|
||||||
attestation,
|
|
||||||
i,
|
|
||||||
proposer_index,
|
|
||||||
verify_signatures,
|
|
||||||
spec,
|
|
||||||
)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -120,16 +118,24 @@ pub mod altair {
|
|||||||
state: &mut BeaconState<T>,
|
state: &mut BeaconState<T>,
|
||||||
attestation: &Attestation<T>,
|
attestation: &Attestation<T>,
|
||||||
att_index: usize,
|
att_index: usize,
|
||||||
proposer_index: u64,
|
ctxt: &mut ConsensusContext<T>,
|
||||||
verify_signatures: VerifySignatures,
|
verify_signatures: VerifySignatures,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<(), BlockProcessingError> {
|
) -> Result<(), BlockProcessingError> {
|
||||||
state.build_committee_cache(RelativeEpoch::Previous, spec)?;
|
state.build_committee_cache(RelativeEpoch::Previous, spec)?;
|
||||||
state.build_committee_cache(RelativeEpoch::Current, spec)?;
|
state.build_committee_cache(RelativeEpoch::Current, spec)?;
|
||||||
|
|
||||||
let indexed_attestation =
|
let proposer_index = ctxt.get_proposer_index(state, spec)?;
|
||||||
verify_attestation_for_block_inclusion(state, attestation, verify_signatures, spec)
|
|
||||||
.map_err(|e| e.into_with_index(att_index))?;
|
let attesting_indices = &verify_attestation_for_block_inclusion(
|
||||||
|
state,
|
||||||
|
attestation,
|
||||||
|
ctxt,
|
||||||
|
verify_signatures,
|
||||||
|
spec,
|
||||||
|
)
|
||||||
|
.map_err(|e| e.into_with_index(att_index))?
|
||||||
|
.attesting_indices;
|
||||||
|
|
||||||
// Matching roots, participation flag indices
|
// Matching roots, participation flag indices
|
||||||
let data = &attestation.data;
|
let data = &attestation.data;
|
||||||
@ -141,7 +147,7 @@ pub mod altair {
|
|||||||
let total_active_balance = state.get_total_active_balance()?;
|
let total_active_balance = state.get_total_active_balance()?;
|
||||||
let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?;
|
let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?;
|
||||||
let mut proposer_reward_numerator = 0;
|
let mut proposer_reward_numerator = 0;
|
||||||
for index in &indexed_attestation.attesting_indices {
|
for index in attesting_indices {
|
||||||
let index = *index as usize;
|
let index = *index as usize;
|
||||||
|
|
||||||
for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() {
|
for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() {
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use super::errors::{AttestationInvalid as Invalid, BlockOperationError};
|
use super::errors::{AttestationInvalid as Invalid, BlockOperationError};
|
||||||
use super::VerifySignatures;
|
use super::VerifySignatures;
|
||||||
use crate::common::get_indexed_attestation;
|
|
||||||
use crate::per_block_processing::is_valid_indexed_attestation;
|
use crate::per_block_processing::is_valid_indexed_attestation;
|
||||||
|
use crate::ConsensusContext;
|
||||||
use safe_arith::SafeArith;
|
use safe_arith::SafeArith;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
@ -15,12 +15,13 @@ fn error(reason: Invalid) -> BlockOperationError<Invalid> {
|
|||||||
/// to `state`. Otherwise, returns a descriptive `Err`.
|
/// to `state`. Otherwise, returns a descriptive `Err`.
|
||||||
///
|
///
|
||||||
/// Optionally verifies the aggregate signature, depending on `verify_signatures`.
|
/// Optionally verifies the aggregate signature, depending on `verify_signatures`.
|
||||||
pub fn verify_attestation_for_block_inclusion<T: EthSpec>(
|
pub fn verify_attestation_for_block_inclusion<'ctxt, T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
attestation: &Attestation<T>,
|
attestation: &Attestation<T>,
|
||||||
|
ctxt: &'ctxt mut ConsensusContext<T>,
|
||||||
verify_signatures: VerifySignatures,
|
verify_signatures: VerifySignatures,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<IndexedAttestation<T>> {
|
) -> Result<&'ctxt IndexedAttestation<T>> {
|
||||||
let data = &attestation.data;
|
let data = &attestation.data;
|
||||||
|
|
||||||
verify!(
|
verify!(
|
||||||
@ -39,7 +40,7 @@ pub fn verify_attestation_for_block_inclusion<T: EthSpec>(
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
verify_attestation_for_state(state, attestation, verify_signatures, spec)
|
verify_attestation_for_state(state, attestation, ctxt, verify_signatures, spec)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `Ok(())` if `attestation` is a valid attestation to the chain that precedes the given
|
/// Returns `Ok(())` if `attestation` is a valid attestation to the chain that precedes the given
|
||||||
@ -49,12 +50,13 @@ pub fn verify_attestation_for_block_inclusion<T: EthSpec>(
|
|||||||
/// prior blocks in `state`.
|
/// prior blocks in `state`.
|
||||||
///
|
///
|
||||||
/// Spec v0.12.1
|
/// Spec v0.12.1
|
||||||
pub fn verify_attestation_for_state<T: EthSpec>(
|
pub fn verify_attestation_for_state<'ctxt, T: EthSpec>(
|
||||||
state: &BeaconState<T>,
|
state: &BeaconState<T>,
|
||||||
attestation: &Attestation<T>,
|
attestation: &Attestation<T>,
|
||||||
|
ctxt: &'ctxt mut ConsensusContext<T>,
|
||||||
verify_signatures: VerifySignatures,
|
verify_signatures: VerifySignatures,
|
||||||
spec: &ChainSpec,
|
spec: &ChainSpec,
|
||||||
) -> Result<IndexedAttestation<T>> {
|
) -> Result<&'ctxt IndexedAttestation<T>> {
|
||||||
let data = &attestation.data;
|
let data = &attestation.data;
|
||||||
|
|
||||||
verify!(
|
verify!(
|
||||||
@ -66,9 +68,8 @@ pub fn verify_attestation_for_state<T: EthSpec>(
|
|||||||
verify_casper_ffg_vote(attestation, state)?;
|
verify_casper_ffg_vote(attestation, state)?;
|
||||||
|
|
||||||
// Check signature and bitfields
|
// Check signature and bitfields
|
||||||
let committee = state.get_beacon_committee(attestation.data.slot, attestation.data.index)?;
|
let indexed_attestation = ctxt.get_indexed_attestation(state, attestation)?;
|
||||||
let indexed_attestation = get_indexed_attestation(committee.committee, attestation)?;
|
is_valid_indexed_attestation(state, indexed_attestation, verify_signatures, spec)?;
|
||||||
is_valid_indexed_attestation(state, &indexed_attestation, verify_signatures, spec)?;
|
|
||||||
|
|
||||||
Ok(indexed_attestation)
|
Ok(indexed_attestation)
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ harness = false
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
serde-big-array = {version = "0.3.2", features = ["const-generics"]}
|
serde-big-array = {version = "0.3.2", features = ["const-generics"]}
|
||||||
|
merkle_proof = { path = "../../consensus/merkle_proof" }
|
||||||
bls = { path = "../../crypto/bls" }
|
bls = { path = "../../crypto/bls" }
|
||||||
compare_fields = { path = "../../common/compare_fields" }
|
compare_fields = { path = "../../common/compare_fields" }
|
||||||
compare_fields_derive = { path = "../../common/compare_fields_derive" }
|
compare_fields_derive = { path = "../../common/compare_fields_derive" }
|
||||||
|
@ -125,6 +125,8 @@ pub enum Error {
|
|||||||
current_epoch: Epoch,
|
current_epoch: Epoch,
|
||||||
epoch: Epoch,
|
epoch: Epoch,
|
||||||
},
|
},
|
||||||
|
IndexNotSupported(usize),
|
||||||
|
MerkleTreeError(merkle_proof::MerkleTreeError),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Control whether an epoch-indexed field can be indexed at the next epoch or not.
|
/// Control whether an epoch-indexed field can be indexed at the next epoch or not.
|
||||||
@ -1735,6 +1737,57 @@ impl<T: EthSpec> BeaconState<T> {
|
|||||||
};
|
};
|
||||||
Ok(sync_committee)
|
Ok(sync_committee)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn compute_merkle_proof(
|
||||||
|
&mut self,
|
||||||
|
generalized_index: usize,
|
||||||
|
) -> Result<Vec<Hash256>, Error> {
|
||||||
|
// 1. Convert generalized index to field index.
|
||||||
|
let field_index = match generalized_index {
|
||||||
|
light_client_update::CURRENT_SYNC_COMMITTEE_INDEX
|
||||||
|
| light_client_update::NEXT_SYNC_COMMITTEE_INDEX => {
|
||||||
|
// Sync committees are top-level fields, subtract off the generalized indices
|
||||||
|
// for the internal nodes. Result should be 22 or 23, the field offset of the committee
|
||||||
|
// in the `BeaconState`:
|
||||||
|
// https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate
|
||||||
|
generalized_index
|
||||||
|
.checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES)
|
||||||
|
.ok_or(Error::IndexNotSupported(generalized_index))?
|
||||||
|
}
|
||||||
|
light_client_update::FINALIZED_ROOT_INDEX => {
|
||||||
|
// Finalized root is the right child of `finalized_checkpoint`, divide by two to get
|
||||||
|
// the generalized index of `state.finalized_checkpoint`.
|
||||||
|
let finalized_checkpoint_generalized_index = generalized_index / 2;
|
||||||
|
// Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches
|
||||||
|
// position of `finalized_checkpoint` in `BeaconState`.
|
||||||
|
finalized_checkpoint_generalized_index
|
||||||
|
.checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES)
|
||||||
|
.ok_or(Error::IndexNotSupported(generalized_index))?
|
||||||
|
}
|
||||||
|
_ => return Err(Error::IndexNotSupported(generalized_index)),
|
||||||
|
};
|
||||||
|
|
||||||
|
// 2. Get all `BeaconState` leaves.
|
||||||
|
let mut cache = self
|
||||||
|
.tree_hash_cache_mut()
|
||||||
|
.take()
|
||||||
|
.ok_or(Error::TreeHashCacheNotInitialized)?;
|
||||||
|
let leaves = cache.recalculate_tree_hash_leaves(self)?;
|
||||||
|
self.tree_hash_cache_mut().restore(cache);
|
||||||
|
|
||||||
|
// 3. Make deposit tree.
|
||||||
|
// Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`).
|
||||||
|
let depth = light_client_update::CURRENT_SYNC_COMMITTEE_PROOF_LEN;
|
||||||
|
let tree = merkle_proof::MerkleTree::create(&leaves, depth);
|
||||||
|
let (_, mut proof) = tree.generate_proof(field_index, depth)?;
|
||||||
|
|
||||||
|
// 4. If we're proving the finalized root, patch in the finalized epoch to complete the proof.
|
||||||
|
if generalized_index == light_client_update::FINALIZED_ROOT_INDEX {
|
||||||
|
proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(proof)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<RelativeEpochError> for Error {
|
impl From<RelativeEpochError> for Error {
|
||||||
@ -1767,6 +1820,12 @@ impl From<tree_hash::Error> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<merkle_proof::MerkleTreeError> for Error {
|
||||||
|
fn from(e: merkle_proof::MerkleTreeError) -> Error {
|
||||||
|
Error::MerkleTreeError(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<ArithError> for Error {
|
impl From<ArithError> for Error {
|
||||||
fn from(e: ArithError) -> Error {
|
fn from(e: ArithError) -> Error {
|
||||||
Error::ArithError(e)
|
Error::ArithError(e)
|
||||||
|
@ -18,7 +18,7 @@ use tree_hash::{mix_in_length, MerkleHasher, TreeHash};
|
|||||||
///
|
///
|
||||||
/// This constant is set with the assumption that there are `> 16` and `<= 32` fields on the
|
/// This constant is set with the assumption that there are `> 16` and `<= 32` fields on the
|
||||||
/// `BeaconState`. **Tree hashing will fail if this value is set incorrectly.**
|
/// `BeaconState`. **Tree hashing will fail if this value is set incorrectly.**
|
||||||
const NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES: usize = 32;
|
pub const NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES: usize = 32;
|
||||||
|
|
||||||
/// The number of nodes in the Merkle tree of a validator record.
|
/// The number of nodes in the Merkle tree of a validator record.
|
||||||
const NODES_PER_VALIDATOR: usize = 15;
|
const NODES_PER_VALIDATOR: usize = 15;
|
||||||
@ -210,6 +210,90 @@ impl<T: EthSpec> BeaconTreeHashCacheInner<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn recalculate_tree_hash_leaves(
|
||||||
|
&mut self,
|
||||||
|
state: &BeaconState<T>,
|
||||||
|
) -> Result<Vec<Hash256>, Error> {
|
||||||
|
let mut leaves = vec![
|
||||||
|
// Genesis data leaves.
|
||||||
|
state.genesis_time().tree_hash_root(),
|
||||||
|
state.genesis_validators_root().tree_hash_root(),
|
||||||
|
// Current fork data leaves.
|
||||||
|
state.slot().tree_hash_root(),
|
||||||
|
state.fork().tree_hash_root(),
|
||||||
|
state.latest_block_header().tree_hash_root(),
|
||||||
|
// Roots leaves.
|
||||||
|
state
|
||||||
|
.block_roots()
|
||||||
|
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)?,
|
||||||
|
state
|
||||||
|
.state_roots()
|
||||||
|
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)?,
|
||||||
|
state
|
||||||
|
.historical_roots()
|
||||||
|
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)?,
|
||||||
|
// Eth1 Data leaves.
|
||||||
|
state.eth1_data().tree_hash_root(),
|
||||||
|
self.eth1_data_votes.recalculate_tree_hash_root(state)?,
|
||||||
|
state.eth1_deposit_index().tree_hash_root(),
|
||||||
|
// Validator leaves.
|
||||||
|
self.validators
|
||||||
|
.recalculate_tree_hash_root(state.validators())?,
|
||||||
|
state
|
||||||
|
.balances()
|
||||||
|
.recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)?,
|
||||||
|
state
|
||||||
|
.randao_mixes()
|
||||||
|
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)?,
|
||||||
|
state
|
||||||
|
.slashings()
|
||||||
|
.recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?,
|
||||||
|
];
|
||||||
|
// Participation
|
||||||
|
if let BeaconState::Base(state) = state {
|
||||||
|
leaves.push(state.previous_epoch_attestations.tree_hash_root());
|
||||||
|
leaves.push(state.current_epoch_attestations.tree_hash_root());
|
||||||
|
} else {
|
||||||
|
leaves.push(
|
||||||
|
self.previous_epoch_participation
|
||||||
|
.recalculate_tree_hash_root(&ParticipationList::new(
|
||||||
|
state.previous_epoch_participation()?,
|
||||||
|
))?,
|
||||||
|
);
|
||||||
|
leaves.push(
|
||||||
|
self.current_epoch_participation
|
||||||
|
.recalculate_tree_hash_root(&ParticipationList::new(
|
||||||
|
state.current_epoch_participation()?,
|
||||||
|
))?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// Checkpoint leaves
|
||||||
|
leaves.push(state.justification_bits().tree_hash_root());
|
||||||
|
leaves.push(state.previous_justified_checkpoint().tree_hash_root());
|
||||||
|
leaves.push(state.current_justified_checkpoint().tree_hash_root());
|
||||||
|
leaves.push(state.finalized_checkpoint().tree_hash_root());
|
||||||
|
// Inactivity & light-client sync committees (Altair and later).
|
||||||
|
if let Ok(inactivity_scores) = state.inactivity_scores() {
|
||||||
|
leaves.push(
|
||||||
|
self.inactivity_scores
|
||||||
|
.recalculate_tree_hash_root(inactivity_scores)?,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
if let Ok(current_sync_committee) = state.current_sync_committee() {
|
||||||
|
leaves.push(current_sync_committee.tree_hash_root());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Ok(next_sync_committee) = state.next_sync_committee() {
|
||||||
|
leaves.push(next_sync_committee.tree_hash_root());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execution payload (merge and later).
|
||||||
|
if let Ok(payload_header) = state.latest_execution_payload_header() {
|
||||||
|
leaves.push(payload_header.tree_hash_root());
|
||||||
|
}
|
||||||
|
Ok(leaves)
|
||||||
|
}
|
||||||
|
|
||||||
/// Updates the cache and returns the tree hash root for the given `state`.
|
/// Updates the cache and returns the tree hash root for the given `state`.
|
||||||
///
|
///
|
||||||
/// The provided `state` should be a descendant of the last `state` given to this function, or
|
/// The provided `state` should be a descendant of the last `state` given to this function, or
|
||||||
@ -246,121 +330,9 @@ impl<T: EthSpec> BeaconTreeHashCacheInner<T> {
|
|||||||
|
|
||||||
let mut hasher = MerkleHasher::with_leaves(NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES);
|
let mut hasher = MerkleHasher::with_leaves(NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES);
|
||||||
|
|
||||||
hasher.write(state.genesis_time().tree_hash_root().as_bytes())?;
|
let leaves = self.recalculate_tree_hash_leaves(state)?;
|
||||||
hasher.write(state.genesis_validators_root().tree_hash_root().as_bytes())?;
|
for leaf in leaves {
|
||||||
hasher.write(state.slot().tree_hash_root().as_bytes())?;
|
hasher.write(leaf.as_bytes())?;
|
||||||
hasher.write(state.fork().tree_hash_root().as_bytes())?;
|
|
||||||
hasher.write(state.latest_block_header().tree_hash_root().as_bytes())?;
|
|
||||||
hasher.write(
|
|
||||||
state
|
|
||||||
.block_roots()
|
|
||||||
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)?
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
hasher.write(
|
|
||||||
state
|
|
||||||
.state_roots()
|
|
||||||
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)?
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
hasher.write(
|
|
||||||
state
|
|
||||||
.historical_roots()
|
|
||||||
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)?
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
hasher.write(state.eth1_data().tree_hash_root().as_bytes())?;
|
|
||||||
hasher.write(
|
|
||||||
self.eth1_data_votes
|
|
||||||
.recalculate_tree_hash_root(state)?
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
hasher.write(state.eth1_deposit_index().tree_hash_root().as_bytes())?;
|
|
||||||
hasher.write(
|
|
||||||
self.validators
|
|
||||||
.recalculate_tree_hash_root(state.validators())?
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
hasher.write(
|
|
||||||
state
|
|
||||||
.balances()
|
|
||||||
.recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)?
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
hasher.write(
|
|
||||||
state
|
|
||||||
.randao_mixes()
|
|
||||||
.recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)?
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
hasher.write(
|
|
||||||
state
|
|
||||||
.slashings()
|
|
||||||
.recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Participation
|
|
||||||
if let BeaconState::Base(state) = state {
|
|
||||||
hasher.write(
|
|
||||||
state
|
|
||||||
.previous_epoch_attestations
|
|
||||||
.tree_hash_root()
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
hasher.write(state.current_epoch_attestations.tree_hash_root().as_bytes())?;
|
|
||||||
} else {
|
|
||||||
hasher.write(
|
|
||||||
self.previous_epoch_participation
|
|
||||||
.recalculate_tree_hash_root(&ParticipationList::new(
|
|
||||||
state.previous_epoch_participation()?,
|
|
||||||
))?
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
hasher.write(
|
|
||||||
self.current_epoch_participation
|
|
||||||
.recalculate_tree_hash_root(&ParticipationList::new(
|
|
||||||
state.current_epoch_participation()?,
|
|
||||||
))?
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
hasher.write(state.justification_bits().tree_hash_root().as_bytes())?;
|
|
||||||
hasher.write(
|
|
||||||
state
|
|
||||||
.previous_justified_checkpoint()
|
|
||||||
.tree_hash_root()
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
hasher.write(
|
|
||||||
state
|
|
||||||
.current_justified_checkpoint()
|
|
||||||
.tree_hash_root()
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
hasher.write(state.finalized_checkpoint().tree_hash_root().as_bytes())?;
|
|
||||||
|
|
||||||
// Inactivity & light-client sync committees (Altair and later).
|
|
||||||
if let Ok(inactivity_scores) = state.inactivity_scores() {
|
|
||||||
hasher.write(
|
|
||||||
self.inactivity_scores
|
|
||||||
.recalculate_tree_hash_root(inactivity_scores)?
|
|
||||||
.as_bytes(),
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(current_sync_committee) = state.current_sync_committee() {
|
|
||||||
hasher.write(current_sync_committee.tree_hash_root().as_bytes())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(next_sync_committee) = state.next_sync_committee() {
|
|
||||||
hasher.write(next_sync_committee.tree_hash_root().as_bytes())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execution payload (merge and later).
|
|
||||||
if let Ok(payload_header) = state.latest_execution_payload_header() {
|
|
||||||
hasher.write(payload_header.tree_hash_root().as_bytes())?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Withdrawal indices (Capella and later).
|
// Withdrawal indices (Capella and later).
|
||||||
|
@ -844,7 +844,7 @@ impl ChainSpec {
|
|||||||
domain_sync_committee_selection_proof: 8,
|
domain_sync_committee_selection_proof: 8,
|
||||||
domain_contribution_and_proof: 9,
|
domain_contribution_and_proof: 9,
|
||||||
altair_fork_version: [0x01, 0x00, 0x00, 0x64],
|
altair_fork_version: [0x01, 0x00, 0x00, 0x64],
|
||||||
altair_fork_epoch: Some(Epoch::new(256)),
|
altair_fork_epoch: Some(Epoch::new(512)),
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Merge hard fork params
|
* Merge hard fork params
|
||||||
@ -855,14 +855,11 @@ impl ChainSpec {
|
|||||||
.expect("pow does not overflow"),
|
.expect("pow does not overflow"),
|
||||||
proportional_slashing_multiplier_bellatrix: 3,
|
proportional_slashing_multiplier_bellatrix: 3,
|
||||||
bellatrix_fork_version: [0x02, 0x00, 0x00, 0x64],
|
bellatrix_fork_version: [0x02, 0x00, 0x00, 0x64],
|
||||||
bellatrix_fork_epoch: None,
|
bellatrix_fork_epoch: Some(Epoch::new(385536)),
|
||||||
terminal_total_difficulty: Uint256::MAX
|
terminal_total_difficulty: Uint256::from_dec_str(
|
||||||
.checked_sub(Uint256::from(2u64.pow(10)))
|
"8626000000000000000000058750000000000000000000",
|
||||||
.expect("subtraction does not overflow")
|
)
|
||||||
// Add 1 since the spec declares `2**256 - 2**10` and we use
|
.expect("terminal_total_difficulty is a valid integer"),
|
||||||
// `Uint256::MAX` which is `2*256- 1`.
|
|
||||||
.checked_add(Uint256::one())
|
|
||||||
.expect("addition does not overflow"),
|
|
||||||
terminal_block_hash: ExecutionBlockHash::zero(),
|
terminal_block_hash: ExecutionBlockHash::zero(),
|
||||||
terminal_block_hash_activation_epoch: Epoch::new(u64::MAX),
|
terminal_block_hash_activation_epoch: Epoch::new(u64::MAX),
|
||||||
safe_slots_to_import_optimistically: 128u64,
|
safe_slots_to_import_optimistically: 128u64,
|
||||||
|
@ -21,17 +21,15 @@ pub struct LightClientBootstrap<T: EthSpec> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<T: EthSpec> LightClientBootstrap<T> {
|
impl<T: EthSpec> LightClientBootstrap<T> {
|
||||||
pub fn from_beacon_state(beacon_state: BeaconState<T>) -> Result<Self, Error> {
|
pub fn from_beacon_state(beacon_state: &mut BeaconState<T>) -> Result<Self, Error> {
|
||||||
let mut header = beacon_state.latest_block_header().clone();
|
let mut header = beacon_state.latest_block_header().clone();
|
||||||
header.state_root = beacon_state.tree_hash_root();
|
header.state_root = beacon_state.tree_hash_root();
|
||||||
|
let current_sync_committee_branch =
|
||||||
|
beacon_state.compute_merkle_proof(CURRENT_SYNC_COMMITTEE_INDEX)?;
|
||||||
Ok(LightClientBootstrap {
|
Ok(LightClientBootstrap {
|
||||||
header,
|
header,
|
||||||
current_sync_committee: beacon_state.current_sync_committee()?.clone(),
|
current_sync_committee: beacon_state.current_sync_committee()?.clone(),
|
||||||
/// TODO(Giulio2002): Generate Merkle Proof, this is just empty hashes
|
current_sync_committee_branch: FixedVector::new(current_sync_committee_branch)?,
|
||||||
current_sync_committee_branch: FixedVector::new(vec![
|
|
||||||
Hash256::zero();
|
|
||||||
CURRENT_SYNC_COMMITTEE_PROOF_LEN
|
|
||||||
])?,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,7 @@ impl<T: EthSpec> LightClientFinalityUpdate<T> {
|
|||||||
chain_spec: ChainSpec,
|
chain_spec: ChainSpec,
|
||||||
beacon_state: BeaconState<T>,
|
beacon_state: BeaconState<T>,
|
||||||
block: BeaconBlock<T>,
|
block: BeaconBlock<T>,
|
||||||
attested_state: BeaconState<T>,
|
attested_state: &mut BeaconState<T>,
|
||||||
finalized_block: BeaconBlock<T>,
|
finalized_block: BeaconBlock<T>,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let altair_fork_epoch = chain_spec
|
let altair_fork_epoch = chain_spec
|
||||||
@ -60,11 +60,12 @@ impl<T: EthSpec> LightClientFinalityUpdate<T> {
|
|||||||
if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root {
|
if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root {
|
||||||
return Err(Error::InvalidFinalizedBlock);
|
return Err(Error::InvalidFinalizedBlock);
|
||||||
}
|
}
|
||||||
// TODO(Giulio2002): compute proper merkle proofs.
|
|
||||||
|
let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
attested_header: attested_header,
|
attested_header: attested_header,
|
||||||
finalized_header: finalized_header,
|
finalized_header: finalized_header,
|
||||||
finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?,
|
finality_branch: FixedVector::new(finality_branch)?,
|
||||||
sync_aggregate: sync_aggregate.clone(),
|
sync_aggregate: sync_aggregate.clone(),
|
||||||
signature_slot: block.slot(),
|
signature_slot: block.slot(),
|
||||||
})
|
})
|
||||||
|
@ -77,7 +77,7 @@ impl<T: EthSpec> LightClientUpdate<T> {
|
|||||||
chain_spec: ChainSpec,
|
chain_spec: ChainSpec,
|
||||||
beacon_state: BeaconState<T>,
|
beacon_state: BeaconState<T>,
|
||||||
block: BeaconBlock<T>,
|
block: BeaconBlock<T>,
|
||||||
attested_state: BeaconState<T>,
|
attested_state: &mut BeaconState<T>,
|
||||||
finalized_block: BeaconBlock<T>,
|
finalized_block: BeaconBlock<T>,
|
||||||
) -> Result<Self, Error> {
|
) -> Result<Self, Error> {
|
||||||
let altair_fork_epoch = chain_spec
|
let altair_fork_epoch = chain_spec
|
||||||
@ -114,16 +114,15 @@ impl<T: EthSpec> LightClientUpdate<T> {
|
|||||||
if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root {
|
if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root {
|
||||||
return Err(Error::InvalidFinalizedBlock);
|
return Err(Error::InvalidFinalizedBlock);
|
||||||
}
|
}
|
||||||
// TODO(Giulio2002): compute proper merkle proofs.
|
let next_sync_committee_branch =
|
||||||
|
attested_state.compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)?;
|
||||||
|
let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
attested_header,
|
attested_header,
|
||||||
next_sync_committee: attested_state.next_sync_committee()?.clone(),
|
next_sync_committee: attested_state.next_sync_committee()?.clone(),
|
||||||
next_sync_committee_branch: FixedVector::new(vec![
|
next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?,
|
||||||
Hash256::zero();
|
|
||||||
NEXT_SYNC_COMMITTEE_PROOF_LEN
|
|
||||||
])?,
|
|
||||||
finalized_header,
|
finalized_header,
|
||||||
finality_branch: FixedVector::new(vec![Hash256::zero(); FINALIZED_ROOT_PROOF_LEN])?,
|
finality_branch: FixedVector::new(finality_branch)?,
|
||||||
sync_aggregate: sync_aggregate.clone(),
|
sync_aggregate: sync_aggregate.clone(),
|
||||||
signature_slot: block.slot(),
|
signature_slot: block.slot(),
|
||||||
})
|
})
|
||||||
|
@ -98,10 +98,9 @@ fn parse_client_config<E: EthSpec>(
|
|||||||
cli_args: &ArgMatches,
|
cli_args: &ArgMatches,
|
||||||
_env: &Environment<E>,
|
_env: &Environment<E>,
|
||||||
) -> Result<ClientConfig, String> {
|
) -> Result<ClientConfig, String> {
|
||||||
let mut client_config = ClientConfig {
|
let mut client_config = ClientConfig::default();
|
||||||
data_dir: get_data_dir(cli_args),
|
|
||||||
..Default::default()
|
client_config.set_data_dir(get_data_dir(cli_args));
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(freezer_dir) = clap_utils::parse_optional(cli_args, "freezer-dir")? {
|
if let Some(freezer_dir) = clap_utils::parse_optional(cli_args, "freezer-dir")? {
|
||||||
client_config.freezer_db_path = Some(freezer_dir);
|
client_config.freezer_db_path = Some(freezer_dir);
|
||||||
@ -289,7 +288,7 @@ pub fn prune_payloads<E: EthSpec>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Run the database manager, returning an error string if the operation did not succeed.
|
/// Run the database manager, returning an error string if the operation did not succeed.
|
||||||
pub fn run<T: EthSpec>(cli_args: &ArgMatches<'_>, mut env: Environment<T>) -> Result<(), String> {
|
pub fn run<T: EthSpec>(cli_args: &ArgMatches<'_>, env: Environment<T>) -> Result<(), String> {
|
||||||
let client_config = parse_client_config(cli_args, &env)?;
|
let client_config = parse_client_config(cli_args, &env)?;
|
||||||
let context = env.core_context();
|
let context = env.core_context();
|
||||||
let log = context.log().clone();
|
let log = context.log().clone();
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "lcli"
|
name = "lcli"
|
||||||
description = "Lighthouse CLI (modeled after zcli)"
|
description = "Lighthouse CLI (modeled after zcli)"
|
||||||
version = "3.2.1"
|
version = "3.3.0"
|
||||||
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
authors = ["Paul Hauner <paul@paulhauner.com>"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user